xref: /netbsd-src/sys/dev/qbus/if_qe.c (revision 3b01aba77a7a698587faaae455bbfe740923c1f5)
1 /*      $NetBSD: if_qe.c,v 1.48 2001/04/26 20:05:46 ragge Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for DEQNA/DELQA ethernet cards.
34  * Things that is still to do:
35  *	Handle ubaresets. Does not work at all right now.
36  *	Fix ALLMULTI reception. But someone must tell me how...
37  *	Collect statistics.
38  */
39 
40 #include "opt_inet.h"
41 #include "bpfilter.h"
42 
43 #include <sys/param.h>
44 #include <sys/mbuf.h>
45 #include <sys/socket.h>
46 #include <sys/device.h>
47 #include <sys/systm.h>
48 #include <sys/sockio.h>
49 
50 #include <net/if.h>
51 #include <net/if_ether.h>
52 #include <net/if_dl.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/if_inarp.h>
56 
57 #if NBPFILTER > 0
58 #include <net/bpf.h>
59 #include <net/bpfdesc.h>
60 #endif
61 
62 #include <machine/bus.h>
63 
64 #include <dev/qbus/ubavar.h>
65 #include <dev/qbus/if_qereg.h>
66 
67 #include "ioconf.h"
68 
69 #define RXDESCS	30	/* # of receive descriptors */
70 #define TXDESCS	60	/* # transmit descs */
71 
72 /*
73  * Structure containing the elements that must be in DMA-safe memory.
74  */
75 struct qe_cdata {
76 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
77 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
78 	u_int8_t	qc_setup[128];		/* Setup packet layout */
79 };
80 
81 struct	qe_softc {
82 	struct device	sc_dev;		/* Configuration common part	*/
83 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
84 	struct ethercom sc_ec;		/* Ethernet common part		*/
85 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
86 	bus_space_tag_t sc_iot;
87 	bus_addr_t	sc_ioh;
88 	bus_dma_tag_t	sc_dmat;
89 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
90 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
91 	struct mbuf*	sc_txmbuf[TXDESCS];
92 	struct mbuf*	sc_rxmbuf[RXDESCS];
93 	bus_dmamap_t	sc_xmtmap[TXDESCS];
94 	bus_dmamap_t	sc_rcvmap[RXDESCS];
95 	struct ubinfo	sc_ui;
96 	int		sc_intvec;	/* Interrupt vector		*/
97 	int		sc_nexttx;
98 	int		sc_inq;
99 	int		sc_lastack;
100 	int		sc_nextrx;
101 	int		sc_setup;	/* Setup packet in queue	*/
102 };
103 
104 static	int	qematch(struct device *, struct cfdata *, void *);
105 static	void	qeattach(struct device *, struct device *, void *);
106 static	void	qeinit(struct qe_softc *);
107 static	void	qestart(struct ifnet *);
108 static	void	qeintr(void *);
109 static	int	qeioctl(struct ifnet *, u_long, caddr_t);
110 static	int	qe_add_rxbuf(struct qe_softc *, int);
111 static	void	qe_setup(struct qe_softc *);
112 static	void	qetimeout(struct ifnet *);
113 
114 struct	cfattach qe_ca = {
115 	sizeof(struct qe_softc), qematch, qeattach
116 };
117 
118 #define	QE_WCSR(csr, val) \
119 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
120 #define	QE_RCSR(csr) \
121 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
122 
123 #define	LOWORD(x)	((int)(x) & 0xffff)
124 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
125 
126 /*
127  * Check for present DEQNA. Done by sending a fake setup packet
128  * and wait for interrupt.
129  */
130 int
131 qematch(struct device *parent, struct cfdata *cf, void *aux)
132 {
133 	struct	qe_softc ssc;
134 	struct	qe_softc *sc = &ssc;
135 	struct	uba_attach_args *ua = aux;
136 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
137 	struct ubinfo ui;
138 
139 #define	PROBESIZE	(sizeof(struct qe_ring) * 4 + 128)
140 	struct	qe_ring ring[15]; /* For diag purposes only */
141 	struct	qe_ring *rp;
142 	int error;
143 
144 	bzero(sc, sizeof(struct qe_softc));
145 	bzero(ring, PROBESIZE);
146 	sc->sc_iot = ua->ua_iot;
147 	sc->sc_ioh = ua->ua_ioh;
148 	sc->sc_dmat = ua->ua_dmat;
149 
150 	ubasc->uh_lastiv -= 4;
151 	QE_WCSR(QE_CSR_CSR, QE_RESET);
152 	QE_WCSR(QE_CSR_VECTOR, ubasc->uh_lastiv);
153 
154 	/*
155 	 * Map the ring area. Actually this is done only to be able to
156 	 * send and receive a internal packet; some junk is loopbacked
157 	 * so that the DEQNA has a reason to interrupt.
158 	 */
159 	ui.ui_size = PROBESIZE;
160 	ui.ui_vaddr = (caddr_t)&ring[0];
161 	if ((error = uballoc((void *)parent, &ui, UBA_CANTWAIT)))
162 		return 0;
163 
164 	/*
165 	 * Init a simple "fake" receive and transmit descriptor that
166 	 * points to some unused area. Send a fake setup packet.
167 	 */
168 	rp = (void *)ui.ui_baddr;
169 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
170 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
171 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
172 	ring[0].qe_buf_len = 128;
173 
174 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
175 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
176 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
177 	ring[2].qe_buf_len = 128;
178 
179 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
180 	DELAY(1000);
181 
182 	/*
183 	 * Start the interface and wait for the packet.
184 	 */
185 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
186 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
187 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
188 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
189 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
190 	DELAY(10000);
191 
192 	/*
193 	 * All done with the bus resources.
194 	 */
195 	ubfree((void *)parent, &ui);
196 	return 1;
197 }
198 
199 /*
200  * Interface exists: make available by filling in network interface
201  * record.  System will initialize the interface when it is ready
202  * to accept packets.
203  */
204 void
205 qeattach(struct device *parent, struct device *self, void *aux)
206 {
207 	struct	uba_attach_args *ua = aux;
208 	struct	uba_softc *ubasc = (struct uba_softc *)parent;
209 	struct	qe_softc *sc = (struct qe_softc *)self;
210 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
211 	struct	qe_ring *rp;
212 	u_int8_t enaddr[ETHER_ADDR_LEN];
213 	int i, error;
214 
215 	sc->sc_iot = ua->ua_iot;
216 	sc->sc_ioh = ua->ua_ioh;
217 	sc->sc_dmat = ua->ua_dmat;
218 
219         /*
220          * Allocate DMA safe memory for descriptors and setup memory.
221          */
222 
223 	sc->sc_ui.ui_size = sizeof(struct qe_cdata);
224 	if ((error = ubmemalloc((struct uba_softc *)parent, &sc->sc_ui, 0))) {
225 		printf(": unable to ubmemalloc(), error = %d\n", error);
226 		return;
227 	}
228 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
229 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
230 
231 	/*
232 	 * Zero the newly allocated memory.
233 	 */
234 	bzero(sc->sc_qedata, sizeof(struct qe_cdata));
235 	/*
236 	 * Create the transmit descriptor DMA maps. We take advantage
237 	 * of the fact that the Qbus address space is big, and therefore
238 	 * allocate map registers for all transmit descriptors also,
239 	 * so that we can avoid this each time we send a packet.
240 	 */
241 	for (i = 0; i < TXDESCS; i++) {
242 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
243 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
244 		    &sc->sc_xmtmap[i]))) {
245 			printf(": unable to create tx DMA map %d, error = %d\n",
246 			    i, error);
247 			goto fail_4;
248 		}
249 	}
250 
251 	/*
252 	 * Create receive buffer DMA maps.
253 	 */
254 	for (i = 0; i < RXDESCS; i++) {
255 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
256 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
257 		    &sc->sc_rcvmap[i]))) {
258 			printf(": unable to create rx DMA map %d, error = %d\n",
259 			    i, error);
260 			goto fail_5;
261 		}
262 	}
263 	/*
264 	 * Pre-allocate the receive buffers.
265 	 */
266 	for (i = 0; i < RXDESCS; i++) {
267 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
268 			printf(": unable to allocate or map rx buffer %d\n,"
269 			    " error = %d\n", i, error);
270 			goto fail_6;
271 		}
272 	}
273 
274 	/*
275 	 * Create ring loops of the buffer chains.
276 	 * This is only done once.
277 	 */
278 
279 	rp = sc->sc_qedata->qc_recv;
280 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
281 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
282 	    QE_VALID | QE_CHAIN;
283 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
284 
285 	rp = sc->sc_qedata->qc_xmit;
286 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
287 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
288 	    QE_VALID | QE_CHAIN;
289 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
290 
291 	/*
292 	 * Get the vector that were set at match time, and remember it.
293 	 */
294 	sc->sc_intvec = ubasc->uh_lastiv;
295 	QE_WCSR(QE_CSR_CSR, QE_RESET);
296 	DELAY(1000);
297 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
298 
299 	/*
300 	 * Read out ethernet address and tell which type this card is.
301 	 */
302 	for (i = 0; i < 6; i++)
303 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
304 
305 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
306 	printf("\n%s: %s, hardware address %s\n", sc->sc_dev.dv_xname,
307 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
308 		ether_sprintf(enaddr));
309 
310 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
311 
312 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
313 		sc, &sc->sc_intrcnt);
314 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
315 		sc->sc_dev.dv_xname, "intr");
316 
317 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
318 	ifp->if_softc = sc;
319 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
320 	ifp->if_start = qestart;
321 	ifp->if_ioctl = qeioctl;
322 	ifp->if_watchdog = qetimeout;
323 	IFQ_SET_READY(&ifp->if_snd);
324 
325 	/*
326 	 * Attach the interface.
327 	 */
328 	if_attach(ifp);
329 	ether_ifattach(ifp, enaddr);
330 
331 	return;
332 
333 	/*
334 	 * Free any resources we've allocated during the failed attach
335 	 * attempt.  Do this in reverse order and fall through.
336 	 */
337  fail_6:
338 	for (i = 0; i < RXDESCS; i++) {
339 		if (sc->sc_rxmbuf[i] != NULL) {
340 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
341 			m_freem(sc->sc_rxmbuf[i]);
342 		}
343 	}
344  fail_5:
345 	for (i = 0; i < RXDESCS; i++) {
346 		if (sc->sc_xmtmap[i] != NULL)
347 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
348 	}
349  fail_4:
350 	for (i = 0; i < TXDESCS; i++) {
351 		if (sc->sc_rcvmap[i] != NULL)
352 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
353 	}
354 }
355 
356 /*
357  * Initialization of interface.
358  */
359 void
360 qeinit(struct qe_softc *sc)
361 {
362 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
363 	struct qe_cdata *qc = sc->sc_qedata;
364 	int i;
365 
366 
367 	/*
368 	 * Reset the interface.
369 	 */
370 	QE_WCSR(QE_CSR_CSR, QE_RESET);
371 	DELAY(1000);
372 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
373 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
374 
375 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
376 	/*
377 	 * Release and init transmit descriptors.
378 	 */
379 	for (i = 0; i < TXDESCS; i++) {
380 		if (sc->sc_txmbuf[i]) {
381 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
382 			m_freem(sc->sc_txmbuf[i]);
383 			sc->sc_txmbuf[i] = 0;
384 		}
385 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
386 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
387 	}
388 
389 
390 	/*
391 	 * Init receive descriptors.
392 	 */
393 	for (i = 0; i < RXDESCS; i++)
394 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
395 	sc->sc_nextrx = 0;
396 
397 	/*
398 	 * Write the descriptor addresses to the device.
399 	 * Receiving packets will be enabled in the interrupt routine.
400 	 */
401 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
402 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
403 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
404 
405 	ifp->if_flags |= IFF_RUNNING;
406 	ifp->if_flags &= ~IFF_OACTIVE;
407 
408 	/*
409 	 * Send a setup frame.
410 	 * This will start the transmit machinery as well.
411 	 */
412 	qe_setup(sc);
413 
414 }
415 
416 /*
417  * Start output on interface.
418  */
419 void
420 qestart(struct ifnet *ifp)
421 {
422 	struct qe_softc *sc = ifp->if_softc;
423 	struct qe_cdata *qc = sc->sc_qedata;
424 	paddr_t	buffer;
425 	struct mbuf *m, *m0;
426 	int idx, len, s, i, totlen, error;
427 	short orword, csr;
428 
429 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
430 		return;
431 
432 	s = splnet();
433 	while (sc->sc_inq < (TXDESCS - 1)) {
434 
435 		if (sc->sc_setup) {
436 			qe_setup(sc);
437 			continue;
438 		}
439 		idx = sc->sc_nexttx;
440 		IFQ_POLL(&ifp->if_snd, m);
441 		if (m == 0)
442 			goto out;
443 		/*
444 		 * Count number of mbufs in chain.
445 		 * Always do DMA directly from mbufs, therefore the transmit
446 		 * ring is really big.
447 		 */
448 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
449 			if (m0->m_len)
450 				i++;
451 		if (i >= TXDESCS)
452 			panic("qestart");
453 
454 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
455 			ifp->if_flags |= IFF_OACTIVE;
456 			goto out;
457 		}
458 
459 		IFQ_DEQUEUE(&ifp->if_snd, m);
460 
461 #if NBPFILTER > 0
462 		if (ifp->if_bpf)
463 			bpf_mtap(ifp->if_bpf, m);
464 #endif
465 		/*
466 		 * m now points to a mbuf chain that can be loaded.
467 		 * Loop around and set it.
468 		 */
469 		totlen = 0;
470 		for (m0 = m; m0; m0 = m0->m_next) {
471 			error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
472 			    mtod(m0, void *), m0->m_len, 0, 0);
473 			buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
474 			len = m0->m_len;
475 			if (len == 0)
476 				continue;
477 
478 			totlen += len;
479 			/* Word alignment calc */
480 			orword = 0;
481 			if (totlen == m->m_pkthdr.len) {
482 				if (totlen < ETHER_MIN_LEN)
483 					len += (ETHER_MIN_LEN - totlen);
484 				orword |= QE_EOMSG;
485 				sc->sc_txmbuf[idx] = m;
486 			}
487 			if ((buffer & 1) || (len & 1))
488 				len += 2;
489 			if (buffer & 1)
490 				orword |= QE_ODDBEGIN;
491 			if ((buffer + len) & 1)
492 				orword |= QE_ODDEND;
493 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
494 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
495 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
496 			qc->qc_xmit[idx].qe_flag =
497 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
498 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
499 			if (++idx == TXDESCS)
500 				idx = 0;
501 			sc->sc_inq++;
502 		}
503 #ifdef DIAGNOSTIC
504 		if (totlen != m->m_pkthdr.len)
505 			panic("qestart: len fault");
506 #endif
507 
508 		/*
509 		 * Kick off the transmit logic, if it is stopped.
510 		 */
511 		csr = QE_RCSR(QE_CSR_CSR);
512 		if (csr & QE_XL_INVALID) {
513 			QE_WCSR(QE_CSR_XMTL,
514 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
515 			QE_WCSR(QE_CSR_XMTH,
516 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
517 		}
518 		sc->sc_nexttx = idx;
519 	}
520 	if (sc->sc_inq == (TXDESCS - 1))
521 		ifp->if_flags |= IFF_OACTIVE;
522 
523 out:	if (sc->sc_inq)
524 		ifp->if_timer = 5; /* If transmit logic dies */
525 	splx(s);
526 }
527 
528 static void
529 qeintr(void *arg)
530 {
531 	struct qe_softc *sc = arg;
532 	struct qe_cdata *qc = sc->sc_qedata;
533 	struct ifnet *ifp = &sc->sc_if;
534 	struct mbuf *m;
535 	int csr, status1, status2, len;
536 
537 	csr = QE_RCSR(QE_CSR_CSR);
538 
539 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
540 	    QE_RCV_INT | QE_ILOOP);
541 
542 	if (csr & QE_RCV_INT)
543 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
544 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
545 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
546 
547 			m = sc->sc_rxmbuf[sc->sc_nextrx];
548 			len = ((status1 & QE_RBL_HI) |
549 			    (status2 & QE_RBL_LO)) + 60;
550 			qe_add_rxbuf(sc, sc->sc_nextrx);
551 			m->m_pkthdr.rcvif = ifp;
552 			m->m_pkthdr.len = m->m_len = len;
553 			if (++sc->sc_nextrx == RXDESCS)
554 				sc->sc_nextrx = 0;
555 #if NBPFILTER > 0
556 			if (ifp->if_bpf)
557 				bpf_mtap(ifp->if_bpf, m);
558 #endif
559 			if ((status1 & QE_ESETUP) == 0)
560 				(*ifp->if_input)(ifp, m);
561 			else
562 				m_freem(m);
563 		}
564 
565 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
566 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
567 			int idx = sc->sc_lastack;
568 
569 			sc->sc_inq--;
570 			if (++sc->sc_lastack == TXDESCS)
571 				sc->sc_lastack = 0;
572 
573 			/* XXX collect statistics */
574 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
575 			qc->qc_xmit[idx].qe_status1 =
576 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
577 
578 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
579 				continue;
580 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
581 			if (sc->sc_txmbuf[idx]) {
582 				m_freem(sc->sc_txmbuf[idx]);
583 				sc->sc_txmbuf[idx] = 0;
584 			}
585 		}
586 		ifp->if_timer = 0;
587 		ifp->if_flags &= ~IFF_OACTIVE;
588 		qestart(ifp); /* Put in more in queue */
589 	}
590 	/*
591 	 * How can the receive list get invalid???
592 	 * Verified that it happens anyway.
593 	 */
594 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
595 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
596 		QE_WCSR(QE_CSR_RCLL,
597 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
598 		QE_WCSR(QE_CSR_RCLH,
599 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
600 	}
601 }
602 
603 /*
604  * Process an ioctl request.
605  */
606 int
607 qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
608 {
609 	struct qe_softc *sc = ifp->if_softc;
610 	struct ifreq *ifr = (struct ifreq *)data;
611 	struct ifaddr *ifa = (struct ifaddr *)data;
612 	int s = splnet(), error = 0;
613 
614 	switch (cmd) {
615 
616 	case SIOCSIFADDR:
617 		ifp->if_flags |= IFF_UP;
618 		switch(ifa->ifa_addr->sa_family) {
619 #ifdef INET
620 		case AF_INET:
621 			qeinit(sc);
622 			arp_ifinit(ifp, ifa);
623 			break;
624 #endif
625 		}
626 		break;
627 
628 	case SIOCSIFFLAGS:
629 		if ((ifp->if_flags & IFF_UP) == 0 &&
630 		    (ifp->if_flags & IFF_RUNNING) != 0) {
631 			/*
632 			 * If interface is marked down and it is running,
633 			 * stop it. (by disabling receive mechanism).
634 			 */
635 			QE_WCSR(QE_CSR_CSR,
636 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
637 			ifp->if_flags &= ~IFF_RUNNING;
638 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
639 			   (ifp->if_flags & IFF_RUNNING) == 0) {
640 			/*
641 			 * If interface it marked up and it is stopped, then
642 			 * start it.
643 			 */
644 			qeinit(sc);
645 		} else if ((ifp->if_flags & IFF_UP) != 0) {
646 			/*
647 			 * Send a new setup packet to match any new changes.
648 			 * (Like IFF_PROMISC etc)
649 			 */
650 			qe_setup(sc);
651 		}
652 		break;
653 
654 	case SIOCADDMULTI:
655 	case SIOCDELMULTI:
656 		/*
657 		 * Update our multicast list.
658 		 */
659 		error = (cmd == SIOCADDMULTI) ?
660 			ether_addmulti(ifr, &sc->sc_ec):
661 			ether_delmulti(ifr, &sc->sc_ec);
662 
663 		if (error == ENETRESET) {
664 			/*
665 			 * Multicast list has changed; set the hardware filter
666 			 * accordingly.
667 			 */
668 			qe_setup(sc);
669 			error = 0;
670 		}
671 		break;
672 
673 	default:
674 		error = EINVAL;
675 
676 	}
677 	splx(s);
678 	return (error);
679 }
680 
681 /*
682  * Add a receive buffer to the indicated descriptor.
683  */
684 int
685 qe_add_rxbuf(struct qe_softc *sc, int i)
686 {
687 	struct mbuf *m;
688 	struct qe_ring *rp;
689 	vaddr_t addr;
690 	int error;
691 
692 	MGETHDR(m, M_DONTWAIT, MT_DATA);
693 	if (m == NULL)
694 		return (ENOBUFS);
695 
696 	MCLGET(m, M_DONTWAIT);
697 	if ((m->m_flags & M_EXT) == 0) {
698 		m_freem(m);
699 		return (ENOBUFS);
700 	}
701 
702 	if (sc->sc_rxmbuf[i] != NULL)
703 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
704 
705 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
706 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
707 	if (error)
708 		panic("%s: can't load rx DMA map %d, error = %d\n",
709 		    sc->sc_dev.dv_xname, i, error);
710 	sc->sc_rxmbuf[i] = m;
711 
712 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
713 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
714 
715 	/*
716 	 * We know that the mbuf cluster is page aligned. Also, be sure
717 	 * that the IP header will be longword aligned.
718 	 */
719 	m->m_data += 2;
720 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
721 	rp = &sc->sc_qedata->qc_recv[i];
722 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
723 	rp->qe_addr_lo = LOWORD(addr);
724 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
725 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
726 
727 	return (0);
728 }
729 
730 /*
731  * Create a setup packet and put in queue for sending.
732  */
733 void
734 qe_setup(struct qe_softc *sc)
735 {
736 	struct ether_multi *enm;
737 	struct ether_multistep step;
738 	struct qe_cdata *qc = sc->sc_qedata;
739 	struct ifnet *ifp = &sc->sc_if;
740 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
741 	int i, j, k, idx, s;
742 
743 	s = splnet();
744 	if (sc->sc_inq == (TXDESCS - 1)) {
745 		sc->sc_setup = 1;
746 		splx(s);
747 		return;
748 	}
749 	sc->sc_setup = 0;
750 	/*
751 	 * Init the setup packet with valid info.
752 	 */
753 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
754 	for (i = 0; i < ETHER_ADDR_LEN; i++)
755 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
756 
757 	/*
758 	 * Multicast handling. The DEQNA can handle up to 12 direct
759 	 * ethernet addresses.
760 	 */
761 	j = 3; k = 0;
762 	ifp->if_flags &= ~IFF_ALLMULTI;
763 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
764 	while (enm != NULL) {
765 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
766 			ifp->if_flags |= IFF_ALLMULTI;
767 			break;
768 		}
769 		for (i = 0; i < ETHER_ADDR_LEN; i++)
770 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
771 		j++;
772 		if (j == 8) {
773 			j = 1; k += 64;
774 		}
775 		if (k > 64) {
776 			ifp->if_flags |= IFF_ALLMULTI;
777 			break;
778 		}
779 		ETHER_NEXT_MULTI(step, enm);
780 	}
781 	idx = sc->sc_nexttx;
782 	qc->qc_xmit[idx].qe_buf_len = -64;
783 
784 	/*
785 	 * How is the DEQNA turned in ALLMULTI mode???
786 	 * Until someone tells me, fall back to PROMISC when more than
787 	 * 12 ethernet addresses.
788 	 */
789 	if (ifp->if_flags & IFF_ALLMULTI)
790 		ifp->if_flags |= IFF_PROMISC;
791 	else if (ifp->if_pcount == 0)
792 		ifp->if_flags &= ~IFF_PROMISC;
793 	if (ifp->if_flags & IFF_PROMISC)
794 		qc->qc_xmit[idx].qe_buf_len = -65;
795 
796 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
797 	qc->qc_xmit[idx].qe_addr_hi =
798 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
799 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
800 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
801 
802 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
803 		QE_WCSR(QE_CSR_XMTL,
804 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
805 		QE_WCSR(QE_CSR_XMTH,
806 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
807 	}
808 
809 	sc->sc_inq++;
810 	if (++sc->sc_nexttx == TXDESCS)
811 		sc->sc_nexttx = 0;
812 	splx(s);
813 }
814 
815 /*
816  * Check for dead transmit logic. Not uncommon.
817  */
818 void
819 qetimeout(struct ifnet *ifp)
820 {
821 	struct qe_softc *sc = ifp->if_softc;
822 
823 	if (sc->sc_inq == 0)
824 		return;
825 
826 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
827 	/*
828 	 * Do a reset of interface, to get it going again.
829 	 * Will it work by just restart the transmit logic?
830 	 */
831 	qeinit(sc);
832 }
833