xref: /netbsd-src/sys/dev/qbus/if_qe.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*      $NetBSD: if_qe.c,v 1.75 2016/06/10 13:27:15 ozaki-r Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for DEQNA/DELQA ethernet cards.
34  * Things that is still to do:
35  *	Handle ubaresets. Does not work at all right now.
36  *	Fix ALLMULTI reception. But someone must tell me how...
37  *	Collect statistics.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.75 2016/06/10 13:27:15 ozaki-r Exp $");
42 
43 #include "opt_inet.h"
44 
45 #include <sys/param.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 #include <sys/device.h>
49 #include <sys/systm.h>
50 #include <sys/sockio.h>
51 
52 #include <net/if.h>
53 #include <net/if_ether.h>
54 #include <net/if_dl.h>
55 
56 #include <netinet/in.h>
57 #include <netinet/if_inarp.h>
58 
59 #include <net/bpf.h>
60 #include <net/bpfdesc.h>
61 
62 #include <sys/bus.h>
63 
64 #include <dev/qbus/ubavar.h>
65 #include <dev/qbus/if_qereg.h>
66 
67 #include "ioconf.h"
68 
69 #define RXDESCS	30	/* # of receive descriptors */
70 #define TXDESCS	60	/* # transmit descs */
71 
72 /*
73  * Structure containing the elements that must be in DMA-safe memory.
74  */
75 struct qe_cdata {
76 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
77 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
78 	u_int8_t	qc_setup[128];		/* Setup packet layout */
79 };
80 
81 struct	qe_softc {
82 	device_t	sc_dev;		/* Configuration common part	*/
83 	struct uba_softc *sc_uh;	/* our parent */
84 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
85 	struct ethercom sc_ec;		/* Ethernet common part		*/
86 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
87 	bus_space_tag_t sc_iot;
88 	bus_addr_t	sc_ioh;
89 	bus_dma_tag_t	sc_dmat;
90 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
91 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
92 	struct mbuf*	sc_txmbuf[TXDESCS];
93 	struct mbuf*	sc_rxmbuf[RXDESCS];
94 	bus_dmamap_t	sc_xmtmap[TXDESCS];
95 	bus_dmamap_t	sc_rcvmap[RXDESCS];
96 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
97 	struct ubinfo	sc_ui;
98 	int		sc_intvec;	/* Interrupt vector		*/
99 	int		sc_nexttx;
100 	int		sc_inq;
101 	int		sc_lastack;
102 	int		sc_nextrx;
103 	int		sc_setup;	/* Setup packet in queue	*/
104 };
105 
106 static	int	qematch(device_t, cfdata_t, void *);
107 static	void	qeattach(device_t, device_t, void *);
108 static	void	qeinit(struct qe_softc *);
109 static	void	qestart(struct ifnet *);
110 static	void	qeintr(void *);
111 static	int	qeioctl(struct ifnet *, u_long, void *);
112 static	int	qe_add_rxbuf(struct qe_softc *, int);
113 static	void	qe_setup(struct qe_softc *);
114 static	void	qetimeout(struct ifnet *);
115 
116 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
117     qematch, qeattach, NULL, NULL);
118 
119 #define	QE_WCSR(csr, val) \
120 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121 #define	QE_RCSR(csr) \
122 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123 
124 #define	LOWORD(x)	((int)(x) & 0xffff)
125 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
126 
127 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
128 
129 /*
130  * Check for present DEQNA. Done by sending a fake setup packet
131  * and wait for interrupt.
132  */
133 int
134 qematch(device_t parent, cfdata_t cf, void *aux)
135 {
136 	struct	qe_softc ssc;
137 	struct	qe_softc *sc = &ssc;
138 	struct	uba_attach_args *ua = aux;
139 	struct	uba_softc *uh = device_private(parent);
140 	struct ubinfo ui;
141 
142 #define	PROBESIZE	4096
143 	struct qe_ring *ring;
144 	struct	qe_ring *rp;
145 	int error, match;
146 
147 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
148 	memset(sc, 0, sizeof(*sc));
149 	sc->sc_iot = ua->ua_iot;
150 	sc->sc_ioh = ua->ua_ioh;
151 	sc->sc_dmat = ua->ua_dmat;
152 
153 	uh->uh_lastiv -= 4;
154 	QE_WCSR(QE_CSR_CSR, QE_RESET);
155 	QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
156 
157 	/*
158 	 * Map the ring area. Actually this is done only to be able to
159 	 * send and receive a internal packet; some junk is loopbacked
160 	 * so that the DEQNA has a reason to interrupt.
161 	 */
162 	ui.ui_size = PROBESIZE;
163 	ui.ui_vaddr = (void *)&ring[0];
164 	if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
165 		match = 0;
166 		goto out0;
167 	}
168 
169 	/*
170 	 * Init a simple "fake" receive and transmit descriptor that
171 	 * points to some unused area. Send a fake setup packet.
172 	 */
173 	rp = (void *)ui.ui_baddr;
174 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
175 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
176 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
177 	ring[0].qe_buf_len = -64;
178 
179 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
180 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
181 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
182 	ring[2].qe_buf_len = -(1500/2);
183 
184 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
185 	DELAY(1000);
186 
187 	/*
188 	 * Start the interface and wait for the packet.
189 	 */
190 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
191 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
192 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
193 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
194 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
195 	DELAY(10000);
196 
197 	match = 1;
198 
199 	/*
200 	 * All done with the bus resources.
201 	 */
202 	ubfree(uh, &ui);
203 out0:	free(ring, M_TEMP);
204 	return match;
205 }
206 
207 /*
208  * Interface exists: make available by filling in network interface
209  * record.  System will initialize the interface when it is ready
210  * to accept packets.
211  */
212 void
213 qeattach(device_t parent, device_t self, void *aux)
214 {
215 	struct uba_attach_args *ua = aux;
216 	struct qe_softc *sc = device_private(self);
217 	struct ifnet *ifp = &sc->sc_if;
218 	struct qe_ring *rp;
219 	u_int8_t enaddr[ETHER_ADDR_LEN];
220 	int i, error;
221 	char *nullbuf;
222 
223 	sc->sc_dev = self;
224 	sc->sc_uh = device_private(parent);
225 	sc->sc_iot = ua->ua_iot;
226 	sc->sc_ioh = ua->ua_ioh;
227 	sc->sc_dmat = ua->ua_dmat;
228 
229 	/*
230 	 * Allocate DMA safe memory for descriptors and setup memory.
231 	 */
232 
233 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
234 	if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
235 		aprint_error(": unable to ubmemalloc(), error = %d\n", error);
236 		return;
237 	}
238 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
239 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
240 
241 	/*
242 	 * Zero the newly allocated memory.
243 	 */
244 	memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
245 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
246 	/*
247 	 * Create the transmit descriptor DMA maps. We take advantage
248 	 * of the fact that the Qbus address space is big, and therefore
249 	 * allocate map registers for all transmit descriptors also,
250 	 * so that we can avoid this each time we send a packet.
251 	 */
252 	for (i = 0; i < TXDESCS; i++) {
253 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
254 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
255 		    &sc->sc_xmtmap[i]))) {
256 			aprint_error(
257 			    ": unable to create tx DMA map %d, error = %d\n",
258 			    i, error);
259 			goto fail_4;
260 		}
261 	}
262 
263 	/*
264 	 * Create receive buffer DMA maps.
265 	 */
266 	for (i = 0; i < RXDESCS; i++) {
267 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
268 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
269 		    &sc->sc_rcvmap[i]))) {
270 			aprint_error(
271 			    ": unable to create rx DMA map %d, error = %d\n",
272 			    i, error);
273 			goto fail_5;
274 		}
275 	}
276 	/*
277 	 * Pre-allocate the receive buffers.
278 	 */
279 	for (i = 0; i < RXDESCS; i++) {
280 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
281 			aprint_error(
282 			    ": unable to allocate or map rx buffer %d,"
283 			    " error = %d\n", i, error);
284 			goto fail_6;
285 		}
286 	}
287 
288 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
289 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
290 		aprint_error(
291 		    ": unable to create pad buffer DMA map, error = %d\n",
292 		    error);
293 		goto fail_6;
294 	}
295 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
296 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
297 		aprint_error(
298 		    ": unable to load pad buffer DMA map, error = %d\n",
299 		    error);
300 		goto fail_7;
301 	}
302 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
303 	    BUS_DMASYNC_PREWRITE);
304 
305 	/*
306 	 * Create ring loops of the buffer chains.
307 	 * This is only done once.
308 	 */
309 
310 	rp = sc->sc_qedata->qc_recv;
311 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
312 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
313 	    QE_VALID | QE_CHAIN;
314 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
315 
316 	rp = sc->sc_qedata->qc_xmit;
317 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
318 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
319 	    QE_VALID | QE_CHAIN;
320 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
321 
322 	/*
323 	 * Get the vector that were set at match time, and remember it.
324 	 */
325 	sc->sc_intvec = sc->sc_uh->uh_lastiv;
326 	QE_WCSR(QE_CSR_CSR, QE_RESET);
327 	DELAY(1000);
328 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
329 
330 	/*
331 	 * Read out ethernet address and tell which type this card is.
332 	 */
333 	for (i = 0; i < 6; i++)
334 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
335 
336 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
337 	aprint_normal(": %s, hardware address %s\n",
338 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
339 		ether_sprintf(enaddr));
340 
341 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
342 
343 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
344 		sc, &sc->sc_intrcnt);
345 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
346 		device_xname(sc->sc_dev), "intr");
347 
348 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
349 	ifp->if_softc = sc;
350 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
351 	ifp->if_start = qestart;
352 	ifp->if_ioctl = qeioctl;
353 	ifp->if_watchdog = qetimeout;
354 	IFQ_SET_READY(&ifp->if_snd);
355 
356 	/*
357 	 * Attach the interface.
358 	 */
359 	if_attach(ifp);
360 	ether_ifattach(ifp, enaddr);
361 
362 	return;
363 
364 	/*
365 	 * Free any resources we've allocated during the failed attach
366 	 * attempt.  Do this in reverse order and fall through.
367 	 */
368  fail_7:
369 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
370  fail_6:
371 	for (i = 0; i < RXDESCS; i++) {
372 		if (sc->sc_rxmbuf[i] != NULL) {
373 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
374 			m_freem(sc->sc_rxmbuf[i]);
375 		}
376 	}
377  fail_5:
378 	for (i = 0; i < RXDESCS; i++) {
379 		if (sc->sc_rcvmap[i] != NULL)
380 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
381 	}
382  fail_4:
383 	for (i = 0; i < TXDESCS; i++) {
384 		if (sc->sc_xmtmap[i] != NULL)
385 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
386 	}
387 }
388 
389 /*
390  * Initialization of interface.
391  */
392 void
393 qeinit(struct qe_softc *sc)
394 {
395 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
396 	struct qe_cdata *qc = sc->sc_qedata;
397 	int i;
398 
399 
400 	/*
401 	 * Reset the interface.
402 	 */
403 	QE_WCSR(QE_CSR_CSR, QE_RESET);
404 	DELAY(1000);
405 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
406 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
407 
408 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
409 	/*
410 	 * Release and init transmit descriptors.
411 	 */
412 	for (i = 0; i < TXDESCS; i++) {
413 		if (sc->sc_txmbuf[i]) {
414 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
415 			m_freem(sc->sc_txmbuf[i]);
416 			sc->sc_txmbuf[i] = 0;
417 		}
418 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
419 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
420 	}
421 
422 
423 	/*
424 	 * Init receive descriptors.
425 	 */
426 	for (i = 0; i < RXDESCS; i++)
427 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
428 	sc->sc_nextrx = 0;
429 
430 	/*
431 	 * Write the descriptor addresses to the device.
432 	 * Receiving packets will be enabled in the interrupt routine.
433 	 */
434 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
435 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
436 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
437 
438 	ifp->if_flags |= IFF_RUNNING;
439 	ifp->if_flags &= ~IFF_OACTIVE;
440 
441 	/*
442 	 * Send a setup frame.
443 	 * This will start the transmit machinery as well.
444 	 */
445 	qe_setup(sc);
446 
447 }
448 
449 /*
450  * Start output on interface.
451  */
452 void
453 qestart(struct ifnet *ifp)
454 {
455 	struct qe_softc *sc = ifp->if_softc;
456 	struct qe_cdata *qc = sc->sc_qedata;
457 	paddr_t	buffer;
458 	struct mbuf *m, *m0;
459 	int idx, len, s, i, totlen, buflen;
460 	short orword, csr;
461 
462 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
463 		return;
464 
465 	s = splnet();
466 	while (sc->sc_inq < (TXDESCS - 1)) {
467 
468 		if (sc->sc_setup) {
469 			qe_setup(sc);
470 			continue;
471 		}
472 		idx = sc->sc_nexttx;
473 		IFQ_POLL(&ifp->if_snd, m);
474 		if (m == 0)
475 			goto out;
476 		/*
477 		 * Count number of mbufs in chain.
478 		 * Always do DMA directly from mbufs, therefore the transmit
479 		 * ring is really big.
480 		 */
481 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
482 			if (m0->m_len)
483 				i++;
484 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
485 			buflen = ETHER_PAD_LEN;
486 			i++;
487 		} else
488 			buflen = m->m_pkthdr.len;
489 		if (i >= TXDESCS)
490 			panic("qestart");
491 
492 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
493 			ifp->if_flags |= IFF_OACTIVE;
494 			goto out;
495 		}
496 
497 		IFQ_DEQUEUE(&ifp->if_snd, m);
498 
499 		bpf_mtap(ifp, m);
500 		/*
501 		 * m now points to a mbuf chain that can be loaded.
502 		 * Loop around and set it.
503 		 */
504 		totlen = 0;
505 		for (m0 = m; ; m0 = m0->m_next) {
506 			if (m0) {
507 				if (m0->m_len == 0)
508 					continue;
509 				bus_dmamap_load(sc->sc_dmat,
510 				    sc->sc_xmtmap[idx], mtod(m0, void *),
511 				    m0->m_len, 0, 0);
512 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
513 				len = m0->m_len;
514 			} else if (totlen < ETHER_PAD_LEN) {
515 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
516 				len = ETHER_PAD_LEN - totlen;
517 			} else {
518 				break;
519 			}
520 
521 			totlen += len;
522 			/* Word alignment calc */
523 			orword = 0;
524 			if (totlen == buflen) {
525 				orword |= QE_EOMSG;
526 				sc->sc_txmbuf[idx] = m;
527 			}
528 			if ((buffer & 1) || (len & 1))
529 				len += 2;
530 			if (buffer & 1)
531 				orword |= QE_ODDBEGIN;
532 			if ((buffer + len) & 1)
533 				orword |= QE_ODDEND;
534 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
535 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
536 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
537 			qc->qc_xmit[idx].qe_flag =
538 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
539 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
540 			if (++idx == TXDESCS)
541 				idx = 0;
542 			sc->sc_inq++;
543 			if (m0 == NULL)
544 				break;
545 		}
546 #ifdef DIAGNOSTIC
547 		if (totlen != buflen)
548 			panic("qestart: len fault");
549 #endif
550 
551 		/*
552 		 * Kick off the transmit logic, if it is stopped.
553 		 */
554 		csr = QE_RCSR(QE_CSR_CSR);
555 		if (csr & QE_XL_INVALID) {
556 			QE_WCSR(QE_CSR_XMTL,
557 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
558 			QE_WCSR(QE_CSR_XMTH,
559 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
560 		}
561 		sc->sc_nexttx = idx;
562 	}
563 	if (sc->sc_inq == (TXDESCS - 1))
564 		ifp->if_flags |= IFF_OACTIVE;
565 
566 out:	if (sc->sc_inq)
567 		ifp->if_timer = 5; /* If transmit logic dies */
568 	splx(s);
569 }
570 
571 static void
572 qeintr(void *arg)
573 {
574 	struct qe_softc *sc = arg;
575 	struct qe_cdata *qc = sc->sc_qedata;
576 	struct ifnet *ifp = &sc->sc_if;
577 	struct mbuf *m;
578 	int csr, status1, status2, len;
579 
580 	csr = QE_RCSR(QE_CSR_CSR);
581 
582 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
583 	    QE_RCV_INT | QE_ILOOP);
584 
585 	if (csr & QE_RCV_INT)
586 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
587 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
588 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
589 
590 			m = sc->sc_rxmbuf[sc->sc_nextrx];
591 			len = ((status1 & QE_RBL_HI) |
592 			    (status2 & QE_RBL_LO)) + 60;
593 			qe_add_rxbuf(sc, sc->sc_nextrx);
594 			m_set_rcvif(m, ifp);
595 			m->m_pkthdr.len = m->m_len = len;
596 			if (++sc->sc_nextrx == RXDESCS)
597 				sc->sc_nextrx = 0;
598 			bpf_mtap(ifp, m);
599 			if ((status1 & QE_ESETUP) == 0)
600 				if_percpuq_enqueue(ifp->if_percpuq, m);
601 			else
602 				m_freem(m);
603 		}
604 
605 	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
606 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
607 			int idx = sc->sc_lastack;
608 
609 			sc->sc_inq--;
610 			if (++sc->sc_lastack == TXDESCS)
611 				sc->sc_lastack = 0;
612 
613 			/* XXX collect statistics */
614 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
615 			qc->qc_xmit[idx].qe_status1 =
616 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
617 
618 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
619 				continue;
620 			if (sc->sc_txmbuf[idx] == NULL ||
621 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
622 				bus_dmamap_unload(sc->sc_dmat,
623 				    sc->sc_xmtmap[idx]);
624 			if (sc->sc_txmbuf[idx]) {
625 				m_freem(sc->sc_txmbuf[idx]);
626 				sc->sc_txmbuf[idx] = NULL;
627 			}
628 		}
629 		ifp->if_timer = 0;
630 		ifp->if_flags &= ~IFF_OACTIVE;
631 		qestart(ifp); /* Put in more in queue */
632 	}
633 	/*
634 	 * How can the receive list get invalid???
635 	 * Verified that it happens anyway.
636 	 */
637 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
638 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
639 		QE_WCSR(QE_CSR_RCLL,
640 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
641 		QE_WCSR(QE_CSR_RCLH,
642 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
643 	}
644 }
645 
646 /*
647  * Process an ioctl request.
648  */
649 int
650 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
651 {
652 	struct qe_softc *sc = ifp->if_softc;
653 	struct ifaddr *ifa = (struct ifaddr *)data;
654 	int s = splnet(), error = 0;
655 
656 	switch (cmd) {
657 
658 	case SIOCINITIFADDR:
659 		ifp->if_flags |= IFF_UP;
660 		switch(ifa->ifa_addr->sa_family) {
661 #ifdef INET
662 		case AF_INET:
663 			qeinit(sc);
664 			arp_ifinit(ifp, ifa);
665 			break;
666 #endif
667 		}
668 		break;
669 
670 	case SIOCSIFFLAGS:
671 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
672 			break;
673 		/* XXX re-use ether_ioctl() */
674 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
675 		case IFF_RUNNING:
676 			/*
677 			 * If interface is marked down and it is running,
678 			 * stop it. (by disabling receive mechanism).
679 			 */
680 			QE_WCSR(QE_CSR_CSR,
681 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
682 			ifp->if_flags &= ~IFF_RUNNING;
683 			break;
684 		case IFF_UP:
685 			/*
686 			 * If interface it marked up and it is stopped, then
687 			 * start it.
688 			 */
689 			qeinit(sc);
690 			break;
691 		case IFF_UP|IFF_RUNNING:
692 			/*
693 			 * Send a new setup packet to match any new changes.
694 			 * (Like IFF_PROMISC etc)
695 			 */
696 			qe_setup(sc);
697 			break;
698 		case 0:
699 			break;
700 		}
701 		break;
702 
703 	case SIOCADDMULTI:
704 	case SIOCDELMULTI:
705 		/*
706 		 * Update our multicast list.
707 		 */
708 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
709 			/*
710 			 * Multicast list has changed; set the hardware filter
711 			 * accordingly.
712 			 */
713 			if (ifp->if_flags & IFF_RUNNING)
714 				qe_setup(sc);
715 			error = 0;
716 		}
717 		break;
718 
719 	default:
720 		error = ether_ioctl(ifp, cmd, data);
721 	}
722 	splx(s);
723 	return (error);
724 }
725 
726 /*
727  * Add a receive buffer to the indicated descriptor.
728  */
729 int
730 qe_add_rxbuf(struct qe_softc *sc, int i)
731 {
732 	struct mbuf *m;
733 	struct qe_ring *rp;
734 	vaddr_t addr;
735 	int error;
736 
737 	MGETHDR(m, M_DONTWAIT, MT_DATA);
738 	if (m == NULL)
739 		return (ENOBUFS);
740 
741 	MCLGET(m, M_DONTWAIT);
742 	if ((m->m_flags & M_EXT) == 0) {
743 		m_freem(m);
744 		return (ENOBUFS);
745 	}
746 
747 	if (sc->sc_rxmbuf[i] != NULL)
748 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
749 
750 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
751 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
752 	if (error)
753 		panic("%s: can't load rx DMA map %d, error = %d",
754 		    device_xname(sc->sc_dev), i, error);
755 	sc->sc_rxmbuf[i] = m;
756 
757 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
758 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
759 
760 	/*
761 	 * We know that the mbuf cluster is page aligned. Also, be sure
762 	 * that the IP header will be longword aligned.
763 	 */
764 	m->m_data += 2;
765 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
766 	rp = &sc->sc_qedata->qc_recv[i];
767 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
768 	rp->qe_addr_lo = LOWORD(addr);
769 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
770 	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
771 
772 	return (0);
773 }
774 
775 /*
776  * Create a setup packet and put in queue for sending.
777  */
778 void
779 qe_setup(struct qe_softc *sc)
780 {
781 	struct ether_multi *enm;
782 	struct ether_multistep step;
783 	struct qe_cdata *qc = sc->sc_qedata;
784 	struct ifnet *ifp = &sc->sc_if;
785 	u_int8_t enaddr[ETHER_ADDR_LEN];
786 	int i, j, k, idx, s;
787 
788 	s = splnet();
789 	if (sc->sc_inq == (TXDESCS - 1)) {
790 		sc->sc_setup = 1;
791 		splx(s);
792 		return;
793 	}
794 	sc->sc_setup = 0;
795 	/*
796 	 * Init the setup packet with valid info.
797 	 */
798 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
799 	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
800 	for (i = 0; i < ETHER_ADDR_LEN; i++)
801 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
802 
803 	/*
804 	 * Multicast handling. The DEQNA can handle up to 12 direct
805 	 * ethernet addresses.
806 	 */
807 	j = 3; k = 0;
808 	ifp->if_flags &= ~IFF_ALLMULTI;
809 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
810 	while (enm != NULL) {
811 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
812 			ifp->if_flags |= IFF_ALLMULTI;
813 			break;
814 		}
815 		for (i = 0; i < ETHER_ADDR_LEN; i++)
816 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
817 		j++;
818 		if (j == 8) {
819 			j = 1; k += 64;
820 		}
821 		if (k > 64) {
822 			ifp->if_flags |= IFF_ALLMULTI;
823 			break;
824 		}
825 		ETHER_NEXT_MULTI(step, enm);
826 	}
827 	idx = sc->sc_nexttx;
828 	qc->qc_xmit[idx].qe_buf_len = -64;
829 
830 	/*
831 	 * How is the DEQNA turned in ALLMULTI mode???
832 	 * Until someone tells me, fall back to PROMISC when more than
833 	 * 12 ethernet addresses.
834 	 */
835 	if (ifp->if_flags & IFF_ALLMULTI)
836 		ifp->if_flags |= IFF_PROMISC;
837 	else if (ifp->if_pcount == 0)
838 		ifp->if_flags &= ~IFF_PROMISC;
839 	if (ifp->if_flags & IFF_PROMISC)
840 		qc->qc_xmit[idx].qe_buf_len = -65;
841 
842 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
843 	qc->qc_xmit[idx].qe_addr_hi =
844 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
845 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
846 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
847 
848 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
849 		QE_WCSR(QE_CSR_XMTL,
850 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
851 		QE_WCSR(QE_CSR_XMTH,
852 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
853 	}
854 
855 	sc->sc_inq++;
856 	if (++sc->sc_nexttx == TXDESCS)
857 		sc->sc_nexttx = 0;
858 	splx(s);
859 }
860 
861 /*
862  * Check for dead transmit logic. Not uncommon.
863  */
864 void
865 qetimeout(struct ifnet *ifp)
866 {
867 	struct qe_softc *sc = ifp->if_softc;
868 
869 	if (sc->sc_inq == 0)
870 		return;
871 
872 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
873 	/*
874 	 * Do a reset of interface, to get it going again.
875 	 * Will it work by just restart the transmit logic?
876 	 */
877 	qeinit(sc);
878 }
879