xref: /netbsd-src/sys/dev/qbus/if_qe.c (revision 481d3881954fd794ca5f2d880b68c53a5db8620e)
1 /*      $NetBSD: if_qe.c,v 1.83 2024/07/05 04:31:52 rin Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 /*
27  * Driver for DEQNA/DELQA ethernet cards.
28  * Things that is still to do:
29  *	Handle ubaresets. Does not work at all right now.
30  *	Fix ALLMULTI reception. But someone must tell me how...
31  *	Collect statistics.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.83 2024/07/05 04:31:52 rin Exp $");
36 
37 #include "opt_inet.h"
38 
39 #include <sys/param.h>
40 #include <sys/mbuf.h>
41 #include <sys/socket.h>
42 #include <sys/device.h>
43 #include <sys/systm.h>
44 #include <sys/sockio.h>
45 
46 #include <net/if.h>
47 #include <net/if_ether.h>
48 #include <net/if_dl.h>
49 #include <net/bpf.h>
50 
51 #include <netinet/in.h>
52 #include <netinet/if_inarp.h>
53 
54 #include <sys/bus.h>
55 
56 #include <dev/qbus/ubavar.h>
57 #include <dev/qbus/if_qereg.h>
58 
59 #include "ioconf.h"
60 
61 #define RXDESCS	30	/* # of receive descriptors */
62 #define TXDESCS	60	/* # transmit descs */
63 
64 /*
65  * Structure containing the elements that must be in DMA-safe memory.
66  */
67 struct qe_cdata {
68 	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
69 	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
70 	uint8_t	qc_setup[128];		/* Setup packet layout */
71 };
72 
73 struct	qe_softc {
74 	device_t	sc_dev;		/* Configuration common part	*/
75 	struct uba_softc *sc_uh;	/* our parent */
76 	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
77 	struct ethercom sc_ec;		/* Ethernet common part		*/
78 #define sc_if	sc_ec.ec_if		/* network-visible interface	*/
79 	bus_space_tag_t sc_iot;
80 	bus_addr_t	sc_ioh;
81 	bus_dma_tag_t	sc_dmat;
82 	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
83 	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
84 	struct mbuf*	sc_txmbuf[TXDESCS];
85 	struct mbuf*	sc_rxmbuf[RXDESCS];
86 	bus_dmamap_t	sc_xmtmap[TXDESCS];
87 	bus_dmamap_t	sc_rcvmap[RXDESCS];
88 	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
89 	struct ubinfo	sc_ui;
90 	int		sc_intvec;	/* Interrupt vector		*/
91 	int		sc_nexttx;
92 	int		sc_inq;
93 	int		sc_lastack;
94 	int		sc_nextrx;
95 	int		sc_setup;	/* Setup packet in queue	*/
96 };
97 
98 static	int	qematch(device_t, cfdata_t, void *);
99 static	void	qeattach(device_t, device_t, void *);
100 static	int	qeinit(struct ifnet *);
101 static	void	qestart(struct ifnet *);
102 static	void	qeintr(void *);
103 static	int	qeioctl(struct ifnet *, u_long, void *);
104 static	int	qe_add_rxbuf(struct qe_softc *, int);
105 static	void	qe_setup(struct qe_softc *);
106 static	void	qetimeout(struct ifnet *);
107 
108 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
109     qematch, qeattach, NULL, NULL);
110 
111 #define	QE_WCSR(csr, val) \
112 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
113 #define	QE_RCSR(csr) \
114 	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
115 
116 #define	LOWORD(x)	((int)(x) & 0xffff)
117 #define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
118 
119 #define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
120 
121 /*
122  * Check for present DEQNA. Done by sending a fake setup packet
123  * and wait for interrupt.
124  */
125 int
qematch(device_t parent,cfdata_t cf,void * aux)126 qematch(device_t parent, cfdata_t cf, void *aux)
127 {
128 	struct	qe_softc ssc;
129 	struct	qe_softc *sc = &ssc;
130 	struct	uba_attach_args *ua = aux;
131 	struct	uba_softc *uh = device_private(parent);
132 	struct ubinfo ui;
133 
134 #define	PROBESIZE	4096
135 	struct qe_ring *ring;
136 	struct	qe_ring *rp;
137 	int error, match;
138 
139 	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK | M_ZERO);
140 	memset(sc, 0, sizeof(*sc));
141 	sc->sc_iot = ua->ua_iot;
142 	sc->sc_ioh = ua->ua_ioh;
143 	sc->sc_dmat = ua->ua_dmat;
144 
145 	uh->uh_lastiv -= 4;
146 	QE_WCSR(QE_CSR_CSR, QE_RESET);
147 	QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
148 
149 	/*
150 	 * Map the ring area. Actually this is done only to be able to
151 	 * send and receive a internal packet; some junk is loopbacked
152 	 * so that the DEQNA has a reason to interrupt.
153 	 */
154 	ui.ui_size = PROBESIZE;
155 	ui.ui_vaddr = (void *)&ring[0];
156 	if ((error = uballoc(uh, &ui, UBA_CANTWAIT))) {
157 		match = 0;
158 		goto out0;
159 	}
160 
161 	/*
162 	 * Init a simple "fake" receive and transmit descriptor that
163 	 * points to some unused area. Send a fake setup packet.
164 	 */
165 	rp = (void *)ui.ui_baddr;
166 	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
167 	ring[0].qe_addr_lo = LOWORD(&rp[4]);
168 	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
169 	ring[0].qe_buf_len = -64;
170 
171 	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
172 	ring[2].qe_addr_lo = LOWORD(&rp[4]);
173 	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
174 	ring[2].qe_buf_len = -(1500/2);
175 
176 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
177 	DELAY(1000);
178 
179 	/*
180 	 * Start the interface and wait for the packet.
181 	 */
182 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
183 	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
184 	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
185 	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
186 	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
187 	DELAY(10000);
188 
189 	match = 1;
190 
191 	/*
192 	 * All done with the bus resources.
193 	 */
194 	ubfree(uh, &ui);
195 out0:	free(ring, M_TEMP);
196 	return match;
197 }
198 
199 /*
200  * Interface exists: make available by filling in network interface
201  * record.  System will initialize the interface when it is ready
202  * to accept packets.
203  */
204 void
qeattach(device_t parent,device_t self,void * aux)205 qeattach(device_t parent, device_t self, void *aux)
206 {
207 	struct uba_attach_args *ua = aux;
208 	struct qe_softc *sc = device_private(self);
209 	struct ifnet *ifp = &sc->sc_if;
210 	struct qe_ring *rp;
211 	uint8_t enaddr[ETHER_ADDR_LEN];
212 	int i, error;
213 	char *nullbuf;
214 
215 	sc->sc_dev = self;
216 	sc->sc_uh = device_private(parent);
217 	sc->sc_iot = ua->ua_iot;
218 	sc->sc_ioh = ua->ua_ioh;
219 	sc->sc_dmat = ua->ua_dmat;
220 
221 	/*
222 	 * Allocate DMA safe memory for descriptors and setup memory.
223 	 */
224 
225 	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
226 	if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
227 		aprint_error(": unable to ubmemalloc(), error = %d\n", error);
228 		return;
229 	}
230 	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
231 	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
232 
233 	/*
234 	 * Zero the newly allocated memory.
235 	 */
236 	memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
237 	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
238 	/*
239 	 * Create the transmit descriptor DMA maps. We take advantage
240 	 * of the fact that the Qbus address space is big, and therefore
241 	 * allocate map registers for all transmit descriptors also,
242 	 * so that we can avoid this each time we send a packet.
243 	 */
244 	for (i = 0; i < TXDESCS; i++) {
245 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
246 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
247 		    &sc->sc_xmtmap[i]))) {
248 			aprint_error(
249 			    ": unable to create tx DMA map %d, error = %d\n",
250 			    i, error);
251 			goto fail_4;
252 		}
253 	}
254 
255 	/*
256 	 * Create receive buffer DMA maps.
257 	 */
258 	for (i = 0; i < RXDESCS; i++) {
259 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
260 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
261 		    &sc->sc_rcvmap[i]))) {
262 			aprint_error(
263 			    ": unable to create rx DMA map %d, error = %d\n",
264 			    i, error);
265 			goto fail_5;
266 		}
267 	}
268 	/*
269 	 * Pre-allocate the receive buffers.
270 	 */
271 	for (i = 0; i < RXDESCS; i++) {
272 		if ((error = qe_add_rxbuf(sc, i)) != 0) {
273 			aprint_error(
274 			    ": unable to allocate or map rx buffer %d,"
275 			    " error = %d\n", i, error);
276 			goto fail_6;
277 		}
278 	}
279 
280 	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
281 	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
282 		aprint_error(
283 		    ": unable to create pad buffer DMA map, error = %d\n",
284 		    error);
285 		goto fail_6;
286 	}
287 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
288 	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
289 		aprint_error(
290 		    ": unable to load pad buffer DMA map, error = %d\n",
291 		    error);
292 		goto fail_7;
293 	}
294 	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
295 	    BUS_DMASYNC_PREWRITE);
296 
297 	/*
298 	 * Create ring loops of the buffer chains.
299 	 * This is only done once.
300 	 */
301 
302 	rp = sc->sc_qedata->qc_recv;
303 	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
304 	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
305 	    QE_VALID | QE_CHAIN;
306 	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
307 
308 	rp = sc->sc_qedata->qc_xmit;
309 	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
310 	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
311 	    QE_VALID | QE_CHAIN;
312 	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
313 
314 	/*
315 	 * Get the vector that were set at match time, and remember it.
316 	 */
317 	sc->sc_intvec = sc->sc_uh->uh_lastiv;
318 	QE_WCSR(QE_CSR_CSR, QE_RESET);
319 	DELAY(1000);
320 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
321 
322 	/*
323 	 * Read out ethernet address and tell which type this card is.
324 	 */
325 	for (i = 0; i < 6; i++)
326 		enaddr[i] = QE_RCSR(i * 2) & 0xff;
327 
328 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
329 	aprint_normal(": %s, hardware address %s\n",
330 		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
331 		ether_sprintf(enaddr));
332 
333 	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
334 
335 	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
336 		sc, &sc->sc_intrcnt);
337 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
338 		device_xname(sc->sc_dev), "intr");
339 
340 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
341 	ifp->if_softc = sc;
342 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
343 	ifp->if_start = qestart;
344 	ifp->if_init = qeinit;
345 	ifp->if_ioctl = qeioctl;
346 	ifp->if_watchdog = qetimeout;
347 	IFQ_SET_READY(&ifp->if_snd);
348 
349 	/*
350 	 * Attach the interface.
351 	 */
352 	if_attach(ifp);
353 	ether_ifattach(ifp, enaddr);
354 
355 	return;
356 
357 	/*
358 	 * Free any resources we've allocated during the failed attach
359 	 * attempt.  Do this in reverse order and fall through.
360 	 */
361  fail_7:
362 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
363  fail_6:
364 	for (i = 0; i < RXDESCS; i++) {
365 		if (sc->sc_rxmbuf[i] != NULL) {
366 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
367 			m_freem(sc->sc_rxmbuf[i]);
368 		}
369 	}
370  fail_5:
371 	for (i = 0; i < RXDESCS; i++) {
372 		if (sc->sc_rcvmap[i] != NULL)
373 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
374 	}
375  fail_4:
376 	for (i = 0; i < TXDESCS; i++) {
377 		if (sc->sc_xmtmap[i] != NULL)
378 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
379 	}
380 }
381 
382 /*
383  * Initialization of interface.
384  */
385 int
qeinit(struct ifnet * ifp)386 qeinit(struct ifnet *ifp)
387 {
388 	struct qe_softc *sc = ifp->if_softc;
389 	struct qe_cdata *qc = sc->sc_qedata;
390 	int i;
391 
392 
393 	/*
394 	 * Reset the interface.
395 	 */
396 	QE_WCSR(QE_CSR_CSR, QE_RESET);
397 	DELAY(1000);
398 	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
399 	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
400 
401 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
402 	/*
403 	 * Release and init transmit descriptors.
404 	 */
405 	for (i = 0; i < TXDESCS; i++) {
406 		if (sc->sc_txmbuf[i]) {
407 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
408 			m_freem(sc->sc_txmbuf[i]);
409 			sc->sc_txmbuf[i] = 0;
410 		}
411 		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
412 		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
413 	}
414 
415 	/*
416 	 * Init receive descriptors.
417 	 */
418 	for (i = 0; i < RXDESCS; i++)
419 		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
420 	sc->sc_nextrx = 0;
421 
422 	/*
423 	 * Write the descriptor addresses to the device.
424 	 * Receiving packets will be enabled in the interrupt routine.
425 	 */
426 	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE | QE_XMIT_INT | QE_RCV_INT);
427 	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
428 	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
429 
430 	ifp->if_flags |= IFF_RUNNING;
431 	ifp->if_flags &= ~IFF_OACTIVE;
432 
433 	/*
434 	 * Send a setup frame.
435 	 * This will start the transmit machinery as well.
436 	 */
437 	qe_setup(sc);
438 
439 	return 0;
440 }
441 
442 /*
443  * Start output on interface.
444  */
445 void
qestart(struct ifnet * ifp)446 qestart(struct ifnet *ifp)
447 {
448 	struct qe_softc *sc = ifp->if_softc;
449 	struct qe_cdata *qc = sc->sc_qedata;
450 	paddr_t	buffer;
451 	struct mbuf *m, *m0;
452 	int idx, len, s, i, totlen, buflen;
453 	short orword, csr;
454 
455 	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
456 		return;
457 
458 	s = splnet();
459 	while (sc->sc_inq < (TXDESCS - 1)) {
460 
461 		if (sc->sc_setup) {
462 			qe_setup(sc);
463 			continue;
464 		}
465 		idx = sc->sc_nexttx;
466 		IFQ_POLL(&ifp->if_snd, m);
467 		if (m == 0)
468 			goto out;
469 		/*
470 		 * Count number of mbufs in chain.
471 		 * Always do DMA directly from mbufs, therefore the transmit
472 		 * ring is really big.
473 		 */
474 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
475 			if (m0->m_len)
476 				i++;
477 		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
478 			buflen = ETHER_PAD_LEN;
479 			i++;
480 		} else
481 			buflen = m->m_pkthdr.len;
482 		if (i >= TXDESCS)
483 			panic("qestart");
484 
485 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
486 			ifp->if_flags |= IFF_OACTIVE;
487 			goto out;
488 		}
489 
490 		IFQ_DEQUEUE(&ifp->if_snd, m);
491 
492 		bpf_mtap(ifp, m, BPF_D_OUT);
493 		/*
494 		 * m now points to a mbuf chain that can be loaded.
495 		 * Loop around and set it.
496 		 */
497 		totlen = 0;
498 		for (m0 = m; ; m0 = m0->m_next) {
499 			if (m0) {
500 				if (m0->m_len == 0)
501 					continue;
502 				bus_dmamap_load(sc->sc_dmat,
503 				    sc->sc_xmtmap[idx], mtod(m0, void *),
504 				    m0->m_len, 0, 0);
505 				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
506 				len = m0->m_len;
507 			} else if (totlen < ETHER_PAD_LEN) {
508 				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
509 				len = ETHER_PAD_LEN - totlen;
510 			} else {
511 				break;
512 			}
513 
514 			totlen += len;
515 			/* Word alignment calc */
516 			orword = 0;
517 			if (totlen == buflen) {
518 				orword |= QE_EOMSG;
519 				sc->sc_txmbuf[idx] = m;
520 			}
521 			if ((buffer & 1) || (len & 1))
522 				len += 2;
523 			if (buffer & 1)
524 				orword |= QE_ODDBEGIN;
525 			if ((buffer + len) & 1)
526 				orword |= QE_ODDEND;
527 			qc->qc_xmit[idx].qe_buf_len = -(len/2);
528 			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
529 			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
530 			qc->qc_xmit[idx].qe_flag =
531 			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
532 			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
533 			if (++idx == TXDESCS)
534 				idx = 0;
535 			sc->sc_inq++;
536 			if (m0 == NULL)
537 				break;
538 		}
539 #ifdef DIAGNOSTIC
540 		if (totlen != buflen)
541 			panic("qestart: len fault");
542 #endif
543 
544 		/*
545 		 * Kick off the transmit logic, if it is stopped.
546 		 */
547 		csr = QE_RCSR(QE_CSR_CSR);
548 		if (csr & QE_XL_INVALID) {
549 			QE_WCSR(QE_CSR_XMTL,
550 			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
551 			QE_WCSR(QE_CSR_XMTH,
552 			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
553 		}
554 		sc->sc_nexttx = idx;
555 	}
556 	if (sc->sc_inq == (TXDESCS - 1))
557 		ifp->if_flags |= IFF_OACTIVE;
558 
559 out:	if (sc->sc_inq)
560 		ifp->if_timer = 5; /* If transmit logic dies */
561 	splx(s);
562 }
563 
564 static void
qeintr(void * arg)565 qeintr(void *arg)
566 {
567 	struct qe_softc *sc = arg;
568 	struct qe_cdata *qc = sc->sc_qedata;
569 	struct ifnet *ifp = &sc->sc_if;
570 	struct mbuf *m;
571 	int csr, status1, status2, len;
572 
573 	csr = QE_RCSR(QE_CSR_CSR);
574 
575 	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
576 	    QE_RCV_INT | QE_ILOOP);
577 
578 	if (csr & QE_RCV_INT)
579 		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
580 			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
581 			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
582 
583 			m = sc->sc_rxmbuf[sc->sc_nextrx];
584 			len = ((status1 & QE_RBL_HI) |
585 			    (status2 & QE_RBL_LO)) + 60;
586 			qe_add_rxbuf(sc, sc->sc_nextrx);
587 			m_set_rcvif(m, ifp);
588 			m->m_pkthdr.len = m->m_len = len;
589 			if (++sc->sc_nextrx == RXDESCS)
590 				sc->sc_nextrx = 0;
591 			if ((status1 & QE_ESETUP) == 0)
592 				if_percpuq_enqueue(ifp->if_percpuq, m);
593 			else
594 				m_freem(m);
595 		}
596 
597 	if (csr & (QE_XMIT_INT | QE_XL_INVALID)) {
598 		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
599 			int idx = sc->sc_lastack;
600 
601 			sc->sc_inq--;
602 			if (++sc->sc_lastack == TXDESCS)
603 				sc->sc_lastack = 0;
604 
605 			/* XXX collect statistics */
606 			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
607 			qc->qc_xmit[idx].qe_status1 =
608 			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
609 
610 			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
611 				continue;
612 			if (sc->sc_txmbuf[idx] == NULL ||
613 			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
614 				bus_dmamap_unload(sc->sc_dmat,
615 				    sc->sc_xmtmap[idx]);
616 			m_freem(sc->sc_txmbuf[idx]);
617 			sc->sc_txmbuf[idx] = NULL;
618 		}
619 		ifp->if_timer = 0;
620 		ifp->if_flags &= ~IFF_OACTIVE;
621 		qestart(ifp); /* Put in more in queue */
622 	}
623 	/*
624 	 * How can the receive list get invalid???
625 	 * Verified that it happens anyway.
626 	 */
627 	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
628 	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
629 		QE_WCSR(QE_CSR_RCLL,
630 		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
631 		QE_WCSR(QE_CSR_RCLH,
632 		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
633 	}
634 }
635 
636 /*
637  * Process an ioctl request.
638  */
639 int
qeioctl(struct ifnet * ifp,u_long cmd,void * data)640 qeioctl(struct ifnet *ifp, u_long cmd, void *data)
641 {
642 	struct qe_softc *sc = ifp->if_softc;
643 	struct ifaddr *ifa = (struct ifaddr *)data;
644 	int s = splnet(), error = 0;
645 
646 	switch (cmd) {
647 
648 	case SIOCINITIFADDR:
649 		ifp->if_flags |= IFF_UP;
650 		switch (ifa->ifa_addr->sa_family) {
651 #ifdef INET
652 		case AF_INET:
653 			qeinit(ifp);
654 			arp_ifinit(ifp, ifa);
655 			break;
656 #endif
657 		}
658 		break;
659 
660 	case SIOCSIFFLAGS:
661 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
662 			break;
663 		/* XXX re-use ether_ioctl() */
664 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
665 		case IFF_RUNNING:
666 			/*
667 			 * If interface is marked down and it is running,
668 			 * stop it. (by disabling receive mechanism).
669 			 */
670 			QE_WCSR(QE_CSR_CSR,
671 			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
672 			ifp->if_flags &= ~IFF_RUNNING;
673 			break;
674 		case IFF_UP:
675 			/*
676 			 * If interface it marked up and it is stopped, then
677 			 * start it.
678 			 */
679 			qeinit(ifp);
680 			break;
681 		case IFF_UP | IFF_RUNNING:
682 			/*
683 			 * Send a new setup packet to match any new changes.
684 			 * (Like IFF_PROMISC etc)
685 			 */
686 			qe_setup(sc);
687 			break;
688 		case 0:
689 			break;
690 		}
691 		break;
692 
693 	case SIOCADDMULTI:
694 	case SIOCDELMULTI:
695 		/*
696 		 * Update our multicast list.
697 		 */
698 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
699 			/*
700 			 * Multicast list has changed; set the hardware filter
701 			 * accordingly.
702 			 */
703 			if (ifp->if_flags & IFF_RUNNING)
704 				qe_setup(sc);
705 			error = 0;
706 		}
707 		break;
708 
709 	default:
710 		error = ether_ioctl(ifp, cmd, data);
711 	}
712 	splx(s);
713 	return error;
714 }
715 
716 /*
717  * Add a receive buffer to the indicated descriptor.
718  */
719 int
qe_add_rxbuf(struct qe_softc * sc,int i)720 qe_add_rxbuf(struct qe_softc *sc, int i)
721 {
722 	struct mbuf *m;
723 	struct qe_ring *rp;
724 	vaddr_t addr;
725 	int error;
726 
727 	MGETHDR(m, M_DONTWAIT, MT_DATA);
728 	if (m == NULL)
729 		return ENOBUFS;
730 
731 	MCLGET(m, M_DONTWAIT);
732 	if ((m->m_flags & M_EXT) == 0) {
733 		m_freem(m);
734 		return ENOBUFS;
735 	}
736 
737 	if (sc->sc_rxmbuf[i] != NULL)
738 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
739 
740 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
741 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
742 	if (error)
743 		panic("%s: can't load rx DMA map %d, error = %d",
744 		    device_xname(sc->sc_dev), i, error);
745 	sc->sc_rxmbuf[i] = m;
746 
747 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
748 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
749 
750 	/*
751 	 * We know that the mbuf cluster is page aligned. Also, be sure
752 	 * that the IP header will be longword aligned.
753 	 */
754 	m->m_data += 2;
755 	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
756 	rp = &sc->sc_qedata->qc_recv[i];
757 	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
758 	rp->qe_addr_lo = LOWORD(addr);
759 	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
760 	rp->qe_buf_len = -(m->m_ext.ext_size - 2) / 2;
761 
762 	return 0;
763 }
764 
765 /*
766  * Create a setup packet and put in queue for sending.
767  */
768 void
qe_setup(struct qe_softc * sc)769 qe_setup(struct qe_softc *sc)
770 {
771 	struct ethercom *ec = &sc->sc_ec;
772 	struct ether_multi *enm;
773 	struct ether_multistep step;
774 	struct qe_cdata *qc = sc->sc_qedata;
775 	struct ifnet *ifp = &sc->sc_if;
776 	uint8_t enaddr[ETHER_ADDR_LEN];
777 	int i, j, k, idx, s;
778 
779 	s = splnet();
780 	if (sc->sc_inq == (TXDESCS - 1)) {
781 		sc->sc_setup = 1;
782 		splx(s);
783 		return;
784 	}
785 	sc->sc_setup = 0;
786 	/*
787 	 * Init the setup packet with valid info.
788 	 */
789 	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
790 	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
791 	for (i = 0; i < ETHER_ADDR_LEN; i++)
792 		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
793 
794 	/*
795 	 * Multicast handling. The DEQNA can handle up to 12 direct
796 	 * ethernet addresses.
797 	 */
798 	j = 3; k = 0;
799 	ifp->if_flags &= ~IFF_ALLMULTI;
800 	ETHER_LOCK(ec);
801 	ETHER_FIRST_MULTI(step, ec, enm);
802 	while (enm != NULL) {
803 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
804 			ifp->if_flags |= IFF_ALLMULTI;
805 			break;
806 		}
807 		for (i = 0; i < ETHER_ADDR_LEN; i++)
808 			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
809 		j++;
810 		if (j == 8) {
811 			j = 1; k += 64;
812 		}
813 		if (k > 64) {
814 			ifp->if_flags |= IFF_ALLMULTI;
815 			break;
816 		}
817 		ETHER_NEXT_MULTI(step, enm);
818 	}
819 	ETHER_UNLOCK(ec);
820 	idx = sc->sc_nexttx;
821 	qc->qc_xmit[idx].qe_buf_len = -64;
822 
823 	/*
824 	 * How is the DEQNA turned in ALLMULTI mode???
825 	 * Until someone tells me, fall back to PROMISC when more than
826 	 * 12 ethernet addresses.
827 	 */
828 	if (ifp->if_flags & IFF_ALLMULTI)
829 		ifp->if_flags |= IFF_PROMISC;
830 	else if (ifp->if_pcount == 0)
831 		ifp->if_flags &= ~IFF_PROMISC;
832 	if (ifp->if_flags & IFF_PROMISC)
833 		qc->qc_xmit[idx].qe_buf_len = -65;
834 
835 	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
836 	qc->qc_xmit[idx].qe_addr_hi =
837 	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
838 	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
839 	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
840 
841 	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
842 		QE_WCSR(QE_CSR_XMTL,
843 		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
844 		QE_WCSR(QE_CSR_XMTH,
845 		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
846 	}
847 
848 	sc->sc_inq++;
849 	if (++sc->sc_nexttx == TXDESCS)
850 		sc->sc_nexttx = 0;
851 	splx(s);
852 }
853 
854 /*
855  * Check for dead transmit logic. Not uncommon.
856  */
857 void
qetimeout(struct ifnet * ifp)858 qetimeout(struct ifnet *ifp)
859 {
860 	struct qe_softc *sc = ifp->if_softc;
861 
862 	if (sc->sc_inq == 0)
863 		return;
864 
865 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
866 	/*
867 	 * Do a reset of interface, to get it going again.
868 	 * Will it work by just restart the transmit logic?
869 	 */
870 	qeinit(ifp);
871 }
872