xref: /netbsd-src/sys/dev/ic/sgec.c (revision b5677b36047b601b9addaaa494a58ceae82c2a6c)
1 /*      $NetBSD: sgec.c,v 1.36 2008/11/07 00:20:03 dyoung Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34  * on for example the VAX 4000/300 (KA670).
35  *
36  * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37  *
38  * Even though the chip is capable to use virtual addresses (read the
39  * System Page Table directly) this driver doesn't do so, and there
40  * is no benefit in doing it either in NetBSD of today.
41  *
42  * Things that is still to do:
43  *	Collect statistics.
44  *	Use imperfect filtering when many multicast addresses.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.36 2008/11/07 00:20:03 dyoung Exp $");
49 
50 #include "opt_inet.h"
51 #include "bpfilter.h"
52 
53 #include <sys/param.h>
54 #include <sys/mbuf.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_dl.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
68 
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #include <net/bpfdesc.h>
72 #endif
73 
74 #include <sys/bus.h>
75 
76 #include <dev/ic/sgecreg.h>
77 #include <dev/ic/sgecvar.h>
78 
79 static	void	zeinit(struct ze_softc *);
80 static	void	zestart(struct ifnet *);
81 static	int	zeioctl(struct ifnet *, u_long, void *);
82 static	int	ze_add_rxbuf(struct ze_softc *, int);
83 static	void	ze_setup(struct ze_softc *);
84 static	void	zetimeout(struct ifnet *);
85 static	bool	zereset(struct ze_softc *);
86 
87 #define	ZE_WCSR(csr, val) \
88 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89 #define	ZE_RCSR(csr) \
90 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
91 
92 /*
93  * Interface exists: make available by filling in network interface
94  * record.  System will initialize the interface when it is ready
95  * to accept packets.
96  */
97 void
98 sgec_attach(struct ze_softc *sc)
99 {
100 	struct ifnet *ifp = &sc->sc_if;
101 	struct ze_tdes *tp;
102 	struct ze_rdes *rp;
103 	bus_dma_segment_t seg;
104 	int i, rseg, error;
105 
106         /*
107          * Allocate DMA safe memory for descriptors and setup memory.
108          */
109 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
110 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
111 	if (error) {
112 		aprint_error(": unable to allocate control data, error = %d\n",
113 		    error);
114 		goto fail_0;
115 	}
116 
117 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
118 	    (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
119 	if (error) {
120 		aprint_error(
121 		    ": unable to map control data, error = %d\n", error);
122 		goto fail_1;
123 	}
124 
125 	error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
126 	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
127 	if (error) {
128 		aprint_error(
129 		    ": unable to create control data DMA map, error = %d\n",
130 		    error);
131 		goto fail_2;
132 	}
133 
134 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
135 	    sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
136 	if (error) {
137 		aprint_error(
138 		    ": unable to load control data DMA map, error = %d\n",
139 		    error);
140 		goto fail_3;
141 	}
142 
143 	/*
144 	 * Zero the newly allocated memory.
145 	 */
146 	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
147 
148 	/*
149 	 * Create the transmit descriptor DMA maps.
150 	 */
151 	for (i = 0; error == 0 && i < TXDESCS; i++) {
152 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
153 		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
154 		    &sc->sc_xmtmap[i]);
155 	}
156 	if (error) {
157 		aprint_error(": unable to create tx DMA map %d, error = %d\n",
158 		    i, error);
159 		goto fail_4;
160 	}
161 
162 	/*
163 	 * Create receive buffer DMA maps.
164 	 */
165 	for (i = 0; error == 0 && i < RXDESCS; i++) {
166 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
167 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
168 	}
169 	if (error) {
170 		aprint_error(": unable to create rx DMA map %d, error = %d\n",
171 		    i, error);
172 		goto fail_5;
173 	}
174 
175 	/*
176 	 * Pre-allocate the receive buffers.
177 	 */
178 	for (i = 0; error == 0 && i < RXDESCS; i++) {
179 		error = ze_add_rxbuf(sc, i);
180 	}
181 
182 	if (error) {
183 		aprint_error(
184 		    ": unable to allocate or map rx buffer %d, error = %d\n",
185 		    i, error);
186 		goto fail_6;
187 	}
188 
189 	/* For vmstat -i
190 	 */
191 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
192 	    device_xname(sc->sc_dev), "intr");
193 	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
194 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
195 	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
196 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
197 	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
198 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
199 	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
200 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
201 	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
202 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
203 
204 	/*
205 	 * Create ring loops of the buffer chains.
206 	 * This is only done once.
207 	 */
208 	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
209 
210 	rp = sc->sc_zedata->zc_recv;
211 	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
212 	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
213 	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
214 
215 	tp = sc->sc_zedata->zc_xmit;
216 	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
217 	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
218 	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
219 
220 	if (zereset(sc))
221 		return;
222 
223 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
224 	ifp->if_softc = sc;
225 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
226 	ifp->if_start = zestart;
227 	ifp->if_ioctl = zeioctl;
228 	ifp->if_watchdog = zetimeout;
229 	IFQ_SET_READY(&ifp->if_snd);
230 
231 	/*
232 	 * Attach the interface.
233 	 */
234 	if_attach(ifp);
235 	ether_ifattach(ifp, sc->sc_enaddr);
236 
237 	aprint_normal("\n");
238 	aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
239 	    ether_sprintf(sc->sc_enaddr));
240 	return;
241 
242 	/*
243 	 * Free any resources we've allocated during the failed attach
244 	 * attempt.  Do this in reverse order and fall through.
245 	 */
246  fail_6:
247 	for (i = 0; i < RXDESCS; i++) {
248 		if (sc->sc_rxmbuf[i] != NULL) {
249 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
250 			m_freem(sc->sc_rxmbuf[i]);
251 		}
252 	}
253  fail_5:
254 	for (i = 0; i < RXDESCS; i++) {
255 		if (sc->sc_xmtmap[i] != NULL)
256 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
257 	}
258  fail_4:
259 	for (i = 0; i < TXDESCS; i++) {
260 		if (sc->sc_rcvmap[i] != NULL)
261 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
262 	}
263 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
264  fail_3:
265 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
266  fail_2:
267 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
268 	    sizeof(struct ze_cdata));
269  fail_1:
270 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
271  fail_0:
272 	return;
273 }
274 
275 /*
276  * Initialization of interface.
277  */
278 void
279 zeinit(struct ze_softc *sc)
280 {
281 	struct ifnet *ifp = &sc->sc_if;
282 	struct ze_cdata *zc = sc->sc_zedata;
283 	int i;
284 
285 	/*
286 	 * Reset the interface.
287 	 */
288 	if (zereset(sc))
289 		return;
290 
291 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
292 	/*
293 	 * Release and init transmit descriptors.
294 	 */
295 	for (i = 0; i < TXDESCS; i++) {
296 		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
297 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
298 		if (sc->sc_txmbuf[i]) {
299 			m_freem(sc->sc_txmbuf[i]);
300 			sc->sc_txmbuf[i] = 0;
301 		}
302 		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
303 	}
304 
305 
306 	/*
307 	 * Init receive descriptors.
308 	 */
309 	for (i = 0; i < RXDESCS; i++)
310 		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
311 	sc->sc_nextrx = 0;
312 
313 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
314 	    ZE_NICSR6_SR|ZE_NICSR6_DC);
315 
316 	ifp->if_flags |= IFF_RUNNING;
317 	ifp->if_flags &= ~IFF_OACTIVE;
318 
319 	/*
320 	 * Send a setup frame.
321 	 * This will start the transmit machinery as well.
322 	 */
323 	ze_setup(sc);
324 
325 }
326 
327 /*
328  * Start output on interface.
329  */
330 void
331 zestart(struct ifnet *ifp)
332 {
333 	struct ze_softc *sc = ifp->if_softc;
334 	struct ze_cdata *zc = sc->sc_zedata;
335 	paddr_t	buffer;
336 	struct mbuf *m;
337 	int nexttx, starttx;
338 	int len, i, totlen, error;
339 	int old_inq = sc->sc_inq;
340 	uint16_t orword, tdr;
341 	bus_dmamap_t map;
342 
343 	while (sc->sc_inq < (TXDESCS - 1)) {
344 
345 		if (sc->sc_setup) {
346 			ze_setup(sc);
347 			continue;
348 		}
349 		nexttx = sc->sc_nexttx;
350 		IFQ_POLL(&sc->sc_if.if_snd, m);
351 		if (m == 0)
352 			goto out;
353 		/*
354 		 * Count number of mbufs in chain.
355 		 * Always do DMA directly from mbufs, therefore the transmit
356 		 * ring is really big.
357 		 */
358 		map = sc->sc_xmtmap[nexttx];
359 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
360 		    BUS_DMA_WRITE);
361 		if (error) {
362 			aprint_error_dev(sc->sc_dev,
363 			    "zestart: load_mbuf failed: %d", error);
364 			goto out;
365 		}
366 
367 		if (map->dm_nsegs >= TXDESCS)
368 			panic("zestart"); /* XXX */
369 
370 		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
371 			bus_dmamap_unload(sc->sc_dmat, map);
372 			ifp->if_flags |= IFF_OACTIVE;
373 			goto out;
374 		}
375 
376 		/*
377 		 * m now points to a mbuf chain that can be loaded.
378 		 * Loop around and set it.
379 		 */
380 		totlen = 0;
381 		orword = ZE_TDES1_FS;
382 		starttx = nexttx;
383 		for (i = 0; i < map->dm_nsegs; i++) {
384 			buffer = map->dm_segs[i].ds_addr;
385 			len = map->dm_segs[i].ds_len;
386 
387 			KASSERT(len > 0);
388 
389 			totlen += len;
390 			/* Word alignment calc */
391 			if (totlen == m->m_pkthdr.len) {
392 				sc->sc_txcnt += map->dm_nsegs;
393 				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
394 					orword |= ZE_TDES1_IC;
395 					sc->sc_txcnt = 0;
396 				}
397 				orword |= ZE_TDES1_LS;
398 				sc->sc_txmbuf[nexttx] = m;
399 			}
400 			zc->zc_xmit[nexttx].ze_bufsize = len;
401 			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
402 			zc->zc_xmit[nexttx].ze_tdes1 = orword;
403 			zc->zc_xmit[nexttx].ze_tdr = tdr;
404 
405 			if (++nexttx == TXDESCS)
406 				nexttx = 0;
407 			orword = 0;
408 			tdr = ZE_TDR_OW;
409 		}
410 
411 		sc->sc_inq += map->dm_nsegs;
412 
413 		IFQ_DEQUEUE(&ifp->if_snd, m);
414 #ifdef DIAGNOSTIC
415 		if (totlen != m->m_pkthdr.len)
416 			panic("zestart: len fault");
417 #endif
418 		/*
419 		 * Turn ownership of the packet over to the device.
420 		 */
421 		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
422 
423 		/*
424 		 * Kick off the transmit logic, if it is stopped.
425 		 */
426 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
427 			ZE_WCSR(ZE_CSR1, -1);
428 		sc->sc_nexttx = nexttx;
429 	}
430 	if (sc->sc_inq == (TXDESCS - 1))
431 		ifp->if_flags |= IFF_OACTIVE;
432 
433 out:	if (old_inq < sc->sc_inq)
434 		ifp->if_timer = 5; /* If transmit logic dies */
435 }
436 
437 int
438 sgec_intr(struct ze_softc *sc)
439 {
440 	struct ze_cdata *zc = sc->sc_zedata;
441 	struct ifnet *ifp = &sc->sc_if;
442 	struct mbuf *m;
443 	int csr, len;
444 
445 	csr = ZE_RCSR(ZE_CSR5);
446 	if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
447 		sc->sc_nointrcnt.ev_count++;
448 		return 0;
449 	}
450 	ZE_WCSR(ZE_CSR5, csr);
451 
452 	if (csr & ZE_NICSR5_RU)
453 		sc->sc_nobufintrcnt.ev_count++;
454 
455 	if (csr & ZE_NICSR5_RI) {
456 		sc->sc_rxintrcnt.ev_count++;
457 		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
458 		    ZE_FRAMELEN_OW) == 0) {
459 
460 			ifp->if_ipackets++;
461 			m = sc->sc_rxmbuf[sc->sc_nextrx];
462 			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
463 			ze_add_rxbuf(sc, sc->sc_nextrx);
464 			if (++sc->sc_nextrx == RXDESCS)
465 				sc->sc_nextrx = 0;
466 			if (len < ETHER_MIN_LEN) {
467 				ifp->if_ierrors++;
468 				m_freem(m);
469 			} else {
470 				m->m_pkthdr.rcvif = ifp;
471 				m->m_pkthdr.len = m->m_len =
472 				    len - ETHER_CRC_LEN;
473 #if NBPFILTER > 0
474 				if (ifp->if_bpf)
475 					bpf_mtap(ifp->if_bpf, m);
476 #endif
477 				(*ifp->if_input)(ifp, m);
478 			}
479 		}
480 	}
481 
482 	if (csr & ZE_NICSR5_TI)
483 		sc->sc_txintrcnt.ev_count++;
484 	if (sc->sc_lastack != sc->sc_nexttx) {
485 		int lastack;
486 		for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
487 			bus_dmamap_t map;
488 			int nlastack;
489 
490 			if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
491 				break;
492 
493 			if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
494 			    ZE_TDES1_DT_SETUP) {
495 				if (++lastack == TXDESCS)
496 					lastack = 0;
497 				sc->sc_inq--;
498 				continue;
499 			}
500 
501 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
502 			map = sc->sc_xmtmap[lastack];
503 			KASSERT(map->dm_nsegs > 0);
504 			nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
505 			if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
506 				break;
507 			lastack = nlastack;
508 			if (sc->sc_txcnt > map->dm_nsegs)
509 			    sc->sc_txcnt -= map->dm_nsegs;
510 			else
511 			    sc->sc_txcnt = 0;
512 			sc->sc_inq -= map->dm_nsegs;
513 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
514 			ifp->if_opackets++;
515 			bus_dmamap_unload(sc->sc_dmat, map);
516 			KASSERT(sc->sc_txmbuf[lastack]);
517 #if NBPFILTER > 0
518 			if (ifp->if_bpf)
519 				bpf_mtap(ifp->if_bpf, sc->sc_txmbuf[lastack]);
520 #endif
521 			m_freem(sc->sc_txmbuf[lastack]);
522 			sc->sc_txmbuf[lastack] = 0;
523 			if (++lastack == TXDESCS)
524 				lastack = 0;
525 		}
526 		if (lastack != sc->sc_lastack) {
527 			sc->sc_txdraincnt.ev_count++;
528 			sc->sc_lastack = lastack;
529 			if (sc->sc_inq == 0)
530 				ifp->if_timer = 0;
531 			ifp->if_flags &= ~IFF_OACTIVE;
532 			zestart(ifp); /* Put in more in queue */
533 		}
534 	}
535 	return 1;
536 }
537 
538 /*
539  * Process an ioctl request.
540  */
541 int
542 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
543 {
544 	struct ze_softc *sc = ifp->if_softc;
545 	struct ifaddr *ifa = data;
546 	int s = splnet(), error = 0;
547 
548 	switch (cmd) {
549 
550 	case SIOCINITIFADDR:
551 		ifp->if_flags |= IFF_UP;
552 		switch(ifa->ifa_addr->sa_family) {
553 #ifdef INET
554 		case AF_INET:
555 			zeinit(sc);
556 			arp_ifinit(ifp, ifa);
557 			break;
558 #endif
559 		}
560 		break;
561 
562 	case SIOCSIFFLAGS:
563 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
564 			break;
565 		/* XXX re-use ether_ioctl() */
566 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
567 		case IFF_RUNNING:
568 			/*
569 			 * If interface is marked down and it is running,
570 			 * stop it. (by disabling receive mechanism).
571 			 */
572 			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
573 			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
574 			ifp->if_flags &= ~IFF_RUNNING;
575 			break;
576 		case IFF_UP:
577 			/*
578 			 * If interface it marked up and it is stopped, then
579 			 * start it.
580 			 */
581 			zeinit(sc);
582 			break;
583 		case IFF_UP|IFF_RUNNING:
584 			/*
585 			 * Send a new setup packet to match any new changes.
586 			 * (Like IFF_PROMISC etc)
587 			 */
588 			ze_setup(sc);
589 			break;
590 		case 0:
591 			break;
592 		}
593 		break;
594 
595 	case SIOCADDMULTI:
596 	case SIOCDELMULTI:
597 		/*
598 		 * Update our multicast list.
599 		 */
600 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
601 			/*
602 			 * Multicast list has changed; set the hardware filter
603 			 * accordingly.
604 			 */
605 			if (ifp->if_flags & IFF_RUNNING)
606 				ze_setup(sc);
607 			error = 0;
608 		}
609 		break;
610 
611 	default:
612 		error = ether_ioctl(ifp, cmd, data);
613 
614 	}
615 	splx(s);
616 	return (error);
617 }
618 
619 /*
620  * Add a receive buffer to the indicated descriptor.
621  */
622 int
623 ze_add_rxbuf(struct ze_softc *sc, int i)
624 {
625 	struct mbuf *m;
626 	struct ze_rdes *rp;
627 	int error;
628 
629 	MGETHDR(m, M_DONTWAIT, MT_DATA);
630 	if (m == NULL)
631 		return (ENOBUFS);
632 
633 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
634 	MCLGET(m, M_DONTWAIT);
635 	if ((m->m_flags & M_EXT) == 0) {
636 		m_freem(m);
637 		return (ENOBUFS);
638 	}
639 
640 	if (sc->sc_rxmbuf[i] != NULL)
641 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
642 
643 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
644 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
645 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
646 	if (error)
647 		panic("%s: can't load rx DMA map %d, error = %d",
648 		    device_xname(sc->sc_dev), i, error);
649 	sc->sc_rxmbuf[i] = m;
650 
651 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
652 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
653 
654 	/*
655 	 * We know that the mbuf cluster is page aligned. Also, be sure
656 	 * that the IP header will be longword aligned.
657 	 */
658 	m->m_data += 2;
659 	rp = &sc->sc_zedata->zc_recv[i];
660 	rp->ze_bufsize = (m->m_ext.ext_size - 2);
661 	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
662 	rp->ze_framelen = ZE_FRAMELEN_OW;
663 
664 	return (0);
665 }
666 
667 /*
668  * Create a setup packet and put in queue for sending.
669  */
670 void
671 ze_setup(struct ze_softc *sc)
672 {
673 	struct ether_multi *enm;
674 	struct ether_multistep step;
675 	struct ze_cdata *zc = sc->sc_zedata;
676 	struct ifnet *ifp = &sc->sc_if;
677 	const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
678 	int j, idx, reg;
679 
680 	if (sc->sc_inq == (TXDESCS - 1)) {
681 		sc->sc_setup = 1;
682 		return;
683 	}
684 	sc->sc_setup = 0;
685 	/*
686 	 * Init the setup packet with valid info.
687 	 */
688 	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
689 	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
690 
691 	/*
692 	 * Multicast handling. The SGEC can handle up to 16 direct
693 	 * ethernet addresses.
694 	 */
695 	j = 16;
696 	ifp->if_flags &= ~IFF_ALLMULTI;
697 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
698 	while (enm != NULL) {
699 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
700 			ifp->if_flags |= IFF_ALLMULTI;
701 			break;
702 		}
703 		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
704 		j += 8;
705 		ETHER_NEXT_MULTI(step, enm);
706 		if ((enm != NULL)&& (j == 128)) {
707 			ifp->if_flags |= IFF_ALLMULTI;
708 			break;
709 		}
710 	}
711 
712 	/*
713 	 * ALLMULTI implies PROMISC in this driver.
714 	 */
715 	if (ifp->if_flags & IFF_ALLMULTI)
716 		ifp->if_flags |= IFF_PROMISC;
717 	else if (ifp->if_pcount == 0)
718 		ifp->if_flags &= ~IFF_PROMISC;
719 
720 	/*
721 	 * Fiddle with the receive logic.
722 	 */
723 	reg = ZE_RCSR(ZE_CSR6);
724 	DELAY(10);
725 	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
726 	reg &= ~ZE_NICSR6_AF;
727 	if (ifp->if_flags & IFF_PROMISC)
728 		reg |= ZE_NICSR6_AF_PROM;
729 	else if (ifp->if_flags & IFF_ALLMULTI)
730 		reg |= ZE_NICSR6_AF_ALLM;
731 	DELAY(10);
732 	ZE_WCSR(ZE_CSR6, reg);
733 	/*
734 	 * Only send a setup packet if needed.
735 	 */
736 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
737 		idx = sc->sc_nexttx;
738 		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
739 		zc->zc_xmit[idx].ze_bufsize = 128;
740 		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
741 		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
742 
743 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
744 			ZE_WCSR(ZE_CSR1, -1);
745 
746 		sc->sc_inq++;
747 		if (++sc->sc_nexttx == TXDESCS)
748 			sc->sc_nexttx = 0;
749 	}
750 }
751 
752 /*
753  * Check for dead transmit logic.
754  */
755 void
756 zetimeout(struct ifnet *ifp)
757 {
758 	struct ze_softc *sc = ifp->if_softc;
759 
760 	if (sc->sc_inq == 0)
761 		return;
762 
763 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
764 	/*
765 	 * Do a reset of interface, to get it going again.
766 	 * Will it work by just restart the transmit logic?
767 	 */
768 	zeinit(sc);
769 }
770 
771 /*
772  * Reset chip:
773  * Set/reset the reset flag.
774  *  Write interrupt vector.
775  *  Write ring buffer addresses.
776  *  Write SBR.
777  */
778 bool
779 zereset(struct ze_softc *sc)
780 {
781 	int reg, i;
782 
783 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
784 	DELAY(50000);
785 	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
786 		aprint_error_dev(sc->sc_dev, "selftest failed\n");
787 		return true;
788 	}
789 
790 	/*
791 	 * Get the vector that were set at match time, and remember it.
792 	 * WHICH VECTOR TO USE? Take one unused. XXX
793 	 * Funny way to set vector described in the programmers manual.
794 	 */
795 	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
796 	i = 10;
797 	do {
798 		if (i-- == 0) {
799 			aprint_error_dev(sc->sc_dev,
800 			    "failing SGEC CSR0 init\n");
801 			return true;
802 		}
803 		ZE_WCSR(ZE_CSR0, reg);
804 	} while (ZE_RCSR(ZE_CSR0) != reg);
805 
806 	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
807 	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
808 	return false;
809 }
810