xref: /netbsd-src/sys/dev/ic/sgec.c (revision 7f21db1c0118155e0dd40b75182e30c589d9f63e)
1 /*      $NetBSD: sgec.c,v 1.37 2010/01/19 22:06:25 pooka Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34  * on for example the VAX 4000/300 (KA670).
35  *
36  * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37  *
38  * Even though the chip is capable to use virtual addresses (read the
39  * System Page Table directly) this driver doesn't do so, and there
40  * is no benefit in doing it either in NetBSD of today.
41  *
42  * Things that is still to do:
43  *	Collect statistics.
44  *	Use imperfect filtering when many multicast addresses.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.37 2010/01/19 22:06:25 pooka Exp $");
49 
50 #include "opt_inet.h"
51 
52 #include <sys/param.h>
53 #include <sys/mbuf.h>
54 #include <sys/socket.h>
55 #include <sys/device.h>
56 #include <sys/systm.h>
57 #include <sys/sockio.h>
58 
59 #include <uvm/uvm_extern.h>
60 
61 #include <net/if.h>
62 #include <net/if_ether.h>
63 #include <net/if_dl.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/if_inarp.h>
67 
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70 
71 #include <sys/bus.h>
72 
73 #include <dev/ic/sgecreg.h>
74 #include <dev/ic/sgecvar.h>
75 
76 static	void	zeinit(struct ze_softc *);
77 static	void	zestart(struct ifnet *);
78 static	int	zeioctl(struct ifnet *, u_long, void *);
79 static	int	ze_add_rxbuf(struct ze_softc *, int);
80 static	void	ze_setup(struct ze_softc *);
81 static	void	zetimeout(struct ifnet *);
82 static	bool	zereset(struct ze_softc *);
83 
84 #define	ZE_WCSR(csr, val) \
85 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
86 #define	ZE_RCSR(csr) \
87 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
88 
89 /*
90  * Interface exists: make available by filling in network interface
91  * record.  System will initialize the interface when it is ready
92  * to accept packets.
93  */
94 void
95 sgec_attach(struct ze_softc *sc)
96 {
97 	struct ifnet *ifp = &sc->sc_if;
98 	struct ze_tdes *tp;
99 	struct ze_rdes *rp;
100 	bus_dma_segment_t seg;
101 	int i, rseg, error;
102 
103         /*
104          * Allocate DMA safe memory for descriptors and setup memory.
105          */
106 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
107 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
108 	if (error) {
109 		aprint_error(": unable to allocate control data, error = %d\n",
110 		    error);
111 		goto fail_0;
112 	}
113 
114 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
115 	    (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
116 	if (error) {
117 		aprint_error(
118 		    ": unable to map control data, error = %d\n", error);
119 		goto fail_1;
120 	}
121 
122 	error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
123 	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
124 	if (error) {
125 		aprint_error(
126 		    ": unable to create control data DMA map, error = %d\n",
127 		    error);
128 		goto fail_2;
129 	}
130 
131 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
132 	    sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
133 	if (error) {
134 		aprint_error(
135 		    ": unable to load control data DMA map, error = %d\n",
136 		    error);
137 		goto fail_3;
138 	}
139 
140 	/*
141 	 * Zero the newly allocated memory.
142 	 */
143 	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
144 
145 	/*
146 	 * Create the transmit descriptor DMA maps.
147 	 */
148 	for (i = 0; error == 0 && i < TXDESCS; i++) {
149 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
150 		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
151 		    &sc->sc_xmtmap[i]);
152 	}
153 	if (error) {
154 		aprint_error(": unable to create tx DMA map %d, error = %d\n",
155 		    i, error);
156 		goto fail_4;
157 	}
158 
159 	/*
160 	 * Create receive buffer DMA maps.
161 	 */
162 	for (i = 0; error == 0 && i < RXDESCS; i++) {
163 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
164 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
165 	}
166 	if (error) {
167 		aprint_error(": unable to create rx DMA map %d, error = %d\n",
168 		    i, error);
169 		goto fail_5;
170 	}
171 
172 	/*
173 	 * Pre-allocate the receive buffers.
174 	 */
175 	for (i = 0; error == 0 && i < RXDESCS; i++) {
176 		error = ze_add_rxbuf(sc, i);
177 	}
178 
179 	if (error) {
180 		aprint_error(
181 		    ": unable to allocate or map rx buffer %d, error = %d\n",
182 		    i, error);
183 		goto fail_6;
184 	}
185 
186 	/* For vmstat -i
187 	 */
188 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
189 	    device_xname(sc->sc_dev), "intr");
190 	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
191 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
192 	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
193 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
194 	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
195 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
196 	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
197 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
198 	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
199 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
200 
201 	/*
202 	 * Create ring loops of the buffer chains.
203 	 * This is only done once.
204 	 */
205 	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
206 
207 	rp = sc->sc_zedata->zc_recv;
208 	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
209 	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
210 	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
211 
212 	tp = sc->sc_zedata->zc_xmit;
213 	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
214 	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
215 	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
216 
217 	if (zereset(sc))
218 		return;
219 
220 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
221 	ifp->if_softc = sc;
222 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
223 	ifp->if_start = zestart;
224 	ifp->if_ioctl = zeioctl;
225 	ifp->if_watchdog = zetimeout;
226 	IFQ_SET_READY(&ifp->if_snd);
227 
228 	/*
229 	 * Attach the interface.
230 	 */
231 	if_attach(ifp);
232 	ether_ifattach(ifp, sc->sc_enaddr);
233 
234 	aprint_normal("\n");
235 	aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
236 	    ether_sprintf(sc->sc_enaddr));
237 	return;
238 
239 	/*
240 	 * Free any resources we've allocated during the failed attach
241 	 * attempt.  Do this in reverse order and fall through.
242 	 */
243  fail_6:
244 	for (i = 0; i < RXDESCS; i++) {
245 		if (sc->sc_rxmbuf[i] != NULL) {
246 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
247 			m_freem(sc->sc_rxmbuf[i]);
248 		}
249 	}
250  fail_5:
251 	for (i = 0; i < RXDESCS; i++) {
252 		if (sc->sc_xmtmap[i] != NULL)
253 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
254 	}
255  fail_4:
256 	for (i = 0; i < TXDESCS; i++) {
257 		if (sc->sc_rcvmap[i] != NULL)
258 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
259 	}
260 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
261  fail_3:
262 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
263  fail_2:
264 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
265 	    sizeof(struct ze_cdata));
266  fail_1:
267 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
268  fail_0:
269 	return;
270 }
271 
272 /*
273  * Initialization of interface.
274  */
275 void
276 zeinit(struct ze_softc *sc)
277 {
278 	struct ifnet *ifp = &sc->sc_if;
279 	struct ze_cdata *zc = sc->sc_zedata;
280 	int i;
281 
282 	/*
283 	 * Reset the interface.
284 	 */
285 	if (zereset(sc))
286 		return;
287 
288 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
289 	/*
290 	 * Release and init transmit descriptors.
291 	 */
292 	for (i = 0; i < TXDESCS; i++) {
293 		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
294 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
295 		if (sc->sc_txmbuf[i]) {
296 			m_freem(sc->sc_txmbuf[i]);
297 			sc->sc_txmbuf[i] = 0;
298 		}
299 		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
300 	}
301 
302 
303 	/*
304 	 * Init receive descriptors.
305 	 */
306 	for (i = 0; i < RXDESCS; i++)
307 		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
308 	sc->sc_nextrx = 0;
309 
310 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
311 	    ZE_NICSR6_SR|ZE_NICSR6_DC);
312 
313 	ifp->if_flags |= IFF_RUNNING;
314 	ifp->if_flags &= ~IFF_OACTIVE;
315 
316 	/*
317 	 * Send a setup frame.
318 	 * This will start the transmit machinery as well.
319 	 */
320 	ze_setup(sc);
321 
322 }
323 
324 /*
325  * Start output on interface.
326  */
327 void
328 zestart(struct ifnet *ifp)
329 {
330 	struct ze_softc *sc = ifp->if_softc;
331 	struct ze_cdata *zc = sc->sc_zedata;
332 	paddr_t	buffer;
333 	struct mbuf *m;
334 	int nexttx, starttx;
335 	int len, i, totlen, error;
336 	int old_inq = sc->sc_inq;
337 	uint16_t orword, tdr;
338 	bus_dmamap_t map;
339 
340 	while (sc->sc_inq < (TXDESCS - 1)) {
341 
342 		if (sc->sc_setup) {
343 			ze_setup(sc);
344 			continue;
345 		}
346 		nexttx = sc->sc_nexttx;
347 		IFQ_POLL(&sc->sc_if.if_snd, m);
348 		if (m == 0)
349 			goto out;
350 		/*
351 		 * Count number of mbufs in chain.
352 		 * Always do DMA directly from mbufs, therefore the transmit
353 		 * ring is really big.
354 		 */
355 		map = sc->sc_xmtmap[nexttx];
356 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
357 		    BUS_DMA_WRITE);
358 		if (error) {
359 			aprint_error_dev(sc->sc_dev,
360 			    "zestart: load_mbuf failed: %d", error);
361 			goto out;
362 		}
363 
364 		if (map->dm_nsegs >= TXDESCS)
365 			panic("zestart"); /* XXX */
366 
367 		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
368 			bus_dmamap_unload(sc->sc_dmat, map);
369 			ifp->if_flags |= IFF_OACTIVE;
370 			goto out;
371 		}
372 
373 		/*
374 		 * m now points to a mbuf chain that can be loaded.
375 		 * Loop around and set it.
376 		 */
377 		totlen = 0;
378 		orword = ZE_TDES1_FS;
379 		starttx = nexttx;
380 		for (i = 0; i < map->dm_nsegs; i++) {
381 			buffer = map->dm_segs[i].ds_addr;
382 			len = map->dm_segs[i].ds_len;
383 
384 			KASSERT(len > 0);
385 
386 			totlen += len;
387 			/* Word alignment calc */
388 			if (totlen == m->m_pkthdr.len) {
389 				sc->sc_txcnt += map->dm_nsegs;
390 				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
391 					orword |= ZE_TDES1_IC;
392 					sc->sc_txcnt = 0;
393 				}
394 				orword |= ZE_TDES1_LS;
395 				sc->sc_txmbuf[nexttx] = m;
396 			}
397 			zc->zc_xmit[nexttx].ze_bufsize = len;
398 			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
399 			zc->zc_xmit[nexttx].ze_tdes1 = orword;
400 			zc->zc_xmit[nexttx].ze_tdr = tdr;
401 
402 			if (++nexttx == TXDESCS)
403 				nexttx = 0;
404 			orword = 0;
405 			tdr = ZE_TDR_OW;
406 		}
407 
408 		sc->sc_inq += map->dm_nsegs;
409 
410 		IFQ_DEQUEUE(&ifp->if_snd, m);
411 #ifdef DIAGNOSTIC
412 		if (totlen != m->m_pkthdr.len)
413 			panic("zestart: len fault");
414 #endif
415 		/*
416 		 * Turn ownership of the packet over to the device.
417 		 */
418 		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
419 
420 		/*
421 		 * Kick off the transmit logic, if it is stopped.
422 		 */
423 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
424 			ZE_WCSR(ZE_CSR1, -1);
425 		sc->sc_nexttx = nexttx;
426 	}
427 	if (sc->sc_inq == (TXDESCS - 1))
428 		ifp->if_flags |= IFF_OACTIVE;
429 
430 out:	if (old_inq < sc->sc_inq)
431 		ifp->if_timer = 5; /* If transmit logic dies */
432 }
433 
434 int
435 sgec_intr(struct ze_softc *sc)
436 {
437 	struct ze_cdata *zc = sc->sc_zedata;
438 	struct ifnet *ifp = &sc->sc_if;
439 	struct mbuf *m;
440 	int csr, len;
441 
442 	csr = ZE_RCSR(ZE_CSR5);
443 	if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
444 		sc->sc_nointrcnt.ev_count++;
445 		return 0;
446 	}
447 	ZE_WCSR(ZE_CSR5, csr);
448 
449 	if (csr & ZE_NICSR5_RU)
450 		sc->sc_nobufintrcnt.ev_count++;
451 
452 	if (csr & ZE_NICSR5_RI) {
453 		sc->sc_rxintrcnt.ev_count++;
454 		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
455 		    ZE_FRAMELEN_OW) == 0) {
456 
457 			ifp->if_ipackets++;
458 			m = sc->sc_rxmbuf[sc->sc_nextrx];
459 			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
460 			ze_add_rxbuf(sc, sc->sc_nextrx);
461 			if (++sc->sc_nextrx == RXDESCS)
462 				sc->sc_nextrx = 0;
463 			if (len < ETHER_MIN_LEN) {
464 				ifp->if_ierrors++;
465 				m_freem(m);
466 			} else {
467 				m->m_pkthdr.rcvif = ifp;
468 				m->m_pkthdr.len = m->m_len =
469 				    len - ETHER_CRC_LEN;
470 				if (ifp->if_bpf)
471 					bpf_ops->bpf_mtap(ifp->if_bpf, m);
472 				(*ifp->if_input)(ifp, m);
473 			}
474 		}
475 	}
476 
477 	if (csr & ZE_NICSR5_TI)
478 		sc->sc_txintrcnt.ev_count++;
479 	if (sc->sc_lastack != sc->sc_nexttx) {
480 		int lastack;
481 		for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
482 			bus_dmamap_t map;
483 			int nlastack;
484 
485 			if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
486 				break;
487 
488 			if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
489 			    ZE_TDES1_DT_SETUP) {
490 				if (++lastack == TXDESCS)
491 					lastack = 0;
492 				sc->sc_inq--;
493 				continue;
494 			}
495 
496 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
497 			map = sc->sc_xmtmap[lastack];
498 			KASSERT(map->dm_nsegs > 0);
499 			nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
500 			if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
501 				break;
502 			lastack = nlastack;
503 			if (sc->sc_txcnt > map->dm_nsegs)
504 			    sc->sc_txcnt -= map->dm_nsegs;
505 			else
506 			    sc->sc_txcnt = 0;
507 			sc->sc_inq -= map->dm_nsegs;
508 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
509 			ifp->if_opackets++;
510 			bus_dmamap_unload(sc->sc_dmat, map);
511 			KASSERT(sc->sc_txmbuf[lastack]);
512 			if (ifp->if_bpf)
513 				bpf_ops->bpf_mtap(ifp->if_bpf, sc->sc_txmbuf[lastack]);
514 			m_freem(sc->sc_txmbuf[lastack]);
515 			sc->sc_txmbuf[lastack] = 0;
516 			if (++lastack == TXDESCS)
517 				lastack = 0;
518 		}
519 		if (lastack != sc->sc_lastack) {
520 			sc->sc_txdraincnt.ev_count++;
521 			sc->sc_lastack = lastack;
522 			if (sc->sc_inq == 0)
523 				ifp->if_timer = 0;
524 			ifp->if_flags &= ~IFF_OACTIVE;
525 			zestart(ifp); /* Put in more in queue */
526 		}
527 	}
528 	return 1;
529 }
530 
531 /*
532  * Process an ioctl request.
533  */
534 int
535 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
536 {
537 	struct ze_softc *sc = ifp->if_softc;
538 	struct ifaddr *ifa = data;
539 	int s = splnet(), error = 0;
540 
541 	switch (cmd) {
542 
543 	case SIOCINITIFADDR:
544 		ifp->if_flags |= IFF_UP;
545 		switch(ifa->ifa_addr->sa_family) {
546 #ifdef INET
547 		case AF_INET:
548 			zeinit(sc);
549 			arp_ifinit(ifp, ifa);
550 			break;
551 #endif
552 		}
553 		break;
554 
555 	case SIOCSIFFLAGS:
556 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
557 			break;
558 		/* XXX re-use ether_ioctl() */
559 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
560 		case IFF_RUNNING:
561 			/*
562 			 * If interface is marked down and it is running,
563 			 * stop it. (by disabling receive mechanism).
564 			 */
565 			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
566 			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
567 			ifp->if_flags &= ~IFF_RUNNING;
568 			break;
569 		case IFF_UP:
570 			/*
571 			 * If interface it marked up and it is stopped, then
572 			 * start it.
573 			 */
574 			zeinit(sc);
575 			break;
576 		case IFF_UP|IFF_RUNNING:
577 			/*
578 			 * Send a new setup packet to match any new changes.
579 			 * (Like IFF_PROMISC etc)
580 			 */
581 			ze_setup(sc);
582 			break;
583 		case 0:
584 			break;
585 		}
586 		break;
587 
588 	case SIOCADDMULTI:
589 	case SIOCDELMULTI:
590 		/*
591 		 * Update our multicast list.
592 		 */
593 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
594 			/*
595 			 * Multicast list has changed; set the hardware filter
596 			 * accordingly.
597 			 */
598 			if (ifp->if_flags & IFF_RUNNING)
599 				ze_setup(sc);
600 			error = 0;
601 		}
602 		break;
603 
604 	default:
605 		error = ether_ioctl(ifp, cmd, data);
606 
607 	}
608 	splx(s);
609 	return (error);
610 }
611 
612 /*
613  * Add a receive buffer to the indicated descriptor.
614  */
615 int
616 ze_add_rxbuf(struct ze_softc *sc, int i)
617 {
618 	struct mbuf *m;
619 	struct ze_rdes *rp;
620 	int error;
621 
622 	MGETHDR(m, M_DONTWAIT, MT_DATA);
623 	if (m == NULL)
624 		return (ENOBUFS);
625 
626 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
627 	MCLGET(m, M_DONTWAIT);
628 	if ((m->m_flags & M_EXT) == 0) {
629 		m_freem(m);
630 		return (ENOBUFS);
631 	}
632 
633 	if (sc->sc_rxmbuf[i] != NULL)
634 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
635 
636 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
637 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
638 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
639 	if (error)
640 		panic("%s: can't load rx DMA map %d, error = %d",
641 		    device_xname(sc->sc_dev), i, error);
642 	sc->sc_rxmbuf[i] = m;
643 
644 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
645 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
646 
647 	/*
648 	 * We know that the mbuf cluster is page aligned. Also, be sure
649 	 * that the IP header will be longword aligned.
650 	 */
651 	m->m_data += 2;
652 	rp = &sc->sc_zedata->zc_recv[i];
653 	rp->ze_bufsize = (m->m_ext.ext_size - 2);
654 	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
655 	rp->ze_framelen = ZE_FRAMELEN_OW;
656 
657 	return (0);
658 }
659 
660 /*
661  * Create a setup packet and put in queue for sending.
662  */
663 void
664 ze_setup(struct ze_softc *sc)
665 {
666 	struct ether_multi *enm;
667 	struct ether_multistep step;
668 	struct ze_cdata *zc = sc->sc_zedata;
669 	struct ifnet *ifp = &sc->sc_if;
670 	const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
671 	int j, idx, reg;
672 
673 	if (sc->sc_inq == (TXDESCS - 1)) {
674 		sc->sc_setup = 1;
675 		return;
676 	}
677 	sc->sc_setup = 0;
678 	/*
679 	 * Init the setup packet with valid info.
680 	 */
681 	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
682 	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
683 
684 	/*
685 	 * Multicast handling. The SGEC can handle up to 16 direct
686 	 * ethernet addresses.
687 	 */
688 	j = 16;
689 	ifp->if_flags &= ~IFF_ALLMULTI;
690 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
691 	while (enm != NULL) {
692 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
693 			ifp->if_flags |= IFF_ALLMULTI;
694 			break;
695 		}
696 		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
697 		j += 8;
698 		ETHER_NEXT_MULTI(step, enm);
699 		if ((enm != NULL)&& (j == 128)) {
700 			ifp->if_flags |= IFF_ALLMULTI;
701 			break;
702 		}
703 	}
704 
705 	/*
706 	 * ALLMULTI implies PROMISC in this driver.
707 	 */
708 	if (ifp->if_flags & IFF_ALLMULTI)
709 		ifp->if_flags |= IFF_PROMISC;
710 	else if (ifp->if_pcount == 0)
711 		ifp->if_flags &= ~IFF_PROMISC;
712 
713 	/*
714 	 * Fiddle with the receive logic.
715 	 */
716 	reg = ZE_RCSR(ZE_CSR6);
717 	DELAY(10);
718 	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
719 	reg &= ~ZE_NICSR6_AF;
720 	if (ifp->if_flags & IFF_PROMISC)
721 		reg |= ZE_NICSR6_AF_PROM;
722 	else if (ifp->if_flags & IFF_ALLMULTI)
723 		reg |= ZE_NICSR6_AF_ALLM;
724 	DELAY(10);
725 	ZE_WCSR(ZE_CSR6, reg);
726 	/*
727 	 * Only send a setup packet if needed.
728 	 */
729 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
730 		idx = sc->sc_nexttx;
731 		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
732 		zc->zc_xmit[idx].ze_bufsize = 128;
733 		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
734 		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
735 
736 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
737 			ZE_WCSR(ZE_CSR1, -1);
738 
739 		sc->sc_inq++;
740 		if (++sc->sc_nexttx == TXDESCS)
741 			sc->sc_nexttx = 0;
742 	}
743 }
744 
745 /*
746  * Check for dead transmit logic.
747  */
748 void
749 zetimeout(struct ifnet *ifp)
750 {
751 	struct ze_softc *sc = ifp->if_softc;
752 
753 	if (sc->sc_inq == 0)
754 		return;
755 
756 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
757 	/*
758 	 * Do a reset of interface, to get it going again.
759 	 * Will it work by just restart the transmit logic?
760 	 */
761 	zeinit(sc);
762 }
763 
764 /*
765  * Reset chip:
766  * Set/reset the reset flag.
767  *  Write interrupt vector.
768  *  Write ring buffer addresses.
769  *  Write SBR.
770  */
771 bool
772 zereset(struct ze_softc *sc)
773 {
774 	int reg, i;
775 
776 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
777 	DELAY(50000);
778 	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
779 		aprint_error_dev(sc->sc_dev, "selftest failed\n");
780 		return true;
781 	}
782 
783 	/*
784 	 * Get the vector that were set at match time, and remember it.
785 	 * WHICH VECTOR TO USE? Take one unused. XXX
786 	 * Funny way to set vector described in the programmers manual.
787 	 */
788 	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
789 	i = 10;
790 	do {
791 		if (i-- == 0) {
792 			aprint_error_dev(sc->sc_dev,
793 			    "failing SGEC CSR0 init\n");
794 			return true;
795 		}
796 		ZE_WCSR(ZE_CSR0, reg);
797 	} while (ZE_RCSR(ZE_CSR0) != reg);
798 
799 	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
800 	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
801 	return false;
802 }
803