xref: /netbsd-src/sys/dev/ic/sgec.c (revision de1dfb1250df962f1ff3a011772cf58e605aed11)
1 /*      $NetBSD: sgec.c,v 1.22 2003/02/26 06:31:10 matt Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *      This product includes software developed at Ludd, University of
16  *      Lule}, Sweden and its contributors.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34  * on for example the VAX 4000/300 (KA670).
35  *
36  * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37  *
38  * Even though the chip is capable to use virtual addresses (read the
39  * System Page Table directly) this driver doesn't do so, and there
40  * is no benefit in doing it either in NetBSD of today.
41  *
42  * Things that is still to do:
43  *	Collect statistics.
44  *	Use imperfect filtering when many multicast addresses.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.22 2003/02/26 06:31:10 matt Exp $");
49 
50 #include "opt_inet.h"
51 #include "bpfilter.h"
52 
53 #include <sys/param.h>
54 #include <sys/mbuf.h>
55 #include <sys/socket.h>
56 #include <sys/device.h>
57 #include <sys/systm.h>
58 #include <sys/sockio.h>
59 
60 #include <uvm/uvm_extern.h>
61 
62 #include <net/if.h>
63 #include <net/if_ether.h>
64 #include <net/if_dl.h>
65 
66 #include <netinet/in.h>
67 #include <netinet/if_inarp.h>
68 
69 #if NBPFILTER > 0
70 #include <net/bpf.h>
71 #include <net/bpfdesc.h>
72 #endif
73 
74 #include <machine/bus.h>
75 
76 #include <dev/ic/sgecreg.h>
77 #include <dev/ic/sgecvar.h>
78 
79 static	void	zeinit __P((struct ze_softc *));
80 static	void	zestart __P((struct ifnet *));
81 static	int	zeioctl __P((struct ifnet *, u_long, caddr_t));
82 static	int	ze_add_rxbuf __P((struct ze_softc *, int));
83 static	void	ze_setup __P((struct ze_softc *));
84 static	void	zetimeout __P((struct ifnet *));
85 static	int	zereset __P((struct ze_softc *));
86 
87 #define	ZE_WCSR(csr, val) \
88 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89 #define	ZE_RCSR(csr) \
90 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
91 
92 /*
93  * Interface exists: make available by filling in network interface
94  * record.  System will initialize the interface when it is ready
95  * to accept packets.
96  */
97 void
98 sgec_attach(sc)
99 	struct ze_softc *sc;
100 {
101 	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
102 	struct	ze_tdes *tp;
103 	struct	ze_rdes *rp;
104 	bus_dma_segment_t seg;
105 	int i, rseg, error;
106 
107         /*
108          * Allocate DMA safe memory for descriptors and setup memory.
109          */
110 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
111 	    sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg,
112 	    BUS_DMA_NOWAIT)) != 0) {
113 		printf(": unable to allocate control data, error = %d\n",
114 		    error);
115 		goto fail_0;
116 	}
117 
118 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
119 	    sizeof(struct ze_cdata), (caddr_t *)&sc->sc_zedata,
120 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
121 		printf(": unable to map control data, error = %d\n", error);
122 		goto fail_1;
123 	}
124 
125 	if ((error = bus_dmamap_create(sc->sc_dmat,
126 	    sizeof(struct ze_cdata), 1,
127 	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT,
128 	    &sc->sc_cmap)) != 0) {
129 		printf(": unable to create control data DMA map, error = %d\n",
130 		    error);
131 		goto fail_2;
132 	}
133 
134 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
135 	    sc->sc_zedata, sizeof(struct ze_cdata), NULL,
136 	    BUS_DMA_NOWAIT)) != 0) {
137 		printf(": unable to load control data DMA map, error = %d\n",
138 		    error);
139 		goto fail_3;
140 	}
141 
142 	/*
143 	 * Zero the newly allocated memory.
144 	 */
145 	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
146 	/*
147 	 * Create the transmit descriptor DMA maps.
148 	 */
149 	for (i = 0; i < TXDESCS; i++) {
150 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
151 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
152 		    &sc->sc_xmtmap[i]))) {
153 			printf(": unable to create tx DMA map %d, error = %d\n",
154 			    i, error);
155 			goto fail_4;
156 		}
157 	}
158 
159 	/*
160 	 * Create receive buffer DMA maps.
161 	 */
162 	for (i = 0; i < RXDESCS; i++) {
163 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
164 		    MCLBYTES, 0, BUS_DMA_NOWAIT,
165 		    &sc->sc_rcvmap[i]))) {
166 			printf(": unable to create rx DMA map %d, error = %d\n",
167 			    i, error);
168 			goto fail_5;
169 		}
170 	}
171 	/*
172 	 * Pre-allocate the receive buffers.
173 	 */
174 	for (i = 0; i < RXDESCS; i++) {
175 		if ((error = ze_add_rxbuf(sc, i)) != 0) {
176 			printf(": unable to allocate or map rx buffer %d\n,"
177 			    " error = %d\n", i, error);
178 			goto fail_6;
179 		}
180 	}
181 
182 	/* For vmstat -i
183 	 */
184 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
185 		sc->sc_dev.dv_xname, "intr");
186 
187 	/*
188 	 * Create ring loops of the buffer chains.
189 	 * This is only done once.
190 	 */
191 	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
192 
193 	rp = sc->sc_zedata->zc_recv;
194 	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
195 	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
196 	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
197 
198 	tp = sc->sc_zedata->zc_xmit;
199 	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
200 	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
201 	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
202 
203 	if (zereset(sc))
204 		return;
205 
206 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
207 	ifp->if_softc = sc;
208 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
209 	ifp->if_start = zestart;
210 	ifp->if_ioctl = zeioctl;
211 	ifp->if_watchdog = zetimeout;
212 	IFQ_SET_READY(&ifp->if_snd);
213 
214 	/*
215 	 * Attach the interface.
216 	 */
217 	if_attach(ifp);
218 	ether_ifattach(ifp, sc->sc_enaddr);
219 
220 	printf("\n%s: hardware address %s\n", sc->sc_dev.dv_xname,
221 	    ether_sprintf(sc->sc_enaddr));
222 	return;
223 
224 	/*
225 	 * Free any resources we've allocated during the failed attach
226 	 * attempt.  Do this in reverse order and fall through.
227 	 */
228  fail_6:
229 	for (i = 0; i < RXDESCS; i++) {
230 		if (sc->sc_rxmbuf[i] != NULL) {
231 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
232 			m_freem(sc->sc_rxmbuf[i]);
233 		}
234 	}
235  fail_5:
236 	for (i = 0; i < RXDESCS; i++) {
237 		if (sc->sc_xmtmap[i] != NULL)
238 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
239 	}
240  fail_4:
241 	for (i = 0; i < TXDESCS; i++) {
242 		if (sc->sc_rcvmap[i] != NULL)
243 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
244 	}
245 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
246  fail_3:
247 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
248  fail_2:
249 	bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_zedata,
250 	    sizeof(struct ze_cdata));
251  fail_1:
252 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
253  fail_0:
254 	return;
255 }
256 
257 /*
258  * Initialization of interface.
259  */
260 void
261 zeinit(sc)
262 	struct ze_softc *sc;
263 {
264 	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
265 	struct ze_cdata *zc = sc->sc_zedata;
266 	int i;
267 
268 	/*
269 	 * Reset the interface.
270 	 */
271 	if (zereset(sc))
272 		return;
273 
274 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
275 	/*
276 	 * Release and init transmit descriptors.
277 	 */
278 	for (i = 0; i < TXDESCS; i++) {
279 		if (sc->sc_txmbuf[i]) {
280 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
281 			m_freem(sc->sc_txmbuf[i]);
282 			sc->sc_txmbuf[i] = 0;
283 		}
284 		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
285 	}
286 
287 
288 	/*
289 	 * Init receive descriptors.
290 	 */
291 	for (i = 0; i < RXDESCS; i++)
292 		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
293 	sc->sc_nextrx = 0;
294 
295 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
296 	    ZE_NICSR6_SR|ZE_NICSR6_DC);
297 
298 	ifp->if_flags |= IFF_RUNNING;
299 	ifp->if_flags &= ~IFF_OACTIVE;
300 
301 	/*
302 	 * Send a setup frame.
303 	 * This will start the transmit machinery as well.
304 	 */
305 	ze_setup(sc);
306 
307 }
308 
309 /*
310  * Start output on interface.
311  */
312 void
313 zestart(ifp)
314 	struct ifnet *ifp;
315 {
316 	struct ze_softc *sc = ifp->if_softc;
317 	struct ze_cdata *zc = sc->sc_zedata;
318 	paddr_t	buffer;
319 	struct mbuf *m, *m0;
320 	int idx, len, i, totlen, error;
321 	int old_inq = sc->sc_inq;
322 	short orword;
323 
324 	while (sc->sc_inq < (TXDESCS - 1)) {
325 
326 		if (sc->sc_setup) {
327 			ze_setup(sc);
328 			continue;
329 		}
330 		idx = sc->sc_nexttx;
331 		IFQ_POLL(&sc->sc_if.if_snd, m);
332 		if (m == 0)
333 			goto out;
334 		/*
335 		 * Count number of mbufs in chain.
336 		 * Always do DMA directly from mbufs, therefore the transmit
337 		 * ring is really big.
338 		 */
339 		for (m0 = m, i = 0; m0; m0 = m0->m_next)
340 			if (m0->m_len)
341 				i++;
342 		if (i >= TXDESCS)
343 			panic("zestart"); /* XXX */
344 
345 		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
346 			ifp->if_flags |= IFF_OACTIVE;
347 			goto out;
348 		}
349 
350 #if NBPFILTER > 0
351 		if (ifp->if_bpf)
352 			bpf_mtap(ifp->if_bpf, m);
353 #endif
354 		/*
355 		 * m now points to a mbuf chain that can be loaded.
356 		 * Loop around and set it.
357 		 */
358 		totlen = 0;
359 		for (m0 = m; m0; m0 = m0->m_next) {
360 			error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx],
361 			    mtod(m0, void *), m0->m_len, 0, BUS_DMA_WRITE);
362 			buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
363 			len = m0->m_len;
364 			if (len == 0)
365 				continue;
366 
367 			totlen += len;
368 			/* Word alignment calc */
369 			orword = 0;
370 			if (totlen == len)
371 				orword = ZE_TDES1_FS;
372 			if (totlen == m->m_pkthdr.len) {
373 				orword |= ZE_TDES1_LS;
374 				sc->sc_txmbuf[idx] = m;
375 			}
376 			zc->zc_xmit[idx].ze_bufsize = len;
377 			zc->zc_xmit[idx].ze_bufaddr = (char *)buffer;
378 			zc->zc_xmit[idx].ze_tdes1 = orword | ZE_TDES1_IC;
379 			zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
380 
381 			if (++idx == TXDESCS)
382 				idx = 0;
383 			sc->sc_inq++;
384 		}
385 		IFQ_DEQUEUE(&ifp->if_snd, m);
386 #ifdef DIAGNOSTIC
387 		if (totlen != m->m_pkthdr.len)
388 			panic("zestart: len fault");
389 #endif
390 
391 		/*
392 		 * Kick off the transmit logic, if it is stopped.
393 		 */
394 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
395 			ZE_WCSR(ZE_CSR1, -1);
396 		sc->sc_nexttx = idx;
397 	}
398 	if (sc->sc_inq == (TXDESCS - 1))
399 		ifp->if_flags |= IFF_OACTIVE;
400 
401 out:	if (old_inq < sc->sc_inq)
402 		ifp->if_timer = 5; /* If transmit logic dies */
403 }
404 
405 int
406 sgec_intr(sc)
407 	struct ze_softc *sc;
408 {
409 	struct ze_cdata *zc = sc->sc_zedata;
410 	struct ifnet *ifp = &sc->sc_if;
411 	struct mbuf *m;
412 	int csr, len;
413 
414 	csr = ZE_RCSR(ZE_CSR5);
415 	if ((csr & ZE_NICSR5_IS) == 0) /* Wasn't we */
416 		return 0;
417 	ZE_WCSR(ZE_CSR5, csr);
418 
419 	if (csr & ZE_NICSR5_RI)
420 		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
421 		    ZE_FRAMELEN_OW) == 0) {
422 
423 			ifp->if_ipackets++;
424 			m = sc->sc_rxmbuf[sc->sc_nextrx];
425 			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
426 			ze_add_rxbuf(sc, sc->sc_nextrx);
427 			m->m_pkthdr.rcvif = ifp;
428 			m->m_pkthdr.len = m->m_len = len;
429 			m->m_flags |= M_HASFCS;
430 			if (++sc->sc_nextrx == RXDESCS)
431 				sc->sc_nextrx = 0;
432 #if NBPFILTER > 0
433 			if (ifp->if_bpf)
434 				bpf_mtap(ifp->if_bpf, m);
435 #endif
436 			(*ifp->if_input)(ifp, m);
437 		}
438 
439 	if (csr & ZE_NICSR5_TI) {
440 		while ((zc->zc_xmit[sc->sc_lastack].ze_tdr & ZE_TDR_OW) == 0) {
441 			int idx = sc->sc_lastack;
442 
443 			if (sc->sc_lastack == sc->sc_nexttx)
444 				break;
445 			sc->sc_inq--;
446 			if (++sc->sc_lastack == TXDESCS)
447 				sc->sc_lastack = 0;
448 
449 			if ((zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_DT) ==
450 			    ZE_TDES1_DT_SETUP)
451 				continue;
452 			/* XXX collect statistics */
453 			if (zc->zc_xmit[idx].ze_tdes1 & ZE_TDES1_LS)
454 				ifp->if_opackets++;
455 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]);
456 			if (sc->sc_txmbuf[idx]) {
457 				m_freem(sc->sc_txmbuf[idx]);
458 				sc->sc_txmbuf[idx] = 0;
459 			}
460 		}
461 		if (sc->sc_inq == 0)
462 			ifp->if_timer = 0;
463 		ifp->if_flags &= ~IFF_OACTIVE;
464 		zestart(ifp); /* Put in more in queue */
465 	}
466 	return 1;
467 }
468 
469 /*
470  * Process an ioctl request.
471  */
472 int
473 zeioctl(ifp, cmd, data)
474 	struct ifnet *ifp;
475 	u_long cmd;
476 	caddr_t data;
477 {
478 	struct ze_softc *sc = ifp->if_softc;
479 	struct ifreq *ifr = (struct ifreq *)data;
480 	struct ifaddr *ifa = (struct ifaddr *)data;
481 	int s = splnet(), error = 0;
482 
483 	switch (cmd) {
484 
485 	case SIOCSIFADDR:
486 		ifp->if_flags |= IFF_UP;
487 		switch(ifa->ifa_addr->sa_family) {
488 #ifdef INET
489 		case AF_INET:
490 			zeinit(sc);
491 			arp_ifinit(ifp, ifa);
492 			break;
493 #endif
494 		}
495 		break;
496 
497 	case SIOCSIFFLAGS:
498 		if ((ifp->if_flags & IFF_UP) == 0 &&
499 		    (ifp->if_flags & IFF_RUNNING) != 0) {
500 			/*
501 			 * If interface is marked down and it is running,
502 			 * stop it. (by disabling receive mechanism).
503 			 */
504 			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
505 			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
506 			ifp->if_flags &= ~IFF_RUNNING;
507 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
508 			   (ifp->if_flags & IFF_RUNNING) == 0) {
509 			/*
510 			 * If interface it marked up and it is stopped, then
511 			 * start it.
512 			 */
513 			zeinit(sc);
514 		} else if ((ifp->if_flags & IFF_UP) != 0) {
515 			/*
516 			 * Send a new setup packet to match any new changes.
517 			 * (Like IFF_PROMISC etc)
518 			 */
519 			ze_setup(sc);
520 		}
521 		break;
522 
523 	case SIOCADDMULTI:
524 	case SIOCDELMULTI:
525 		/*
526 		 * Update our multicast list.
527 		 */
528 		error = (cmd == SIOCADDMULTI) ?
529 			ether_addmulti(ifr, &sc->sc_ec):
530 			ether_delmulti(ifr, &sc->sc_ec);
531 
532 		if (error == ENETRESET) {
533 			/*
534 			 * Multicast list has changed; set the hardware filter
535 			 * accordingly.
536 			 */
537 			ze_setup(sc);
538 			error = 0;
539 		}
540 		break;
541 
542 	default:
543 		error = EINVAL;
544 
545 	}
546 	splx(s);
547 	return (error);
548 }
549 
550 /*
551  * Add a receive buffer to the indicated descriptor.
552  */
553 int
554 ze_add_rxbuf(sc, i)
555 	struct ze_softc *sc;
556 	int i;
557 {
558 	struct mbuf *m;
559 	struct ze_rdes *rp;
560 	int error;
561 
562 	MGETHDR(m, M_DONTWAIT, MT_DATA);
563 	if (m == NULL)
564 		return (ENOBUFS);
565 
566 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
567 	MCLGET(m, M_DONTWAIT);
568 	if ((m->m_flags & M_EXT) == 0) {
569 		m_freem(m);
570 		return (ENOBUFS);
571 	}
572 
573 	if (sc->sc_rxmbuf[i] != NULL)
574 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
575 
576 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
577 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
578 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
579 	if (error)
580 		panic("%s: can't load rx DMA map %d, error = %d",
581 		    sc->sc_dev.dv_xname, i, error);
582 	sc->sc_rxmbuf[i] = m;
583 
584 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
585 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
586 
587 	/*
588 	 * We know that the mbuf cluster is page aligned. Also, be sure
589 	 * that the IP header will be longword aligned.
590 	 */
591 	m->m_data += 2;
592 	rp = &sc->sc_zedata->zc_recv[i];
593 	rp->ze_bufsize = (m->m_ext.ext_size - 2);
594 	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
595 	rp->ze_framelen = ZE_FRAMELEN_OW;
596 
597 	return (0);
598 }
599 
600 /*
601  * Create a setup packet and put in queue for sending.
602  */
603 void
604 ze_setup(sc)
605 	struct ze_softc *sc;
606 {
607 	struct ether_multi *enm;
608 	struct ether_multistep step;
609 	struct ze_cdata *zc = sc->sc_zedata;
610 	struct ifnet *ifp = &sc->sc_if;
611 	u_int8_t *enaddr = LLADDR(ifp->if_sadl);
612 	int j, idx, reg;
613 
614 	if (sc->sc_inq == (TXDESCS - 1)) {
615 		sc->sc_setup = 1;
616 		return;
617 	}
618 	sc->sc_setup = 0;
619 	/*
620 	 * Init the setup packet with valid info.
621 	 */
622 	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
623 	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
624 
625 	/*
626 	 * Multicast handling. The SGEC can handle up to 16 direct
627 	 * ethernet addresses.
628 	 */
629 	j = 16;
630 	ifp->if_flags &= ~IFF_ALLMULTI;
631 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
632 	while (enm != NULL) {
633 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
634 			ifp->if_flags |= IFF_ALLMULTI;
635 			break;
636 		}
637 		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
638 		j += 8;
639 		ETHER_NEXT_MULTI(step, enm);
640 		if ((enm != NULL)&& (j == 128)) {
641 			ifp->if_flags |= IFF_ALLMULTI;
642 			break;
643 		}
644 	}
645 
646 	/*
647 	 * ALLMULTI implies PROMISC in this driver.
648 	 */
649 	if (ifp->if_flags & IFF_ALLMULTI)
650 		ifp->if_flags |= IFF_PROMISC;
651 	else if (ifp->if_pcount == 0)
652 		ifp->if_flags &= ~IFF_PROMISC;
653 
654 	/*
655 	 * Fiddle with the receive logic.
656 	 */
657 	reg = ZE_RCSR(ZE_CSR6);
658 	DELAY(10);
659 	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
660 	reg &= ~ZE_NICSR6_AF;
661 	if (ifp->if_flags & IFF_PROMISC)
662 		reg |= ZE_NICSR6_AF_PROM;
663 	else if (ifp->if_flags & IFF_ALLMULTI)
664 		reg |= ZE_NICSR6_AF_ALLM;
665 	DELAY(10);
666 	ZE_WCSR(ZE_CSR6, reg);
667 	/*
668 	 * Only send a setup packet if needed.
669 	 */
670 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
671 		idx = sc->sc_nexttx;
672 		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
673 		zc->zc_xmit[idx].ze_bufsize = 128;
674 		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
675 		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
676 
677 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
678 			ZE_WCSR(ZE_CSR1, -1);
679 
680 		sc->sc_inq++;
681 		if (++sc->sc_nexttx == TXDESCS)
682 			sc->sc_nexttx = 0;
683 	}
684 }
685 
686 /*
687  * Check for dead transmit logic.
688  */
689 void
690 zetimeout(ifp)
691 	struct ifnet *ifp;
692 {
693 	struct ze_softc *sc = ifp->if_softc;
694 
695 	if (sc->sc_inq == 0)
696 		return;
697 
698 	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
699 	/*
700 	 * Do a reset of interface, to get it going again.
701 	 * Will it work by just restart the transmit logic?
702 	 */
703 	zeinit(sc);
704 }
705 
706 /*
707  * Reset chip:
708  * Set/reset the reset flag.
709  *  Write interrupt vector.
710  *  Write ring buffer addresses.
711  *  Write SBR.
712  */
713 int
714 zereset(sc)
715 	struct ze_softc *sc;
716 {
717 	int reg, i;
718 
719 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
720 	DELAY(50000);
721 	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
722 		printf("%s: selftest failed\n", sc->sc_dev.dv_xname);
723 		return 1;
724 	}
725 
726 	/*
727 	 * Get the vector that were set at match time, and remember it.
728 	 * WHICH VECTOR TO USE? Take one unused. XXX
729 	 * Funny way to set vector described in the programmers manual.
730 	 */
731 	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
732 	i = 10;
733 	do {
734 		if (i-- == 0) {
735 			printf("Failing SGEC CSR0 init\n");
736 			return 1;
737 		}
738 		ZE_WCSR(ZE_CSR0, reg);
739 	} while (ZE_RCSR(ZE_CSR0) != reg);
740 
741 	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
742 	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
743 	return 0;
744 }
745