xref: /netbsd-src/sys/dev/ic/sgec.c (revision c38e7cc395b1472a774ff828e46123de44c628e9)
1 /*      $NetBSD: sgec.c,v 1.46 2017/05/22 17:23:49 ragge Exp $ */
2 /*
3  * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 /*
27  * Driver for the SGEC (Second Generation Ethernet Controller), sitting
28  * on for example the VAX 4000/300 (KA670).
29  *
30  * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
31  *
32  * Even though the chip is capable to use virtual addresses (read the
33  * System Page Table directly) this driver doesn't do so, and there
34  * is no benefit in doing it either in NetBSD of today.
35  *
36  * Things that is still to do:
37  *	Collect statistics.
38  *	Use imperfect filtering when many multicast addresses.
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.46 2017/05/22 17:23:49 ragge Exp $");
43 
44 #include "opt_inet.h"
45 
46 #include <sys/param.h>
47 #include <sys/mbuf.h>
48 #include <sys/socket.h>
49 #include <sys/device.h>
50 #include <sys/systm.h>
51 #include <sys/sockio.h>
52 
53 #include <net/if.h>
54 #include <net/if_ether.h>
55 #include <net/if_dl.h>
56 
57 #include <netinet/in.h>
58 #include <netinet/if_inarp.h>
59 
60 #include <net/bpf.h>
61 #include <net/bpfdesc.h>
62 
63 #include <sys/bus.h>
64 
65 #include <dev/ic/sgecreg.h>
66 #include <dev/ic/sgecvar.h>
67 
68 static	void	zeinit(struct ze_softc *);
69 static	void	zestart(struct ifnet *);
70 static	int	zeioctl(struct ifnet *, u_long, void *);
71 static	int	ze_add_rxbuf(struct ze_softc *, int);
72 static	void	ze_setup(struct ze_softc *);
73 static	void	zetimeout(struct ifnet *);
74 static	bool	zereset(struct ze_softc *);
75 
76 #define	ZE_WCSR(csr, val) \
77 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
78 #define	ZE_RCSR(csr) \
79 	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
80 
81 /*
82  * Interface exists: make available by filling in network interface
83  * record.  System will initialize the interface when it is ready
84  * to accept packets.
85  */
86 void
87 sgec_attach(struct ze_softc *sc)
88 {
89 	struct ifnet *ifp = &sc->sc_if;
90 	struct ze_tdes *tp;
91 	struct ze_rdes *rp;
92 	bus_dma_segment_t seg;
93 	int i, rseg, error;
94 
95         /*
96          * Allocate DMA safe memory for descriptors and setup memory.
97          */
98 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ze_cdata),
99 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
100 	if (error) {
101 		aprint_error(": unable to allocate control data, error = %d\n",
102 		    error);
103 		goto fail_0;
104 	}
105 
106 	error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ze_cdata),
107 	    (void **)&sc->sc_zedata, BUS_DMA_NOWAIT|BUS_DMA_COHERENT);
108 	if (error) {
109 		aprint_error(
110 		    ": unable to map control data, error = %d\n", error);
111 		goto fail_1;
112 	}
113 
114 	error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ze_cdata), 1,
115 	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT, &sc->sc_cmap);
116 	if (error) {
117 		aprint_error(
118 		    ": unable to create control data DMA map, error = %d\n",
119 		    error);
120 		goto fail_2;
121 	}
122 
123 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap, sc->sc_zedata,
124 	    sizeof(struct ze_cdata), NULL, BUS_DMA_NOWAIT);
125 	if (error) {
126 		aprint_error(
127 		    ": unable to load control data DMA map, error = %d\n",
128 		    error);
129 		goto fail_3;
130 	}
131 
132 	/*
133 	 * Zero the newly allocated memory.
134 	 */
135 	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
136 
137 	/*
138 	 * Create the transmit descriptor DMA maps.
139 	 */
140 	for (i = 0; error == 0 && i < TXDESCS; i++) {
141 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
142 		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
143 		    &sc->sc_xmtmap[i]);
144 	}
145 	if (error) {
146 		aprint_error(": unable to create tx DMA map %d, error = %d\n",
147 		    i, error);
148 		goto fail_4;
149 	}
150 
151 	/*
152 	 * Create receive buffer DMA maps.
153 	 */
154 	for (i = 0; error == 0 && i < RXDESCS; i++) {
155 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
156 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rcvmap[i]);
157 	}
158 	if (error) {
159 		aprint_error(": unable to create rx DMA map %d, error = %d\n",
160 		    i, error);
161 		goto fail_5;
162 	}
163 
164 	/*
165 	 * Pre-allocate the receive buffers.
166 	 */
167 	for (i = 0; error == 0 && i < RXDESCS; i++) {
168 		error = ze_add_rxbuf(sc, i);
169 	}
170 
171 	if (error) {
172 		aprint_error(
173 		    ": unable to allocate or map rx buffer %d, error = %d\n",
174 		    i, error);
175 		goto fail_6;
176 	}
177 
178 	/* For vmstat -i
179 	 */
180 	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
181 	    device_xname(sc->sc_dev), "intr");
182 	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
183 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "rx intr");
184 	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
185 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx intr");
186 	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
187 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "tx drain");
188 	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
189 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "nobuf intr");
190 	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
191 	    &sc->sc_intrcnt, device_xname(sc->sc_dev), "no intr");
192 
193 	/*
194 	 * Create ring loops of the buffer chains.
195 	 * This is only done once.
196 	 */
197 	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
198 
199 	rp = sc->sc_zedata->zc_recv;
200 	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
201 	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
202 	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
203 
204 	tp = sc->sc_zedata->zc_xmit;
205 	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
206 	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
207 	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
208 
209 	if (zereset(sc))
210 		return;
211 
212 	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
213 	ifp->if_softc = sc;
214 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
215 	ifp->if_start = zestart;
216 	ifp->if_ioctl = zeioctl;
217 	ifp->if_watchdog = zetimeout;
218 	IFQ_SET_READY(&ifp->if_snd);
219 
220 	/*
221 	 * Attach the interface.
222 	 */
223 	if_attach(ifp);
224 	ether_ifattach(ifp, sc->sc_enaddr);
225 
226 	aprint_normal("\n");
227 	aprint_normal_dev(sc->sc_dev, "hardware address %s\n",
228 	    ether_sprintf(sc->sc_enaddr));
229 	return;
230 
231 	/*
232 	 * Free any resources we've allocated during the failed attach
233 	 * attempt.  Do this in reverse order and fall through.
234 	 */
235  fail_6:
236 	for (i = 0; i < RXDESCS; i++) {
237 		if (sc->sc_rxmbuf[i] != NULL) {
238 			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
239 			m_freem(sc->sc_rxmbuf[i]);
240 		}
241 	}
242  fail_5:
243 	for (i = 0; i < TXDESCS; i++) {
244 		if (sc->sc_xmtmap[i] != NULL)
245 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
246 	}
247  fail_4:
248 	for (i = 0; i < RXDESCS; i++) {
249 		if (sc->sc_rcvmap[i] != NULL)
250 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
251 	}
252 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
253  fail_3:
254 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
255  fail_2:
256 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
257 	    sizeof(struct ze_cdata));
258  fail_1:
259 	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
260  fail_0:
261 	return;
262 }
263 
264 /*
265  * Initialization of interface.
266  */
267 void
268 zeinit(struct ze_softc *sc)
269 {
270 	struct ifnet *ifp = &sc->sc_if;
271 	struct ze_cdata *zc = sc->sc_zedata;
272 	int i;
273 
274 	/*
275 	 * Reset the interface.
276 	 */
277 	if (zereset(sc))
278 		return;
279 
280 	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
281 	/*
282 	 * Release and init transmit descriptors.
283 	 */
284 	for (i = 0; i < TXDESCS; i++) {
285 		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
286 			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
287 		if (sc->sc_txmbuf[i]) {
288 			m_freem(sc->sc_txmbuf[i]);
289 			sc->sc_txmbuf[i] = 0;
290 		}
291 		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
292 	}
293 
294 
295 	/*
296 	 * Init receive descriptors.
297 	 */
298 	for (i = 0; i < RXDESCS; i++)
299 		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
300 	sc->sc_nextrx = 0;
301 
302 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
303 	    ZE_NICSR6_SR|ZE_NICSR6_DC);
304 
305 	ifp->if_flags |= IFF_RUNNING;
306 	ifp->if_flags &= ~IFF_OACTIVE;
307 
308 	/*
309 	 * Send a setup frame.
310 	 * This will start the transmit machinery as well.
311 	 */
312 	ze_setup(sc);
313 
314 }
315 
316 /*
317  * Start output on interface.
318  */
319 void
320 zestart(struct ifnet *ifp)
321 {
322 	struct ze_softc *sc = ifp->if_softc;
323 	struct ze_cdata *zc = sc->sc_zedata;
324 	paddr_t	buffer;
325 	struct mbuf *m;
326 	int nexttx, starttx;
327 	int len, i, totlen, error;
328 	int old_inq = sc->sc_inq;
329 	uint16_t orword, tdr = 0;
330 	bus_dmamap_t map;
331 
332 	while (sc->sc_inq < (TXDESCS - 1)) {
333 
334 		if (sc->sc_setup) {
335 			ze_setup(sc);
336 			continue;
337 		}
338 		nexttx = sc->sc_nexttx;
339 		IFQ_POLL(&sc->sc_if.if_snd, m);
340 		if (m == 0)
341 			goto out;
342 		/*
343 		 * Count number of mbufs in chain.
344 		 * Always do DMA directly from mbufs, therefore the transmit
345 		 * ring is really big.
346 		 */
347 		map = sc->sc_xmtmap[nexttx];
348 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
349 		    BUS_DMA_WRITE);
350 		if (error) {
351 			aprint_error_dev(sc->sc_dev,
352 			    "zestart: load_mbuf failed: %d", error);
353 			goto out;
354 		}
355 
356 		if (map->dm_nsegs >= TXDESCS)
357 			panic("zestart"); /* XXX */
358 
359 		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
360 			bus_dmamap_unload(sc->sc_dmat, map);
361 			ifp->if_flags |= IFF_OACTIVE;
362 			goto out;
363 		}
364 
365 		/*
366 		 * m now points to a mbuf chain that can be loaded.
367 		 * Loop around and set it.
368 		 */
369 		totlen = 0;
370 		orword = ZE_TDES1_FS;
371 		starttx = nexttx;
372 		for (i = 0; i < map->dm_nsegs; i++) {
373 			buffer = map->dm_segs[i].ds_addr;
374 			len = map->dm_segs[i].ds_len;
375 
376 			KASSERT(len > 0);
377 
378 			totlen += len;
379 			/* Word alignment calc */
380 			if (totlen == m->m_pkthdr.len) {
381 				sc->sc_txcnt += map->dm_nsegs;
382 				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
383 					orword |= ZE_TDES1_IC;
384 					sc->sc_txcnt = 0;
385 				}
386 				orword |= ZE_TDES1_LS;
387 				sc->sc_txmbuf[nexttx] = m;
388 			}
389 			zc->zc_xmit[nexttx].ze_bufsize = len;
390 			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
391 			zc->zc_xmit[nexttx].ze_tdes1 = orword;
392 			zc->zc_xmit[nexttx].ze_tdr = tdr;
393 
394 			if (++nexttx == TXDESCS)
395 				nexttx = 0;
396 			orword = 0;
397 			tdr = ZE_TDR_OW;
398 		}
399 
400 		sc->sc_inq += map->dm_nsegs;
401 
402 		IFQ_DEQUEUE(&ifp->if_snd, m);
403 #ifdef DIAGNOSTIC
404 		if (totlen != m->m_pkthdr.len)
405 			panic("zestart: len fault");
406 #endif
407 		/*
408 		 * Turn ownership of the packet over to the device.
409 		 */
410 		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
411 
412 		/*
413 		 * Kick off the transmit logic, if it is stopped.
414 		 */
415 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
416 			ZE_WCSR(ZE_CSR1, -1);
417 		sc->sc_nexttx = nexttx;
418 
419 		bpf_mtap(ifp, m);
420 	}
421 	if (sc->sc_inq == (TXDESCS - 1))
422 		ifp->if_flags |= IFF_OACTIVE;
423 
424 out:	if (old_inq < sc->sc_inq)
425 		ifp->if_timer = 5; /* If transmit logic dies */
426 }
427 
428 int
429 sgec_intr(struct ze_softc *sc)
430 {
431 	struct ze_cdata *zc = sc->sc_zedata;
432 	struct ifnet *ifp = &sc->sc_if;
433 	struct mbuf *m;
434 	int csr, len;
435 
436 	csr = ZE_RCSR(ZE_CSR5);
437 	if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
438 		sc->sc_nointrcnt.ev_count++;
439 		return 0;
440 	}
441 	ZE_WCSR(ZE_CSR5, csr);
442 
443 	if (csr & ZE_NICSR5_RU)
444 		sc->sc_nobufintrcnt.ev_count++;
445 
446 	if (csr & ZE_NICSR5_RI) {
447 		sc->sc_rxintrcnt.ev_count++;
448 		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
449 		    ZE_FRAMELEN_OW) == 0) {
450 
451 			m = sc->sc_rxmbuf[sc->sc_nextrx];
452 			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
453 			ze_add_rxbuf(sc, sc->sc_nextrx);
454 			if (++sc->sc_nextrx == RXDESCS)
455 				sc->sc_nextrx = 0;
456 			if (len < ETHER_MIN_LEN) {
457 				ifp->if_ierrors++;
458 				m_freem(m);
459 			} else {
460 				m_set_rcvif(m, ifp);
461 				m->m_pkthdr.len = m->m_len =
462 				    len - ETHER_CRC_LEN;
463 				if_percpuq_enqueue(ifp->if_percpuq, m);
464 			}
465 		}
466 	}
467 
468 	if (csr & ZE_NICSR5_TI)
469 		sc->sc_txintrcnt.ev_count++;
470 	if (sc->sc_lastack != sc->sc_nexttx) {
471 		int lastack;
472 		for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
473 			bus_dmamap_t map;
474 			int nlastack;
475 
476 			if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
477 				break;
478 
479 			if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
480 			    ZE_TDES1_DT_SETUP) {
481 				if (++lastack == TXDESCS)
482 					lastack = 0;
483 				sc->sc_inq--;
484 				continue;
485 			}
486 
487 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
488 			map = sc->sc_xmtmap[lastack];
489 			KASSERT(map->dm_nsegs > 0);
490 			nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
491 			if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
492 				break;
493 			lastack = nlastack;
494 			if (sc->sc_txcnt > map->dm_nsegs)
495 			    sc->sc_txcnt -= map->dm_nsegs;
496 			else
497 			    sc->sc_txcnt = 0;
498 			sc->sc_inq -= map->dm_nsegs;
499 			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
500 			ifp->if_opackets++;
501 			bus_dmamap_unload(sc->sc_dmat, map);
502 			KASSERT(sc->sc_txmbuf[lastack]);
503 			m_freem(sc->sc_txmbuf[lastack]);
504 			sc->sc_txmbuf[lastack] = 0;
505 			if (++lastack == TXDESCS)
506 				lastack = 0;
507 		}
508 		if (lastack != sc->sc_lastack) {
509 			sc->sc_txdraincnt.ev_count++;
510 			sc->sc_lastack = lastack;
511 			if (sc->sc_inq == 0)
512 				ifp->if_timer = 0;
513 			ifp->if_flags &= ~IFF_OACTIVE;
514 			zestart(ifp); /* Put in more in queue */
515 		}
516 	}
517 	return 1;
518 }
519 
520 /*
521  * Process an ioctl request.
522  */
523 int
524 zeioctl(struct ifnet *ifp, u_long cmd, void *data)
525 {
526 	struct ze_softc *sc = ifp->if_softc;
527 	struct ifaddr *ifa = data;
528 	int s = splnet(), error = 0;
529 
530 	switch (cmd) {
531 
532 	case SIOCINITIFADDR:
533 		ifp->if_flags |= IFF_UP;
534 		switch(ifa->ifa_addr->sa_family) {
535 #ifdef INET
536 		case AF_INET:
537 			zeinit(sc);
538 			arp_ifinit(ifp, ifa);
539 			break;
540 #endif
541 		}
542 		break;
543 
544 	case SIOCSIFFLAGS:
545 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
546 			break;
547 		/* XXX re-use ether_ioctl() */
548 		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
549 		case IFF_RUNNING:
550 			/*
551 			 * If interface is marked down and it is running,
552 			 * stop it. (by disabling receive mechanism).
553 			 */
554 			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
555 			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
556 			ifp->if_flags &= ~IFF_RUNNING;
557 			break;
558 		case IFF_UP:
559 			/*
560 			 * If interface it marked up and it is stopped, then
561 			 * start it.
562 			 */
563 			zeinit(sc);
564 			break;
565 		case IFF_UP|IFF_RUNNING:
566 			/*
567 			 * Send a new setup packet to match any new changes.
568 			 * (Like IFF_PROMISC etc)
569 			 */
570 			ze_setup(sc);
571 			break;
572 		case 0:
573 			break;
574 		}
575 		break;
576 
577 	case SIOCADDMULTI:
578 	case SIOCDELMULTI:
579 		/*
580 		 * Update our multicast list.
581 		 */
582 		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
583 			/*
584 			 * Multicast list has changed; set the hardware filter
585 			 * accordingly.
586 			 */
587 			if (ifp->if_flags & IFF_RUNNING)
588 				ze_setup(sc);
589 			error = 0;
590 		}
591 		break;
592 
593 	default:
594 		error = ether_ioctl(ifp, cmd, data);
595 
596 	}
597 	splx(s);
598 	return (error);
599 }
600 
601 /*
602  * Add a receive buffer to the indicated descriptor.
603  */
604 int
605 ze_add_rxbuf(struct ze_softc *sc, int i)
606 {
607 	struct mbuf *m;
608 	struct ze_rdes *rp;
609 	int error;
610 
611 	MGETHDR(m, M_DONTWAIT, MT_DATA);
612 	if (m == NULL)
613 		return (ENOBUFS);
614 
615 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
616 	MCLGET(m, M_DONTWAIT);
617 	if ((m->m_flags & M_EXT) == 0) {
618 		m_freem(m);
619 		return (ENOBUFS);
620 	}
621 
622 	if (sc->sc_rxmbuf[i] != NULL)
623 		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
624 
625 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
626 	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
627 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
628 	if (error)
629 		panic("%s: can't load rx DMA map %d, error = %d",
630 		    device_xname(sc->sc_dev), i, error);
631 	sc->sc_rxmbuf[i] = m;
632 
633 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
634 	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
635 
636 	/*
637 	 * We know that the mbuf cluster is page aligned. Also, be sure
638 	 * that the IP header will be longword aligned.
639 	 */
640 	m->m_data += 2;
641 	rp = &sc->sc_zedata->zc_recv[i];
642 	rp->ze_bufsize = (m->m_ext.ext_size - 2);
643 	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
644 	rp->ze_framelen = ZE_FRAMELEN_OW;
645 
646 	return (0);
647 }
648 
649 /*
650  * Create a setup packet and put in queue for sending.
651  */
652 void
653 ze_setup(struct ze_softc *sc)
654 {
655 	struct ether_multi *enm;
656 	struct ether_multistep step;
657 	struct ze_cdata *zc = sc->sc_zedata;
658 	struct ifnet *ifp = &sc->sc_if;
659 	const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
660 	int j, idx, reg;
661 
662 	if (sc->sc_inq == (TXDESCS - 1)) {
663 		sc->sc_setup = 1;
664 		return;
665 	}
666 	sc->sc_setup = 0;
667 	/*
668 	 * Init the setup packet with valid info.
669 	 */
670 	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
671 	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
672 
673 	/*
674 	 * Multicast handling. The SGEC can handle up to 16 direct
675 	 * ethernet addresses.
676 	 */
677 	j = 16;
678 	ifp->if_flags &= ~IFF_ALLMULTI;
679 	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
680 	while (enm != NULL) {
681 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
682 			ifp->if_flags |= IFF_ALLMULTI;
683 			break;
684 		}
685 		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
686 		j += 8;
687 		ETHER_NEXT_MULTI(step, enm);
688 		if ((enm != NULL)&& (j == 128)) {
689 			ifp->if_flags |= IFF_ALLMULTI;
690 			break;
691 		}
692 	}
693 
694 	/*
695 	 * ALLMULTI implies PROMISC in this driver.
696 	 */
697 	if (ifp->if_flags & IFF_ALLMULTI)
698 		ifp->if_flags |= IFF_PROMISC;
699 	else if (ifp->if_pcount == 0)
700 		ifp->if_flags &= ~IFF_PROMISC;
701 
702 	/*
703 	 * Fiddle with the receive logic.
704 	 */
705 	reg = ZE_RCSR(ZE_CSR6);
706 	DELAY(10);
707 	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
708 	reg &= ~ZE_NICSR6_AF;
709 	if (ifp->if_flags & IFF_PROMISC)
710 		reg |= ZE_NICSR6_AF_PROM;
711 	else if (ifp->if_flags & IFF_ALLMULTI)
712 		reg |= ZE_NICSR6_AF_ALLM;
713 	DELAY(10);
714 	ZE_WCSR(ZE_CSR6, reg);
715 	/*
716 	 * Only send a setup packet if needed.
717 	 */
718 	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
719 		idx = sc->sc_nexttx;
720 		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
721 		zc->zc_xmit[idx].ze_bufsize = 128;
722 		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
723 		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
724 
725 		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
726 			ZE_WCSR(ZE_CSR1, -1);
727 
728 		sc->sc_inq++;
729 		if (++sc->sc_nexttx == TXDESCS)
730 			sc->sc_nexttx = 0;
731 	}
732 }
733 
734 /*
735  * Check for dead transmit logic.
736  */
737 void
738 zetimeout(struct ifnet *ifp)
739 {
740 	struct ze_softc *sc = ifp->if_softc;
741 
742 	if (sc->sc_inq == 0)
743 		return;
744 
745 	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
746 	/*
747 	 * Do a reset of interface, to get it going again.
748 	 * Will it work by just restart the transmit logic?
749 	 */
750 	zeinit(sc);
751 }
752 
753 /*
754  * Reset chip:
755  * Set/reset the reset flag.
756  *  Write interrupt vector.
757  *  Write ring buffer addresses.
758  *  Write SBR.
759  */
760 bool
761 zereset(struct ze_softc *sc)
762 {
763 	int reg, i;
764 
765 	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
766 	DELAY(50000);
767 	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
768 		aprint_error_dev(sc->sc_dev, "selftest failed\n");
769 		return true;
770 	}
771 
772 	/*
773 	 * Get the vector that were set at match time, and remember it.
774 	 * WHICH VECTOR TO USE? Take one unused. XXX
775 	 * Funny way to set vector described in the programmers manual.
776 	 */
777 	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
778 	i = 10;
779 	do {
780 		if (i-- == 0) {
781 			aprint_error_dev(sc->sc_dev,
782 			    "failing SGEC CSR0 init\n");
783 			return true;
784 		}
785 		ZE_WCSR(ZE_CSR0, reg);
786 	} while (ZE_RCSR(ZE_CSR0) != reg);
787 
788 	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
789 	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
790 	return false;
791 }
792