xref: /netbsd-src/sys/dev/pci/if_vr.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: if_vr.c,v 1.61 2003/04/10 01:58:21 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1997, 1998
42  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by Bill Paul.
55  * 4. Neither the name of the author nor the names of any co-contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69  * THE POSSIBILITY OF SUCH DAMAGE.
70  *
71  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72  */
73 
74 /*
75  * VIA Rhine fast ethernet PCI NIC driver
76  *
77  * Supports various network adapters based on the VIA Rhine
78  * and Rhine II PCI controllers, including the D-Link DFE530TX.
79  * Datasheets are available at http://www.via.com.tw.
80  *
81  * Written by Bill Paul <wpaul@ctr.columbia.edu>
82  * Electrical Engineering Department
83  * Columbia University, New York City
84  */
85 
86 /*
87  * The VIA Rhine controllers are similar in some respects to the
88  * the DEC tulip chips, except less complicated. The controller
89  * uses an MII bus and an external physical layer interface. The
90  * receiver has a one entry perfect filter and a 64-bit hash table
91  * multicast filter. Transmit and receive descriptors are similar
92  * to the tulip.
93  *
94  * The Rhine has a serious flaw in its transmit DMA mechanism:
95  * transmit buffers must be longword aligned. Unfortunately,
96  * the kernel doesn't guarantee that mbufs will be filled in starting
97  * at longword boundaries, so we have to do a buffer copy before
98  * transmission.
99  *
100  * Apparently, the receive DMA mechanism also has the same flaw.  This
101  * means that on systems with struct alignment requirements, incoming
102  * frames must be copied to a new buffer which shifts the data forward
103  * 2 bytes so that the payload is aligned on a 4-byte boundary.
104  */
105 
106 #include <sys/cdefs.h>
107 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.61 2003/04/10 01:58:21 christos Exp $");
108 
109 #include <sys/param.h>
110 #include <sys/systm.h>
111 #include <sys/callout.h>
112 #include <sys/sockio.h>
113 #include <sys/mbuf.h>
114 #include <sys/malloc.h>
115 #include <sys/kernel.h>
116 #include <sys/socket.h>
117 #include <sys/device.h>
118 
119 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
120 
121 #include <net/if.h>
122 #include <net/if_arp.h>
123 #include <net/if_dl.h>
124 #include <net/if_media.h>
125 #include <net/if_ether.h>
126 
127 #include "bpfilter.h"
128 #if NBPFILTER > 0
129 #include <net/bpf.h>
130 #endif
131 
132 #include <machine/bus.h>
133 #include <machine/intr.h>
134 #include <machine/endian.h>
135 
136 #include <dev/mii/mii.h>
137 #include <dev/mii/miivar.h>
138 #include <dev/mii/mii_bitbang.h>
139 
140 #include <dev/pci/pcireg.h>
141 #include <dev/pci/pcivar.h>
142 #include <dev/pci/pcidevs.h>
143 
144 #include <dev/pci/if_vrreg.h>
145 
146 #define	VR_USEIOSPACE
147 
148 /*
149  * Various supported device vendors/types and their names.
150  */
151 static struct vr_type {
152 	pci_vendor_id_t		vr_vid;
153 	pci_product_id_t	vr_did;
154 	const char		*vr_name;
155 } vr_devs[] = {
156 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
157 		"VIA VT3043 (Rhine) 10/100" },
158 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
159 		"VIA VT6102 (Rhine II) 10/100" },
160 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
161 		"VIA VT86C100A (Rhine-II) 10/100" },
162 	{ 0, 0, NULL }
163 };
164 
165 /*
166  * Transmit descriptor list size.
167  */
168 #define	VR_NTXDESC		64
169 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
170 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
171 
172 /*
173  * Receive descriptor list size.
174  */
175 #define	VR_NRXDESC		64
176 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
177 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
178 
179 /*
180  * Control data structres that are DMA'd to the Rhine chip.  We allocate
181  * them in a single clump that maps to a single DMA segment to make several
182  * things easier.
183  *
184  * Note that since we always copy outgoing packets to aligned transmit
185  * buffers, we can reduce the transmit descriptors to one per packet.
186  */
187 struct vr_control_data {
188 	struct vr_desc		vr_txdescs[VR_NTXDESC];
189 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
190 };
191 
192 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
193 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
194 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
195 
196 /*
197  * Software state of transmit and receive descriptors.
198  */
199 struct vr_descsoft {
200 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
201 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
202 };
203 
204 struct vr_softc {
205 	struct device		vr_dev;		/* generic device glue */
206 	void			*vr_ih;		/* interrupt cookie */
207 	void			*vr_ats;	/* shutdown hook */
208 	bus_space_tag_t		vr_bst;		/* bus space tag */
209 	bus_space_handle_t	vr_bsh;		/* bus space handle */
210 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
211 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
212 	struct ethercom		vr_ec;		/* Ethernet common info */
213 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
214 	struct mii_data		vr_mii;		/* MII/media info */
215 
216 	u_int8_t		vr_revid;	/* Rhine chip revision */
217 
218 	struct callout		vr_tick_ch;	/* tick callout */
219 
220 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
221 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
222 
223 	/*
224 	 * Software state for transmit and receive descriptors.
225 	 */
226 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
227 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
228 
229 	/*
230 	 * Control data structures.
231 	 */
232 	struct vr_control_data	*vr_control_data;
233 
234 	int	vr_txpending;		/* number of TX requests pending */
235 	int	vr_txdirty;		/* first dirty TX descriptor */
236 	int	vr_txlast;		/* last used TX descriptor */
237 
238 	int	vr_rxptr;		/* next ready RX descriptor */
239 };
240 
241 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
242 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
243 
244 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
245 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
246 
247 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
248 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
249 
250 #define	VR_CDTXSYNC(sc, x, ops)						\
251 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
252 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
253 
254 #define	VR_CDRXSYNC(sc, x, ops)						\
255 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
256 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
257 
258 /*
259  * Note we rely on MCLBYTES being a power of two below.
260  */
261 #define	VR_INIT_RXDESC(sc, i)						\
262 do {									\
263 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
264 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
265 									\
266 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
267 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
268 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
269 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
270 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
271 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
272 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
273 } while (0)
274 
275 /*
276  * register space access macros
277  */
278 #define	CSR_WRITE_4(sc, reg, val)					\
279 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
280 #define	CSR_WRITE_2(sc, reg, val)					\
281 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
282 #define	CSR_WRITE_1(sc, reg, val)					\
283 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
284 
285 #define	CSR_READ_4(sc, reg)						\
286 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
287 #define	CSR_READ_2(sc, reg)						\
288 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
289 #define	CSR_READ_1(sc, reg)						\
290 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
291 
292 #define	VR_TIMEOUT		1000
293 
294 static int vr_add_rxbuf		__P((struct vr_softc *, int));
295 
296 static void vr_rxeof		__P((struct vr_softc *));
297 static void vr_rxeoc		__P((struct vr_softc *));
298 static void vr_txeof		__P((struct vr_softc *));
299 static int vr_intr		__P((void *));
300 static void vr_start		__P((struct ifnet *));
301 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
302 static int vr_init		__P((struct ifnet *));
303 static void vr_stop		__P((struct ifnet *, int));
304 static void vr_rxdrain		__P((struct vr_softc *));
305 static void vr_watchdog		__P((struct ifnet *));
306 static void vr_tick		__P((void *));
307 
308 static int vr_ifmedia_upd	__P((struct ifnet *));
309 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
310 
311 static int vr_mii_readreg	__P((struct device *, int, int));
312 static void vr_mii_writereg	__P((struct device *, int, int, int));
313 static void vr_mii_statchg	__P((struct device *));
314 
315 static void vr_setmulti		__P((struct vr_softc *));
316 static void vr_reset		__P((struct vr_softc *));
317 
318 int	vr_copy_small = 0;
319 
320 #define	VR_SETBIT(sc, reg, x)				\
321 	CSR_WRITE_1(sc, reg,				\
322 		CSR_READ_1(sc, reg) | x)
323 
324 #define	VR_CLRBIT(sc, reg, x)				\
325 	CSR_WRITE_1(sc, reg,				\
326 		CSR_READ_1(sc, reg) & ~x)
327 
328 #define	VR_SETBIT16(sc, reg, x)				\
329 	CSR_WRITE_2(sc, reg,				\
330 		CSR_READ_2(sc, reg) | x)
331 
332 #define	VR_CLRBIT16(sc, reg, x)				\
333 	CSR_WRITE_2(sc, reg,				\
334 		CSR_READ_2(sc, reg) & ~x)
335 
336 #define	VR_SETBIT32(sc, reg, x)				\
337 	CSR_WRITE_4(sc, reg,				\
338 		CSR_READ_4(sc, reg) | x)
339 
340 #define	VR_CLRBIT32(sc, reg, x)				\
341 	CSR_WRITE_4(sc, reg,				\
342 		CSR_READ_4(sc, reg) & ~x)
343 
344 /*
345  * MII bit-bang glue.
346  */
347 u_int32_t vr_mii_bitbang_read __P((struct device *));
348 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
349 
350 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
351 	vr_mii_bitbang_read,
352 	vr_mii_bitbang_write,
353 	{
354 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
355 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
356 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
357 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
358 		0,			/* MII_BIT_DIR_PHY_HOST */
359 	}
360 };
361 
362 u_int32_t
363 vr_mii_bitbang_read(self)
364 	struct device *self;
365 {
366 	struct vr_softc *sc = (void *) self;
367 
368 	return (CSR_READ_1(sc, VR_MIICMD));
369 }
370 
371 void
372 vr_mii_bitbang_write(self, val)
373 	struct device *self;
374 	u_int32_t val;
375 {
376 	struct vr_softc *sc = (void *) self;
377 
378 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
379 }
380 
381 /*
382  * Read an PHY register through the MII.
383  */
384 static int
385 vr_mii_readreg(self, phy, reg)
386 	struct device *self;
387 	int phy, reg;
388 {
389 	struct vr_softc *sc = (void *) self;
390 
391 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
392 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
393 }
394 
395 /*
396  * Write to a PHY register through the MII.
397  */
398 static void
399 vr_mii_writereg(self, phy, reg, val)
400 	struct device *self;
401 	int phy, reg, val;
402 {
403 	struct vr_softc *sc = (void *) self;
404 
405 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
406 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
407 }
408 
409 static void
410 vr_mii_statchg(self)
411 	struct device *self;
412 {
413 	struct vr_softc *sc = (struct vr_softc *)self;
414 
415 	/*
416 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
417 	 * register, we first have to put the transmit and/or receive logic
418 	 * in the idle state.
419 	 */
420 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
421 
422 	if (sc->vr_mii.mii_media_active & IFM_FDX)
423 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
424 	else
425 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
426 
427 	if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
428 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
429 }
430 
431 #define	vr_calchash(addr) \
432 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
433 
434 /*
435  * Program the 64-bit multicast hash filter.
436  */
437 static void
438 vr_setmulti(sc)
439 	struct vr_softc *sc;
440 {
441 	struct ifnet *ifp;
442 	int h = 0;
443 	u_int32_t hashes[2] = { 0, 0 };
444 	struct ether_multistep step;
445 	struct ether_multi *enm;
446 	int mcnt = 0;
447 	u_int8_t rxfilt;
448 
449 	ifp = &sc->vr_ec.ec_if;
450 
451 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
452 
453 	if (ifp->if_flags & IFF_PROMISC) {
454 allmulti:
455 		ifp->if_flags |= IFF_ALLMULTI;
456 		rxfilt |= VR_RXCFG_RX_MULTI;
457 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
458 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
459 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
460 		return;
461 	}
462 
463 	/* first, zot all the existing hash bits */
464 	CSR_WRITE_4(sc, VR_MAR0, 0);
465 	CSR_WRITE_4(sc, VR_MAR1, 0);
466 
467 	/* now program new ones */
468 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
469 	while (enm != NULL) {
470 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
471 		    ETHER_ADDR_LEN) != 0)
472 			goto allmulti;
473 
474 		h = vr_calchash(enm->enm_addrlo);
475 
476 		if (h < 32)
477 			hashes[0] |= (1 << h);
478 		else
479 			hashes[1] |= (1 << (h - 32));
480 		ETHER_NEXT_MULTI(step, enm);
481 		mcnt++;
482 	}
483 
484 	ifp->if_flags &= ~IFF_ALLMULTI;
485 
486 	if (mcnt)
487 		rxfilt |= VR_RXCFG_RX_MULTI;
488 	else
489 		rxfilt &= ~VR_RXCFG_RX_MULTI;
490 
491 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
492 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
493 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
494 }
495 
496 static void
497 vr_reset(sc)
498 	struct vr_softc *sc;
499 {
500 	int i;
501 
502 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
503 
504 	for (i = 0; i < VR_TIMEOUT; i++) {
505 		DELAY(10);
506 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
507 			break;
508 	}
509 	if (i == VR_TIMEOUT) {
510 		if (sc->vr_revid < REV_ID_VT3065_A) {
511 			printf("%s: reset never completed!\n",
512 			    sc->vr_dev.dv_xname);
513 		} else {
514 			/* Use newer force reset command */
515 			printf("%s: using force reset command.\n",
516 			    sc->vr_dev.dv_xname);
517 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
518 		}
519 	}
520 
521 	/* Wait a little while for the chip to get its brains in order. */
522 	DELAY(1000);
523 }
524 
525 /*
526  * Initialize an RX descriptor and attach an MBUF cluster.
527  * Note: the length fields are only 11 bits wide, which means the
528  * largest size we can specify is 2047. This is important because
529  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
530  * overflow the field and make a mess.
531  */
532 static int
533 vr_add_rxbuf(sc, i)
534 	struct vr_softc *sc;
535 	int i;
536 {
537 	struct vr_descsoft *ds = VR_DSRX(sc, i);
538 	struct mbuf *m_new;
539 	int error;
540 
541 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
542 	if (m_new == NULL)
543 		return (ENOBUFS);
544 
545 	MCLGET(m_new, M_DONTWAIT);
546 	if ((m_new->m_flags & M_EXT) == 0) {
547 		m_freem(m_new);
548 		return (ENOBUFS);
549 	}
550 
551 	if (ds->ds_mbuf != NULL)
552 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
553 
554 	ds->ds_mbuf = m_new;
555 
556 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
557 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
558 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
559 	if (error) {
560 		printf("%s: unable to load rx DMA map %d, error = %d\n",
561 		    sc->vr_dev.dv_xname, i, error);
562 		panic("vr_add_rxbuf");		/* XXX */
563 	}
564 
565 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
566 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
567 
568 	VR_INIT_RXDESC(sc, i);
569 
570 	return (0);
571 }
572 
573 /*
574  * A frame has been uploaded: pass the resulting mbuf chain up to
575  * the higher level protocols.
576  */
577 static void
578 vr_rxeof(sc)
579 	struct vr_softc *sc;
580 {
581 	struct mbuf *m;
582 	struct ifnet *ifp;
583 	struct vr_desc *d;
584 	struct vr_descsoft *ds;
585 	int i, total_len;
586 	u_int32_t rxstat;
587 
588 	ifp = &sc->vr_ec.ec_if;
589 
590 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
591 		d = VR_CDRX(sc, i);
592 		ds = VR_DSRX(sc, i);
593 
594 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
595 
596 		rxstat = le32toh(d->vr_status);
597 
598 		if (rxstat & VR_RXSTAT_OWN) {
599 			/*
600 			 * We have processed all of the receive buffers.
601 			 */
602 			break;
603 		}
604 
605 		/*
606 		 * If an error occurs, update stats, clear the
607 		 * status word and leave the mbuf cluster in place:
608 		 * it should simply get re-used next time this descriptor
609 		 * comes up in the ring.
610 		 */
611 		if (rxstat & VR_RXSTAT_RXERR) {
612 			const char *errstr;
613 
614 			ifp->if_ierrors++;
615 			switch (rxstat & 0x000000FF) {
616 			case VR_RXSTAT_CRCERR:
617 				errstr = "crc error";
618 				break;
619 			case VR_RXSTAT_FRAMEALIGNERR:
620 				errstr = "frame alignment error";
621 				break;
622 			case VR_RXSTAT_FIFOOFLOW:
623 				errstr = "FIFO overflow";
624 				break;
625 			case VR_RXSTAT_GIANT:
626 				errstr = "received giant packet";
627 				break;
628 			case VR_RXSTAT_RUNT:
629 				errstr = "received runt packet";
630 				break;
631 			case VR_RXSTAT_BUSERR:
632 				errstr = "system bus error";
633 				break;
634 			case VR_RXSTAT_BUFFERR:
635 				errstr = "rx buffer error";
636 				break;
637 			default:
638 				errstr = "unknown rx error";
639 				break;
640 			}
641 			printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
642 			    errstr);
643 
644 			VR_INIT_RXDESC(sc, i);
645 
646 			continue;
647 		}
648 
649 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
650 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
651 
652 		/* No errors; receive the packet. */
653 		total_len = VR_RXBYTES(le32toh(d->vr_status));
654 
655 #ifdef __NO_STRICT_ALIGNMENT
656 		/*
657 		 * If the packet is small enough to fit in a
658 		 * single header mbuf, allocate one and copy
659 		 * the data into it.  This greatly reduces
660 		 * memory consumption when we receive lots
661 		 * of small packets.
662 		 *
663 		 * Otherwise, we add a new buffer to the receive
664 		 * chain.  If this fails, we drop the packet and
665 		 * recycle the old buffer.
666 		 */
667 		if (vr_copy_small != 0 && total_len <= MHLEN) {
668 			MGETHDR(m, M_DONTWAIT, MT_DATA);
669 			if (m == NULL)
670 				goto dropit;
671 			memcpy(mtod(m, caddr_t),
672 			    mtod(ds->ds_mbuf, caddr_t), total_len);
673 			VR_INIT_RXDESC(sc, i);
674 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
675 			    ds->ds_dmamap->dm_mapsize,
676 			    BUS_DMASYNC_PREREAD);
677 		} else {
678 			m = ds->ds_mbuf;
679 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
680  dropit:
681 				ifp->if_ierrors++;
682 				VR_INIT_RXDESC(sc, i);
683 				bus_dmamap_sync(sc->vr_dmat,
684 				    ds->ds_dmamap, 0,
685 				    ds->ds_dmamap->dm_mapsize,
686 				    BUS_DMASYNC_PREREAD);
687 				continue;
688 			}
689 		}
690 #else
691 		/*
692 		 * The Rhine's packet buffers must be 4-byte aligned.
693 		 * But this means that the data after the Ethernet header
694 		 * is misaligned.  We must allocate a new buffer and
695 		 * copy the data, shifted forward 2 bytes.
696 		 */
697 		MGETHDR(m, M_DONTWAIT, MT_DATA);
698 		if (m == NULL) {
699  dropit:
700 			ifp->if_ierrors++;
701 			VR_INIT_RXDESC(sc, i);
702 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
703 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
704 			continue;
705 		}
706 		if (total_len > (MHLEN - 2)) {
707 			MCLGET(m, M_DONTWAIT);
708 			if ((m->m_flags & M_EXT) == 0) {
709 				m_freem(m);
710 				goto dropit;
711 			}
712 		}
713 		m->m_data += 2;
714 
715 		/*
716 		 * Note that we use clusters for incoming frames, so the
717 		 * buffer is virtually contiguous.
718 		 */
719 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
720 		    total_len);
721 
722 		/* Allow the receive descriptor to continue using its mbuf. */
723 		VR_INIT_RXDESC(sc, i);
724 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
725 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
726 #endif /* __NO_STRICT_ALIGNMENT */
727 
728 		/*
729 		 * The Rhine chip includes the FCS with every
730 		 * received packet.
731 		 */
732 		m->m_flags |= M_HASFCS;
733 
734 		ifp->if_ipackets++;
735 		m->m_pkthdr.rcvif = ifp;
736 		m->m_pkthdr.len = m->m_len = total_len;
737 #if NBPFILTER > 0
738 		/*
739 		 * Handle BPF listeners. Let the BPF user see the packet, but
740 		 * don't pass it up to the ether_input() layer unless it's
741 		 * a broadcast packet, multicast packet, matches our ethernet
742 		 * address or the interface is in promiscuous mode.
743 		 */
744 		if (ifp->if_bpf)
745 			bpf_mtap(ifp->if_bpf, m);
746 #endif
747 		/* Pass it on. */
748 		(*ifp->if_input)(ifp, m);
749 	}
750 
751 	/* Update the receive pointer. */
752 	sc->vr_rxptr = i;
753 }
754 
755 void
756 vr_rxeoc(sc)
757 	struct vr_softc *sc;
758 {
759 
760 	vr_rxeof(sc);
761 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
762 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
763 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
764 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
765 }
766 
767 /*
768  * A frame was downloaded to the chip. It's safe for us to clean up
769  * the list buffers.
770  */
771 static void
772 vr_txeof(sc)
773 	struct vr_softc *sc;
774 {
775 	struct ifnet *ifp = &sc->vr_ec.ec_if;
776 	struct vr_desc *d;
777 	struct vr_descsoft *ds;
778 	u_int32_t txstat;
779 	int i;
780 
781 	ifp->if_flags &= ~IFF_OACTIVE;
782 
783 	/*
784 	 * Go through our tx list and free mbufs for those
785 	 * frames that have been transmitted.
786 	 */
787 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
788 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
789 		d = VR_CDTX(sc, i);
790 		ds = VR_DSTX(sc, i);
791 
792 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
793 
794 		txstat = le32toh(d->vr_status);
795 		if (txstat & VR_TXSTAT_OWN)
796 			break;
797 
798 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
799 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
800 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
801 		m_freem(ds->ds_mbuf);
802 		ds->ds_mbuf = NULL;
803 
804 		if (txstat & VR_TXSTAT_ERRSUM) {
805 			ifp->if_oerrors++;
806 			if (txstat & VR_TXSTAT_DEFER)
807 				ifp->if_collisions++;
808 			if (txstat & VR_TXSTAT_LATECOLL)
809 				ifp->if_collisions++;
810 		}
811 
812 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
813 		ifp->if_opackets++;
814 	}
815 
816 	/* Update the dirty transmit buffer pointer. */
817 	sc->vr_txdirty = i;
818 
819 	/*
820 	 * Cancel the watchdog timer if there are no pending
821 	 * transmissions.
822 	 */
823 	if (sc->vr_txpending == 0)
824 		ifp->if_timer = 0;
825 }
826 
827 static int
828 vr_intr(arg)
829 	void *arg;
830 {
831 	struct vr_softc *sc;
832 	struct ifnet *ifp;
833 	u_int16_t status;
834 	int handled = 0, dotx = 0;
835 
836 	sc = arg;
837 	ifp = &sc->vr_ec.ec_if;
838 
839 	/* Suppress unwanted interrupts. */
840 	if ((ifp->if_flags & IFF_UP) == 0) {
841 		vr_stop(ifp, 1);
842 		return (0);
843 	}
844 
845 	/* Disable interrupts. */
846 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
847 
848 	for (;;) {
849 		status = CSR_READ_2(sc, VR_ISR);
850 		if (status)
851 			CSR_WRITE_2(sc, VR_ISR, status);
852 
853 		if ((status & VR_INTRS) == 0)
854 			break;
855 
856 		handled = 1;
857 
858 		if (status & VR_ISR_RX_OK)
859 			vr_rxeof(sc);
860 
861 		if (status &
862 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
863 		     VR_ISR_RX_DROPPED))
864 			vr_rxeoc(sc);
865 
866 		if (status & VR_ISR_TX_OK) {
867 			dotx = 1;
868 			vr_txeof(sc);
869 		}
870 
871 		if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
872 			if (status & VR_ISR_TX_UNDERRUN)
873 				printf("%s: transmit underrun\n",
874 				    sc->vr_dev.dv_xname);
875 			if (status & VR_ISR_TX_ABRT)
876 				printf("%s: transmit aborted\n",
877 				    sc->vr_dev.dv_xname);
878 			ifp->if_oerrors++;
879 			dotx = 1;
880 			vr_txeof(sc);
881 			if (sc->vr_txpending) {
882 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
883 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
884 			}
885 			/*
886 			 * Unfortunately many cards get stuck after
887 			 * aborted transmits, so we reset them.
888 			 */
889 			if (status & VR_ISR_TX_ABRT) {
890 				printf("%s: restarting\n", sc->vr_dev.dv_xname);
891 				dotx = 0;
892 				(void) vr_init(ifp);
893 			}
894 		}
895 
896 		if (status & VR_ISR_BUSERR) {
897 			printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
898 			/* vr_init() calls vr_start() */
899 			dotx = 0;
900 			(void) vr_init(ifp);
901 		}
902 	}
903 
904 	/* Re-enable interrupts. */
905 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
906 
907 	if (dotx)
908 		vr_start(ifp);
909 
910 	return (handled);
911 }
912 
913 /*
914  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
915  * to the mbuf data regions directly in the transmit lists. We also save a
916  * copy of the pointers since the transmit list fragment pointers are
917  * physical addresses.
918  */
919 static void
920 vr_start(ifp)
921 	struct ifnet *ifp;
922 {
923 	struct vr_softc *sc = ifp->if_softc;
924 	struct mbuf *m0, *m;
925 	struct vr_desc *d;
926 	struct vr_descsoft *ds;
927 	int error, firsttx, nexttx, opending;
928 
929 	/*
930 	 * Remember the previous txpending and the first transmit
931 	 * descriptor we use.
932 	 */
933 	opending = sc->vr_txpending;
934 	firsttx = VR_NEXTTX(sc->vr_txlast);
935 
936 	/*
937 	 * Loop through the send queue, setting up transmit descriptors
938 	 * until we drain the queue, or use up all available transmit
939 	 * descriptors.
940 	 */
941 	while (sc->vr_txpending < VR_NTXDESC) {
942 		/*
943 		 * Grab a packet off the queue.
944 		 */
945 		IFQ_POLL(&ifp->if_snd, m0);
946 		if (m0 == NULL)
947 			break;
948 		m = NULL;
949 
950 		/*
951 		 * Get the next available transmit descriptor.
952 		 */
953 		nexttx = VR_NEXTTX(sc->vr_txlast);
954 		d = VR_CDTX(sc, nexttx);
955 		ds = VR_DSTX(sc, nexttx);
956 
957 		/*
958 		 * Load the DMA map.  If this fails, the packet didn't
959 		 * fit in one DMA segment, and we need to copy.  Note,
960 		 * the packet must also be aligned.
961 		 * if the packet is too small, copy it too, so we're sure
962 		 * so have enouth room for the pad buffer.
963 		 */
964 		if ((mtod(m0, uintptr_t) & 3) != 0 ||
965 		    m0->m_pkthdr.len < VR_MIN_FRAMELEN ||
966 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
967 		     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
968 			MGETHDR(m, M_DONTWAIT, MT_DATA);
969 			if (m == NULL) {
970 				printf("%s: unable to allocate Tx mbuf\n",
971 				    sc->vr_dev.dv_xname);
972 				break;
973 			}
974 			if (m0->m_pkthdr.len > MHLEN) {
975 				MCLGET(m, M_DONTWAIT);
976 				if ((m->m_flags & M_EXT) == 0) {
977 					printf("%s: unable to allocate Tx "
978 					    "cluster\n", sc->vr_dev.dv_xname);
979 					m_freem(m);
980 					break;
981 				}
982 			}
983 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
984 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
985 			/*
986 			 * The Rhine doesn't auto-pad, so we have to do this
987 			 * ourselves.
988 			 */
989 			if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) {
990 				memset(mtod(m, caddr_t) + m0->m_pkthdr.len,
991 				    0, VR_MIN_FRAMELEN - m0->m_pkthdr.len);
992 				m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN;
993 			}
994 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
995 			    ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
996 			if (error) {
997 				printf("%s: unable to load Tx buffer, "
998 				    "error = %d\n", sc->vr_dev.dv_xname, error);
999 				break;
1000 			}
1001 		}
1002 
1003 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1004 		if (m != NULL) {
1005 			m_freem(m0);
1006 			m0 = m;
1007 		}
1008 
1009 		/* Sync the DMA map. */
1010 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1011 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1012 
1013 		/*
1014 		 * Store a pointer to the packet so we can free it later.
1015 		 */
1016 		ds->ds_mbuf = m0;
1017 
1018 #if NBPFILTER > 0
1019 		/*
1020 		 * If there's a BPF listener, bounce a copy of this frame
1021 		 * to him.
1022 		 */
1023 		if (ifp->if_bpf)
1024 			bpf_mtap(ifp->if_bpf, m0);
1025 #endif
1026 
1027 		/*
1028 		 * Fill in the transmit descriptor.
1029 		 */
1030 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1031 		d->vr_ctl = htole32(m0->m_pkthdr.len);
1032 		d->vr_ctl |=
1033 		    htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1034 		    VR_TXCTL_LASTFRAG);
1035 
1036 		/*
1037 		 * If this is the first descriptor we're enqueuing,
1038 		 * don't give it to the Rhine yet.  That could cause
1039 		 * a race condition.  We'll do it below.
1040 		 */
1041 		if (nexttx == firsttx)
1042 			d->vr_status = 0;
1043 		else
1044 			d->vr_status = htole32(VR_TXSTAT_OWN);
1045 
1046 		VR_CDTXSYNC(sc, nexttx,
1047 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1048 
1049 		/* Advance the tx pointer. */
1050 		sc->vr_txpending++;
1051 		sc->vr_txlast = nexttx;
1052 	}
1053 
1054 	if (sc->vr_txpending == VR_NTXDESC) {
1055 		/* No more slots left; notify upper layer. */
1056 		ifp->if_flags |= IFF_OACTIVE;
1057 	}
1058 
1059 	if (sc->vr_txpending != opending) {
1060 		/*
1061 		 * We enqueued packets.  If the transmitter was idle,
1062 		 * reset the txdirty pointer.
1063 		 */
1064 		if (opending == 0)
1065 			sc->vr_txdirty = firsttx;
1066 
1067 		/*
1068 		 * Cause a transmit interrupt to happen on the
1069 		 * last packet we enqueued.
1070 		 */
1071 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1072 		VR_CDTXSYNC(sc, sc->vr_txlast,
1073 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1074 
1075 		/*
1076 		 * The entire packet chain is set up.  Give the
1077 		 * first descriptor to the Rhine now.
1078 		 */
1079 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1080 		VR_CDTXSYNC(sc, firsttx,
1081 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1082 
1083 		/* Start the transmitter. */
1084 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1085 
1086 		/* Set the watchdog timer in case the chip flakes out. */
1087 		ifp->if_timer = 5;
1088 	}
1089 }
1090 
1091 /*
1092  * Initialize the interface.  Must be called at splnet.
1093  */
1094 static int
1095 vr_init(ifp)
1096 	struct ifnet *ifp;
1097 {
1098 	struct vr_softc *sc = ifp->if_softc;
1099 	struct vr_desc *d;
1100 	struct vr_descsoft *ds;
1101 	int i, error = 0;
1102 
1103 	/* Cancel pending I/O. */
1104 	vr_stop(ifp, 0);
1105 
1106 	/* Reset the Rhine to a known state. */
1107 	vr_reset(sc);
1108 
1109 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1110 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1111 
1112 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1113 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1114 
1115 	/*
1116 	 * Initialize the transmit desciptor ring.  txlast is initialized
1117 	 * to the end of the list so that it will wrap around to the first
1118 	 * descriptor when the first packet is transmitted.
1119 	 */
1120 	for (i = 0; i < VR_NTXDESC; i++) {
1121 		d = VR_CDTX(sc, i);
1122 		memset(d, 0, sizeof(struct vr_desc));
1123 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1124 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1125 	}
1126 	sc->vr_txpending = 0;
1127 	sc->vr_txdirty = 0;
1128 	sc->vr_txlast = VR_NTXDESC - 1;
1129 
1130 	/*
1131 	 * Initialize the receive descriptor ring.
1132 	 */
1133 	for (i = 0; i < VR_NRXDESC; i++) {
1134 		ds = VR_DSRX(sc, i);
1135 		if (ds->ds_mbuf == NULL) {
1136 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1137 				printf("%s: unable to allocate or map rx "
1138 				    "buffer %d, error = %d\n",
1139 				    sc->vr_dev.dv_xname, i, error);
1140 				/*
1141 				 * XXX Should attempt to run with fewer receive
1142 				 * XXX buffers instead of just failing.
1143 				 */
1144 				vr_rxdrain(sc);
1145 				goto out;
1146 			}
1147 		} else
1148 			VR_INIT_RXDESC(sc, i);
1149 	}
1150 	sc->vr_rxptr = 0;
1151 
1152 	/* If we want promiscuous mode, set the allframes bit. */
1153 	if (ifp->if_flags & IFF_PROMISC)
1154 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1155 	else
1156 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1157 
1158 	/* Set capture broadcast bit to capture broadcast frames. */
1159 	if (ifp->if_flags & IFF_BROADCAST)
1160 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1161 	else
1162 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1163 
1164 	/* Program the multicast filter, if necessary. */
1165 	vr_setmulti(sc);
1166 
1167 	/* Give the transmit and receive rings to the Rhine. */
1168 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1169 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1170 
1171 	/* Set current media. */
1172 	mii_mediachg(&sc->vr_mii);
1173 
1174 	/* Enable receiver and transmitter. */
1175 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1176 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1177 				    VR_CMD_RX_GO);
1178 
1179 	/* Enable interrupts. */
1180 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1181 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1182 
1183 	ifp->if_flags |= IFF_RUNNING;
1184 	ifp->if_flags &= ~IFF_OACTIVE;
1185 
1186 	/* Start one second timer. */
1187 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1188 
1189 	/* Attempt to start output on the interface. */
1190 	vr_start(ifp);
1191 
1192  out:
1193 	if (error)
1194 		printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1195 	return (error);
1196 }
1197 
1198 /*
1199  * Set media options.
1200  */
1201 static int
1202 vr_ifmedia_upd(ifp)
1203 	struct ifnet *ifp;
1204 {
1205 	struct vr_softc *sc = ifp->if_softc;
1206 
1207 	if (ifp->if_flags & IFF_UP)
1208 		mii_mediachg(&sc->vr_mii);
1209 	return (0);
1210 }
1211 
1212 /*
1213  * Report current media status.
1214  */
1215 static void
1216 vr_ifmedia_sts(ifp, ifmr)
1217 	struct ifnet *ifp;
1218 	struct ifmediareq *ifmr;
1219 {
1220 	struct vr_softc *sc = ifp->if_softc;
1221 
1222 	mii_pollstat(&sc->vr_mii);
1223 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
1224 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
1225 }
1226 
1227 static int
1228 vr_ioctl(ifp, command, data)
1229 	struct ifnet *ifp;
1230 	u_long command;
1231 	caddr_t data;
1232 {
1233 	struct vr_softc *sc = ifp->if_softc;
1234 	struct ifreq *ifr = (struct ifreq *)data;
1235 	int s, error = 0;
1236 
1237 	s = splnet();
1238 
1239 	switch (command) {
1240 	case SIOCGIFMEDIA:
1241 	case SIOCSIFMEDIA:
1242 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1243 		break;
1244 
1245 	default:
1246 		error = ether_ioctl(ifp, command, data);
1247 		if (error == ENETRESET) {
1248 			/*
1249 			 * Multicast list has changed; set the hardware filter
1250 			 * accordingly.
1251 			 */
1252 			vr_setmulti(sc);
1253 			error = 0;
1254 		}
1255 		break;
1256 	}
1257 
1258 	splx(s);
1259 	return (error);
1260 }
1261 
1262 static void
1263 vr_watchdog(ifp)
1264 	struct ifnet *ifp;
1265 {
1266 	struct vr_softc *sc = ifp->if_softc;
1267 
1268 	printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1269 	ifp->if_oerrors++;
1270 
1271 	(void) vr_init(ifp);
1272 }
1273 
1274 /*
1275  * One second timer, used to tick MII.
1276  */
1277 static void
1278 vr_tick(arg)
1279 	void *arg;
1280 {
1281 	struct vr_softc *sc = arg;
1282 	int s;
1283 
1284 	s = splnet();
1285 	mii_tick(&sc->vr_mii);
1286 	splx(s);
1287 
1288 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1289 }
1290 
1291 /*
1292  * Drain the receive queue.
1293  */
1294 static void
1295 vr_rxdrain(sc)
1296 	struct vr_softc *sc;
1297 {
1298 	struct vr_descsoft *ds;
1299 	int i;
1300 
1301 	for (i = 0; i < VR_NRXDESC; i++) {
1302 		ds = VR_DSRX(sc, i);
1303 		if (ds->ds_mbuf != NULL) {
1304 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1305 			m_freem(ds->ds_mbuf);
1306 			ds->ds_mbuf = NULL;
1307 		}
1308 	}
1309 }
1310 
1311 /*
1312  * Stop the adapter and free any mbufs allocated to the
1313  * transmit lists.
1314  */
1315 static void
1316 vr_stop(ifp, disable)
1317 	struct ifnet *ifp;
1318 	int disable;
1319 {
1320 	struct vr_softc *sc = ifp->if_softc;
1321 	struct vr_descsoft *ds;
1322 	int i;
1323 
1324 	/* Cancel one second timer. */
1325 	callout_stop(&sc->vr_tick_ch);
1326 
1327 	/* Down the MII. */
1328 	mii_down(&sc->vr_mii);
1329 
1330 	ifp = &sc->vr_ec.ec_if;
1331 	ifp->if_timer = 0;
1332 
1333 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1334 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1335 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1336 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1337 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1338 
1339 	/*
1340 	 * Release any queued transmit buffers.
1341 	 */
1342 	for (i = 0; i < VR_NTXDESC; i++) {
1343 		ds = VR_DSTX(sc, i);
1344 		if (ds->ds_mbuf != NULL) {
1345 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1346 			m_freem(ds->ds_mbuf);
1347 			ds->ds_mbuf = NULL;
1348 		}
1349 	}
1350 
1351 	if (disable)
1352 		vr_rxdrain(sc);
1353 
1354 	/*
1355 	 * Mark the interface down and cancel the watchdog timer.
1356 	 */
1357 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1358 	ifp->if_timer = 0;
1359 }
1360 
1361 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1362 static int vr_probe __P((struct device *, struct cfdata *, void *));
1363 static void vr_attach __P((struct device *, struct device *, void *));
1364 static void vr_shutdown __P((void *));
1365 
1366 CFATTACH_DECL(vr, sizeof (struct vr_softc),
1367     vr_probe, vr_attach, NULL, NULL);
1368 
1369 static struct vr_type *
1370 vr_lookup(pa)
1371 	struct pci_attach_args *pa;
1372 {
1373 	struct vr_type *vrt;
1374 
1375 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1376 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1377 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1378 			return (vrt);
1379 	}
1380 	return (NULL);
1381 }
1382 
1383 static int
1384 vr_probe(parent, match, aux)
1385 	struct device *parent;
1386 	struct cfdata *match;
1387 	void *aux;
1388 {
1389 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1390 
1391 	if (vr_lookup(pa) != NULL)
1392 		return (1);
1393 
1394 	return (0);
1395 }
1396 
1397 /*
1398  * Stop all chip I/O so that the kernel's probe routines don't
1399  * get confused by errant DMAs when rebooting.
1400  */
1401 static void
1402 vr_shutdown(arg)
1403 	void *arg;
1404 {
1405 	struct vr_softc *sc = (struct vr_softc *)arg;
1406 
1407 	vr_stop(&sc->vr_ec.ec_if, 1);
1408 }
1409 
1410 /*
1411  * Attach the interface. Allocate softc structures, do ifmedia
1412  * setup and ethernet/BPF attach.
1413  */
1414 static void
1415 vr_attach(parent, self, aux)
1416 	struct device *parent;
1417 	struct device *self;
1418 	void *aux;
1419 {
1420 	struct vr_softc *sc = (struct vr_softc *) self;
1421 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1422 	bus_dma_segment_t seg;
1423 	struct vr_type *vrt;
1424 	u_int32_t command;
1425 	struct ifnet *ifp;
1426 	u_char eaddr[ETHER_ADDR_LEN];
1427 	int i, rseg, error;
1428 
1429 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1430 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1431 
1432 	callout_init(&sc->vr_tick_ch);
1433 
1434 	vrt = vr_lookup(pa);
1435 	if (vrt == NULL) {
1436 		printf("\n");
1437 		panic("vr_attach: impossible");
1438 	}
1439 
1440 	printf(": %s Ethernet\n", vrt->vr_name);
1441 
1442 	/*
1443 	 * Handle power management nonsense.
1444 	 */
1445 
1446 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1447 	if (command == 0x01) {
1448 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1449 		if (command & VR_PSTATE_MASK) {
1450 			u_int32_t iobase, membase, irq;
1451 
1452 			/* Save important PCI config data. */
1453 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
1454 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
1455 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
1456 
1457 			/* Reset the power state. */
1458 			printf("%s: chip is in D%d power mode "
1459 				"-- setting to D0\n",
1460 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1461 			command &= 0xFFFFFFFC;
1462 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1463 
1464 			/* Restore PCI config data. */
1465 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1466 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1467 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1468 		}
1469 	}
1470 
1471 	/* Make sure bus mastering is enabled. */
1472 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1473 	command |= PCI_COMMAND_MASTER_ENABLE;
1474 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1475 
1476 	/* Get revision */
1477 	sc->vr_revid = PCI_CONF_READ(VR_PCI_REVID) & 0x000000FF;
1478 
1479 	/*
1480 	 * Map control/status registers.
1481 	 */
1482 	{
1483 		bus_space_tag_t iot, memt;
1484 		bus_space_handle_t ioh, memh;
1485 		int ioh_valid, memh_valid;
1486 		pci_intr_handle_t intrhandle;
1487 		const char *intrstr;
1488 
1489 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1490 			PCI_MAPREG_TYPE_IO, 0,
1491 			&iot, &ioh, NULL, NULL) == 0);
1492 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1493 			PCI_MAPREG_TYPE_MEM |
1494 			PCI_MAPREG_MEM_TYPE_32BIT,
1495 			0, &memt, &memh, NULL, NULL) == 0);
1496 #if defined(VR_USEIOSPACE)
1497 		if (ioh_valid) {
1498 			sc->vr_bst = iot;
1499 			sc->vr_bsh = ioh;
1500 		} else if (memh_valid) {
1501 			sc->vr_bst = memt;
1502 			sc->vr_bsh = memh;
1503 		}
1504 #else
1505 		if (memh_valid) {
1506 			sc->vr_bst = memt;
1507 			sc->vr_bsh = memh;
1508 		} else if (ioh_valid) {
1509 			sc->vr_bst = iot;
1510 			sc->vr_bsh = ioh;
1511 		}
1512 #endif
1513 		else {
1514 			printf(": unable to map device registers\n");
1515 			return;
1516 		}
1517 
1518 		/* Allocate interrupt */
1519 		if (pci_intr_map(pa, &intrhandle)) {
1520 			printf("%s: couldn't map interrupt\n",
1521 				sc->vr_dev.dv_xname);
1522 			return;
1523 		}
1524 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1525 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1526 						vr_intr, sc);
1527 		if (sc->vr_ih == NULL) {
1528 			printf("%s: couldn't establish interrupt",
1529 				sc->vr_dev.dv_xname);
1530 			if (intrstr != NULL)
1531 				printf(" at %s", intrstr);
1532 			printf("\n");
1533 		}
1534 		printf("%s: interrupting at %s\n",
1535 			sc->vr_dev.dv_xname, intrstr);
1536 	}
1537 
1538 	/*
1539 	 * Windows may put the chip in suspend mode when it
1540 	 * shuts down. Be sure to kick it in the head to wake it
1541 	 * up again.
1542 	 */
1543 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1544 
1545 	/* Reset the adapter. */
1546 	vr_reset(sc);
1547 
1548 	/*
1549 	 * Get station address. The way the Rhine chips work,
1550 	 * you're not allowed to directly access the EEPROM once
1551 	 * they've been programmed a special way. Consequently,
1552 	 * we need to read the node address from the PAR0 and PAR1
1553 	 * registers.
1554 	 */
1555 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1556 	DELAY(200);
1557 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1558 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1559 
1560 	/*
1561 	 * A Rhine chip was detected. Inform the world.
1562 	 */
1563 	printf("%s: Ethernet address: %s\n",
1564 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1565 
1566 	memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN);
1567 
1568 	sc->vr_dmat = pa->pa_dmat;
1569 
1570 	/*
1571 	 * Allocate the control data structures, and create and load
1572 	 * the DMA map for it.
1573 	 */
1574 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1575 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1576 	    0)) != 0) {
1577 		printf("%s: unable to allocate control data, error = %d\n",
1578 		    sc->vr_dev.dv_xname, error);
1579 		goto fail_0;
1580 	}
1581 
1582 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1583 	    sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1584 	    BUS_DMA_COHERENT)) != 0) {
1585 		printf("%s: unable to map control data, error = %d\n",
1586 		    sc->vr_dev.dv_xname, error);
1587 		goto fail_1;
1588 	}
1589 
1590 	if ((error = bus_dmamap_create(sc->vr_dmat,
1591 	    sizeof(struct vr_control_data), 1,
1592 	    sizeof(struct vr_control_data), 0, 0,
1593 	    &sc->vr_cddmamap)) != 0) {
1594 		printf("%s: unable to create control data DMA map, "
1595 		    "error = %d\n", sc->vr_dev.dv_xname, error);
1596 		goto fail_2;
1597 	}
1598 
1599 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1600 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1601 	    0)) != 0) {
1602 		printf("%s: unable to load control data DMA map, error = %d\n",
1603 		    sc->vr_dev.dv_xname, error);
1604 		goto fail_3;
1605 	}
1606 
1607 	/*
1608 	 * Create the transmit buffer DMA maps.
1609 	 */
1610 	for (i = 0; i < VR_NTXDESC; i++) {
1611 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1612 		    1, MCLBYTES, 0, 0,
1613 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1614 			printf("%s: unable to create tx DMA map %d, "
1615 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1616 			goto fail_4;
1617 		}
1618 	}
1619 
1620 	/*
1621 	 * Create the receive buffer DMA maps.
1622 	 */
1623 	for (i = 0; i < VR_NRXDESC; i++) {
1624 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1625 		    MCLBYTES, 0, 0,
1626 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1627 			printf("%s: unable to create rx DMA map %d, "
1628 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1629 			goto fail_5;
1630 		}
1631 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1632 	}
1633 
1634 	ifp = &sc->vr_ec.ec_if;
1635 	ifp->if_softc = sc;
1636 	ifp->if_mtu = ETHERMTU;
1637 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1638 	ifp->if_ioctl = vr_ioctl;
1639 	ifp->if_start = vr_start;
1640 	ifp->if_watchdog = vr_watchdog;
1641 	ifp->if_init = vr_init;
1642 	ifp->if_stop = vr_stop;
1643 	IFQ_SET_READY(&ifp->if_snd);
1644 
1645 	strcpy(ifp->if_xname, sc->vr_dev.dv_xname);
1646 
1647 	/*
1648 	 * Initialize MII/media info.
1649 	 */
1650 	sc->vr_mii.mii_ifp = ifp;
1651 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1652 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1653 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1654 	ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, vr_ifmedia_upd,
1655 		vr_ifmedia_sts);
1656 	mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1657 	    MII_OFFSET_ANY, MIIF_FORCEANEG);
1658 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1659 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1660 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1661 	} else
1662 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1663 
1664 	/*
1665 	 * Call MI attach routines.
1666 	 */
1667 	if_attach(ifp);
1668 	ether_ifattach(ifp, sc->vr_enaddr);
1669 
1670 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1671 	if (sc->vr_ats == NULL)
1672 		printf("%s: warning: couldn't establish shutdown hook\n",
1673 			sc->vr_dev.dv_xname);
1674 	return;
1675 
1676  fail_5:
1677 	for (i = 0; i < VR_NRXDESC; i++) {
1678 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1679 			bus_dmamap_destroy(sc->vr_dmat,
1680 			    sc->vr_rxsoft[i].ds_dmamap);
1681 	}
1682  fail_4:
1683 	for (i = 0; i < VR_NTXDESC; i++) {
1684 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1685 			bus_dmamap_destroy(sc->vr_dmat,
1686 			    sc->vr_txsoft[i].ds_dmamap);
1687 	}
1688 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1689  fail_3:
1690 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1691  fail_2:
1692 	bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1693 	    sizeof(struct vr_control_data));
1694  fail_1:
1695 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1696  fail_0:
1697 	return;
1698 }
1699