xref: /netbsd-src/sys/dev/pci/if_vr.c (revision d20841bb642898112fe68f0ad3f7b26dddf56f07)
1 /*	$NetBSD: if_vr.c,v 1.67 2003/11/14 22:33:29 jmcneill Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1997, 1998
42  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by Bill Paul.
55  * 4. Neither the name of the author nor the names of any co-contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69  * THE POSSIBILITY OF SUCH DAMAGE.
70  *
71  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72  */
73 
74 /*
75  * VIA Rhine fast ethernet PCI NIC driver
76  *
77  * Supports various network adapters based on the VIA Rhine
78  * and Rhine II PCI controllers, including the D-Link DFE530TX.
79  * Datasheets are available at http://www.via.com.tw.
80  *
81  * Written by Bill Paul <wpaul@ctr.columbia.edu>
82  * Electrical Engineering Department
83  * Columbia University, New York City
84  */
85 
86 /*
87  * The VIA Rhine controllers are similar in some respects to the
88  * the DEC tulip chips, except less complicated. The controller
89  * uses an MII bus and an external physical layer interface. The
90  * receiver has a one entry perfect filter and a 64-bit hash table
91  * multicast filter. Transmit and receive descriptors are similar
92  * to the tulip.
93  *
94  * The Rhine has a serious flaw in its transmit DMA mechanism:
95  * transmit buffers must be longword aligned. Unfortunately,
96  * the kernel doesn't guarantee that mbufs will be filled in starting
97  * at longword boundaries, so we have to do a buffer copy before
98  * transmission.
99  *
100  * Apparently, the receive DMA mechanism also has the same flaw.  This
101  * means that on systems with struct alignment requirements, incoming
102  * frames must be copied to a new buffer which shifts the data forward
103  * 2 bytes so that the payload is aligned on a 4-byte boundary.
104  */
105 
106 #include <sys/cdefs.h>
107 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.67 2003/11/14 22:33:29 jmcneill Exp $");
108 
109 #include <sys/param.h>
110 #include <sys/systm.h>
111 #include <sys/callout.h>
112 #include <sys/sockio.h>
113 #include <sys/mbuf.h>
114 #include <sys/malloc.h>
115 #include <sys/kernel.h>
116 #include <sys/socket.h>
117 #include <sys/device.h>
118 
119 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
120 
121 #include <net/if.h>
122 #include <net/if_arp.h>
123 #include <net/if_dl.h>
124 #include <net/if_media.h>
125 #include <net/if_ether.h>
126 
127 #include "bpfilter.h"
128 #if NBPFILTER > 0
129 #include <net/bpf.h>
130 #endif
131 
132 #include <machine/bus.h>
133 #include <machine/intr.h>
134 #include <machine/endian.h>
135 
136 #include <dev/mii/mii.h>
137 #include <dev/mii/miivar.h>
138 #include <dev/mii/mii_bitbang.h>
139 
140 #include <dev/pci/pcireg.h>
141 #include <dev/pci/pcivar.h>
142 #include <dev/pci/pcidevs.h>
143 
144 #include <dev/pci/if_vrreg.h>
145 
146 #define	VR_USEIOSPACE
147 
148 /*
149  * Various supported device vendors/types and their names.
150  */
151 static struct vr_type {
152 	pci_vendor_id_t		vr_vid;
153 	pci_product_id_t	vr_did;
154 	const char		*vr_name;
155 } vr_devs[] = {
156 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
157 		"VIA VT3043 (Rhine) 10/100" },
158 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
159 		"VIA VT6102 (Rhine II) 10/100" },
160 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
161 		"VIA VT6105 (Rhine III) 10/100" },
162 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
163 		"VIA VT86C100A (Rhine-II) 10/100" },
164 	{ 0, 0, NULL }
165 };
166 
167 /*
168  * Transmit descriptor list size.
169  */
170 #define	VR_NTXDESC		64
171 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
172 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
173 
174 /*
175  * Receive descriptor list size.
176  */
177 #define	VR_NRXDESC		64
178 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
179 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
180 
181 /*
182  * Control data structres that are DMA'd to the Rhine chip.  We allocate
183  * them in a single clump that maps to a single DMA segment to make several
184  * things easier.
185  *
186  * Note that since we always copy outgoing packets to aligned transmit
187  * buffers, we can reduce the transmit descriptors to one per packet.
188  */
189 struct vr_control_data {
190 	struct vr_desc		vr_txdescs[VR_NTXDESC];
191 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
192 };
193 
194 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
195 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
196 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
197 
198 /*
199  * Software state of transmit and receive descriptors.
200  */
201 struct vr_descsoft {
202 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
203 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
204 };
205 
206 struct vr_softc {
207 	struct device		vr_dev;		/* generic device glue */
208 	void			*vr_ih;		/* interrupt cookie */
209 	void			*vr_ats;	/* shutdown hook */
210 	bus_space_tag_t		vr_bst;		/* bus space tag */
211 	bus_space_handle_t	vr_bsh;		/* bus space handle */
212 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
213 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
214 	struct ethercom		vr_ec;		/* Ethernet common info */
215 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
216 	struct mii_data		vr_mii;		/* MII/media info */
217 
218 	u_int8_t		vr_revid;	/* Rhine chip revision */
219 
220 	struct callout		vr_tick_ch;	/* tick callout */
221 
222 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
223 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
224 
225 	/*
226 	 * Software state for transmit and receive descriptors.
227 	 */
228 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
229 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
230 
231 	/*
232 	 * Control data structures.
233 	 */
234 	struct vr_control_data	*vr_control_data;
235 
236 	int	vr_txpending;		/* number of TX requests pending */
237 	int	vr_txdirty;		/* first dirty TX descriptor */
238 	int	vr_txlast;		/* last used TX descriptor */
239 
240 	int	vr_rxptr;		/* next ready RX descriptor */
241 };
242 
243 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
244 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
245 
246 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
247 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
248 
249 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
250 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
251 
252 #define	VR_CDTXSYNC(sc, x, ops)						\
253 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
254 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
255 
256 #define	VR_CDRXSYNC(sc, x, ops)						\
257 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
258 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
259 
260 /*
261  * Note we rely on MCLBYTES being a power of two below.
262  */
263 #define	VR_INIT_RXDESC(sc, i)						\
264 do {									\
265 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
266 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
267 									\
268 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
269 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
270 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
271 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
272 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
273 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
274 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
275 } while (/* CONSTCOND */ 0)
276 
277 /*
278  * register space access macros
279  */
280 #define	CSR_WRITE_4(sc, reg, val)					\
281 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
282 #define	CSR_WRITE_2(sc, reg, val)					\
283 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
284 #define	CSR_WRITE_1(sc, reg, val)					\
285 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
286 
287 #define	CSR_READ_4(sc, reg)						\
288 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
289 #define	CSR_READ_2(sc, reg)						\
290 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
291 #define	CSR_READ_1(sc, reg)						\
292 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
293 
294 #define	VR_TIMEOUT		1000
295 
296 static int vr_add_rxbuf		__P((struct vr_softc *, int));
297 
298 static void vr_rxeof		__P((struct vr_softc *));
299 static void vr_rxeoc		__P((struct vr_softc *));
300 static void vr_txeof		__P((struct vr_softc *));
301 static int vr_intr		__P((void *));
302 static void vr_start		__P((struct ifnet *));
303 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
304 static int vr_init		__P((struct ifnet *));
305 static void vr_stop		__P((struct ifnet *, int));
306 static void vr_rxdrain		__P((struct vr_softc *));
307 static void vr_watchdog		__P((struct ifnet *));
308 static void vr_tick		__P((void *));
309 
310 static int vr_ifmedia_upd	__P((struct ifnet *));
311 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
312 
313 static int vr_mii_readreg	__P((struct device *, int, int));
314 static void vr_mii_writereg	__P((struct device *, int, int, int));
315 static void vr_mii_statchg	__P((struct device *));
316 
317 static void vr_setmulti		__P((struct vr_softc *));
318 static void vr_reset		__P((struct vr_softc *));
319 
320 int	vr_copy_small = 0;
321 
322 #define	VR_SETBIT(sc, reg, x)				\
323 	CSR_WRITE_1(sc, reg,				\
324 	    CSR_READ_1(sc, reg) | (x))
325 
326 #define	VR_CLRBIT(sc, reg, x)				\
327 	CSR_WRITE_1(sc, reg,				\
328 	    CSR_READ_1(sc, reg) & ~(x))
329 
330 #define	VR_SETBIT16(sc, reg, x)				\
331 	CSR_WRITE_2(sc, reg,				\
332 	    CSR_READ_2(sc, reg) | (x))
333 
334 #define	VR_CLRBIT16(sc, reg, x)				\
335 	CSR_WRITE_2(sc, reg,				\
336 	    CSR_READ_2(sc, reg) & ~(x))
337 
338 #define	VR_SETBIT32(sc, reg, x)				\
339 	CSR_WRITE_4(sc, reg,				\
340 	    CSR_READ_4(sc, reg) | (x))
341 
342 #define	VR_CLRBIT32(sc, reg, x)				\
343 	CSR_WRITE_4(sc, reg,				\
344 	    CSR_READ_4(sc, reg) & ~(x))
345 
346 /*
347  * MII bit-bang glue.
348  */
349 u_int32_t vr_mii_bitbang_read __P((struct device *));
350 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
351 
352 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
353 	vr_mii_bitbang_read,
354 	vr_mii_bitbang_write,
355 	{
356 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
357 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
358 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
359 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
360 		0,			/* MII_BIT_DIR_PHY_HOST */
361 	}
362 };
363 
364 u_int32_t
365 vr_mii_bitbang_read(self)
366 	struct device *self;
367 {
368 	struct vr_softc *sc = (void *) self;
369 
370 	return (CSR_READ_1(sc, VR_MIICMD));
371 }
372 
373 void
374 vr_mii_bitbang_write(self, val)
375 	struct device *self;
376 	u_int32_t val;
377 {
378 	struct vr_softc *sc = (void *) self;
379 
380 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
381 }
382 
383 /*
384  * Read an PHY register through the MII.
385  */
386 static int
387 vr_mii_readreg(self, phy, reg)
388 	struct device *self;
389 	int phy, reg;
390 {
391 	struct vr_softc *sc = (void *) self;
392 
393 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
394 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
395 }
396 
397 /*
398  * Write to a PHY register through the MII.
399  */
400 static void
401 vr_mii_writereg(self, phy, reg, val)
402 	struct device *self;
403 	int phy, reg, val;
404 {
405 	struct vr_softc *sc = (void *) self;
406 
407 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
408 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
409 }
410 
411 static void
412 vr_mii_statchg(self)
413 	struct device *self;
414 {
415 	struct vr_softc *sc = (struct vr_softc *)self;
416 
417 	/*
418 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
419 	 * register, we first have to put the transmit and/or receive logic
420 	 * in the idle state.
421 	 */
422 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
423 
424 	if (sc->vr_mii.mii_media_active & IFM_FDX)
425 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
426 	else
427 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
428 
429 	if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
430 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
431 }
432 
433 #define	vr_calchash(addr) \
434 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
435 
436 /*
437  * Program the 64-bit multicast hash filter.
438  */
439 static void
440 vr_setmulti(sc)
441 	struct vr_softc *sc;
442 {
443 	struct ifnet *ifp;
444 	int h = 0;
445 	u_int32_t hashes[2] = { 0, 0 };
446 	struct ether_multistep step;
447 	struct ether_multi *enm;
448 	int mcnt = 0;
449 	u_int8_t rxfilt;
450 
451 	ifp = &sc->vr_ec.ec_if;
452 
453 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
454 
455 	if (ifp->if_flags & IFF_PROMISC) {
456 allmulti:
457 		ifp->if_flags |= IFF_ALLMULTI;
458 		rxfilt |= VR_RXCFG_RX_MULTI;
459 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
460 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
461 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
462 		return;
463 	}
464 
465 	/* first, zot all the existing hash bits */
466 	CSR_WRITE_4(sc, VR_MAR0, 0);
467 	CSR_WRITE_4(sc, VR_MAR1, 0);
468 
469 	/* now program new ones */
470 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
471 	while (enm != NULL) {
472 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
473 		    ETHER_ADDR_LEN) != 0)
474 			goto allmulti;
475 
476 		h = vr_calchash(enm->enm_addrlo);
477 
478 		if (h < 32)
479 			hashes[0] |= (1 << h);
480 		else
481 			hashes[1] |= (1 << (h - 32));
482 		ETHER_NEXT_MULTI(step, enm);
483 		mcnt++;
484 	}
485 
486 	ifp->if_flags &= ~IFF_ALLMULTI;
487 
488 	if (mcnt)
489 		rxfilt |= VR_RXCFG_RX_MULTI;
490 	else
491 		rxfilt &= ~VR_RXCFG_RX_MULTI;
492 
493 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
494 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
495 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
496 }
497 
498 static void
499 vr_reset(sc)
500 	struct vr_softc *sc;
501 {
502 	int i;
503 
504 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
505 
506 	for (i = 0; i < VR_TIMEOUT; i++) {
507 		DELAY(10);
508 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
509 			break;
510 	}
511 	if (i == VR_TIMEOUT) {
512 		if (sc->vr_revid < REV_ID_VT3065_A) {
513 			printf("%s: reset never completed!\n",
514 			    sc->vr_dev.dv_xname);
515 		} else {
516 			/* Use newer force reset command */
517 			printf("%s: using force reset command.\n",
518 			    sc->vr_dev.dv_xname);
519 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
520 		}
521 	}
522 
523 	/* Wait a little while for the chip to get its brains in order. */
524 	DELAY(1000);
525 }
526 
527 /*
528  * Initialize an RX descriptor and attach an MBUF cluster.
529  * Note: the length fields are only 11 bits wide, which means the
530  * largest size we can specify is 2047. This is important because
531  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
532  * overflow the field and make a mess.
533  */
534 static int
535 vr_add_rxbuf(sc, i)
536 	struct vr_softc *sc;
537 	int i;
538 {
539 	struct vr_descsoft *ds = VR_DSRX(sc, i);
540 	struct mbuf *m_new;
541 	int error;
542 
543 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
544 	if (m_new == NULL)
545 		return (ENOBUFS);
546 
547 	MCLGET(m_new, M_DONTWAIT);
548 	if ((m_new->m_flags & M_EXT) == 0) {
549 		m_freem(m_new);
550 		return (ENOBUFS);
551 	}
552 
553 	if (ds->ds_mbuf != NULL)
554 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
555 
556 	ds->ds_mbuf = m_new;
557 
558 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
559 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
560 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
561 	if (error) {
562 		printf("%s: unable to load rx DMA map %d, error = %d\n",
563 		    sc->vr_dev.dv_xname, i, error);
564 		panic("vr_add_rxbuf");		/* XXX */
565 	}
566 
567 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
568 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
569 
570 	VR_INIT_RXDESC(sc, i);
571 
572 	return (0);
573 }
574 
575 /*
576  * A frame has been uploaded: pass the resulting mbuf chain up to
577  * the higher level protocols.
578  */
579 static void
580 vr_rxeof(sc)
581 	struct vr_softc *sc;
582 {
583 	struct mbuf *m;
584 	struct ifnet *ifp;
585 	struct vr_desc *d;
586 	struct vr_descsoft *ds;
587 	int i, total_len;
588 	u_int32_t rxstat;
589 
590 	ifp = &sc->vr_ec.ec_if;
591 
592 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
593 		d = VR_CDRX(sc, i);
594 		ds = VR_DSRX(sc, i);
595 
596 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
597 
598 		rxstat = le32toh(d->vr_status);
599 
600 		if (rxstat & VR_RXSTAT_OWN) {
601 			/*
602 			 * We have processed all of the receive buffers.
603 			 */
604 			break;
605 		}
606 
607 		/*
608 		 * If an error occurs, update stats, clear the
609 		 * status word and leave the mbuf cluster in place:
610 		 * it should simply get re-used next time this descriptor
611 		 * comes up in the ring.
612 		 */
613 		if (rxstat & VR_RXSTAT_RXERR) {
614 			const char *errstr;
615 
616 			ifp->if_ierrors++;
617 			switch (rxstat & 0x000000FF) {
618 			case VR_RXSTAT_CRCERR:
619 				errstr = "crc error";
620 				break;
621 			case VR_RXSTAT_FRAMEALIGNERR:
622 				errstr = "frame alignment error";
623 				break;
624 			case VR_RXSTAT_FIFOOFLOW:
625 				errstr = "FIFO overflow";
626 				break;
627 			case VR_RXSTAT_GIANT:
628 				errstr = "received giant packet";
629 				break;
630 			case VR_RXSTAT_RUNT:
631 				errstr = "received runt packet";
632 				break;
633 			case VR_RXSTAT_BUSERR:
634 				errstr = "system bus error";
635 				break;
636 			case VR_RXSTAT_BUFFERR:
637 				errstr = "rx buffer error";
638 				break;
639 			default:
640 				errstr = "unknown rx error";
641 				break;
642 			}
643 			printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
644 			    errstr);
645 
646 			VR_INIT_RXDESC(sc, i);
647 
648 			continue;
649 		}
650 
651 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
652 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
653 
654 		/* No errors; receive the packet. */
655 		total_len = VR_RXBYTES(le32toh(d->vr_status));
656 
657 #ifdef __NO_STRICT_ALIGNMENT
658 		/*
659 		 * If the packet is small enough to fit in a
660 		 * single header mbuf, allocate one and copy
661 		 * the data into it.  This greatly reduces
662 		 * memory consumption when we receive lots
663 		 * of small packets.
664 		 *
665 		 * Otherwise, we add a new buffer to the receive
666 		 * chain.  If this fails, we drop the packet and
667 		 * recycle the old buffer.
668 		 */
669 		if (vr_copy_small != 0 && total_len <= MHLEN) {
670 			MGETHDR(m, M_DONTWAIT, MT_DATA);
671 			if (m == NULL)
672 				goto dropit;
673 			memcpy(mtod(m, caddr_t),
674 			    mtod(ds->ds_mbuf, caddr_t), total_len);
675 			VR_INIT_RXDESC(sc, i);
676 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
677 			    ds->ds_dmamap->dm_mapsize,
678 			    BUS_DMASYNC_PREREAD);
679 		} else {
680 			m = ds->ds_mbuf;
681 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
682  dropit:
683 				ifp->if_ierrors++;
684 				VR_INIT_RXDESC(sc, i);
685 				bus_dmamap_sync(sc->vr_dmat,
686 				    ds->ds_dmamap, 0,
687 				    ds->ds_dmamap->dm_mapsize,
688 				    BUS_DMASYNC_PREREAD);
689 				continue;
690 			}
691 		}
692 #else
693 		/*
694 		 * The Rhine's packet buffers must be 4-byte aligned.
695 		 * But this means that the data after the Ethernet header
696 		 * is misaligned.  We must allocate a new buffer and
697 		 * copy the data, shifted forward 2 bytes.
698 		 */
699 		MGETHDR(m, M_DONTWAIT, MT_DATA);
700 		if (m == NULL) {
701  dropit:
702 			ifp->if_ierrors++;
703 			VR_INIT_RXDESC(sc, i);
704 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
705 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
706 			continue;
707 		}
708 		if (total_len > (MHLEN - 2)) {
709 			MCLGET(m, M_DONTWAIT);
710 			if ((m->m_flags & M_EXT) == 0) {
711 				m_freem(m);
712 				goto dropit;
713 			}
714 		}
715 		m->m_data += 2;
716 
717 		/*
718 		 * Note that we use clusters for incoming frames, so the
719 		 * buffer is virtually contiguous.
720 		 */
721 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
722 		    total_len);
723 
724 		/* Allow the receive descriptor to continue using its mbuf. */
725 		VR_INIT_RXDESC(sc, i);
726 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
727 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
728 #endif /* __NO_STRICT_ALIGNMENT */
729 
730 		/*
731 		 * The Rhine chip includes the FCS with every
732 		 * received packet.
733 		 */
734 		m->m_flags |= M_HASFCS;
735 
736 		ifp->if_ipackets++;
737 		m->m_pkthdr.rcvif = ifp;
738 		m->m_pkthdr.len = m->m_len = total_len;
739 #if NBPFILTER > 0
740 		/*
741 		 * Handle BPF listeners. Let the BPF user see the packet, but
742 		 * don't pass it up to the ether_input() layer unless it's
743 		 * a broadcast packet, multicast packet, matches our ethernet
744 		 * address or the interface is in promiscuous mode.
745 		 */
746 		if (ifp->if_bpf)
747 			bpf_mtap(ifp->if_bpf, m);
748 #endif
749 		/* Pass it on. */
750 		(*ifp->if_input)(ifp, m);
751 	}
752 
753 	/* Update the receive pointer. */
754 	sc->vr_rxptr = i;
755 }
756 
757 void
758 vr_rxeoc(sc)
759 	struct vr_softc *sc;
760 {
761 
762 	vr_rxeof(sc);
763 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
764 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
765 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
766 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
767 }
768 
769 /*
770  * A frame was downloaded to the chip. It's safe for us to clean up
771  * the list buffers.
772  */
773 static void
774 vr_txeof(sc)
775 	struct vr_softc *sc;
776 {
777 	struct ifnet *ifp = &sc->vr_ec.ec_if;
778 	struct vr_desc *d;
779 	struct vr_descsoft *ds;
780 	u_int32_t txstat;
781 	int i;
782 
783 	ifp->if_flags &= ~IFF_OACTIVE;
784 
785 	/*
786 	 * Go through our tx list and free mbufs for those
787 	 * frames that have been transmitted.
788 	 */
789 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
790 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
791 		d = VR_CDTX(sc, i);
792 		ds = VR_DSTX(sc, i);
793 
794 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
795 
796 		txstat = le32toh(d->vr_status);
797 		if (txstat & VR_TXSTAT_OWN)
798 			break;
799 
800 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
801 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
802 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
803 		m_freem(ds->ds_mbuf);
804 		ds->ds_mbuf = NULL;
805 
806 		if (txstat & VR_TXSTAT_ERRSUM) {
807 			ifp->if_oerrors++;
808 			if (txstat & VR_TXSTAT_DEFER)
809 				ifp->if_collisions++;
810 			if (txstat & VR_TXSTAT_LATECOLL)
811 				ifp->if_collisions++;
812 		}
813 
814 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
815 		ifp->if_opackets++;
816 	}
817 
818 	/* Update the dirty transmit buffer pointer. */
819 	sc->vr_txdirty = i;
820 
821 	/*
822 	 * Cancel the watchdog timer if there are no pending
823 	 * transmissions.
824 	 */
825 	if (sc->vr_txpending == 0)
826 		ifp->if_timer = 0;
827 }
828 
829 static int
830 vr_intr(arg)
831 	void *arg;
832 {
833 	struct vr_softc *sc;
834 	struct ifnet *ifp;
835 	u_int16_t status;
836 	int handled = 0, dotx = 0;
837 
838 	sc = arg;
839 	ifp = &sc->vr_ec.ec_if;
840 
841 	/* Suppress unwanted interrupts. */
842 	if ((ifp->if_flags & IFF_UP) == 0) {
843 		vr_stop(ifp, 1);
844 		return (0);
845 	}
846 
847 	/* Disable interrupts. */
848 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
849 
850 	for (;;) {
851 		status = CSR_READ_2(sc, VR_ISR);
852 		if (status)
853 			CSR_WRITE_2(sc, VR_ISR, status);
854 
855 		if ((status & VR_INTRS) == 0)
856 			break;
857 
858 		handled = 1;
859 
860 		if (status & VR_ISR_RX_OK)
861 			vr_rxeof(sc);
862 
863 		if (status &
864 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
865 		     VR_ISR_RX_DROPPED))
866 			vr_rxeoc(sc);
867 
868 		if (status & VR_ISR_TX_OK) {
869 			dotx = 1;
870 			vr_txeof(sc);
871 		}
872 
873 		if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
874 			if (status & VR_ISR_TX_UNDERRUN)
875 				printf("%s: transmit underrun\n",
876 				    sc->vr_dev.dv_xname);
877 			if (status & VR_ISR_TX_ABRT)
878 				printf("%s: transmit aborted\n",
879 				    sc->vr_dev.dv_xname);
880 			ifp->if_oerrors++;
881 			dotx = 1;
882 			vr_txeof(sc);
883 			if (sc->vr_txpending) {
884 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
885 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
886 			}
887 			/*
888 			 * Unfortunately many cards get stuck after
889 			 * aborted transmits, so we reset them.
890 			 */
891 			if (status & VR_ISR_TX_ABRT) {
892 				printf("%s: restarting\n", sc->vr_dev.dv_xname);
893 				dotx = 0;
894 				(void) vr_init(ifp);
895 			}
896 		}
897 
898 		if (status & VR_ISR_BUSERR) {
899 			printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
900 			/* vr_init() calls vr_start() */
901 			dotx = 0;
902 			(void) vr_init(ifp);
903 		}
904 	}
905 
906 	/* Re-enable interrupts. */
907 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
908 
909 	if (dotx)
910 		vr_start(ifp);
911 
912 	return (handled);
913 }
914 
915 /*
916  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
917  * to the mbuf data regions directly in the transmit lists. We also save a
918  * copy of the pointers since the transmit list fragment pointers are
919  * physical addresses.
920  */
921 static void
922 vr_start(ifp)
923 	struct ifnet *ifp;
924 {
925 	struct vr_softc *sc = ifp->if_softc;
926 	struct mbuf *m0, *m;
927 	struct vr_desc *d;
928 	struct vr_descsoft *ds;
929 	int error, firsttx, nexttx, opending;
930 
931 	/*
932 	 * Remember the previous txpending and the first transmit
933 	 * descriptor we use.
934 	 */
935 	opending = sc->vr_txpending;
936 	firsttx = VR_NEXTTX(sc->vr_txlast);
937 
938 	/*
939 	 * Loop through the send queue, setting up transmit descriptors
940 	 * until we drain the queue, or use up all available transmit
941 	 * descriptors.
942 	 */
943 	while (sc->vr_txpending < VR_NTXDESC) {
944 		/*
945 		 * Grab a packet off the queue.
946 		 */
947 		IFQ_POLL(&ifp->if_snd, m0);
948 		if (m0 == NULL)
949 			break;
950 		m = NULL;
951 
952 		/*
953 		 * Get the next available transmit descriptor.
954 		 */
955 		nexttx = VR_NEXTTX(sc->vr_txlast);
956 		d = VR_CDTX(sc, nexttx);
957 		ds = VR_DSTX(sc, nexttx);
958 
959 		/*
960 		 * Load the DMA map.  If this fails, the packet didn't
961 		 * fit in one DMA segment, and we need to copy.  Note,
962 		 * the packet must also be aligned.
963 		 * if the packet is too small, copy it too, so we're sure
964 		 * so have enouth room for the pad buffer.
965 		 */
966 		if ((mtod(m0, uintptr_t) & 3) != 0 ||
967 		    m0->m_pkthdr.len < VR_MIN_FRAMELEN ||
968 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
969 		     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
970 			MGETHDR(m, M_DONTWAIT, MT_DATA);
971 			if (m == NULL) {
972 				printf("%s: unable to allocate Tx mbuf\n",
973 				    sc->vr_dev.dv_xname);
974 				break;
975 			}
976 			if (m0->m_pkthdr.len > MHLEN) {
977 				MCLGET(m, M_DONTWAIT);
978 				if ((m->m_flags & M_EXT) == 0) {
979 					printf("%s: unable to allocate Tx "
980 					    "cluster\n", sc->vr_dev.dv_xname);
981 					m_freem(m);
982 					break;
983 				}
984 			}
985 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
986 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
987 			/*
988 			 * The Rhine doesn't auto-pad, so we have to do this
989 			 * ourselves.
990 			 */
991 			if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) {
992 				memset(mtod(m, caddr_t) + m0->m_pkthdr.len,
993 				    0, VR_MIN_FRAMELEN - m0->m_pkthdr.len);
994 				m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN;
995 			}
996 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
997 			    ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
998 			if (error) {
999 				printf("%s: unable to load Tx buffer, "
1000 				    "error = %d\n", sc->vr_dev.dv_xname, error);
1001 				break;
1002 			}
1003 		}
1004 
1005 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1006 		if (m != NULL) {
1007 			m_freem(m0);
1008 			m0 = m;
1009 		}
1010 
1011 		/* Sync the DMA map. */
1012 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1013 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1014 
1015 		/*
1016 		 * Store a pointer to the packet so we can free it later.
1017 		 */
1018 		ds->ds_mbuf = m0;
1019 
1020 #if NBPFILTER > 0
1021 		/*
1022 		 * If there's a BPF listener, bounce a copy of this frame
1023 		 * to him.
1024 		 */
1025 		if (ifp->if_bpf)
1026 			bpf_mtap(ifp->if_bpf, m0);
1027 #endif
1028 
1029 		/*
1030 		 * Fill in the transmit descriptor.
1031 		 */
1032 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1033 		d->vr_ctl = htole32(m0->m_pkthdr.len);
1034 		d->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG | VR_TXCTL_LASTFRAG);
1035 
1036 		/*
1037 		 * If this is the first descriptor we're enqueuing,
1038 		 * don't give it to the Rhine yet.  That could cause
1039 		 * a race condition.  We'll do it below.
1040 		 */
1041 		if (nexttx == firsttx)
1042 			d->vr_status = 0;
1043 		else
1044 			d->vr_status = htole32(VR_TXSTAT_OWN);
1045 
1046 		VR_CDTXSYNC(sc, nexttx,
1047 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1048 
1049 		/* Advance the tx pointer. */
1050 		sc->vr_txpending++;
1051 		sc->vr_txlast = nexttx;
1052 	}
1053 
1054 	if (sc->vr_txpending == VR_NTXDESC) {
1055 		/* No more slots left; notify upper layer. */
1056 		ifp->if_flags |= IFF_OACTIVE;
1057 	}
1058 
1059 	if (sc->vr_txpending != opending) {
1060 		/*
1061 		 * We enqueued packets.  If the transmitter was idle,
1062 		 * reset the txdirty pointer.
1063 		 */
1064 		if (opending == 0)
1065 			sc->vr_txdirty = firsttx;
1066 
1067 		/*
1068 		 * Cause a transmit interrupt to happen on the
1069 		 * last packet we enqueued.
1070 		 */
1071 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1072 		VR_CDTXSYNC(sc, sc->vr_txlast,
1073 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1074 
1075 		/*
1076 		 * The entire packet chain is set up.  Give the
1077 		 * first descriptor to the Rhine now.
1078 		 */
1079 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1080 		VR_CDTXSYNC(sc, firsttx,
1081 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1082 
1083 		/* Start the transmitter. */
1084 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1085 
1086 		/* Set the watchdog timer in case the chip flakes out. */
1087 		ifp->if_timer = 5;
1088 	}
1089 }
1090 
1091 /*
1092  * Initialize the interface.  Must be called at splnet.
1093  */
1094 static int
1095 vr_init(ifp)
1096 	struct ifnet *ifp;
1097 {
1098 	struct vr_softc *sc = ifp->if_softc;
1099 	struct vr_desc *d;
1100 	struct vr_descsoft *ds;
1101 	int i, error = 0;
1102 
1103 	/* Cancel pending I/O. */
1104 	vr_stop(ifp, 0);
1105 
1106 	/* Reset the Rhine to a known state. */
1107 	vr_reset(sc);
1108 
1109 	/* set DMA length in BCR0 and BCR1 */
1110 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1111 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1112 
1113 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1114 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTH_128BYTES);
1115 
1116 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1117 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTH_STORENFWD);
1118 
1119 	/* set DMA threshold length in RXCFG and TXCFG */
1120 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1121 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1122 
1123 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1124 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1125 
1126 	/*
1127 	 * Initialize the transmit desciptor ring.  txlast is initialized
1128 	 * to the end of the list so that it will wrap around to the first
1129 	 * descriptor when the first packet is transmitted.
1130 	 */
1131 	for (i = 0; i < VR_NTXDESC; i++) {
1132 		d = VR_CDTX(sc, i);
1133 		memset(d, 0, sizeof(struct vr_desc));
1134 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1135 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1136 	}
1137 	sc->vr_txpending = 0;
1138 	sc->vr_txdirty = 0;
1139 	sc->vr_txlast = VR_NTXDESC - 1;
1140 
1141 	/*
1142 	 * Initialize the receive descriptor ring.
1143 	 */
1144 	for (i = 0; i < VR_NRXDESC; i++) {
1145 		ds = VR_DSRX(sc, i);
1146 		if (ds->ds_mbuf == NULL) {
1147 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1148 				printf("%s: unable to allocate or map rx "
1149 				    "buffer %d, error = %d\n",
1150 				    sc->vr_dev.dv_xname, i, error);
1151 				/*
1152 				 * XXX Should attempt to run with fewer receive
1153 				 * XXX buffers instead of just failing.
1154 				 */
1155 				vr_rxdrain(sc);
1156 				goto out;
1157 			}
1158 		} else
1159 			VR_INIT_RXDESC(sc, i);
1160 	}
1161 	sc->vr_rxptr = 0;
1162 
1163 	/* If we want promiscuous mode, set the allframes bit. */
1164 	if (ifp->if_flags & IFF_PROMISC)
1165 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1166 	else
1167 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1168 
1169 	/* Set capture broadcast bit to capture broadcast frames. */
1170 	if (ifp->if_flags & IFF_BROADCAST)
1171 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1172 	else
1173 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1174 
1175 	/* Program the multicast filter, if necessary. */
1176 	vr_setmulti(sc);
1177 
1178 	/* Give the transmit and receive rings to the Rhine. */
1179 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1180 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1181 
1182 	/* Set current media. */
1183 	mii_mediachg(&sc->vr_mii);
1184 
1185 	/* Enable receiver and transmitter. */
1186 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1187 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1188 				    VR_CMD_RX_GO);
1189 
1190 	/* Enable interrupts. */
1191 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1192 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1193 
1194 	ifp->if_flags |= IFF_RUNNING;
1195 	ifp->if_flags &= ~IFF_OACTIVE;
1196 
1197 	/* Start one second timer. */
1198 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1199 
1200 	/* Attempt to start output on the interface. */
1201 	vr_start(ifp);
1202 
1203  out:
1204 	if (error)
1205 		printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1206 	return (error);
1207 }
1208 
1209 /*
1210  * Set media options.
1211  */
1212 static int
1213 vr_ifmedia_upd(ifp)
1214 	struct ifnet *ifp;
1215 {
1216 	struct vr_softc *sc = ifp->if_softc;
1217 
1218 	if (ifp->if_flags & IFF_UP)
1219 		mii_mediachg(&sc->vr_mii);
1220 	return (0);
1221 }
1222 
1223 /*
1224  * Report current media status.
1225  */
1226 static void
1227 vr_ifmedia_sts(ifp, ifmr)
1228 	struct ifnet *ifp;
1229 	struct ifmediareq *ifmr;
1230 {
1231 	struct vr_softc *sc = ifp->if_softc;
1232 
1233 	mii_pollstat(&sc->vr_mii);
1234 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
1235 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
1236 }
1237 
1238 static int
1239 vr_ioctl(ifp, command, data)
1240 	struct ifnet *ifp;
1241 	u_long command;
1242 	caddr_t data;
1243 {
1244 	struct vr_softc *sc = ifp->if_softc;
1245 	struct ifreq *ifr = (struct ifreq *)data;
1246 	int s, error = 0;
1247 
1248 	s = splnet();
1249 
1250 	switch (command) {
1251 	case SIOCGIFMEDIA:
1252 	case SIOCSIFMEDIA:
1253 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1254 		break;
1255 
1256 	default:
1257 		error = ether_ioctl(ifp, command, data);
1258 		if (error == ENETRESET) {
1259 			/*
1260 			 * Multicast list has changed; set the hardware filter
1261 			 * accordingly.
1262 			 */
1263 			vr_setmulti(sc);
1264 			error = 0;
1265 		}
1266 		break;
1267 	}
1268 
1269 	splx(s);
1270 	return (error);
1271 }
1272 
1273 static void
1274 vr_watchdog(ifp)
1275 	struct ifnet *ifp;
1276 {
1277 	struct vr_softc *sc = ifp->if_softc;
1278 
1279 	printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1280 	ifp->if_oerrors++;
1281 
1282 	(void) vr_init(ifp);
1283 }
1284 
1285 /*
1286  * One second timer, used to tick MII.
1287  */
1288 static void
1289 vr_tick(arg)
1290 	void *arg;
1291 {
1292 	struct vr_softc *sc = arg;
1293 	int s;
1294 
1295 	s = splnet();
1296 	mii_tick(&sc->vr_mii);
1297 	splx(s);
1298 
1299 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1300 }
1301 
1302 /*
1303  * Drain the receive queue.
1304  */
1305 static void
1306 vr_rxdrain(sc)
1307 	struct vr_softc *sc;
1308 {
1309 	struct vr_descsoft *ds;
1310 	int i;
1311 
1312 	for (i = 0; i < VR_NRXDESC; i++) {
1313 		ds = VR_DSRX(sc, i);
1314 		if (ds->ds_mbuf != NULL) {
1315 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1316 			m_freem(ds->ds_mbuf);
1317 			ds->ds_mbuf = NULL;
1318 		}
1319 	}
1320 }
1321 
1322 /*
1323  * Stop the adapter and free any mbufs allocated to the
1324  * transmit lists.
1325  */
1326 static void
1327 vr_stop(ifp, disable)
1328 	struct ifnet *ifp;
1329 	int disable;
1330 {
1331 	struct vr_softc *sc = ifp->if_softc;
1332 	struct vr_descsoft *ds;
1333 	int i;
1334 
1335 	/* Cancel one second timer. */
1336 	callout_stop(&sc->vr_tick_ch);
1337 
1338 	/* Down the MII. */
1339 	mii_down(&sc->vr_mii);
1340 
1341 	ifp = &sc->vr_ec.ec_if;
1342 	ifp->if_timer = 0;
1343 
1344 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1345 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1346 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1347 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1348 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1349 
1350 	/*
1351 	 * Release any queued transmit buffers.
1352 	 */
1353 	for (i = 0; i < VR_NTXDESC; i++) {
1354 		ds = VR_DSTX(sc, i);
1355 		if (ds->ds_mbuf != NULL) {
1356 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1357 			m_freem(ds->ds_mbuf);
1358 			ds->ds_mbuf = NULL;
1359 		}
1360 	}
1361 
1362 	if (disable)
1363 		vr_rxdrain(sc);
1364 
1365 	/*
1366 	 * Mark the interface down and cancel the watchdog timer.
1367 	 */
1368 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1369 	ifp->if_timer = 0;
1370 }
1371 
1372 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1373 static int vr_probe __P((struct device *, struct cfdata *, void *));
1374 static void vr_attach __P((struct device *, struct device *, void *));
1375 static void vr_shutdown __P((void *));
1376 
1377 CFATTACH_DECL(vr, sizeof (struct vr_softc),
1378     vr_probe, vr_attach, NULL, NULL);
1379 
1380 static struct vr_type *
1381 vr_lookup(pa)
1382 	struct pci_attach_args *pa;
1383 {
1384 	struct vr_type *vrt;
1385 
1386 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1387 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1388 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1389 			return (vrt);
1390 	}
1391 	return (NULL);
1392 }
1393 
1394 static int
1395 vr_probe(parent, match, aux)
1396 	struct device *parent;
1397 	struct cfdata *match;
1398 	void *aux;
1399 {
1400 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1401 
1402 	if (vr_lookup(pa) != NULL)
1403 		return (1);
1404 
1405 	return (0);
1406 }
1407 
1408 /*
1409  * Stop all chip I/O so that the kernel's probe routines don't
1410  * get confused by errant DMAs when rebooting.
1411  */
1412 static void
1413 vr_shutdown(arg)
1414 	void *arg;
1415 {
1416 	struct vr_softc *sc = (struct vr_softc *)arg;
1417 
1418 	vr_stop(&sc->vr_ec.ec_if, 1);
1419 }
1420 
1421 /*
1422  * Attach the interface. Allocate softc structures, do ifmedia
1423  * setup and ethernet/BPF attach.
1424  */
1425 static void
1426 vr_attach(parent, self, aux)
1427 	struct device *parent;
1428 	struct device *self;
1429 	void *aux;
1430 {
1431 	struct vr_softc *sc = (struct vr_softc *) self;
1432 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1433 	bus_dma_segment_t seg;
1434 	struct vr_type *vrt;
1435 	u_int32_t pmreg, reg;
1436 	struct ifnet *ifp;
1437 	u_char eaddr[ETHER_ADDR_LEN];
1438 	int i, rseg, error;
1439 
1440 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1441 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1442 
1443 	callout_init(&sc->vr_tick_ch);
1444 
1445 	vrt = vr_lookup(pa);
1446 	if (vrt == NULL) {
1447 		printf("\n");
1448 		panic("vr_attach: impossible");
1449 	}
1450 
1451 	printf(": %s Ethernet\n", vrt->vr_name);
1452 
1453 	/*
1454 	 * Handle power management nonsense.
1455 	 */
1456 
1457 	if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1458 	    PCI_CAP_PWRMGMT, &pmreg, 0)) {
1459 		reg = PCI_CONF_READ(pmreg + PCI_PMCSR);
1460 		if ((reg & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_STATE_D0) {
1461 			u_int32_t iobase, membase, irq;
1462 
1463 			/* Save important PCI config data. */
1464 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
1465 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
1466 			irq = PCI_CONF_READ(PCI_INTERRUPT_REG);
1467 
1468 			/* Reset the power state. */
1469 			printf("%s: chip is in D%d power mode "
1470 			    "-- setting to D0\n",
1471 			    sc->vr_dev.dv_xname, reg & PCI_PMCSR_STATE_MASK);
1472 			reg = (reg & ~PCI_PMCSR_STATE_MASK) |
1473 			    PCI_PMCSR_STATE_D0;
1474 			PCI_CONF_WRITE(pmreg + PCI_PMCSR, reg);
1475 
1476 			/* Restore PCI config data. */
1477 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1478 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1479 			PCI_CONF_WRITE(PCI_INTERRUPT_REG, irq);
1480 		}
1481 	}
1482 
1483 	/* Make sure bus mastering is enabled. */
1484 	reg = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1485 	reg |= PCI_COMMAND_MASTER_ENABLE;
1486 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, reg);
1487 
1488 	/* Get revision */
1489 	sc->vr_revid = PCI_REVISION(pa->pa_class);
1490 
1491 	/*
1492 	 * Map control/status registers.
1493 	 */
1494 	{
1495 		bus_space_tag_t iot, memt;
1496 		bus_space_handle_t ioh, memh;
1497 		int ioh_valid, memh_valid;
1498 		pci_intr_handle_t intrhandle;
1499 		const char *intrstr;
1500 
1501 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1502 			PCI_MAPREG_TYPE_IO, 0,
1503 			&iot, &ioh, NULL, NULL) == 0);
1504 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1505 			PCI_MAPREG_TYPE_MEM |
1506 			PCI_MAPREG_MEM_TYPE_32BIT,
1507 			0, &memt, &memh, NULL, NULL) == 0);
1508 #if defined(VR_USEIOSPACE)
1509 		if (ioh_valid) {
1510 			sc->vr_bst = iot;
1511 			sc->vr_bsh = ioh;
1512 		} else if (memh_valid) {
1513 			sc->vr_bst = memt;
1514 			sc->vr_bsh = memh;
1515 		}
1516 #else
1517 		if (memh_valid) {
1518 			sc->vr_bst = memt;
1519 			sc->vr_bsh = memh;
1520 		} else if (ioh_valid) {
1521 			sc->vr_bst = iot;
1522 			sc->vr_bsh = ioh;
1523 		}
1524 #endif
1525 		else {
1526 			printf(": unable to map device registers\n");
1527 			return;
1528 		}
1529 
1530 		/* Allocate interrupt */
1531 		if (pci_intr_map(pa, &intrhandle)) {
1532 			printf("%s: couldn't map interrupt\n",
1533 				sc->vr_dev.dv_xname);
1534 			return;
1535 		}
1536 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1537 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1538 						vr_intr, sc);
1539 		if (sc->vr_ih == NULL) {
1540 			printf("%s: couldn't establish interrupt",
1541 				sc->vr_dev.dv_xname);
1542 			if (intrstr != NULL)
1543 				printf(" at %s", intrstr);
1544 			printf("\n");
1545 		}
1546 		printf("%s: interrupting at %s\n",
1547 			sc->vr_dev.dv_xname, intrstr);
1548 	}
1549 
1550 	/*
1551 	 * Windows may put the chip in suspend mode when it
1552 	 * shuts down. Be sure to kick it in the head to wake it
1553 	 * up again.
1554 	 */
1555 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1556 
1557 	/* Reset the adapter. */
1558 	vr_reset(sc);
1559 
1560 	/*
1561 	 * Get station address. The way the Rhine chips work,
1562 	 * you're not allowed to directly access the EEPROM once
1563 	 * they've been programmed a special way. Consequently,
1564 	 * we need to read the node address from the PAR0 and PAR1
1565 	 * registers.
1566 	 *
1567 	 * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload
1568 	 *         of the *whole* EEPROM, not just the MAC address. This is
1569 	 *         pretty pointless since the chip does this automatically
1570 	 *         at powerup/reset.
1571 	 *         I suspect the same thing applies to the other Rhine
1572 	 *         variants, but in the absence of a data sheet for those
1573 	 *         (and the lack of anyone else noticing the problems this
1574 	 *         causes) I'm going to retain the old behaviour for the
1575 	 *         other parts.
1576 	 */
1577 	if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6105 &&
1578 	    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6102) {
1579 		VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1580 		DELAY(200);
1581 	}
1582 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1583 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1584 
1585 	/*
1586 	 * A Rhine chip was detected. Inform the world.
1587 	 */
1588 	printf("%s: Ethernet address: %s\n",
1589 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1590 
1591 	memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN);
1592 
1593 	sc->vr_dmat = pa->pa_dmat;
1594 
1595 	/*
1596 	 * Allocate the control data structures, and create and load
1597 	 * the DMA map for it.
1598 	 */
1599 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1600 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1601 	    0)) != 0) {
1602 		printf("%s: unable to allocate control data, error = %d\n",
1603 		    sc->vr_dev.dv_xname, error);
1604 		goto fail_0;
1605 	}
1606 
1607 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1608 	    sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1609 	    BUS_DMA_COHERENT)) != 0) {
1610 		printf("%s: unable to map control data, error = %d\n",
1611 		    sc->vr_dev.dv_xname, error);
1612 		goto fail_1;
1613 	}
1614 
1615 	if ((error = bus_dmamap_create(sc->vr_dmat,
1616 	    sizeof(struct vr_control_data), 1,
1617 	    sizeof(struct vr_control_data), 0, 0,
1618 	    &sc->vr_cddmamap)) != 0) {
1619 		printf("%s: unable to create control data DMA map, "
1620 		    "error = %d\n", sc->vr_dev.dv_xname, error);
1621 		goto fail_2;
1622 	}
1623 
1624 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1625 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1626 	    0)) != 0) {
1627 		printf("%s: unable to load control data DMA map, error = %d\n",
1628 		    sc->vr_dev.dv_xname, error);
1629 		goto fail_3;
1630 	}
1631 
1632 	/*
1633 	 * Create the transmit buffer DMA maps.
1634 	 */
1635 	for (i = 0; i < VR_NTXDESC; i++) {
1636 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1637 		    1, MCLBYTES, 0, 0,
1638 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1639 			printf("%s: unable to create tx DMA map %d, "
1640 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1641 			goto fail_4;
1642 		}
1643 	}
1644 
1645 	/*
1646 	 * Create the receive buffer DMA maps.
1647 	 */
1648 	for (i = 0; i < VR_NRXDESC; i++) {
1649 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1650 		    MCLBYTES, 0, 0,
1651 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1652 			printf("%s: unable to create rx DMA map %d, "
1653 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1654 			goto fail_5;
1655 		}
1656 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1657 	}
1658 
1659 	ifp = &sc->vr_ec.ec_if;
1660 	ifp->if_softc = sc;
1661 	ifp->if_mtu = ETHERMTU;
1662 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1663 	ifp->if_ioctl = vr_ioctl;
1664 	ifp->if_start = vr_start;
1665 	ifp->if_watchdog = vr_watchdog;
1666 	ifp->if_init = vr_init;
1667 	ifp->if_stop = vr_stop;
1668 	IFQ_SET_READY(&ifp->if_snd);
1669 
1670 	strcpy(ifp->if_xname, sc->vr_dev.dv_xname);
1671 
1672 	/*
1673 	 * Initialize MII/media info.
1674 	 */
1675 	sc->vr_mii.mii_ifp = ifp;
1676 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1677 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1678 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1679 	ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, vr_ifmedia_upd,
1680 		vr_ifmedia_sts);
1681 	mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1682 	    MII_OFFSET_ANY, MIIF_FORCEANEG);
1683 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1684 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1685 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1686 	} else
1687 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1688 
1689 	/*
1690 	 * Call MI attach routines.
1691 	 */
1692 	if_attach(ifp);
1693 	ether_ifattach(ifp, sc->vr_enaddr);
1694 
1695 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1696 	if (sc->vr_ats == NULL)
1697 		printf("%s: warning: couldn't establish shutdown hook\n",
1698 			sc->vr_dev.dv_xname);
1699 	return;
1700 
1701  fail_5:
1702 	for (i = 0; i < VR_NRXDESC; i++) {
1703 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1704 			bus_dmamap_destroy(sc->vr_dmat,
1705 			    sc->vr_rxsoft[i].ds_dmamap);
1706 	}
1707  fail_4:
1708 	for (i = 0; i < VR_NTXDESC; i++) {
1709 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1710 			bus_dmamap_destroy(sc->vr_dmat,
1711 			    sc->vr_txsoft[i].ds_dmamap);
1712 	}
1713 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1714  fail_3:
1715 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1716  fail_2:
1717 	bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1718 	    sizeof(struct vr_control_data));
1719  fail_1:
1720 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1721  fail_0:
1722 	return;
1723 }
1724