xref: /netbsd-src/sys/dev/pci/if_vr.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: if_vr.c,v 1.35 2000/06/28 16:08:46 mrg Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1997, 1998
42  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by Bill Paul.
55  * 4. Neither the name of the author nor the names of any co-contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69  * THE POSSIBILITY OF SUCH DAMAGE.
70  *
71  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72  */
73 
74 /*
75  * VIA Rhine fast ethernet PCI NIC driver
76  *
77  * Supports various network adapters based on the VIA Rhine
78  * and Rhine II PCI controllers, including the D-Link DFE530TX.
79  * Datasheets are available at http://www.via.com.tw.
80  *
81  * Written by Bill Paul <wpaul@ctr.columbia.edu>
82  * Electrical Engineering Department
83  * Columbia University, New York City
84  */
85 
86 /*
87  * The VIA Rhine controllers are similar in some respects to the
88  * the DEC tulip chips, except less complicated. The controller
89  * uses an MII bus and an external physical layer interface. The
90  * receiver has a one entry perfect filter and a 64-bit hash table
91  * multicast filter. Transmit and receive descriptors are similar
92  * to the tulip.
93  *
94  * The Rhine has a serious flaw in its transmit DMA mechanism:
95  * transmit buffers must be longword aligned. Unfortunately,
96  * the kernel doesn't guarantee that mbufs will be filled in starting
97  * at longword boundaries, so we have to do a buffer copy before
98  * transmission.
99  *
100  * Apparently, the receive DMA mechanism also has the same flaw.  This
101  * means that on systems with struct alignment requirements, incoming
102  * frames must be copied to a new buffer which shifts the data forward
103  * 2 bytes so that the payload is aligned on a 4-byte boundary.
104  */
105 
106 #include "opt_inet.h"
107 
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/callout.h>
111 #include <sys/sockio.h>
112 #include <sys/mbuf.h>
113 #include <sys/malloc.h>
114 #include <sys/kernel.h>
115 #include <sys/socket.h>
116 #include <sys/device.h>
117 
118 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
119 
120 #include <net/if.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_ether.h>
125 
126 #if defined(INET)
127 #include <netinet/in.h>
128 #include <netinet/if_inarp.h>
129 #endif
130 
131 #include "bpfilter.h"
132 #if NBPFILTER > 0
133 #include <net/bpf.h>
134 #endif
135 
136 #include <machine/bus.h>
137 #include <machine/intr.h>
138 #include <machine/endian.h>
139 
140 #include <dev/mii/mii.h>
141 #include <dev/mii/miivar.h>
142 #include <dev/mii/mii_bitbang.h>
143 
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147 
148 #include <dev/pci/if_vrreg.h>
149 
150 #define	VR_USEIOSPACE
151 
152 /*
153  * Various supported device vendors/types and their names.
154  */
155 static struct vr_type {
156 	pci_vendor_id_t		vr_vid;
157 	pci_product_id_t	vr_did;
158 	const char		*vr_name;
159 } vr_devs[] = {
160 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
161 		"VIA VT3043 (Rhine) 10/100" },
162 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
163 		"VIA VT86C100A (Rhine-II) 10/100" },
164 	{ 0, 0, NULL }
165 };
166 
167 /*
168  * Transmit descriptor list size.
169  */
170 #define	VR_NTXDESC		64
171 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
172 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
173 
174 /*
175  * Receive descriptor list size.
176  */
177 #define	VR_NRXDESC		64
178 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
179 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
180 
181 /*
182  * Control data structres that are DMA'd to the Rhine chip.  We allocate
183  * them in a single clump that maps to a single DMA segment to make several
184  * things easier.
185  *
186  * Note that since we always copy outgoing packets to aligned transmit
187  * buffers, we can reduce the transmit descriptors to one per packet.
188  */
189 struct vr_control_data {
190 	struct vr_desc		vr_txdescs[VR_NTXDESC];
191 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
192 };
193 
194 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
195 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
196 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
197 
198 /*
199  * Software state of transmit and receive descriptors.
200  */
201 struct vr_descsoft {
202 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
203 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
204 };
205 
206 struct vr_softc {
207 	struct device		vr_dev;		/* generic device glue */
208 	void			*vr_ih;		/* interrupt cookie */
209 	void			*vr_ats;	/* shutdown hook */
210 	bus_space_tag_t		vr_bst;		/* bus space tag */
211 	bus_space_handle_t	vr_bsh;		/* bus space handle */
212 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
213 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
214 	struct ethercom		vr_ec;		/* Ethernet common info */
215 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
216 	struct mii_data		vr_mii;		/* MII/media info */
217 
218 	struct callout		vr_tick_ch;	/* tick callout */
219 
220 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
221 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
222 
223 	/*
224 	 * Software state for transmit and receive descriptors.
225 	 */
226 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
227 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
228 
229 	/*
230 	 * Control data structures.
231 	 */
232 	struct vr_control_data	*vr_control_data;
233 
234 	int	vr_txpending;		/* number of TX requests pending */
235 	int	vr_txdirty;		/* first dirty TX descriptor */
236 	int	vr_txlast;		/* last used TX descriptor */
237 
238 	int	vr_rxptr;		/* next ready RX descriptor */
239 };
240 
241 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
242 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
243 
244 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
245 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
246 
247 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
248 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
249 
250 #define	VR_CDTXSYNC(sc, x, ops)						\
251 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
252 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
253 
254 #define	VR_CDRXSYNC(sc, x, ops)						\
255 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
256 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
257 
258 /*
259  * Note we rely on MCLBYTES being a power of two below.
260  */
261 #define	VR_INIT_RXDESC(sc, i)						\
262 do {									\
263 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
264 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
265 									\
266 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
267 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
268 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
269 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
270 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
271 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
272 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
273 } while (0)
274 
275 /*
276  * register space access macros
277  */
278 #define	CSR_WRITE_4(sc, reg, val)					\
279 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
280 #define	CSR_WRITE_2(sc, reg, val)					\
281 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
282 #define	CSR_WRITE_1(sc, reg, val)					\
283 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
284 
285 #define	CSR_READ_4(sc, reg)						\
286 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
287 #define	CSR_READ_2(sc, reg)						\
288 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
289 #define	CSR_READ_1(sc, reg)						\
290 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
291 
292 #define	VR_TIMEOUT		1000
293 
294 static int vr_add_rxbuf		__P((struct vr_softc *, int));
295 
296 static void vr_rxeof		__P((struct vr_softc *));
297 static void vr_rxeoc		__P((struct vr_softc *));
298 static void vr_txeof		__P((struct vr_softc *));
299 static int vr_intr		__P((void *));
300 static void vr_start		__P((struct ifnet *));
301 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
302 static int vr_init		__P((struct vr_softc *));
303 static void vr_stop		__P((struct vr_softc *, int));
304 static void vr_rxdrain		__P((struct vr_softc *));
305 static void vr_watchdog		__P((struct ifnet *));
306 static void vr_tick		__P((void *));
307 
308 static int vr_ifmedia_upd	__P((struct ifnet *));
309 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
310 
311 static int vr_mii_readreg	__P((struct device *, int, int));
312 static void vr_mii_writereg	__P((struct device *, int, int, int));
313 static void vr_mii_statchg	__P((struct device *));
314 
315 static u_int8_t vr_calchash	__P((u_int8_t *));
316 static void vr_setmulti		__P((struct vr_softc *));
317 static void vr_reset		__P((struct vr_softc *));
318 
319 int	vr_copy_small = 0;
320 
321 #define	VR_SETBIT(sc, reg, x)				\
322 	CSR_WRITE_1(sc, reg,				\
323 		CSR_READ_1(sc, reg) | x)
324 
325 #define	VR_CLRBIT(sc, reg, x)				\
326 	CSR_WRITE_1(sc, reg,				\
327 		CSR_READ_1(sc, reg) & ~x)
328 
329 #define	VR_SETBIT16(sc, reg, x)				\
330 	CSR_WRITE_2(sc, reg,				\
331 		CSR_READ_2(sc, reg) | x)
332 
333 #define	VR_CLRBIT16(sc, reg, x)				\
334 	CSR_WRITE_2(sc, reg,				\
335 		CSR_READ_2(sc, reg) & ~x)
336 
337 #define	VR_SETBIT32(sc, reg, x)				\
338 	CSR_WRITE_4(sc, reg,				\
339 		CSR_READ_4(sc, reg) | x)
340 
341 #define	VR_CLRBIT32(sc, reg, x)				\
342 	CSR_WRITE_4(sc, reg,				\
343 		CSR_READ_4(sc, reg) & ~x)
344 
345 /*
346  * MII bit-bang glue.
347  */
348 u_int32_t vr_mii_bitbang_read __P((struct device *));
349 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
350 
351 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
352 	vr_mii_bitbang_read,
353 	vr_mii_bitbang_write,
354 	{
355 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
356 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
357 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
358 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
359 		0,			/* MII_BIT_DIR_PHY_HOST */
360 	}
361 };
362 
363 u_int32_t
364 vr_mii_bitbang_read(self)
365 	struct device *self;
366 {
367 	struct vr_softc *sc = (void *) self;
368 
369 	return (CSR_READ_1(sc, VR_MIICMD));
370 }
371 
372 void
373 vr_mii_bitbang_write(self, val)
374 	struct device *self;
375 	u_int32_t val;
376 {
377 	struct vr_softc *sc = (void *) self;
378 
379 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
380 }
381 
382 /*
383  * Read an PHY register through the MII.
384  */
385 static int
386 vr_mii_readreg(self, phy, reg)
387 	struct device *self;
388 	int phy, reg;
389 {
390 	struct vr_softc *sc = (void *) self;
391 
392 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
393 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
394 }
395 
396 /*
397  * Write to a PHY register through the MII.
398  */
399 static void
400 vr_mii_writereg(self, phy, reg, val)
401 	struct device *self;
402 	int phy, reg, val;
403 {
404 	struct vr_softc *sc = (void *) self;
405 
406 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
407 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
408 }
409 
410 static void
411 vr_mii_statchg(self)
412 	struct device *self;
413 {
414 	struct vr_softc *sc = (struct vr_softc *)self;
415 
416 	/*
417 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
418 	 * register, we first have to put the transmit and/or receive logic
419 	 * in the idle state.
420 	 */
421 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
422 
423 	if (sc->vr_mii.mii_media_active & IFM_FDX)
424 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
425 	else
426 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
427 
428 	if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
429 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
430 }
431 
432 /*
433  * Calculate CRC of a multicast group address, return the lower 6 bits.
434  */
435 static u_int8_t
436 vr_calchash(addr)
437 	u_int8_t *addr;
438 {
439 	u_int32_t crc, carry;
440 	int i, j;
441 	u_int8_t c;
442 
443 	/* Compute CRC for the address value. */
444 	crc = 0xFFFFFFFF; /* initial value */
445 
446 	for (i = 0; i < 6; i++) {
447 		c = *(addr + i);
448 		for (j = 0; j < 8; j++) {
449 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
450 			crc <<= 1;
451 			c >>= 1;
452 			if (carry)
453 				crc = (crc ^ 0x04c11db6) | carry;
454 		}
455 	}
456 
457 	/* return the filter bit position */
458 	return ((crc >> 26) & 0x0000003F);
459 }
460 
461 /*
462  * Program the 64-bit multicast hash filter.
463  */
464 static void
465 vr_setmulti(sc)
466 	struct vr_softc *sc;
467 {
468 	struct ifnet *ifp;
469 	int h = 0;
470 	u_int32_t hashes[2] = { 0, 0 };
471 	struct ether_multistep step;
472 	struct ether_multi *enm;
473 	int mcnt = 0;
474 	u_int8_t rxfilt;
475 
476 	ifp = &sc->vr_ec.ec_if;
477 
478 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
479 
480 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
481 		rxfilt |= VR_RXCFG_RX_MULTI;
482 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
483 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
484 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
485 		return;
486 	}
487 
488 	/* first, zot all the existing hash bits */
489 	CSR_WRITE_4(sc, VR_MAR0, 0);
490 	CSR_WRITE_4(sc, VR_MAR1, 0);
491 
492 	/* now program new ones */
493 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
494 	while (enm != NULL) {
495 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
496 			continue;
497 
498 		h = vr_calchash(enm->enm_addrlo);
499 
500 		if (h < 32)
501 			hashes[0] |= (1 << h);
502 		else
503 			hashes[1] |= (1 << (h - 32));
504 		ETHER_NEXT_MULTI(step, enm);
505 		mcnt++;
506 	}
507 
508 	if (mcnt)
509 		rxfilt |= VR_RXCFG_RX_MULTI;
510 	else
511 		rxfilt &= ~VR_RXCFG_RX_MULTI;
512 
513 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
514 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
515 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
516 }
517 
518 static void
519 vr_reset(sc)
520 	struct vr_softc *sc;
521 {
522 	int i;
523 
524 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
525 
526 	for (i = 0; i < VR_TIMEOUT; i++) {
527 		DELAY(10);
528 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
529 			break;
530 	}
531 	if (i == VR_TIMEOUT)
532 		printf("%s: reset never completed!\n",
533 			sc->vr_dev.dv_xname);
534 
535 	/* Wait a little while for the chip to get its brains in order. */
536 	DELAY(1000);
537 }
538 
539 /*
540  * Initialize an RX descriptor and attach an MBUF cluster.
541  * Note: the length fields are only 11 bits wide, which means the
542  * largest size we can specify is 2047. This is important because
543  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
544  * overflow the field and make a mess.
545  */
546 static int
547 vr_add_rxbuf(sc, i)
548 	struct vr_softc *sc;
549 	int i;
550 {
551 	struct vr_descsoft *ds = VR_DSRX(sc, i);
552 	struct mbuf *m_new;
553 	int error;
554 
555 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
556 	if (m_new == NULL)
557 		return (ENOBUFS);
558 
559 	MCLGET(m_new, M_DONTWAIT);
560 	if ((m_new->m_flags & M_EXT) == 0) {
561 		m_freem(m_new);
562 		return (ENOBUFS);
563 	}
564 
565 	if (ds->ds_mbuf != NULL)
566 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
567 
568 	ds->ds_mbuf = m_new;
569 
570 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
571 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
572 	if (error) {
573 		printf("%s: unable to load rx DMA map %d, error = %d\n",
574 		    sc->vr_dev.dv_xname, i, error);
575 		panic("vr_add_rxbuf");		/* XXX */
576 	}
577 
578 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
579 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
580 
581 	VR_INIT_RXDESC(sc, i);
582 
583 	return (0);
584 }
585 
586 /*
587  * A frame has been uploaded: pass the resulting mbuf chain up to
588  * the higher level protocols.
589  */
590 static void
591 vr_rxeof(sc)
592 	struct vr_softc *sc;
593 {
594 	struct ether_header *eh;
595 	struct mbuf *m;
596 	struct ifnet *ifp;
597 	struct vr_desc *d;
598 	struct vr_descsoft *ds;
599 	int i, total_len;
600 	u_int32_t rxstat;
601 
602 	ifp = &sc->vr_ec.ec_if;
603 
604 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
605 		d = VR_CDRX(sc, i);
606 		ds = VR_DSRX(sc, i);
607 
608 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
609 
610 		rxstat = le32toh(d->vr_status);
611 
612 		if (rxstat & VR_RXSTAT_OWN) {
613 			/*
614 			 * We have processed all of the receive buffers.
615 			 */
616 			break;
617 		}
618 
619 		/*
620 		 * If an error occurs, update stats, clear the
621 		 * status word and leave the mbuf cluster in place:
622 		 * it should simply get re-used next time this descriptor
623 		 * comes up in the ring.
624 		 */
625 		if (rxstat & VR_RXSTAT_RXERR) {
626 			const char *errstr;
627 
628 			ifp->if_ierrors++;
629 			switch (rxstat & 0x000000FF) {
630 			case VR_RXSTAT_CRCERR:
631 				errstr = "crc error";
632 				break;
633 			case VR_RXSTAT_FRAMEALIGNERR:
634 				errstr = "frame alignment error";
635 				break;
636 			case VR_RXSTAT_FIFOOFLOW:
637 				errstr = "FIFO overflow";
638 				break;
639 			case VR_RXSTAT_GIANT:
640 				errstr = "received giant packet";
641 				break;
642 			case VR_RXSTAT_RUNT:
643 				errstr = "received runt packet";
644 				break;
645 			case VR_RXSTAT_BUSERR:
646 				errstr = "system bus error";
647 				break;
648 			case VR_RXSTAT_BUFFERR:
649 				errstr = "rx buffer error";
650 				break;
651 			default:
652 				errstr = "unknown rx error";
653 				break;
654 			}
655 			printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
656 			    errstr);
657 
658 			VR_INIT_RXDESC(sc, i);
659 
660 			continue;
661 		}
662 
663 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
664 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
665 
666 		/* No errors; receive the packet. */
667 		total_len = VR_RXBYTES(le32toh(d->vr_status));
668 
669 		/*
670 		 * XXX The VIA Rhine chip includes the CRC with every
671 		 * received frame, and there's no way to turn this
672 		 * behavior off (at least, I can't find anything in
673 		 * the manual that explains how to do it) so we have
674 		 * to trim off the CRC manually.
675 		 */
676 		total_len -= ETHER_CRC_LEN;
677 
678 #ifdef __NO_STRICT_ALIGNMENT
679 		/*
680 		 * If the packet is small enough to fit in a
681 		 * single header mbuf, allocate one and copy
682 		 * the data into it.  This greatly reduces
683 		 * memory consumption when we receive lots
684 		 * of small packets.
685 		 *
686 		 * Otherwise, we add a new buffer to the receive
687 		 * chain.  If this fails, we drop the packet and
688 		 * recycle the old buffer.
689 		 */
690 		if (vr_copy_small != 0 && total_len <= MHLEN) {
691 			MGETHDR(m, M_DONTWAIT, MT_DATA);
692 			if (m == NULL)
693 				goto dropit;
694 			memcpy(mtod(m, caddr_t),
695 			    mtod(ds->ds_mbuf, caddr_t), total_len);
696 			VR_INIT_RXDESC(sc, i);
697 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
698 			    ds->ds_dmamap->dm_mapsize,
699 			    BUS_DMASYNC_PREREAD);
700 		} else {
701 			m = ds->ds_mbuf;
702 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
703  dropit:
704 				ifp->if_ierrors++;
705 				VR_INIT_RXDESC(sc, i);
706 				bus_dmamap_sync(sc->vr_dmat,
707 				    ds->ds_dmamap, 0,
708 				    ds->ds_dmamap->dm_mapsize,
709 				    BUS_DMASYNC_PREREAD);
710 				continue;
711 			}
712 		}
713 #else
714 		/*
715 		 * The Rhine's packet buffers must be 4-byte aligned.
716 		 * But this means that the data after the Ethernet header
717 		 * is misaligned.  We must allocate a new buffer and
718 		 * copy the data, shifted forward 2 bytes.
719 		 */
720 		MGETHDR(m, M_DONTWAIT, MT_DATA);
721 		if (m == NULL) {
722  dropit:
723 			ifp->if_ierrors++;
724 			VR_INIT_RXDESC(sc, i);
725 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
726 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
727 			continue;
728 		}
729 		if (total_len > (MHLEN - 2)) {
730 			MCLGET(m, M_DONTWAIT);
731 			if ((m->m_flags & M_EXT) == 0) {
732 				m_freem(m);
733 				goto dropit;
734 			}
735 		}
736 		m->m_data += 2;
737 
738 		/*
739 		 * Note that we use clusters for incoming frames, so the
740 		 * buffer is virtually contiguous.
741 		 */
742 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
743 		    total_len);
744 
745 		/* Allow the recieve descriptor to continue using its mbuf. */
746 		VR_INIT_RXDESC(sc, i);
747 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
748 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
749 #endif /* __NO_STRICT_ALIGNMENT */
750 
751 		ifp->if_ipackets++;
752 		eh = mtod(m, struct ether_header *);
753 		m->m_pkthdr.rcvif = ifp;
754 		m->m_pkthdr.len = m->m_len = total_len;
755 #if NBPFILTER > 0
756 		/*
757 		 * Handle BPF listeners. Let the BPF user see the packet, but
758 		 * don't pass it up to the ether_input() layer unless it's
759 		 * a broadcast packet, multicast packet, matches our ethernet
760 		 * address or the interface is in promiscuous mode.
761 		 */
762 		if (ifp->if_bpf) {
763 			bpf_mtap(ifp->if_bpf, m);
764 			if ((ifp->if_flags & IFF_PROMISC) != 0 &&
765 			    ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
766 			    memcmp(eh->ether_dhost, LLADDR(ifp->if_sadl),
767 				   ETHER_ADDR_LEN) != 0) {
768 				m_freem(m);
769 				continue;
770 			}
771 		}
772 #endif
773 		/* Pass it on. */
774 		(*ifp->if_input)(ifp, m);
775 	}
776 
777 	/* Update the receive pointer. */
778 	sc->vr_rxptr = i;
779 }
780 
781 void
782 vr_rxeoc(sc)
783 	struct vr_softc *sc;
784 {
785 
786 	vr_rxeof(sc);
787 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
788 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
789 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
790 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
791 }
792 
793 /*
794  * A frame was downloaded to the chip. It's safe for us to clean up
795  * the list buffers.
796  */
797 static void
798 vr_txeof(sc)
799 	struct vr_softc *sc;
800 {
801 	struct ifnet *ifp = &sc->vr_ec.ec_if;
802 	struct vr_desc *d;
803 	struct vr_descsoft *ds;
804 	u_int32_t txstat;
805 	int i;
806 
807 	ifp->if_flags &= ~IFF_OACTIVE;
808 
809 	/*
810 	 * Go through our tx list and free mbufs for those
811 	 * frames that have been transmitted.
812 	 */
813 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
814 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
815 		d = VR_CDTX(sc, i);
816 		ds = VR_DSTX(sc, i);
817 
818 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
819 
820 		txstat = le32toh(d->vr_status);
821 		if (txstat & VR_TXSTAT_OWN)
822 			break;
823 
824 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
825 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
826 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
827 		m_freem(ds->ds_mbuf);
828 		ds->ds_mbuf = NULL;
829 
830 		if (txstat & VR_TXSTAT_ERRSUM) {
831 			ifp->if_oerrors++;
832 			if (txstat & VR_TXSTAT_DEFER)
833 				ifp->if_collisions++;
834 			if (txstat & VR_TXSTAT_LATECOLL)
835 				ifp->if_collisions++;
836 		}
837 
838 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
839 		ifp->if_opackets++;
840 	}
841 
842 	/* Update the dirty transmit buffer pointer. */
843 	sc->vr_txdirty = i;
844 
845 	/*
846 	 * Cancel the watchdog timer if there are no pending
847 	 * transmissions.
848 	 */
849 	if (sc->vr_txpending == 0)
850 		ifp->if_timer = 0;
851 }
852 
853 static int
854 vr_intr(arg)
855 	void *arg;
856 {
857 	struct vr_softc *sc;
858 	struct ifnet *ifp;
859 	u_int16_t status;
860 	int handled = 0, dotx = 0;
861 
862 	sc = arg;
863 	ifp = &sc->vr_ec.ec_if;
864 
865 	/* Suppress unwanted interrupts. */
866 	if ((ifp->if_flags & IFF_UP) == 0) {
867 		vr_stop(sc, 1);
868 		return (0);
869 	}
870 
871 	/* Disable interrupts. */
872 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
873 
874 	for (;;) {
875 		status = CSR_READ_2(sc, VR_ISR);
876 		if (status)
877 			CSR_WRITE_2(sc, VR_ISR, status);
878 
879 		if ((status & VR_INTRS) == 0)
880 			break;
881 
882 		handled = 1;
883 
884 		if (status & VR_ISR_RX_OK)
885 			vr_rxeof(sc);
886 
887 		if (status &
888 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
889 		     VR_ISR_RX_DROPPED))
890 			vr_rxeoc(sc);
891 
892 		if (status & VR_ISR_TX_OK) {
893 			dotx = 1;
894 			vr_txeof(sc);
895 		}
896 
897 		if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
898 			if (status & VR_ISR_TX_UNDERRUN)
899 				printf("%s: transmit underrun\n",
900 				    sc->vr_dev.dv_xname);
901 			if (status & VR_ISR_TX_ABRT)
902 				printf("%s: transmit aborted\n",
903 				    sc->vr_dev.dv_xname);
904 			ifp->if_oerrors++;
905 			dotx = 1;
906 			vr_txeof(sc);
907 			if (sc->vr_txpending) {
908 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
909 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
910 			}
911 		}
912 
913 		if (status & VR_ISR_BUSERR) {
914 			printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
915 			/* vr_init() calls vr_start() */
916 			dotx = 0;
917 			(void) vr_init(sc);
918 		}
919 	}
920 
921 	/* Re-enable interrupts. */
922 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
923 
924 	if (dotx)
925 		vr_start(ifp);
926 
927 	return (handled);
928 }
929 
930 /*
931  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
932  * to the mbuf data regions directly in the transmit lists. We also save a
933  * copy of the pointers since the transmit list fragment pointers are
934  * physical addresses.
935  */
936 static void
937 vr_start(ifp)
938 	struct ifnet *ifp;
939 {
940 	struct vr_softc *sc = ifp->if_softc;
941 	struct mbuf *m0, *m;
942 	struct vr_desc *d;
943 	struct vr_descsoft *ds;
944 	int error, firsttx, nexttx, opending;
945 
946 	/*
947 	 * Remember the previous txpending and the first transmit
948 	 * descriptor we use.
949 	 */
950 	opending = sc->vr_txpending;
951 	firsttx = VR_NEXTTX(sc->vr_txlast);
952 
953 	/*
954 	 * Loop through the send queue, setting up transmit descriptors
955 	 * until we drain the queue, or use up all available transmit
956 	 * descriptors.
957 	 */
958 	while (sc->vr_txpending < VR_NTXDESC) {
959 		/*
960 		 * Grab a packet off the queue.
961 		 */
962 		IF_DEQUEUE(&ifp->if_snd, m0);
963 		if (m0 == NULL)
964 			break;
965 
966 		/*
967 		 * Get the next available transmit descriptor.
968 		 */
969 		nexttx = VR_NEXTTX(sc->vr_txlast);
970 		d = VR_CDTX(sc, nexttx);
971 		ds = VR_DSTX(sc, nexttx);
972 
973 		/*
974 		 * Load the DMA map.  If this fails, the packet didn't
975 		 * fit in one DMA segment, and we need to copy.  Note,
976 		 * the packet must also be aligned.
977 		 */
978 		if ((mtod(m0, bus_addr_t) & 3) != 0 ||
979 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
980 		     BUS_DMA_NOWAIT) != 0) {
981 			MGETHDR(m, M_DONTWAIT, MT_DATA);
982 			if (m == NULL) {
983 				printf("%s: unable to allocate Tx mbuf\n",
984 				    sc->vr_dev.dv_xname);
985 				IF_PREPEND(&ifp->if_snd, m0);
986 				break;
987 			}
988 			if (m0->m_pkthdr.len > MHLEN) {
989 				MCLGET(m, M_DONTWAIT);
990 				if ((m->m_flags & M_EXT) == 0) {
991 					printf("%s: unable to allocate Tx "
992 					    "cluster\n", sc->vr_dev.dv_xname);
993 					m_freem(m);
994 					IF_PREPEND(&ifp->if_snd, m0);
995 					break;
996 				}
997 			}
998 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
999 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1000 			m_freem(m0);
1001 			m0 = m;
1002 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
1003 			    ds->ds_dmamap, m0, BUS_DMA_NOWAIT);
1004 			if (error) {
1005 				printf("%s: unable to load Tx buffer, "
1006 				    "error = %d\n", sc->vr_dev.dv_xname, error);
1007 				IF_PREPEND(&ifp->if_snd, m0);
1008 				break;
1009 			}
1010 		}
1011 
1012 		/* Sync the DMA map. */
1013 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1014 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1015 
1016 		/*
1017 		 * Store a pointer to the packet so we can free it later.
1018 		 */
1019 		ds->ds_mbuf = m0;
1020 
1021 #if NBPFILTER > 0
1022 		/*
1023 		 * If there's a BPF listener, bounce a copy of this frame
1024 		 * to him.
1025 		 */
1026 		if (ifp->if_bpf)
1027 			bpf_mtap(ifp->if_bpf, m0);
1028 #endif
1029 
1030 		/*
1031 		 * Fill in the transmit descriptor.  The Rhine
1032 		 * doesn't auto-pad, so we have to do this ourselves.
1033 		 */
1034 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1035 		d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1036 		    VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1037 		d->vr_ctl |=
1038 		    htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1039 		    VR_TXCTL_LASTFRAG);
1040 
1041 		/*
1042 		 * If this is the first descriptor we're enqueuing,
1043 		 * don't give it to the Rhine yet.  That could cause
1044 		 * a race condition.  We'll do it below.
1045 		 */
1046 		if (nexttx == firsttx)
1047 			d->vr_status = 0;
1048 		else
1049 			d->vr_status = htole32(VR_TXSTAT_OWN);
1050 
1051 		VR_CDTXSYNC(sc, nexttx,
1052 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1053 
1054 		/* Advance the tx pointer. */
1055 		sc->vr_txpending++;
1056 		sc->vr_txlast = nexttx;
1057 	}
1058 
1059 	if (sc->vr_txpending == VR_NTXDESC) {
1060 		/* No more slots left; notify upper layer. */
1061 		ifp->if_flags |= IFF_OACTIVE;
1062 	}
1063 
1064 	if (sc->vr_txpending != opending) {
1065 		/*
1066 		 * We enqueued packets.  If the transmitter was idle,
1067 		 * reset the txdirty pointer.
1068 		 */
1069 		if (opending == 0)
1070 			sc->vr_txdirty = firsttx;
1071 
1072 		/*
1073 		 * Cause a transmit interrupt to happen on the
1074 		 * last packet we enqueued.
1075 		 */
1076 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1077 		VR_CDTXSYNC(sc, sc->vr_txlast,
1078 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1079 
1080 		/*
1081 		 * The entire packet chain is set up.  Give the
1082 		 * first descriptor to the Rhine now.
1083 		 */
1084 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1085 		VR_CDTXSYNC(sc, firsttx,
1086 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1087 
1088 		/* Start the transmitter. */
1089 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1090 
1091 		/* Set the watchdog timer in case the chip flakes out. */
1092 		ifp->if_timer = 5;
1093 	}
1094 }
1095 
1096 /*
1097  * Initialize the interface.  Must be called at splnet.
1098  */
1099 static int
1100 vr_init(sc)
1101 	struct vr_softc *sc;
1102 {
1103 	struct ifnet *ifp = &sc->vr_ec.ec_if;
1104 	struct vr_desc *d;
1105 	struct vr_descsoft *ds;
1106 	int i, error = 0;
1107 
1108 	/* Cancel pending I/O. */
1109 	vr_stop(sc, 0);
1110 
1111 	/* Reset the Rhine to a known state. */
1112 	vr_reset(sc);
1113 
1114 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1115 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1116 
1117 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1118 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1119 
1120 	/*
1121 	 * Initialize the transmit desciptor ring.  txlast is initialized
1122 	 * to the end of the list so that it will wrap around to the first
1123 	 * descriptor when the first packet is transmitted.
1124 	 */
1125 	for (i = 0; i < VR_NTXDESC; i++) {
1126 		d = VR_CDTX(sc, i);
1127 		memset(d, 0, sizeof(struct vr_desc));
1128 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1129 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1130 	}
1131 	sc->vr_txpending = 0;
1132 	sc->vr_txdirty = 0;
1133 	sc->vr_txlast = VR_NTXDESC - 1;
1134 
1135 	/*
1136 	 * Initialize the receive descriptor ring.
1137 	 */
1138 	for (i = 0; i < VR_NRXDESC; i++) {
1139 		ds = VR_DSRX(sc, i);
1140 		if (ds->ds_mbuf == NULL) {
1141 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1142 				printf("%s: unable to allocate or map rx "
1143 				    "buffer %d, error = %d\n",
1144 				    sc->vr_dev.dv_xname, i, error);
1145 				/*
1146 				 * XXX Should attempt to run with fewer receive
1147 				 * XXX buffers instead of just failing.
1148 				 */
1149 				vr_rxdrain(sc);
1150 				goto out;
1151 			}
1152 		}
1153 	}
1154 	sc->vr_rxptr = 0;
1155 
1156 	/* If we want promiscuous mode, set the allframes bit. */
1157 	if (ifp->if_flags & IFF_PROMISC)
1158 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1159 	else
1160 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1161 
1162 	/* Set capture broadcast bit to capture broadcast frames. */
1163 	if (ifp->if_flags & IFF_BROADCAST)
1164 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1165 	else
1166 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1167 
1168 	/* Program the multicast filter, if necessary. */
1169 	vr_setmulti(sc);
1170 
1171 	/* Give the transmit and recieve rings to the Rhine. */
1172 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1173 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1174 
1175 	/* Set current media. */
1176 	mii_mediachg(&sc->vr_mii);
1177 
1178 	/* Enable receiver and transmitter. */
1179 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1180 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1181 				    VR_CMD_RX_GO);
1182 
1183 	/* Enable interrupts. */
1184 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1185 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1186 
1187 	ifp->if_flags |= IFF_RUNNING;
1188 	ifp->if_flags &= ~IFF_OACTIVE;
1189 
1190 	/* Start one second timer. */
1191 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1192 
1193 	/* Attempt to start output on the interface. */
1194 	vr_start(ifp);
1195 
1196  out:
1197 	if (error)
1198 		printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1199 	return (error);
1200 }
1201 
1202 /*
1203  * Set media options.
1204  */
1205 static int
1206 vr_ifmedia_upd(ifp)
1207 	struct ifnet *ifp;
1208 {
1209 	struct vr_softc *sc = ifp->if_softc;
1210 
1211 	if (ifp->if_flags & IFF_UP)
1212 		mii_mediachg(&sc->vr_mii);
1213 	return (0);
1214 }
1215 
1216 /*
1217  * Report current media status.
1218  */
1219 static void
1220 vr_ifmedia_sts(ifp, ifmr)
1221 	struct ifnet *ifp;
1222 	struct ifmediareq *ifmr;
1223 {
1224 	struct vr_softc *sc = ifp->if_softc;
1225 
1226 	mii_pollstat(&sc->vr_mii);
1227 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
1228 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
1229 }
1230 
1231 static int
1232 vr_ioctl(ifp, command, data)
1233 	struct ifnet *ifp;
1234 	u_long command;
1235 	caddr_t data;
1236 {
1237 	struct vr_softc *sc = ifp->if_softc;
1238 	struct ifreq *ifr = (struct ifreq *)data;
1239 	struct ifaddr *ifa = (struct ifaddr *)data;
1240 	int s, error = 0;
1241 
1242 	s = splnet();
1243 
1244 	switch (command) {
1245 	case SIOCSIFADDR:
1246 		ifp->if_flags |= IFF_UP;
1247 
1248 		switch (ifa->ifa_addr->sa_family) {
1249 #ifdef INET
1250 		case AF_INET:
1251 			if ((error = vr_init(sc)) != 0)
1252 				break;
1253 			arp_ifinit(ifp, ifa);
1254 			break;
1255 #endif /* INET */
1256 		default:
1257 			error = vr_init(sc);
1258 			break;
1259 		}
1260 		break;
1261 
1262 	case SIOCGIFADDR:
1263 		bcopy((caddr_t) sc->vr_enaddr,
1264 			(caddr_t) ((struct sockaddr *)&ifr->ifr_data)->sa_data,
1265 			ETHER_ADDR_LEN);
1266 		break;
1267 
1268 	case SIOCSIFMTU:
1269 		if (ifr->ifr_mtu > ETHERMTU)
1270 			error = EINVAL;
1271 		else
1272 			ifp->if_mtu = ifr->ifr_mtu;
1273 		break;
1274 
1275 	case SIOCSIFFLAGS:
1276 		if ((ifp->if_flags & IFF_UP) == 0 &&
1277 		    (ifp->if_flags & IFF_RUNNING) != 0) {
1278 			/*
1279 			 * If interface is marked down and it is running, then
1280 			 * stop it.
1281 			 */
1282 			vr_stop(sc, 1);
1283 		} else if ((ifp->if_flags & IFF_UP) != 0 &&
1284 			   (ifp->if_flags & IFF_RUNNING) == 0) {
1285 			/*
1286 			 * If interface is marked up and it is stopped, then
1287 			 * start it.
1288 			 */
1289 			error = vr_init(sc);
1290 		} else if ((ifp->if_flags & IFF_UP) != 0) {
1291 			/*
1292 			 * Reset the interface to pick up changes in any other
1293 			 * flags that affect the hardware state.
1294 			 */
1295 			error = vr_init(sc);
1296 		}
1297 		break;
1298 
1299 	case SIOCADDMULTI:
1300 	case SIOCDELMULTI:
1301 		if (command == SIOCADDMULTI)
1302 			error = ether_addmulti(ifr, &sc->vr_ec);
1303 		else
1304 			error = ether_delmulti(ifr, &sc->vr_ec);
1305 
1306 		if (error == ENETRESET) {
1307 			/*
1308 			 * Multicast list has changed; set the hardware filter
1309 			 * accordingly.
1310 			 */
1311 			vr_setmulti(sc);
1312 			error = 0;
1313 		}
1314 		break;
1315 
1316 	case SIOCGIFMEDIA:
1317 	case SIOCSIFMEDIA:
1318 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1319 		break;
1320 
1321 	default:
1322 		error = EINVAL;
1323 		break;
1324 	}
1325 
1326 	splx(s);
1327 	return (error);
1328 }
1329 
1330 static void
1331 vr_watchdog(ifp)
1332 	struct ifnet *ifp;
1333 {
1334 	struct vr_softc *sc = ifp->if_softc;
1335 
1336 	printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1337 	ifp->if_oerrors++;
1338 
1339 	(void) vr_init(sc);
1340 }
1341 
1342 /*
1343  * One second timer, used to tick MII.
1344  */
1345 static void
1346 vr_tick(arg)
1347 	void *arg;
1348 {
1349 	struct vr_softc *sc = arg;
1350 	int s;
1351 
1352 	s = splnet();
1353 	mii_tick(&sc->vr_mii);
1354 	splx(s);
1355 
1356 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1357 }
1358 
1359 /*
1360  * Drain the receive queue.
1361  */
1362 static void
1363 vr_rxdrain(sc)
1364 	struct vr_softc *sc;
1365 {
1366 	struct vr_descsoft *ds;
1367 	int i;
1368 
1369 	for (i = 0; i < VR_NRXDESC; i++) {
1370 		ds = VR_DSRX(sc, i);
1371 		if (ds->ds_mbuf != NULL) {
1372 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1373 			m_freem(ds->ds_mbuf);
1374 			ds->ds_mbuf = NULL;
1375 		}
1376 	}
1377 }
1378 
1379 /*
1380  * Stop the adapter and free any mbufs allocated to the
1381  * transmit lists.
1382  */
1383 static void
1384 vr_stop(sc, drain)
1385 	struct vr_softc *sc;
1386 	int drain;
1387 {
1388 	struct vr_descsoft *ds;
1389 	struct ifnet *ifp;
1390 	int i;
1391 
1392 	/* Cancel one second timer. */
1393 	callout_stop(&sc->vr_tick_ch);
1394 
1395 	/* Down the MII. */
1396 	mii_down(&sc->vr_mii);
1397 
1398 	ifp = &sc->vr_ec.ec_if;
1399 	ifp->if_timer = 0;
1400 
1401 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1402 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1403 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1404 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1405 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1406 
1407 	/*
1408 	 * Release any queued transmit buffers.
1409 	 */
1410 	for (i = 0; i < VR_NTXDESC; i++) {
1411 		ds = VR_DSTX(sc, i);
1412 		if (ds->ds_mbuf != NULL) {
1413 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1414 			m_freem(ds->ds_mbuf);
1415 			ds->ds_mbuf = NULL;
1416 		}
1417 	}
1418 
1419 	if (drain) {
1420 		/*
1421 		 * Release the receive buffers.
1422 		 */
1423 		vr_rxdrain(sc);
1424 	}
1425 
1426 	/*
1427 	 * Mark the interface down and cancel the watchdog timer.
1428 	 */
1429 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1430 	ifp->if_timer = 0;
1431 }
1432 
1433 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1434 static int vr_probe __P((struct device *, struct cfdata *, void *));
1435 static void vr_attach __P((struct device *, struct device *, void *));
1436 static void vr_shutdown __P((void *));
1437 
1438 struct cfattach vr_ca = {
1439 	sizeof (struct vr_softc), vr_probe, vr_attach
1440 };
1441 
1442 static struct vr_type *
1443 vr_lookup(pa)
1444 	struct pci_attach_args *pa;
1445 {
1446 	struct vr_type *vrt;
1447 
1448 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1449 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1450 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1451 			return (vrt);
1452 	}
1453 	return (NULL);
1454 }
1455 
1456 static int
1457 vr_probe(parent, match, aux)
1458 	struct device *parent;
1459 	struct cfdata *match;
1460 	void *aux;
1461 {
1462 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1463 
1464 	if (vr_lookup(pa) != NULL)
1465 		return (1);
1466 
1467 	return (0);
1468 }
1469 
1470 /*
1471  * Stop all chip I/O so that the kernel's probe routines don't
1472  * get confused by errant DMAs when rebooting.
1473  */
1474 static void
1475 vr_shutdown(arg)
1476 	void *arg;
1477 {
1478 	struct vr_softc *sc = (struct vr_softc *)arg;
1479 
1480 	vr_stop(sc, 1);
1481 }
1482 
1483 /*
1484  * Attach the interface. Allocate softc structures, do ifmedia
1485  * setup and ethernet/BPF attach.
1486  */
1487 static void
1488 vr_attach(parent, self, aux)
1489 	struct device *parent;
1490 	struct device *self;
1491 	void *aux;
1492 {
1493 	struct vr_softc *sc = (struct vr_softc *) self;
1494 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1495 	bus_dma_segment_t seg;
1496 	struct vr_type *vrt;
1497 	u_int32_t command;
1498 	struct ifnet *ifp;
1499 	u_char eaddr[ETHER_ADDR_LEN];
1500 	int i, rseg, error;
1501 
1502 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1503 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1504 
1505 	callout_init(&sc->vr_tick_ch);
1506 
1507 	vrt = vr_lookup(pa);
1508 	if (vrt == NULL) {
1509 		printf("\n");
1510 		panic("vr_attach: impossible");
1511 	}
1512 
1513 	printf(": %s Ethernet\n", vrt->vr_name);
1514 
1515 	/*
1516 	 * Handle power management nonsense.
1517 	 */
1518 
1519 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1520 	if (command == 0x01) {
1521 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1522 		if (command & VR_PSTATE_MASK) {
1523 			u_int32_t iobase, membase, irq;
1524 
1525 			/* Save important PCI config data. */
1526 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
1527 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
1528 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
1529 
1530 			/* Reset the power state. */
1531 			printf("%s: chip is in D%d power mode "
1532 				"-- setting to D0\n",
1533 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1534 			command &= 0xFFFFFFFC;
1535 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1536 
1537 			/* Restore PCI config data. */
1538 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1539 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1540 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1541 		}
1542 	}
1543 
1544 	/* Make sure bus mastering is enabled. */
1545 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1546 	command |= PCI_COMMAND_MASTER_ENABLE;
1547 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1548 
1549 	/*
1550 	 * Map control/status registers.
1551 	 */
1552 	{
1553 		bus_space_tag_t iot, memt;
1554 		bus_space_handle_t ioh, memh;
1555 		int ioh_valid, memh_valid;
1556 		pci_intr_handle_t intrhandle;
1557 		const char *intrstr;
1558 
1559 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1560 			PCI_MAPREG_TYPE_IO, 0,
1561 			&iot, &ioh, NULL, NULL) == 0);
1562 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1563 			PCI_MAPREG_TYPE_MEM |
1564 			PCI_MAPREG_MEM_TYPE_32BIT,
1565 			0, &memt, &memh, NULL, NULL) == 0);
1566 #if defined(VR_USEIOSPACE)
1567 		if (ioh_valid) {
1568 			sc->vr_bst = iot;
1569 			sc->vr_bsh = ioh;
1570 		} else if (memh_valid) {
1571 			sc->vr_bst = memt;
1572 			sc->vr_bsh = memh;
1573 		}
1574 #else
1575 		if (memh_valid) {
1576 			sc->vr_bst = memt;
1577 			sc->vr_bsh = memh;
1578 		} else if (ioh_valid) {
1579 			sc->vr_bst = iot;
1580 			sc->vr_bsh = ioh;
1581 		}
1582 #endif
1583 		else {
1584 			printf(": unable to map device registers\n");
1585 			return;
1586 		}
1587 
1588 		/* Allocate interrupt */
1589 		if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1590 				pa->pa_intrline, &intrhandle)) {
1591 			printf("%s: couldn't map interrupt\n",
1592 				sc->vr_dev.dv_xname);
1593 			return;
1594 		}
1595 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1596 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1597 						vr_intr, sc);
1598 		if (sc->vr_ih == NULL) {
1599 			printf("%s: couldn't establish interrupt",
1600 				sc->vr_dev.dv_xname);
1601 			if (intrstr != NULL)
1602 				printf(" at %s", intrstr);
1603 			printf("\n");
1604 		}
1605 		printf("%s: interrupting at %s\n",
1606 			sc->vr_dev.dv_xname, intrstr);
1607 	}
1608 
1609 	/* Reset the adapter. */
1610 	vr_reset(sc);
1611 
1612 	/*
1613 	 * Get station address. The way the Rhine chips work,
1614 	 * you're not allowed to directly access the EEPROM once
1615 	 * they've been programmed a special way. Consequently,
1616 	 * we need to read the node address from the PAR0 and PAR1
1617 	 * registers.
1618 	 */
1619 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1620 	DELAY(200);
1621 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1622 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1623 
1624 	/*
1625 	 * A Rhine chip was detected. Inform the world.
1626 	 */
1627 	printf("%s: Ethernet address: %s\n",
1628 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1629 
1630 	bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1631 
1632 	sc->vr_dmat = pa->pa_dmat;
1633 
1634 	/*
1635 	 * Allocate the control data structures, and create and load
1636 	 * the DMA map for it.
1637 	 */
1638 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1639 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1640 	    0)) != 0) {
1641 		printf("%s: unable to allocate control data, error = %d\n",
1642 		    sc->vr_dev.dv_xname, error);
1643 		goto fail_0;
1644 	}
1645 
1646 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1647 	    sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1648 	    BUS_DMA_COHERENT)) != 0) {
1649 		printf("%s: unable to map control data, error = %d\n",
1650 		    sc->vr_dev.dv_xname, error);
1651 		goto fail_1;
1652 	}
1653 
1654 	if ((error = bus_dmamap_create(sc->vr_dmat,
1655 	    sizeof(struct vr_control_data), 1,
1656 	    sizeof(struct vr_control_data), 0, 0,
1657 	    &sc->vr_cddmamap)) != 0) {
1658 		printf("%s: unable to create control data DMA map, "
1659 		    "error = %d\n", sc->vr_dev.dv_xname, error);
1660 		goto fail_2;
1661 	}
1662 
1663 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1664 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1665 	    0)) != 0) {
1666 		printf("%s: unable to load control data DMA map, error = %d\n",
1667 		    sc->vr_dev.dv_xname, error);
1668 		goto fail_3;
1669 	}
1670 
1671 	/*
1672 	 * Create the transmit buffer DMA maps.
1673 	 */
1674 	for (i = 0; i < VR_NTXDESC; i++) {
1675 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1676 		    1, MCLBYTES, 0, 0,
1677 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1678 			printf("%s: unable to create tx DMA map %d, "
1679 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1680 			goto fail_4;
1681 		}
1682 	}
1683 
1684 	/*
1685 	 * Create the receive buffer DMA maps.
1686 	 */
1687 	for (i = 0; i < VR_NRXDESC; i++) {
1688 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1689 		    MCLBYTES, 0, 0,
1690 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1691 			printf("%s: unable to create rx DMA map %d, "
1692 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1693 			goto fail_5;
1694 		}
1695 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1696 	}
1697 
1698 	ifp = &sc->vr_ec.ec_if;
1699 	ifp->if_softc = sc;
1700 	ifp->if_mtu = ETHERMTU;
1701 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1702 	ifp->if_ioctl = vr_ioctl;
1703 	ifp->if_start = vr_start;
1704 	ifp->if_watchdog = vr_watchdog;
1705 	bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1706 
1707 	/*
1708 	 * Initialize MII/media info.
1709 	 */
1710 	sc->vr_mii.mii_ifp = ifp;
1711 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1712 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1713 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1714 	ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1715 	mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1716 	    MII_OFFSET_ANY, 0);
1717 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1718 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1719 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1720 	} else
1721 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1722 
1723 	/*
1724 	 * Call MI attach routines.
1725 	 */
1726 	if_attach(ifp);
1727 	ether_ifattach(ifp, sc->vr_enaddr);
1728 
1729 #if NBPFILTER > 0
1730 	bpfattach(&sc->vr_ec.ec_if.if_bpf,
1731 		ifp, DLT_EN10MB, sizeof (struct ether_header));
1732 #endif
1733 
1734 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1735 	if (sc->vr_ats == NULL)
1736 		printf("%s: warning: couldn't establish shutdown hook\n",
1737 			sc->vr_dev.dv_xname);
1738 	return;
1739 
1740  fail_5:
1741 	for (i = 0; i < VR_NRXDESC; i++) {
1742 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1743 			bus_dmamap_destroy(sc->vr_dmat,
1744 			    sc->vr_rxsoft[i].ds_dmamap);
1745 	}
1746  fail_4:
1747 	for (i = 0; i < VR_NTXDESC; i++) {
1748 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1749 			bus_dmamap_destroy(sc->vr_dmat,
1750 			    sc->vr_txsoft[i].ds_dmamap);
1751 	}
1752 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1753  fail_3:
1754 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1755  fail_2:
1756 	bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1757 	    sizeof(struct vr_control_data));
1758  fail_1:
1759 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1760  fail_0:
1761 	return;
1762 }
1763