xref: /netbsd-src/sys/dev/pci/if_vr.c (revision 7cc2f76925f078d01ddc9e640a98f4ccfc9f8c3b)
1 /*	$NetBSD: if_vr.c,v 1.42 2000/12/14 06:42:57 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1997, 1998
42  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by Bill Paul.
55  * 4. Neither the name of the author nor the names of any co-contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69  * THE POSSIBILITY OF SUCH DAMAGE.
70  *
71  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72  */
73 
74 /*
75  * VIA Rhine fast ethernet PCI NIC driver
76  *
77  * Supports various network adapters based on the VIA Rhine
78  * and Rhine II PCI controllers, including the D-Link DFE530TX.
79  * Datasheets are available at http://www.via.com.tw.
80  *
81  * Written by Bill Paul <wpaul@ctr.columbia.edu>
82  * Electrical Engineering Department
83  * Columbia University, New York City
84  */
85 
86 /*
87  * The VIA Rhine controllers are similar in some respects to the
88  * the DEC tulip chips, except less complicated. The controller
89  * uses an MII bus and an external physical layer interface. The
90  * receiver has a one entry perfect filter and a 64-bit hash table
91  * multicast filter. Transmit and receive descriptors are similar
92  * to the tulip.
93  *
94  * The Rhine has a serious flaw in its transmit DMA mechanism:
95  * transmit buffers must be longword aligned. Unfortunately,
96  * the kernel doesn't guarantee that mbufs will be filled in starting
97  * at longword boundaries, so we have to do a buffer copy before
98  * transmission.
99  *
100  * Apparently, the receive DMA mechanism also has the same flaw.  This
101  * means that on systems with struct alignment requirements, incoming
102  * frames must be copied to a new buffer which shifts the data forward
103  * 2 bytes so that the payload is aligned on a 4-byte boundary.
104  */
105 
106 #include "opt_inet.h"
107 
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/callout.h>
111 #include <sys/sockio.h>
112 #include <sys/mbuf.h>
113 #include <sys/malloc.h>
114 #include <sys/kernel.h>
115 #include <sys/socket.h>
116 #include <sys/device.h>
117 
118 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
119 
120 #include <net/if.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_ether.h>
125 
126 #if defined(INET)
127 #include <netinet/in.h>
128 #include <netinet/if_inarp.h>
129 #endif
130 
131 #include "bpfilter.h"
132 #if NBPFILTER > 0
133 #include <net/bpf.h>
134 #endif
135 
136 #include <machine/bus.h>
137 #include <machine/intr.h>
138 #include <machine/endian.h>
139 
140 #include <dev/mii/mii.h>
141 #include <dev/mii/miivar.h>
142 #include <dev/mii/mii_bitbang.h>
143 
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147 
148 #include <dev/pci/if_vrreg.h>
149 
150 #define	VR_USEIOSPACE
151 
152 /*
153  * Various supported device vendors/types and their names.
154  */
155 static struct vr_type {
156 	pci_vendor_id_t		vr_vid;
157 	pci_product_id_t	vr_did;
158 	const char		*vr_name;
159 } vr_devs[] = {
160 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
161 		"VIA VT3043 (Rhine) 10/100" },
162 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
163 		"VIA VT6102 (Rhine II) 10/100" },
164 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
165 		"VIA VT86C100A (Rhine-II) 10/100" },
166 	{ 0, 0, NULL }
167 };
168 
169 /*
170  * Transmit descriptor list size.
171  */
172 #define	VR_NTXDESC		64
173 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
174 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
175 
176 /*
177  * Receive descriptor list size.
178  */
179 #define	VR_NRXDESC		64
180 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
181 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
182 
183 /*
184  * Control data structres that are DMA'd to the Rhine chip.  We allocate
185  * them in a single clump that maps to a single DMA segment to make several
186  * things easier.
187  *
188  * Note that since we always copy outgoing packets to aligned transmit
189  * buffers, we can reduce the transmit descriptors to one per packet.
190  */
191 struct vr_control_data {
192 	struct vr_desc		vr_txdescs[VR_NTXDESC];
193 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
194 };
195 
196 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
197 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
198 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
199 
200 /*
201  * Software state of transmit and receive descriptors.
202  */
203 struct vr_descsoft {
204 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
205 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
206 };
207 
208 struct vr_softc {
209 	struct device		vr_dev;		/* generic device glue */
210 	void			*vr_ih;		/* interrupt cookie */
211 	void			*vr_ats;	/* shutdown hook */
212 	bus_space_tag_t		vr_bst;		/* bus space tag */
213 	bus_space_handle_t	vr_bsh;		/* bus space handle */
214 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
215 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
216 	struct ethercom		vr_ec;		/* Ethernet common info */
217 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
218 	struct mii_data		vr_mii;		/* MII/media info */
219 
220 	struct callout		vr_tick_ch;	/* tick callout */
221 
222 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
223 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
224 
225 	/*
226 	 * Software state for transmit and receive descriptors.
227 	 */
228 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
229 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
230 
231 	/*
232 	 * Control data structures.
233 	 */
234 	struct vr_control_data	*vr_control_data;
235 
236 	int	vr_txpending;		/* number of TX requests pending */
237 	int	vr_txdirty;		/* first dirty TX descriptor */
238 	int	vr_txlast;		/* last used TX descriptor */
239 
240 	int	vr_rxptr;		/* next ready RX descriptor */
241 };
242 
243 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
244 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
245 
246 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
247 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
248 
249 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
250 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
251 
252 #define	VR_CDTXSYNC(sc, x, ops)						\
253 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
254 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
255 
256 #define	VR_CDRXSYNC(sc, x, ops)						\
257 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
258 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
259 
260 /*
261  * Note we rely on MCLBYTES being a power of two below.
262  */
263 #define	VR_INIT_RXDESC(sc, i)						\
264 do {									\
265 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
266 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
267 									\
268 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
269 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
270 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
271 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
272 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
273 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
274 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
275 } while (0)
276 
277 /*
278  * register space access macros
279  */
280 #define	CSR_WRITE_4(sc, reg, val)					\
281 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
282 #define	CSR_WRITE_2(sc, reg, val)					\
283 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
284 #define	CSR_WRITE_1(sc, reg, val)					\
285 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
286 
287 #define	CSR_READ_4(sc, reg)						\
288 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
289 #define	CSR_READ_2(sc, reg)						\
290 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
291 #define	CSR_READ_1(sc, reg)						\
292 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
293 
294 #define	VR_TIMEOUT		1000
295 
296 static int vr_add_rxbuf		__P((struct vr_softc *, int));
297 
298 static void vr_rxeof		__P((struct vr_softc *));
299 static void vr_rxeoc		__P((struct vr_softc *));
300 static void vr_txeof		__P((struct vr_softc *));
301 static int vr_intr		__P((void *));
302 static void vr_start		__P((struct ifnet *));
303 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
304 static int vr_init		__P((struct ifnet *));
305 static void vr_stop		__P((struct ifnet *, int));
306 static void vr_rxdrain		__P((struct vr_softc *));
307 static void vr_watchdog		__P((struct ifnet *));
308 static void vr_tick		__P((void *));
309 
310 static int vr_ifmedia_upd	__P((struct ifnet *));
311 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
312 
313 static int vr_mii_readreg	__P((struct device *, int, int));
314 static void vr_mii_writereg	__P((struct device *, int, int, int));
315 static void vr_mii_statchg	__P((struct device *));
316 
317 static u_int8_t vr_calchash	__P((u_int8_t *));
318 static void vr_setmulti		__P((struct vr_softc *));
319 static void vr_reset		__P((struct vr_softc *));
320 
321 int	vr_copy_small = 0;
322 
323 #define	VR_SETBIT(sc, reg, x)				\
324 	CSR_WRITE_1(sc, reg,				\
325 		CSR_READ_1(sc, reg) | x)
326 
327 #define	VR_CLRBIT(sc, reg, x)				\
328 	CSR_WRITE_1(sc, reg,				\
329 		CSR_READ_1(sc, reg) & ~x)
330 
331 #define	VR_SETBIT16(sc, reg, x)				\
332 	CSR_WRITE_2(sc, reg,				\
333 		CSR_READ_2(sc, reg) | x)
334 
335 #define	VR_CLRBIT16(sc, reg, x)				\
336 	CSR_WRITE_2(sc, reg,				\
337 		CSR_READ_2(sc, reg) & ~x)
338 
339 #define	VR_SETBIT32(sc, reg, x)				\
340 	CSR_WRITE_4(sc, reg,				\
341 		CSR_READ_4(sc, reg) | x)
342 
343 #define	VR_CLRBIT32(sc, reg, x)				\
344 	CSR_WRITE_4(sc, reg,				\
345 		CSR_READ_4(sc, reg) & ~x)
346 
347 /*
348  * MII bit-bang glue.
349  */
350 u_int32_t vr_mii_bitbang_read __P((struct device *));
351 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
352 
353 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
354 	vr_mii_bitbang_read,
355 	vr_mii_bitbang_write,
356 	{
357 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
358 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
359 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
360 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
361 		0,			/* MII_BIT_DIR_PHY_HOST */
362 	}
363 };
364 
365 u_int32_t
366 vr_mii_bitbang_read(self)
367 	struct device *self;
368 {
369 	struct vr_softc *sc = (void *) self;
370 
371 	return (CSR_READ_1(sc, VR_MIICMD));
372 }
373 
374 void
375 vr_mii_bitbang_write(self, val)
376 	struct device *self;
377 	u_int32_t val;
378 {
379 	struct vr_softc *sc = (void *) self;
380 
381 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
382 }
383 
384 /*
385  * Read an PHY register through the MII.
386  */
387 static int
388 vr_mii_readreg(self, phy, reg)
389 	struct device *self;
390 	int phy, reg;
391 {
392 	struct vr_softc *sc = (void *) self;
393 
394 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
395 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
396 }
397 
398 /*
399  * Write to a PHY register through the MII.
400  */
401 static void
402 vr_mii_writereg(self, phy, reg, val)
403 	struct device *self;
404 	int phy, reg, val;
405 {
406 	struct vr_softc *sc = (void *) self;
407 
408 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
409 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
410 }
411 
412 static void
413 vr_mii_statchg(self)
414 	struct device *self;
415 {
416 	struct vr_softc *sc = (struct vr_softc *)self;
417 
418 	/*
419 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
420 	 * register, we first have to put the transmit and/or receive logic
421 	 * in the idle state.
422 	 */
423 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
424 
425 	if (sc->vr_mii.mii_media_active & IFM_FDX)
426 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
427 	else
428 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
429 
430 	if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
431 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
432 }
433 
434 /*
435  * Calculate CRC of a multicast group address, return the lower 6 bits.
436  */
437 static u_int8_t
438 vr_calchash(addr)
439 	u_int8_t *addr;
440 {
441 	u_int32_t crc, carry;
442 	int i, j;
443 	u_int8_t c;
444 
445 	/* Compute CRC for the address value. */
446 	crc = 0xFFFFFFFF; /* initial value */
447 
448 	for (i = 0; i < 6; i++) {
449 		c = *(addr + i);
450 		for (j = 0; j < 8; j++) {
451 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
452 			crc <<= 1;
453 			c >>= 1;
454 			if (carry)
455 				crc = (crc ^ 0x04c11db6) | carry;
456 		}
457 	}
458 
459 	/* return the filter bit position */
460 	return ((crc >> 26) & 0x0000003F);
461 }
462 
463 /*
464  * Program the 64-bit multicast hash filter.
465  */
466 static void
467 vr_setmulti(sc)
468 	struct vr_softc *sc;
469 {
470 	struct ifnet *ifp;
471 	int h = 0;
472 	u_int32_t hashes[2] = { 0, 0 };
473 	struct ether_multistep step;
474 	struct ether_multi *enm;
475 	int mcnt = 0;
476 	u_int8_t rxfilt;
477 
478 	ifp = &sc->vr_ec.ec_if;
479 
480 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
481 
482 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
483 		rxfilt |= VR_RXCFG_RX_MULTI;
484 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
485 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
486 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
487 		return;
488 	}
489 
490 	/* first, zot all the existing hash bits */
491 	CSR_WRITE_4(sc, VR_MAR0, 0);
492 	CSR_WRITE_4(sc, VR_MAR1, 0);
493 
494 	/* now program new ones */
495 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
496 	while (enm != NULL) {
497 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) != 0)
498 			continue;
499 
500 		h = vr_calchash(enm->enm_addrlo);
501 
502 		if (h < 32)
503 			hashes[0] |= (1 << h);
504 		else
505 			hashes[1] |= (1 << (h - 32));
506 		ETHER_NEXT_MULTI(step, enm);
507 		mcnt++;
508 	}
509 
510 	if (mcnt)
511 		rxfilt |= VR_RXCFG_RX_MULTI;
512 	else
513 		rxfilt &= ~VR_RXCFG_RX_MULTI;
514 
515 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
516 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
517 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
518 }
519 
520 static void
521 vr_reset(sc)
522 	struct vr_softc *sc;
523 {
524 	int i;
525 
526 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
527 
528 	for (i = 0; i < VR_TIMEOUT; i++) {
529 		DELAY(10);
530 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
531 			break;
532 	}
533 	if (i == VR_TIMEOUT)
534 		printf("%s: reset never completed!\n",
535 			sc->vr_dev.dv_xname);
536 
537 	/* Wait a little while for the chip to get its brains in order. */
538 	DELAY(1000);
539 }
540 
541 /*
542  * Initialize an RX descriptor and attach an MBUF cluster.
543  * Note: the length fields are only 11 bits wide, which means the
544  * largest size we can specify is 2047. This is important because
545  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
546  * overflow the field and make a mess.
547  */
548 static int
549 vr_add_rxbuf(sc, i)
550 	struct vr_softc *sc;
551 	int i;
552 {
553 	struct vr_descsoft *ds = VR_DSRX(sc, i);
554 	struct mbuf *m_new;
555 	int error;
556 
557 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
558 	if (m_new == NULL)
559 		return (ENOBUFS);
560 
561 	MCLGET(m_new, M_DONTWAIT);
562 	if ((m_new->m_flags & M_EXT) == 0) {
563 		m_freem(m_new);
564 		return (ENOBUFS);
565 	}
566 
567 	if (ds->ds_mbuf != NULL)
568 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
569 
570 	ds->ds_mbuf = m_new;
571 
572 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
573 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
574 	if (error) {
575 		printf("%s: unable to load rx DMA map %d, error = %d\n",
576 		    sc->vr_dev.dv_xname, i, error);
577 		panic("vr_add_rxbuf");		/* XXX */
578 	}
579 
580 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
581 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
582 
583 	VR_INIT_RXDESC(sc, i);
584 
585 	return (0);
586 }
587 
588 /*
589  * A frame has been uploaded: pass the resulting mbuf chain up to
590  * the higher level protocols.
591  */
592 static void
593 vr_rxeof(sc)
594 	struct vr_softc *sc;
595 {
596 	struct mbuf *m;
597 	struct ifnet *ifp;
598 	struct vr_desc *d;
599 	struct vr_descsoft *ds;
600 	int i, total_len;
601 	u_int32_t rxstat;
602 
603 	ifp = &sc->vr_ec.ec_if;
604 
605 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
606 		d = VR_CDRX(sc, i);
607 		ds = VR_DSRX(sc, i);
608 
609 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
610 
611 		rxstat = le32toh(d->vr_status);
612 
613 		if (rxstat & VR_RXSTAT_OWN) {
614 			/*
615 			 * We have processed all of the receive buffers.
616 			 */
617 			break;
618 		}
619 
620 		/*
621 		 * If an error occurs, update stats, clear the
622 		 * status word and leave the mbuf cluster in place:
623 		 * it should simply get re-used next time this descriptor
624 		 * comes up in the ring.
625 		 */
626 		if (rxstat & VR_RXSTAT_RXERR) {
627 			const char *errstr;
628 
629 			ifp->if_ierrors++;
630 			switch (rxstat & 0x000000FF) {
631 			case VR_RXSTAT_CRCERR:
632 				errstr = "crc error";
633 				break;
634 			case VR_RXSTAT_FRAMEALIGNERR:
635 				errstr = "frame alignment error";
636 				break;
637 			case VR_RXSTAT_FIFOOFLOW:
638 				errstr = "FIFO overflow";
639 				break;
640 			case VR_RXSTAT_GIANT:
641 				errstr = "received giant packet";
642 				break;
643 			case VR_RXSTAT_RUNT:
644 				errstr = "received runt packet";
645 				break;
646 			case VR_RXSTAT_BUSERR:
647 				errstr = "system bus error";
648 				break;
649 			case VR_RXSTAT_BUFFERR:
650 				errstr = "rx buffer error";
651 				break;
652 			default:
653 				errstr = "unknown rx error";
654 				break;
655 			}
656 			printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
657 			    errstr);
658 
659 			VR_INIT_RXDESC(sc, i);
660 
661 			continue;
662 		}
663 
664 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
665 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
666 
667 		/* No errors; receive the packet. */
668 		total_len = VR_RXBYTES(le32toh(d->vr_status));
669 
670 #ifdef __NO_STRICT_ALIGNMENT
671 		/*
672 		 * If the packet is small enough to fit in a
673 		 * single header mbuf, allocate one and copy
674 		 * the data into it.  This greatly reduces
675 		 * memory consumption when we receive lots
676 		 * of small packets.
677 		 *
678 		 * Otherwise, we add a new buffer to the receive
679 		 * chain.  If this fails, we drop the packet and
680 		 * recycle the old buffer.
681 		 */
682 		if (vr_copy_small != 0 && total_len <= MHLEN) {
683 			MGETHDR(m, M_DONTWAIT, MT_DATA);
684 			if (m == NULL)
685 				goto dropit;
686 			memcpy(mtod(m, caddr_t),
687 			    mtod(ds->ds_mbuf, caddr_t), total_len);
688 			VR_INIT_RXDESC(sc, i);
689 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
690 			    ds->ds_dmamap->dm_mapsize,
691 			    BUS_DMASYNC_PREREAD);
692 		} else {
693 			m = ds->ds_mbuf;
694 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
695  dropit:
696 				ifp->if_ierrors++;
697 				VR_INIT_RXDESC(sc, i);
698 				bus_dmamap_sync(sc->vr_dmat,
699 				    ds->ds_dmamap, 0,
700 				    ds->ds_dmamap->dm_mapsize,
701 				    BUS_DMASYNC_PREREAD);
702 				continue;
703 			}
704 		}
705 #else
706 		/*
707 		 * The Rhine's packet buffers must be 4-byte aligned.
708 		 * But this means that the data after the Ethernet header
709 		 * is misaligned.  We must allocate a new buffer and
710 		 * copy the data, shifted forward 2 bytes.
711 		 */
712 		MGETHDR(m, M_DONTWAIT, MT_DATA);
713 		if (m == NULL) {
714  dropit:
715 			ifp->if_ierrors++;
716 			VR_INIT_RXDESC(sc, i);
717 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
718 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
719 			continue;
720 		}
721 		if (total_len > (MHLEN - 2)) {
722 			MCLGET(m, M_DONTWAIT);
723 			if ((m->m_flags & M_EXT) == 0) {
724 				m_freem(m);
725 				goto dropit;
726 			}
727 		}
728 		m->m_data += 2;
729 
730 		/*
731 		 * Note that we use clusters for incoming frames, so the
732 		 * buffer is virtually contiguous.
733 		 */
734 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
735 		    total_len);
736 
737 		/* Allow the recieve descriptor to continue using its mbuf. */
738 		VR_INIT_RXDESC(sc, i);
739 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
740 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
741 #endif /* __NO_STRICT_ALIGNMENT */
742 
743 		/*
744 		 * The Rhine chip includes the FCS with every
745 		 * received packet.
746 		 */
747 		m->m_flags |= M_HASFCS;
748 
749 		ifp->if_ipackets++;
750 		m->m_pkthdr.rcvif = ifp;
751 		m->m_pkthdr.len = m->m_len = total_len;
752 #if NBPFILTER > 0
753 		/*
754 		 * Handle BPF listeners. Let the BPF user see the packet, but
755 		 * don't pass it up to the ether_input() layer unless it's
756 		 * a broadcast packet, multicast packet, matches our ethernet
757 		 * address or the interface is in promiscuous mode.
758 		 */
759 		if (ifp->if_bpf)
760 			bpf_mtap(ifp->if_bpf, m);
761 #endif
762 		/* Pass it on. */
763 		(*ifp->if_input)(ifp, m);
764 	}
765 
766 	/* Update the receive pointer. */
767 	sc->vr_rxptr = i;
768 }
769 
770 void
771 vr_rxeoc(sc)
772 	struct vr_softc *sc;
773 {
774 
775 	vr_rxeof(sc);
776 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
777 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
778 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
779 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
780 }
781 
782 /*
783  * A frame was downloaded to the chip. It's safe for us to clean up
784  * the list buffers.
785  */
786 static void
787 vr_txeof(sc)
788 	struct vr_softc *sc;
789 {
790 	struct ifnet *ifp = &sc->vr_ec.ec_if;
791 	struct vr_desc *d;
792 	struct vr_descsoft *ds;
793 	u_int32_t txstat;
794 	int i;
795 
796 	ifp->if_flags &= ~IFF_OACTIVE;
797 
798 	/*
799 	 * Go through our tx list and free mbufs for those
800 	 * frames that have been transmitted.
801 	 */
802 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
803 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
804 		d = VR_CDTX(sc, i);
805 		ds = VR_DSTX(sc, i);
806 
807 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
808 
809 		txstat = le32toh(d->vr_status);
810 		if (txstat & VR_TXSTAT_OWN)
811 			break;
812 
813 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
814 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
815 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
816 		m_freem(ds->ds_mbuf);
817 		ds->ds_mbuf = NULL;
818 
819 		if (txstat & VR_TXSTAT_ERRSUM) {
820 			ifp->if_oerrors++;
821 			if (txstat & VR_TXSTAT_DEFER)
822 				ifp->if_collisions++;
823 			if (txstat & VR_TXSTAT_LATECOLL)
824 				ifp->if_collisions++;
825 		}
826 
827 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
828 		ifp->if_opackets++;
829 	}
830 
831 	/* Update the dirty transmit buffer pointer. */
832 	sc->vr_txdirty = i;
833 
834 	/*
835 	 * Cancel the watchdog timer if there are no pending
836 	 * transmissions.
837 	 */
838 	if (sc->vr_txpending == 0)
839 		ifp->if_timer = 0;
840 }
841 
842 static int
843 vr_intr(arg)
844 	void *arg;
845 {
846 	struct vr_softc *sc;
847 	struct ifnet *ifp;
848 	u_int16_t status;
849 	int handled = 0, dotx = 0;
850 
851 	sc = arg;
852 	ifp = &sc->vr_ec.ec_if;
853 
854 	/* Suppress unwanted interrupts. */
855 	if ((ifp->if_flags & IFF_UP) == 0) {
856 		vr_stop(ifp, 1);
857 		return (0);
858 	}
859 
860 	/* Disable interrupts. */
861 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
862 
863 	for (;;) {
864 		status = CSR_READ_2(sc, VR_ISR);
865 		if (status)
866 			CSR_WRITE_2(sc, VR_ISR, status);
867 
868 		if ((status & VR_INTRS) == 0)
869 			break;
870 
871 		handled = 1;
872 
873 		if (status & VR_ISR_RX_OK)
874 			vr_rxeof(sc);
875 
876 		if (status &
877 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
878 		     VR_ISR_RX_DROPPED))
879 			vr_rxeoc(sc);
880 
881 		if (status & VR_ISR_TX_OK) {
882 			dotx = 1;
883 			vr_txeof(sc);
884 		}
885 
886 		if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
887 			if (status & VR_ISR_TX_UNDERRUN)
888 				printf("%s: transmit underrun\n",
889 				    sc->vr_dev.dv_xname);
890 			if (status & VR_ISR_TX_ABRT)
891 				printf("%s: transmit aborted\n",
892 				    sc->vr_dev.dv_xname);
893 			ifp->if_oerrors++;
894 			dotx = 1;
895 			vr_txeof(sc);
896 			if (sc->vr_txpending) {
897 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
898 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
899 			}
900 		}
901 
902 		if (status & VR_ISR_BUSERR) {
903 			printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
904 			/* vr_init() calls vr_start() */
905 			dotx = 0;
906 			(void) vr_init(ifp);
907 		}
908 	}
909 
910 	/* Re-enable interrupts. */
911 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
912 
913 	if (dotx)
914 		vr_start(ifp);
915 
916 	return (handled);
917 }
918 
919 /*
920  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
921  * to the mbuf data regions directly in the transmit lists. We also save a
922  * copy of the pointers since the transmit list fragment pointers are
923  * physical addresses.
924  */
925 static void
926 vr_start(ifp)
927 	struct ifnet *ifp;
928 {
929 	struct vr_softc *sc = ifp->if_softc;
930 	struct mbuf *m0, *m;
931 	struct vr_desc *d;
932 	struct vr_descsoft *ds;
933 	int error, firsttx, nexttx, opending;
934 
935 	/*
936 	 * Remember the previous txpending and the first transmit
937 	 * descriptor we use.
938 	 */
939 	opending = sc->vr_txpending;
940 	firsttx = VR_NEXTTX(sc->vr_txlast);
941 
942 	/*
943 	 * Loop through the send queue, setting up transmit descriptors
944 	 * until we drain the queue, or use up all available transmit
945 	 * descriptors.
946 	 */
947 	while (sc->vr_txpending < VR_NTXDESC) {
948 		/*
949 		 * Grab a packet off the queue.
950 		 */
951 		IFQ_POLL(&ifp->if_snd, m0);
952 		if (m0 == NULL)
953 			break;
954 
955 		/*
956 		 * Get the next available transmit descriptor.
957 		 */
958 		nexttx = VR_NEXTTX(sc->vr_txlast);
959 		d = VR_CDTX(sc, nexttx);
960 		ds = VR_DSTX(sc, nexttx);
961 
962 		/*
963 		 * Load the DMA map.  If this fails, the packet didn't
964 		 * fit in one DMA segment, and we need to copy.  Note,
965 		 * the packet must also be aligned.
966 		 */
967 		if ((mtod(m0, bus_addr_t) & 3) != 0 ||
968 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
969 		     BUS_DMA_NOWAIT) != 0) {
970 			MGETHDR(m, M_DONTWAIT, MT_DATA);
971 			if (m == NULL) {
972 				printf("%s: unable to allocate Tx mbuf\n",
973 				    sc->vr_dev.dv_xname);
974 				break;
975 			}
976 			if (m0->m_pkthdr.len > MHLEN) {
977 				MCLGET(m, M_DONTWAIT);
978 				if ((m->m_flags & M_EXT) == 0) {
979 					printf("%s: unable to allocate Tx "
980 					    "cluster\n", sc->vr_dev.dv_xname);
981 					m_freem(m);
982 					break;
983 				}
984 			}
985 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
986 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
987 			m_freem(m0);
988 			m0 = m;
989 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
990 			    ds->ds_dmamap, m0, BUS_DMA_NOWAIT);
991 			if (error) {
992 				printf("%s: unable to load Tx buffer, "
993 				    "error = %d\n", sc->vr_dev.dv_xname, error);
994 				break;
995 			}
996 		}
997 
998 		IFQ_DEQUEUE(&ifp->if_snd, m0);
999 
1000 		/* Sync the DMA map. */
1001 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1002 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1003 
1004 		/*
1005 		 * Store a pointer to the packet so we can free it later.
1006 		 */
1007 		ds->ds_mbuf = m0;
1008 
1009 #if NBPFILTER > 0
1010 		/*
1011 		 * If there's a BPF listener, bounce a copy of this frame
1012 		 * to him.
1013 		 */
1014 		if (ifp->if_bpf)
1015 			bpf_mtap(ifp->if_bpf, m0);
1016 #endif
1017 
1018 		/*
1019 		 * Fill in the transmit descriptor.  The Rhine
1020 		 * doesn't auto-pad, so we have to do this ourselves.
1021 		 */
1022 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1023 		d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1024 		    VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1025 		d->vr_ctl |=
1026 		    htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1027 		    VR_TXCTL_LASTFRAG);
1028 
1029 		/*
1030 		 * If this is the first descriptor we're enqueuing,
1031 		 * don't give it to the Rhine yet.  That could cause
1032 		 * a race condition.  We'll do it below.
1033 		 */
1034 		if (nexttx == firsttx)
1035 			d->vr_status = 0;
1036 		else
1037 			d->vr_status = htole32(VR_TXSTAT_OWN);
1038 
1039 		VR_CDTXSYNC(sc, nexttx,
1040 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1041 
1042 		/* Advance the tx pointer. */
1043 		sc->vr_txpending++;
1044 		sc->vr_txlast = nexttx;
1045 	}
1046 
1047 	if (sc->vr_txpending == VR_NTXDESC) {
1048 		/* No more slots left; notify upper layer. */
1049 		ifp->if_flags |= IFF_OACTIVE;
1050 	}
1051 
1052 	if (sc->vr_txpending != opending) {
1053 		/*
1054 		 * We enqueued packets.  If the transmitter was idle,
1055 		 * reset the txdirty pointer.
1056 		 */
1057 		if (opending == 0)
1058 			sc->vr_txdirty = firsttx;
1059 
1060 		/*
1061 		 * Cause a transmit interrupt to happen on the
1062 		 * last packet we enqueued.
1063 		 */
1064 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1065 		VR_CDTXSYNC(sc, sc->vr_txlast,
1066 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1067 
1068 		/*
1069 		 * The entire packet chain is set up.  Give the
1070 		 * first descriptor to the Rhine now.
1071 		 */
1072 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1073 		VR_CDTXSYNC(sc, firsttx,
1074 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1075 
1076 		/* Start the transmitter. */
1077 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1078 
1079 		/* Set the watchdog timer in case the chip flakes out. */
1080 		ifp->if_timer = 5;
1081 	}
1082 }
1083 
1084 /*
1085  * Initialize the interface.  Must be called at splnet.
1086  */
1087 static int
1088 vr_init(ifp)
1089 	struct ifnet *ifp;
1090 {
1091 	struct vr_softc *sc = ifp->if_softc;
1092 	struct vr_desc *d;
1093 	struct vr_descsoft *ds;
1094 	int i, error = 0;
1095 
1096 	/* Cancel pending I/O. */
1097 	vr_stop(ifp, 0);
1098 
1099 	/* Reset the Rhine to a known state. */
1100 	vr_reset(sc);
1101 
1102 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1103 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1104 
1105 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1106 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1107 
1108 	/*
1109 	 * Initialize the transmit desciptor ring.  txlast is initialized
1110 	 * to the end of the list so that it will wrap around to the first
1111 	 * descriptor when the first packet is transmitted.
1112 	 */
1113 	for (i = 0; i < VR_NTXDESC; i++) {
1114 		d = VR_CDTX(sc, i);
1115 		memset(d, 0, sizeof(struct vr_desc));
1116 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1117 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1118 	}
1119 	sc->vr_txpending = 0;
1120 	sc->vr_txdirty = 0;
1121 	sc->vr_txlast = VR_NTXDESC - 1;
1122 
1123 	/*
1124 	 * Initialize the receive descriptor ring.
1125 	 */
1126 	for (i = 0; i < VR_NRXDESC; i++) {
1127 		ds = VR_DSRX(sc, i);
1128 		if (ds->ds_mbuf == NULL) {
1129 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1130 				printf("%s: unable to allocate or map rx "
1131 				    "buffer %d, error = %d\n",
1132 				    sc->vr_dev.dv_xname, i, error);
1133 				/*
1134 				 * XXX Should attempt to run with fewer receive
1135 				 * XXX buffers instead of just failing.
1136 				 */
1137 				vr_rxdrain(sc);
1138 				goto out;
1139 			}
1140 		}
1141 	}
1142 	sc->vr_rxptr = 0;
1143 
1144 	/* If we want promiscuous mode, set the allframes bit. */
1145 	if (ifp->if_flags & IFF_PROMISC)
1146 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1147 	else
1148 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1149 
1150 	/* Set capture broadcast bit to capture broadcast frames. */
1151 	if (ifp->if_flags & IFF_BROADCAST)
1152 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1153 	else
1154 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1155 
1156 	/* Program the multicast filter, if necessary. */
1157 	vr_setmulti(sc);
1158 
1159 	/* Give the transmit and recieve rings to the Rhine. */
1160 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1161 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1162 
1163 	/* Set current media. */
1164 	mii_mediachg(&sc->vr_mii);
1165 
1166 	/* Enable receiver and transmitter. */
1167 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1168 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1169 				    VR_CMD_RX_GO);
1170 
1171 	/* Enable interrupts. */
1172 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1173 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1174 
1175 	ifp->if_flags |= IFF_RUNNING;
1176 	ifp->if_flags &= ~IFF_OACTIVE;
1177 
1178 	/* Start one second timer. */
1179 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1180 
1181 	/* Attempt to start output on the interface. */
1182 	vr_start(ifp);
1183 
1184  out:
1185 	if (error)
1186 		printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1187 	return (error);
1188 }
1189 
1190 /*
1191  * Set media options.
1192  */
1193 static int
1194 vr_ifmedia_upd(ifp)
1195 	struct ifnet *ifp;
1196 {
1197 	struct vr_softc *sc = ifp->if_softc;
1198 
1199 	if (ifp->if_flags & IFF_UP)
1200 		mii_mediachg(&sc->vr_mii);
1201 	return (0);
1202 }
1203 
1204 /*
1205  * Report current media status.
1206  */
1207 static void
1208 vr_ifmedia_sts(ifp, ifmr)
1209 	struct ifnet *ifp;
1210 	struct ifmediareq *ifmr;
1211 {
1212 	struct vr_softc *sc = ifp->if_softc;
1213 
1214 	mii_pollstat(&sc->vr_mii);
1215 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
1216 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
1217 }
1218 
1219 static int
1220 vr_ioctl(ifp, command, data)
1221 	struct ifnet *ifp;
1222 	u_long command;
1223 	caddr_t data;
1224 {
1225 	struct vr_softc *sc = ifp->if_softc;
1226 	struct ifreq *ifr = (struct ifreq *)data;
1227 	int s, error = 0;
1228 
1229 	s = splnet();
1230 
1231 	switch (command) {
1232 	case SIOCGIFMEDIA:
1233 	case SIOCSIFMEDIA:
1234 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1235 		break;
1236 
1237 	default:
1238 		error = ether_ioctl(ifp, command, data);
1239 		if (error == ENETRESET) {
1240 			/*
1241 			 * Multicast list has changed; set the hardware filter
1242 			 * accordingly.
1243 			 */
1244 			vr_setmulti(sc);
1245 			error = 0;
1246 		}
1247 		break;
1248 	}
1249 
1250 	splx(s);
1251 	return (error);
1252 }
1253 
1254 static void
1255 vr_watchdog(ifp)
1256 	struct ifnet *ifp;
1257 {
1258 	struct vr_softc *sc = ifp->if_softc;
1259 
1260 	printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1261 	ifp->if_oerrors++;
1262 
1263 	(void) vr_init(ifp);
1264 }
1265 
1266 /*
1267  * One second timer, used to tick MII.
1268  */
1269 static void
1270 vr_tick(arg)
1271 	void *arg;
1272 {
1273 	struct vr_softc *sc = arg;
1274 	int s;
1275 
1276 	s = splnet();
1277 	mii_tick(&sc->vr_mii);
1278 	splx(s);
1279 
1280 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1281 }
1282 
1283 /*
1284  * Drain the receive queue.
1285  */
1286 static void
1287 vr_rxdrain(sc)
1288 	struct vr_softc *sc;
1289 {
1290 	struct vr_descsoft *ds;
1291 	int i;
1292 
1293 	for (i = 0; i < VR_NRXDESC; i++) {
1294 		ds = VR_DSRX(sc, i);
1295 		if (ds->ds_mbuf != NULL) {
1296 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1297 			m_freem(ds->ds_mbuf);
1298 			ds->ds_mbuf = NULL;
1299 		}
1300 	}
1301 }
1302 
1303 /*
1304  * Stop the adapter and free any mbufs allocated to the
1305  * transmit lists.
1306  */
1307 static void
1308 vr_stop(ifp, disable)
1309 	struct ifnet *ifp;
1310 	int disable;
1311 {
1312 	struct vr_softc *sc = ifp->if_softc;
1313 	struct vr_descsoft *ds;
1314 	int i;
1315 
1316 	/* Cancel one second timer. */
1317 	callout_stop(&sc->vr_tick_ch);
1318 
1319 	/* Down the MII. */
1320 	mii_down(&sc->vr_mii);
1321 
1322 	ifp = &sc->vr_ec.ec_if;
1323 	ifp->if_timer = 0;
1324 
1325 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1326 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1327 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1328 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1329 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1330 
1331 	/*
1332 	 * Release any queued transmit buffers.
1333 	 */
1334 	for (i = 0; i < VR_NTXDESC; i++) {
1335 		ds = VR_DSTX(sc, i);
1336 		if (ds->ds_mbuf != NULL) {
1337 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1338 			m_freem(ds->ds_mbuf);
1339 			ds->ds_mbuf = NULL;
1340 		}
1341 	}
1342 
1343 	if (disable)
1344 		vr_rxdrain(sc);
1345 
1346 	/*
1347 	 * Mark the interface down and cancel the watchdog timer.
1348 	 */
1349 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1350 	ifp->if_timer = 0;
1351 }
1352 
1353 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1354 static int vr_probe __P((struct device *, struct cfdata *, void *));
1355 static void vr_attach __P((struct device *, struct device *, void *));
1356 static void vr_shutdown __P((void *));
1357 
1358 struct cfattach vr_ca = {
1359 	sizeof (struct vr_softc), vr_probe, vr_attach
1360 };
1361 
1362 static struct vr_type *
1363 vr_lookup(pa)
1364 	struct pci_attach_args *pa;
1365 {
1366 	struct vr_type *vrt;
1367 
1368 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1369 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1370 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1371 			return (vrt);
1372 	}
1373 	return (NULL);
1374 }
1375 
1376 static int
1377 vr_probe(parent, match, aux)
1378 	struct device *parent;
1379 	struct cfdata *match;
1380 	void *aux;
1381 {
1382 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1383 
1384 	if (vr_lookup(pa) != NULL)
1385 		return (1);
1386 
1387 	return (0);
1388 }
1389 
1390 /*
1391  * Stop all chip I/O so that the kernel's probe routines don't
1392  * get confused by errant DMAs when rebooting.
1393  */
1394 static void
1395 vr_shutdown(arg)
1396 	void *arg;
1397 {
1398 	struct vr_softc *sc = (struct vr_softc *)arg;
1399 
1400 	vr_stop(&sc->vr_ec.ec_if, 1);
1401 }
1402 
1403 /*
1404  * Attach the interface. Allocate softc structures, do ifmedia
1405  * setup and ethernet/BPF attach.
1406  */
1407 static void
1408 vr_attach(parent, self, aux)
1409 	struct device *parent;
1410 	struct device *self;
1411 	void *aux;
1412 {
1413 	struct vr_softc *sc = (struct vr_softc *) self;
1414 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1415 	bus_dma_segment_t seg;
1416 	struct vr_type *vrt;
1417 	u_int32_t command;
1418 	struct ifnet *ifp;
1419 	u_char eaddr[ETHER_ADDR_LEN];
1420 	int i, rseg, error;
1421 
1422 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1423 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1424 
1425 	callout_init(&sc->vr_tick_ch);
1426 
1427 	vrt = vr_lookup(pa);
1428 	if (vrt == NULL) {
1429 		printf("\n");
1430 		panic("vr_attach: impossible");
1431 	}
1432 
1433 	printf(": %s Ethernet\n", vrt->vr_name);
1434 
1435 	/*
1436 	 * Handle power management nonsense.
1437 	 */
1438 
1439 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1440 	if (command == 0x01) {
1441 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1442 		if (command & VR_PSTATE_MASK) {
1443 			u_int32_t iobase, membase, irq;
1444 
1445 			/* Save important PCI config data. */
1446 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
1447 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
1448 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
1449 
1450 			/* Reset the power state. */
1451 			printf("%s: chip is in D%d power mode "
1452 				"-- setting to D0\n",
1453 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1454 			command &= 0xFFFFFFFC;
1455 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1456 
1457 			/* Restore PCI config data. */
1458 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1459 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1460 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1461 		}
1462 	}
1463 
1464 	/* Make sure bus mastering is enabled. */
1465 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1466 	command |= PCI_COMMAND_MASTER_ENABLE;
1467 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1468 
1469 	/*
1470 	 * Map control/status registers.
1471 	 */
1472 	{
1473 		bus_space_tag_t iot, memt;
1474 		bus_space_handle_t ioh, memh;
1475 		int ioh_valid, memh_valid;
1476 		pci_intr_handle_t intrhandle;
1477 		const char *intrstr;
1478 
1479 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1480 			PCI_MAPREG_TYPE_IO, 0,
1481 			&iot, &ioh, NULL, NULL) == 0);
1482 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1483 			PCI_MAPREG_TYPE_MEM |
1484 			PCI_MAPREG_MEM_TYPE_32BIT,
1485 			0, &memt, &memh, NULL, NULL) == 0);
1486 #if defined(VR_USEIOSPACE)
1487 		if (ioh_valid) {
1488 			sc->vr_bst = iot;
1489 			sc->vr_bsh = ioh;
1490 		} else if (memh_valid) {
1491 			sc->vr_bst = memt;
1492 			sc->vr_bsh = memh;
1493 		}
1494 #else
1495 		if (memh_valid) {
1496 			sc->vr_bst = memt;
1497 			sc->vr_bsh = memh;
1498 		} else if (ioh_valid) {
1499 			sc->vr_bst = iot;
1500 			sc->vr_bsh = ioh;
1501 		}
1502 #endif
1503 		else {
1504 			printf(": unable to map device registers\n");
1505 			return;
1506 		}
1507 
1508 		/* Allocate interrupt */
1509 		if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
1510 				pa->pa_intrline, &intrhandle)) {
1511 			printf("%s: couldn't map interrupt\n",
1512 				sc->vr_dev.dv_xname);
1513 			return;
1514 		}
1515 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1516 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1517 						vr_intr, sc);
1518 		if (sc->vr_ih == NULL) {
1519 			printf("%s: couldn't establish interrupt",
1520 				sc->vr_dev.dv_xname);
1521 			if (intrstr != NULL)
1522 				printf(" at %s", intrstr);
1523 			printf("\n");
1524 		}
1525 		printf("%s: interrupting at %s\n",
1526 			sc->vr_dev.dv_xname, intrstr);
1527 	}
1528 
1529 	/* Reset the adapter. */
1530 	vr_reset(sc);
1531 
1532 	/*
1533 	 * Get station address. The way the Rhine chips work,
1534 	 * you're not allowed to directly access the EEPROM once
1535 	 * they've been programmed a special way. Consequently,
1536 	 * we need to read the node address from the PAR0 and PAR1
1537 	 * registers.
1538 	 */
1539 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1540 	DELAY(200);
1541 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1542 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1543 
1544 	/*
1545 	 * A Rhine chip was detected. Inform the world.
1546 	 */
1547 	printf("%s: Ethernet address: %s\n",
1548 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1549 
1550 	bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1551 
1552 	sc->vr_dmat = pa->pa_dmat;
1553 
1554 	/*
1555 	 * Allocate the control data structures, and create and load
1556 	 * the DMA map for it.
1557 	 */
1558 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1559 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1560 	    0)) != 0) {
1561 		printf("%s: unable to allocate control data, error = %d\n",
1562 		    sc->vr_dev.dv_xname, error);
1563 		goto fail_0;
1564 	}
1565 
1566 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1567 	    sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1568 	    BUS_DMA_COHERENT)) != 0) {
1569 		printf("%s: unable to map control data, error = %d\n",
1570 		    sc->vr_dev.dv_xname, error);
1571 		goto fail_1;
1572 	}
1573 
1574 	if ((error = bus_dmamap_create(sc->vr_dmat,
1575 	    sizeof(struct vr_control_data), 1,
1576 	    sizeof(struct vr_control_data), 0, 0,
1577 	    &sc->vr_cddmamap)) != 0) {
1578 		printf("%s: unable to create control data DMA map, "
1579 		    "error = %d\n", sc->vr_dev.dv_xname, error);
1580 		goto fail_2;
1581 	}
1582 
1583 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1584 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1585 	    0)) != 0) {
1586 		printf("%s: unable to load control data DMA map, error = %d\n",
1587 		    sc->vr_dev.dv_xname, error);
1588 		goto fail_3;
1589 	}
1590 
1591 	/*
1592 	 * Create the transmit buffer DMA maps.
1593 	 */
1594 	for (i = 0; i < VR_NTXDESC; i++) {
1595 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1596 		    1, MCLBYTES, 0, 0,
1597 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1598 			printf("%s: unable to create tx DMA map %d, "
1599 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1600 			goto fail_4;
1601 		}
1602 	}
1603 
1604 	/*
1605 	 * Create the receive buffer DMA maps.
1606 	 */
1607 	for (i = 0; i < VR_NRXDESC; i++) {
1608 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1609 		    MCLBYTES, 0, 0,
1610 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1611 			printf("%s: unable to create rx DMA map %d, "
1612 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1613 			goto fail_5;
1614 		}
1615 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1616 	}
1617 
1618 	ifp = &sc->vr_ec.ec_if;
1619 	ifp->if_softc = sc;
1620 	ifp->if_mtu = ETHERMTU;
1621 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1622 	ifp->if_ioctl = vr_ioctl;
1623 	ifp->if_start = vr_start;
1624 	ifp->if_watchdog = vr_watchdog;
1625 	ifp->if_init = vr_init;
1626 	ifp->if_stop = vr_stop;
1627 	IFQ_SET_READY(&ifp->if_snd);
1628 
1629 	bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1630 
1631 	/*
1632 	 * Initialize MII/media info.
1633 	 */
1634 	sc->vr_mii.mii_ifp = ifp;
1635 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1636 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1637 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1638 	ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1639 	mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1640 	    MII_OFFSET_ANY, 0);
1641 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1642 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1643 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1644 	} else
1645 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1646 
1647 	/*
1648 	 * Call MI attach routines.
1649 	 */
1650 	if_attach(ifp);
1651 	ether_ifattach(ifp, sc->vr_enaddr);
1652 
1653 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1654 	if (sc->vr_ats == NULL)
1655 		printf("%s: warning: couldn't establish shutdown hook\n",
1656 			sc->vr_dev.dv_xname);
1657 	return;
1658 
1659  fail_5:
1660 	for (i = 0; i < VR_NRXDESC; i++) {
1661 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1662 			bus_dmamap_destroy(sc->vr_dmat,
1663 			    sc->vr_rxsoft[i].ds_dmamap);
1664 	}
1665  fail_4:
1666 	for (i = 0; i < VR_NTXDESC; i++) {
1667 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1668 			bus_dmamap_destroy(sc->vr_dmat,
1669 			    sc->vr_txsoft[i].ds_dmamap);
1670 	}
1671 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1672  fail_3:
1673 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1674  fail_2:
1675 	bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1676 	    sizeof(struct vr_control_data));
1677  fail_1:
1678 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1679  fail_0:
1680 	return;
1681 }
1682