xref: /netbsd-src/sys/dev/pci/if_vr.c (revision 3cec974c61d7fac0a37c0377723a33214a458c8b)
1 /*	$NetBSD: if_vr.c,v 1.46 2001/01/29 12:04:10 tsutsui Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*
41  * Copyright (c) 1997, 1998
42  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
43  *
44  * Redistribution and use in source and binary forms, with or without
45  * modification, are permitted provided that the following conditions
46  * are met:
47  * 1. Redistributions of source code must retain the above copyright
48  *    notice, this list of conditions and the following disclaimer.
49  * 2. Redistributions in binary form must reproduce the above copyright
50  *    notice, this list of conditions and the following disclaimer in the
51  *    documentation and/or other materials provided with the distribution.
52  * 3. All advertising materials mentioning features or use of this software
53  *    must display the following acknowledgement:
54  *	This product includes software developed by Bill Paul.
55  * 4. Neither the name of the author nor the names of any co-contributors
56  *    may be used to endorse or promote products derived from this software
57  *    without specific prior written permission.
58  *
59  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
63  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
64  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
65  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
66  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
67  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
68  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
69  * THE POSSIBILITY OF SUCH DAMAGE.
70  *
71  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
72  */
73 
74 /*
75  * VIA Rhine fast ethernet PCI NIC driver
76  *
77  * Supports various network adapters based on the VIA Rhine
78  * and Rhine II PCI controllers, including the D-Link DFE530TX.
79  * Datasheets are available at http://www.via.com.tw.
80  *
81  * Written by Bill Paul <wpaul@ctr.columbia.edu>
82  * Electrical Engineering Department
83  * Columbia University, New York City
84  */
85 
86 /*
87  * The VIA Rhine controllers are similar in some respects to the
88  * the DEC tulip chips, except less complicated. The controller
89  * uses an MII bus and an external physical layer interface. The
90  * receiver has a one entry perfect filter and a 64-bit hash table
91  * multicast filter. Transmit and receive descriptors are similar
92  * to the tulip.
93  *
94  * The Rhine has a serious flaw in its transmit DMA mechanism:
95  * transmit buffers must be longword aligned. Unfortunately,
96  * the kernel doesn't guarantee that mbufs will be filled in starting
97  * at longword boundaries, so we have to do a buffer copy before
98  * transmission.
99  *
100  * Apparently, the receive DMA mechanism also has the same flaw.  This
101  * means that on systems with struct alignment requirements, incoming
102  * frames must be copied to a new buffer which shifts the data forward
103  * 2 bytes so that the payload is aligned on a 4-byte boundary.
104  */
105 
106 #include "opt_inet.h"
107 
108 #include <sys/param.h>
109 #include <sys/systm.h>
110 #include <sys/callout.h>
111 #include <sys/sockio.h>
112 #include <sys/mbuf.h>
113 #include <sys/malloc.h>
114 #include <sys/kernel.h>
115 #include <sys/socket.h>
116 #include <sys/device.h>
117 
118 #include <uvm/uvm_extern.h>		/* for PAGE_SIZE */
119 
120 #include <net/if.h>
121 #include <net/if_arp.h>
122 #include <net/if_dl.h>
123 #include <net/if_media.h>
124 #include <net/if_ether.h>
125 
126 #if defined(INET)
127 #include <netinet/in.h>
128 #include <netinet/if_inarp.h>
129 #endif
130 
131 #include "bpfilter.h"
132 #if NBPFILTER > 0
133 #include <net/bpf.h>
134 #endif
135 
136 #include <machine/bus.h>
137 #include <machine/intr.h>
138 #include <machine/endian.h>
139 
140 #include <dev/mii/mii.h>
141 #include <dev/mii/miivar.h>
142 #include <dev/mii/mii_bitbang.h>
143 
144 #include <dev/pci/pcireg.h>
145 #include <dev/pci/pcivar.h>
146 #include <dev/pci/pcidevs.h>
147 
148 #include <dev/pci/if_vrreg.h>
149 
150 #define	VR_USEIOSPACE
151 
152 /*
153  * Various supported device vendors/types and their names.
154  */
155 static struct vr_type {
156 	pci_vendor_id_t		vr_vid;
157 	pci_product_id_t	vr_did;
158 	const char		*vr_name;
159 } vr_devs[] = {
160 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043,
161 		"VIA VT3043 (Rhine) 10/100" },
162 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102,
163 		"VIA VT6102 (Rhine II) 10/100" },
164 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A,
165 		"VIA VT86C100A (Rhine-II) 10/100" },
166 	{ 0, 0, NULL }
167 };
168 
169 /*
170  * Transmit descriptor list size.
171  */
172 #define	VR_NTXDESC		64
173 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
174 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
175 
176 /*
177  * Receive descriptor list size.
178  */
179 #define	VR_NRXDESC		64
180 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
181 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
182 
183 /*
184  * Control data structres that are DMA'd to the Rhine chip.  We allocate
185  * them in a single clump that maps to a single DMA segment to make several
186  * things easier.
187  *
188  * Note that since we always copy outgoing packets to aligned transmit
189  * buffers, we can reduce the transmit descriptors to one per packet.
190  */
191 struct vr_control_data {
192 	struct vr_desc		vr_txdescs[VR_NTXDESC];
193 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
194 };
195 
196 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
197 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
198 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
199 
200 /*
201  * Software state of transmit and receive descriptors.
202  */
203 struct vr_descsoft {
204 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
205 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
206 };
207 
208 struct vr_softc {
209 	struct device		vr_dev;		/* generic device glue */
210 	void			*vr_ih;		/* interrupt cookie */
211 	void			*vr_ats;	/* shutdown hook */
212 	bus_space_tag_t		vr_bst;		/* bus space tag */
213 	bus_space_handle_t	vr_bsh;		/* bus space handle */
214 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
215 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
216 	struct ethercom		vr_ec;		/* Ethernet common info */
217 	u_int8_t 		vr_enaddr[ETHER_ADDR_LEN];
218 	struct mii_data		vr_mii;		/* MII/media info */
219 
220 	struct callout		vr_tick_ch;	/* tick callout */
221 
222 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
223 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
224 
225 	/*
226 	 * Software state for transmit and receive descriptors.
227 	 */
228 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
229 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
230 
231 	/*
232 	 * Control data structures.
233 	 */
234 	struct vr_control_data	*vr_control_data;
235 
236 	int	vr_txpending;		/* number of TX requests pending */
237 	int	vr_txdirty;		/* first dirty TX descriptor */
238 	int	vr_txlast;		/* last used TX descriptor */
239 
240 	int	vr_rxptr;		/* next ready RX descriptor */
241 };
242 
243 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
244 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
245 
246 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
247 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
248 
249 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
250 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
251 
252 #define	VR_CDTXSYNC(sc, x, ops)						\
253 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
254 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
255 
256 #define	VR_CDRXSYNC(sc, x, ops)						\
257 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
258 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
259 
260 /*
261  * Note we rely on MCLBYTES being a power of two below.
262  */
263 #define	VR_INIT_RXDESC(sc, i)						\
264 do {									\
265 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
266 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
267 									\
268 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
269 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
270 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
271 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
272 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
273 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
274 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
275 } while (0)
276 
277 /*
278  * register space access macros
279  */
280 #define	CSR_WRITE_4(sc, reg, val)					\
281 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
282 #define	CSR_WRITE_2(sc, reg, val)					\
283 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
284 #define	CSR_WRITE_1(sc, reg, val)					\
285 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
286 
287 #define	CSR_READ_4(sc, reg)						\
288 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
289 #define	CSR_READ_2(sc, reg)						\
290 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
291 #define	CSR_READ_1(sc, reg)						\
292 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
293 
294 #define	VR_TIMEOUT		1000
295 
296 static int vr_add_rxbuf		__P((struct vr_softc *, int));
297 
298 static void vr_rxeof		__P((struct vr_softc *));
299 static void vr_rxeoc		__P((struct vr_softc *));
300 static void vr_txeof		__P((struct vr_softc *));
301 static int vr_intr		__P((void *));
302 static void vr_start		__P((struct ifnet *));
303 static int vr_ioctl		__P((struct ifnet *, u_long, caddr_t));
304 static int vr_init		__P((struct ifnet *));
305 static void vr_stop		__P((struct ifnet *, int));
306 static void vr_rxdrain		__P((struct vr_softc *));
307 static void vr_watchdog		__P((struct ifnet *));
308 static void vr_tick		__P((void *));
309 
310 static int vr_ifmedia_upd	__P((struct ifnet *));
311 static void vr_ifmedia_sts	__P((struct ifnet *, struct ifmediareq *));
312 
313 static int vr_mii_readreg	__P((struct device *, int, int));
314 static void vr_mii_writereg	__P((struct device *, int, int, int));
315 static void vr_mii_statchg	__P((struct device *));
316 
317 static void vr_setmulti		__P((struct vr_softc *));
318 static void vr_reset		__P((struct vr_softc *));
319 
320 int	vr_copy_small = 0;
321 
322 #define	VR_SETBIT(sc, reg, x)				\
323 	CSR_WRITE_1(sc, reg,				\
324 		CSR_READ_1(sc, reg) | x)
325 
326 #define	VR_CLRBIT(sc, reg, x)				\
327 	CSR_WRITE_1(sc, reg,				\
328 		CSR_READ_1(sc, reg) & ~x)
329 
330 #define	VR_SETBIT16(sc, reg, x)				\
331 	CSR_WRITE_2(sc, reg,				\
332 		CSR_READ_2(sc, reg) | x)
333 
334 #define	VR_CLRBIT16(sc, reg, x)				\
335 	CSR_WRITE_2(sc, reg,				\
336 		CSR_READ_2(sc, reg) & ~x)
337 
338 #define	VR_SETBIT32(sc, reg, x)				\
339 	CSR_WRITE_4(sc, reg,				\
340 		CSR_READ_4(sc, reg) | x)
341 
342 #define	VR_CLRBIT32(sc, reg, x)				\
343 	CSR_WRITE_4(sc, reg,				\
344 		CSR_READ_4(sc, reg) & ~x)
345 
346 /*
347  * MII bit-bang glue.
348  */
349 u_int32_t vr_mii_bitbang_read __P((struct device *));
350 void vr_mii_bitbang_write __P((struct device *, u_int32_t));
351 
352 const struct mii_bitbang_ops vr_mii_bitbang_ops = {
353 	vr_mii_bitbang_read,
354 	vr_mii_bitbang_write,
355 	{
356 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
357 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
358 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
359 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
360 		0,			/* MII_BIT_DIR_PHY_HOST */
361 	}
362 };
363 
364 u_int32_t
365 vr_mii_bitbang_read(self)
366 	struct device *self;
367 {
368 	struct vr_softc *sc = (void *) self;
369 
370 	return (CSR_READ_1(sc, VR_MIICMD));
371 }
372 
373 void
374 vr_mii_bitbang_write(self, val)
375 	struct device *self;
376 	u_int32_t val;
377 {
378 	struct vr_softc *sc = (void *) self;
379 
380 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
381 }
382 
383 /*
384  * Read an PHY register through the MII.
385  */
386 static int
387 vr_mii_readreg(self, phy, reg)
388 	struct device *self;
389 	int phy, reg;
390 {
391 	struct vr_softc *sc = (void *) self;
392 
393 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
394 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
395 }
396 
397 /*
398  * Write to a PHY register through the MII.
399  */
400 static void
401 vr_mii_writereg(self, phy, reg, val)
402 	struct device *self;
403 	int phy, reg, val;
404 {
405 	struct vr_softc *sc = (void *) self;
406 
407 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
408 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
409 }
410 
411 static void
412 vr_mii_statchg(self)
413 	struct device *self;
414 {
415 	struct vr_softc *sc = (struct vr_softc *)self;
416 
417 	/*
418 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
419 	 * register, we first have to put the transmit and/or receive logic
420 	 * in the idle state.
421 	 */
422 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
423 
424 	if (sc->vr_mii.mii_media_active & IFM_FDX)
425 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
426 	else
427 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
428 
429 	if (sc->vr_ec.ec_if.if_flags & IFF_RUNNING)
430 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
431 }
432 
433 #define	vr_calchash(addr) \
434 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
435 
436 /*
437  * Program the 64-bit multicast hash filter.
438  */
439 static void
440 vr_setmulti(sc)
441 	struct vr_softc *sc;
442 {
443 	struct ifnet *ifp;
444 	int h = 0;
445 	u_int32_t hashes[2] = { 0, 0 };
446 	struct ether_multistep step;
447 	struct ether_multi *enm;
448 	int mcnt = 0;
449 	u_int8_t rxfilt;
450 
451 	ifp = &sc->vr_ec.ec_if;
452 
453 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
454 
455 	if (ifp->if_flags & IFF_PROMISC) {
456 allmulti:
457 		ifp->if_flags |= IFF_ALLMULTI;
458 		rxfilt |= VR_RXCFG_RX_MULTI;
459 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
460 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
461 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
462 		return;
463 	}
464 
465 	/* first, zot all the existing hash bits */
466 	CSR_WRITE_4(sc, VR_MAR0, 0);
467 	CSR_WRITE_4(sc, VR_MAR1, 0);
468 
469 	/* now program new ones */
470 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
471 	while (enm != NULL) {
472 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
473 		    ETHER_ADDR_LEN) != 0)
474 			goto allmulti;
475 
476 		h = vr_calchash(enm->enm_addrlo);
477 
478 		if (h < 32)
479 			hashes[0] |= (1 << h);
480 		else
481 			hashes[1] |= (1 << (h - 32));
482 		ETHER_NEXT_MULTI(step, enm);
483 		mcnt++;
484 	}
485 
486 	ifp->if_flags &= ~IFF_ALLMULTI;
487 
488 	if (mcnt)
489 		rxfilt |= VR_RXCFG_RX_MULTI;
490 	else
491 		rxfilt &= ~VR_RXCFG_RX_MULTI;
492 
493 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
494 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
495 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
496 }
497 
498 static void
499 vr_reset(sc)
500 	struct vr_softc *sc;
501 {
502 	int i;
503 
504 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
505 
506 	for (i = 0; i < VR_TIMEOUT; i++) {
507 		DELAY(10);
508 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
509 			break;
510 	}
511 	if (i == VR_TIMEOUT)
512 		printf("%s: reset never completed!\n",
513 			sc->vr_dev.dv_xname);
514 
515 	/* Wait a little while for the chip to get its brains in order. */
516 	DELAY(1000);
517 }
518 
519 /*
520  * Initialize an RX descriptor and attach an MBUF cluster.
521  * Note: the length fields are only 11 bits wide, which means the
522  * largest size we can specify is 2047. This is important because
523  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
524  * overflow the field and make a mess.
525  */
526 static int
527 vr_add_rxbuf(sc, i)
528 	struct vr_softc *sc;
529 	int i;
530 {
531 	struct vr_descsoft *ds = VR_DSRX(sc, i);
532 	struct mbuf *m_new;
533 	int error;
534 
535 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
536 	if (m_new == NULL)
537 		return (ENOBUFS);
538 
539 	MCLGET(m_new, M_DONTWAIT);
540 	if ((m_new->m_flags & M_EXT) == 0) {
541 		m_freem(m_new);
542 		return (ENOBUFS);
543 	}
544 
545 	if (ds->ds_mbuf != NULL)
546 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
547 
548 	ds->ds_mbuf = m_new;
549 
550 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
551 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
552 	if (error) {
553 		printf("%s: unable to load rx DMA map %d, error = %d\n",
554 		    sc->vr_dev.dv_xname, i, error);
555 		panic("vr_add_rxbuf");		/* XXX */
556 	}
557 
558 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
559 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
560 
561 	VR_INIT_RXDESC(sc, i);
562 
563 	return (0);
564 }
565 
566 /*
567  * A frame has been uploaded: pass the resulting mbuf chain up to
568  * the higher level protocols.
569  */
570 static void
571 vr_rxeof(sc)
572 	struct vr_softc *sc;
573 {
574 	struct mbuf *m;
575 	struct ifnet *ifp;
576 	struct vr_desc *d;
577 	struct vr_descsoft *ds;
578 	int i, total_len;
579 	u_int32_t rxstat;
580 
581 	ifp = &sc->vr_ec.ec_if;
582 
583 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
584 		d = VR_CDRX(sc, i);
585 		ds = VR_DSRX(sc, i);
586 
587 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
588 
589 		rxstat = le32toh(d->vr_status);
590 
591 		if (rxstat & VR_RXSTAT_OWN) {
592 			/*
593 			 * We have processed all of the receive buffers.
594 			 */
595 			break;
596 		}
597 
598 		/*
599 		 * If an error occurs, update stats, clear the
600 		 * status word and leave the mbuf cluster in place:
601 		 * it should simply get re-used next time this descriptor
602 		 * comes up in the ring.
603 		 */
604 		if (rxstat & VR_RXSTAT_RXERR) {
605 			const char *errstr;
606 
607 			ifp->if_ierrors++;
608 			switch (rxstat & 0x000000FF) {
609 			case VR_RXSTAT_CRCERR:
610 				errstr = "crc error";
611 				break;
612 			case VR_RXSTAT_FRAMEALIGNERR:
613 				errstr = "frame alignment error";
614 				break;
615 			case VR_RXSTAT_FIFOOFLOW:
616 				errstr = "FIFO overflow";
617 				break;
618 			case VR_RXSTAT_GIANT:
619 				errstr = "received giant packet";
620 				break;
621 			case VR_RXSTAT_RUNT:
622 				errstr = "received runt packet";
623 				break;
624 			case VR_RXSTAT_BUSERR:
625 				errstr = "system bus error";
626 				break;
627 			case VR_RXSTAT_BUFFERR:
628 				errstr = "rx buffer error";
629 				break;
630 			default:
631 				errstr = "unknown rx error";
632 				break;
633 			}
634 			printf("%s: receive error: %s\n", sc->vr_dev.dv_xname,
635 			    errstr);
636 
637 			VR_INIT_RXDESC(sc, i);
638 
639 			continue;
640 		}
641 
642 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
643 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
644 
645 		/* No errors; receive the packet. */
646 		total_len = VR_RXBYTES(le32toh(d->vr_status));
647 
648 #ifdef __NO_STRICT_ALIGNMENT
649 		/*
650 		 * If the packet is small enough to fit in a
651 		 * single header mbuf, allocate one and copy
652 		 * the data into it.  This greatly reduces
653 		 * memory consumption when we receive lots
654 		 * of small packets.
655 		 *
656 		 * Otherwise, we add a new buffer to the receive
657 		 * chain.  If this fails, we drop the packet and
658 		 * recycle the old buffer.
659 		 */
660 		if (vr_copy_small != 0 && total_len <= MHLEN) {
661 			MGETHDR(m, M_DONTWAIT, MT_DATA);
662 			if (m == NULL)
663 				goto dropit;
664 			memcpy(mtod(m, caddr_t),
665 			    mtod(ds->ds_mbuf, caddr_t), total_len);
666 			VR_INIT_RXDESC(sc, i);
667 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
668 			    ds->ds_dmamap->dm_mapsize,
669 			    BUS_DMASYNC_PREREAD);
670 		} else {
671 			m = ds->ds_mbuf;
672 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
673  dropit:
674 				ifp->if_ierrors++;
675 				VR_INIT_RXDESC(sc, i);
676 				bus_dmamap_sync(sc->vr_dmat,
677 				    ds->ds_dmamap, 0,
678 				    ds->ds_dmamap->dm_mapsize,
679 				    BUS_DMASYNC_PREREAD);
680 				continue;
681 			}
682 		}
683 #else
684 		/*
685 		 * The Rhine's packet buffers must be 4-byte aligned.
686 		 * But this means that the data after the Ethernet header
687 		 * is misaligned.  We must allocate a new buffer and
688 		 * copy the data, shifted forward 2 bytes.
689 		 */
690 		MGETHDR(m, M_DONTWAIT, MT_DATA);
691 		if (m == NULL) {
692  dropit:
693 			ifp->if_ierrors++;
694 			VR_INIT_RXDESC(sc, i);
695 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
696 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
697 			continue;
698 		}
699 		if (total_len > (MHLEN - 2)) {
700 			MCLGET(m, M_DONTWAIT);
701 			if ((m->m_flags & M_EXT) == 0) {
702 				m_freem(m);
703 				goto dropit;
704 			}
705 		}
706 		m->m_data += 2;
707 
708 		/*
709 		 * Note that we use clusters for incoming frames, so the
710 		 * buffer is virtually contiguous.
711 		 */
712 		memcpy(mtod(m, caddr_t), mtod(ds->ds_mbuf, caddr_t),
713 		    total_len);
714 
715 		/* Allow the recieve descriptor to continue using its mbuf. */
716 		VR_INIT_RXDESC(sc, i);
717 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
718 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
719 #endif /* __NO_STRICT_ALIGNMENT */
720 
721 		/*
722 		 * The Rhine chip includes the FCS with every
723 		 * received packet.
724 		 */
725 		m->m_flags |= M_HASFCS;
726 
727 		ifp->if_ipackets++;
728 		m->m_pkthdr.rcvif = ifp;
729 		m->m_pkthdr.len = m->m_len = total_len;
730 #if NBPFILTER > 0
731 		/*
732 		 * Handle BPF listeners. Let the BPF user see the packet, but
733 		 * don't pass it up to the ether_input() layer unless it's
734 		 * a broadcast packet, multicast packet, matches our ethernet
735 		 * address or the interface is in promiscuous mode.
736 		 */
737 		if (ifp->if_bpf)
738 			bpf_mtap(ifp->if_bpf, m);
739 #endif
740 		/* Pass it on. */
741 		(*ifp->if_input)(ifp, m);
742 	}
743 
744 	/* Update the receive pointer. */
745 	sc->vr_rxptr = i;
746 }
747 
748 void
749 vr_rxeoc(sc)
750 	struct vr_softc *sc;
751 {
752 
753 	vr_rxeof(sc);
754 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
755 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
756 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
757 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
758 }
759 
760 /*
761  * A frame was downloaded to the chip. It's safe for us to clean up
762  * the list buffers.
763  */
764 static void
765 vr_txeof(sc)
766 	struct vr_softc *sc;
767 {
768 	struct ifnet *ifp = &sc->vr_ec.ec_if;
769 	struct vr_desc *d;
770 	struct vr_descsoft *ds;
771 	u_int32_t txstat;
772 	int i;
773 
774 	ifp->if_flags &= ~IFF_OACTIVE;
775 
776 	/*
777 	 * Go through our tx list and free mbufs for those
778 	 * frames that have been transmitted.
779 	 */
780 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
781 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
782 		d = VR_CDTX(sc, i);
783 		ds = VR_DSTX(sc, i);
784 
785 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
786 
787 		txstat = le32toh(d->vr_status);
788 		if (txstat & VR_TXSTAT_OWN)
789 			break;
790 
791 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
792 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
793 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
794 		m_freem(ds->ds_mbuf);
795 		ds->ds_mbuf = NULL;
796 
797 		if (txstat & VR_TXSTAT_ERRSUM) {
798 			ifp->if_oerrors++;
799 			if (txstat & VR_TXSTAT_DEFER)
800 				ifp->if_collisions++;
801 			if (txstat & VR_TXSTAT_LATECOLL)
802 				ifp->if_collisions++;
803 		}
804 
805 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
806 		ifp->if_opackets++;
807 	}
808 
809 	/* Update the dirty transmit buffer pointer. */
810 	sc->vr_txdirty = i;
811 
812 	/*
813 	 * Cancel the watchdog timer if there are no pending
814 	 * transmissions.
815 	 */
816 	if (sc->vr_txpending == 0)
817 		ifp->if_timer = 0;
818 }
819 
820 static int
821 vr_intr(arg)
822 	void *arg;
823 {
824 	struct vr_softc *sc;
825 	struct ifnet *ifp;
826 	u_int16_t status;
827 	int handled = 0, dotx = 0;
828 
829 	sc = arg;
830 	ifp = &sc->vr_ec.ec_if;
831 
832 	/* Suppress unwanted interrupts. */
833 	if ((ifp->if_flags & IFF_UP) == 0) {
834 		vr_stop(ifp, 1);
835 		return (0);
836 	}
837 
838 	/* Disable interrupts. */
839 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
840 
841 	for (;;) {
842 		status = CSR_READ_2(sc, VR_ISR);
843 		if (status)
844 			CSR_WRITE_2(sc, VR_ISR, status);
845 
846 		if ((status & VR_INTRS) == 0)
847 			break;
848 
849 		handled = 1;
850 
851 		if (status & VR_ISR_RX_OK)
852 			vr_rxeof(sc);
853 
854 		if (status &
855 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW |
856 		     VR_ISR_RX_DROPPED))
857 			vr_rxeoc(sc);
858 
859 		if (status & VR_ISR_TX_OK) {
860 			dotx = 1;
861 			vr_txeof(sc);
862 		}
863 
864 		if (status & (VR_ISR_TX_UNDERRUN | VR_ISR_TX_ABRT)) {
865 			if (status & VR_ISR_TX_UNDERRUN)
866 				printf("%s: transmit underrun\n",
867 				    sc->vr_dev.dv_xname);
868 			if (status & VR_ISR_TX_ABRT)
869 				printf("%s: transmit aborted\n",
870 				    sc->vr_dev.dv_xname);
871 			ifp->if_oerrors++;
872 			dotx = 1;
873 			vr_txeof(sc);
874 			if (sc->vr_txpending) {
875 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
876 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
877 			}
878 		}
879 
880 		if (status & VR_ISR_BUSERR) {
881 			printf("%s: PCI bus error\n", sc->vr_dev.dv_xname);
882 			/* vr_init() calls vr_start() */
883 			dotx = 0;
884 			(void) vr_init(ifp);
885 		}
886 	}
887 
888 	/* Re-enable interrupts. */
889 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
890 
891 	if (dotx)
892 		vr_start(ifp);
893 
894 	return (handled);
895 }
896 
897 /*
898  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
899  * to the mbuf data regions directly in the transmit lists. We also save a
900  * copy of the pointers since the transmit list fragment pointers are
901  * physical addresses.
902  */
903 static void
904 vr_start(ifp)
905 	struct ifnet *ifp;
906 {
907 	struct vr_softc *sc = ifp->if_softc;
908 	struct mbuf *m0, *m;
909 	struct vr_desc *d;
910 	struct vr_descsoft *ds;
911 	int error, firsttx, nexttx, opending;
912 
913 	/*
914 	 * Remember the previous txpending and the first transmit
915 	 * descriptor we use.
916 	 */
917 	opending = sc->vr_txpending;
918 	firsttx = VR_NEXTTX(sc->vr_txlast);
919 
920 	/*
921 	 * Loop through the send queue, setting up transmit descriptors
922 	 * until we drain the queue, or use up all available transmit
923 	 * descriptors.
924 	 */
925 	while (sc->vr_txpending < VR_NTXDESC) {
926 		/*
927 		 * Grab a packet off the queue.
928 		 */
929 		IFQ_POLL(&ifp->if_snd, m0);
930 		if (m0 == NULL)
931 			break;
932 		m = NULL;
933 
934 		/*
935 		 * Get the next available transmit descriptor.
936 		 */
937 		nexttx = VR_NEXTTX(sc->vr_txlast);
938 		d = VR_CDTX(sc, nexttx);
939 		ds = VR_DSTX(sc, nexttx);
940 
941 		/*
942 		 * Load the DMA map.  If this fails, the packet didn't
943 		 * fit in one DMA segment, and we need to copy.  Note,
944 		 * the packet must also be aligned.
945 		 */
946 		if ((mtod(m0, bus_addr_t) & 3) != 0 ||
947 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
948 		     BUS_DMA_NOWAIT) != 0) {
949 			MGETHDR(m, M_DONTWAIT, MT_DATA);
950 			if (m == NULL) {
951 				printf("%s: unable to allocate Tx mbuf\n",
952 				    sc->vr_dev.dv_xname);
953 				break;
954 			}
955 			if (m0->m_pkthdr.len > MHLEN) {
956 				MCLGET(m, M_DONTWAIT);
957 				if ((m->m_flags & M_EXT) == 0) {
958 					printf("%s: unable to allocate Tx "
959 					    "cluster\n", sc->vr_dev.dv_xname);
960 					m_freem(m);
961 					break;
962 				}
963 			}
964 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
965 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
966 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
967 			    ds->ds_dmamap, m, BUS_DMA_NOWAIT);
968 			if (error) {
969 				printf("%s: unable to load Tx buffer, "
970 				    "error = %d\n", sc->vr_dev.dv_xname, error);
971 				break;
972 			}
973 		}
974 
975 		IFQ_DEQUEUE(&ifp->if_snd, m0);
976 		if (m != NULL) {
977 			m_freem(m0);
978 			m0 = m;
979 		}
980 
981 		/* Sync the DMA map. */
982 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
983 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
984 
985 		/*
986 		 * Store a pointer to the packet so we can free it later.
987 		 */
988 		ds->ds_mbuf = m0;
989 
990 #if NBPFILTER > 0
991 		/*
992 		 * If there's a BPF listener, bounce a copy of this frame
993 		 * to him.
994 		 */
995 		if (ifp->if_bpf)
996 			bpf_mtap(ifp->if_bpf, m0);
997 #endif
998 
999 		/*
1000 		 * Fill in the transmit descriptor.  The Rhine
1001 		 * doesn't auto-pad, so we have to do this ourselves.
1002 		 */
1003 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1004 		d->vr_ctl = htole32(m0->m_pkthdr.len < VR_MIN_FRAMELEN ?
1005 		    VR_MIN_FRAMELEN : m0->m_pkthdr.len);
1006 		d->vr_ctl |=
1007 		    htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG|
1008 		    VR_TXCTL_LASTFRAG);
1009 
1010 		/*
1011 		 * If this is the first descriptor we're enqueuing,
1012 		 * don't give it to the Rhine yet.  That could cause
1013 		 * a race condition.  We'll do it below.
1014 		 */
1015 		if (nexttx == firsttx)
1016 			d->vr_status = 0;
1017 		else
1018 			d->vr_status = htole32(VR_TXSTAT_OWN);
1019 
1020 		VR_CDTXSYNC(sc, nexttx,
1021 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1022 
1023 		/* Advance the tx pointer. */
1024 		sc->vr_txpending++;
1025 		sc->vr_txlast = nexttx;
1026 	}
1027 
1028 	if (sc->vr_txpending == VR_NTXDESC) {
1029 		/* No more slots left; notify upper layer. */
1030 		ifp->if_flags |= IFF_OACTIVE;
1031 	}
1032 
1033 	if (sc->vr_txpending != opending) {
1034 		/*
1035 		 * We enqueued packets.  If the transmitter was idle,
1036 		 * reset the txdirty pointer.
1037 		 */
1038 		if (opending == 0)
1039 			sc->vr_txdirty = firsttx;
1040 
1041 		/*
1042 		 * Cause a transmit interrupt to happen on the
1043 		 * last packet we enqueued.
1044 		 */
1045 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1046 		VR_CDTXSYNC(sc, sc->vr_txlast,
1047 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1048 
1049 		/*
1050 		 * The entire packet chain is set up.  Give the
1051 		 * first descriptor to the Rhine now.
1052 		 */
1053 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1054 		VR_CDTXSYNC(sc, firsttx,
1055 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1056 
1057 		/* Start the transmitter. */
1058 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO);
1059 
1060 		/* Set the watchdog timer in case the chip flakes out. */
1061 		ifp->if_timer = 5;
1062 	}
1063 }
1064 
1065 /*
1066  * Initialize the interface.  Must be called at splnet.
1067  */
1068 static int
1069 vr_init(ifp)
1070 	struct ifnet *ifp;
1071 {
1072 	struct vr_softc *sc = ifp->if_softc;
1073 	struct vr_desc *d;
1074 	struct vr_descsoft *ds;
1075 	int i, error = 0;
1076 
1077 	/* Cancel pending I/O. */
1078 	vr_stop(ifp, 0);
1079 
1080 	/* Reset the Rhine to a known state. */
1081 	vr_reset(sc);
1082 
1083 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1084 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD);
1085 
1086 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1087 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1088 
1089 	/*
1090 	 * Initialize the transmit desciptor ring.  txlast is initialized
1091 	 * to the end of the list so that it will wrap around to the first
1092 	 * descriptor when the first packet is transmitted.
1093 	 */
1094 	for (i = 0; i < VR_NTXDESC; i++) {
1095 		d = VR_CDTX(sc, i);
1096 		memset(d, 0, sizeof(struct vr_desc));
1097 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1098 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1099 	}
1100 	sc->vr_txpending = 0;
1101 	sc->vr_txdirty = 0;
1102 	sc->vr_txlast = VR_NTXDESC - 1;
1103 
1104 	/*
1105 	 * Initialize the receive descriptor ring.
1106 	 */
1107 	for (i = 0; i < VR_NRXDESC; i++) {
1108 		ds = VR_DSRX(sc, i);
1109 		if (ds->ds_mbuf == NULL) {
1110 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1111 				printf("%s: unable to allocate or map rx "
1112 				    "buffer %d, error = %d\n",
1113 				    sc->vr_dev.dv_xname, i, error);
1114 				/*
1115 				 * XXX Should attempt to run with fewer receive
1116 				 * XXX buffers instead of just failing.
1117 				 */
1118 				vr_rxdrain(sc);
1119 				goto out;
1120 			}
1121 		}
1122 	}
1123 	sc->vr_rxptr = 0;
1124 
1125 	/* If we want promiscuous mode, set the allframes bit. */
1126 	if (ifp->if_flags & IFF_PROMISC)
1127 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1128 	else
1129 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1130 
1131 	/* Set capture broadcast bit to capture broadcast frames. */
1132 	if (ifp->if_flags & IFF_BROADCAST)
1133 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1134 	else
1135 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1136 
1137 	/* Program the multicast filter, if necessary. */
1138 	vr_setmulti(sc);
1139 
1140 	/* Give the transmit and recieve rings to the Rhine. */
1141 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1142 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1143 
1144 	/* Set current media. */
1145 	mii_mediachg(&sc->vr_mii);
1146 
1147 	/* Enable receiver and transmitter. */
1148 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1149 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1150 				    VR_CMD_RX_GO);
1151 
1152 	/* Enable interrupts. */
1153 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1154 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1155 
1156 	ifp->if_flags |= IFF_RUNNING;
1157 	ifp->if_flags &= ~IFF_OACTIVE;
1158 
1159 	/* Start one second timer. */
1160 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1161 
1162 	/* Attempt to start output on the interface. */
1163 	vr_start(ifp);
1164 
1165  out:
1166 	if (error)
1167 		printf("%s: interface not running\n", sc->vr_dev.dv_xname);
1168 	return (error);
1169 }
1170 
1171 /*
1172  * Set media options.
1173  */
1174 static int
1175 vr_ifmedia_upd(ifp)
1176 	struct ifnet *ifp;
1177 {
1178 	struct vr_softc *sc = ifp->if_softc;
1179 
1180 	if (ifp->if_flags & IFF_UP)
1181 		mii_mediachg(&sc->vr_mii);
1182 	return (0);
1183 }
1184 
1185 /*
1186  * Report current media status.
1187  */
1188 static void
1189 vr_ifmedia_sts(ifp, ifmr)
1190 	struct ifnet *ifp;
1191 	struct ifmediareq *ifmr;
1192 {
1193 	struct vr_softc *sc = ifp->if_softc;
1194 
1195 	mii_pollstat(&sc->vr_mii);
1196 	ifmr->ifm_status = sc->vr_mii.mii_media_status;
1197 	ifmr->ifm_active = sc->vr_mii.mii_media_active;
1198 }
1199 
1200 static int
1201 vr_ioctl(ifp, command, data)
1202 	struct ifnet *ifp;
1203 	u_long command;
1204 	caddr_t data;
1205 {
1206 	struct vr_softc *sc = ifp->if_softc;
1207 	struct ifreq *ifr = (struct ifreq *)data;
1208 	int s, error = 0;
1209 
1210 	s = splnet();
1211 
1212 	switch (command) {
1213 	case SIOCGIFMEDIA:
1214 	case SIOCSIFMEDIA:
1215 		error = ifmedia_ioctl(ifp, ifr, &sc->vr_mii.mii_media, command);
1216 		break;
1217 
1218 	default:
1219 		error = ether_ioctl(ifp, command, data);
1220 		if (error == ENETRESET) {
1221 			/*
1222 			 * Multicast list has changed; set the hardware filter
1223 			 * accordingly.
1224 			 */
1225 			vr_setmulti(sc);
1226 			error = 0;
1227 		}
1228 		break;
1229 	}
1230 
1231 	splx(s);
1232 	return (error);
1233 }
1234 
1235 static void
1236 vr_watchdog(ifp)
1237 	struct ifnet *ifp;
1238 {
1239 	struct vr_softc *sc = ifp->if_softc;
1240 
1241 	printf("%s: device timeout\n", sc->vr_dev.dv_xname);
1242 	ifp->if_oerrors++;
1243 
1244 	(void) vr_init(ifp);
1245 }
1246 
1247 /*
1248  * One second timer, used to tick MII.
1249  */
1250 static void
1251 vr_tick(arg)
1252 	void *arg;
1253 {
1254 	struct vr_softc *sc = arg;
1255 	int s;
1256 
1257 	s = splnet();
1258 	mii_tick(&sc->vr_mii);
1259 	splx(s);
1260 
1261 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1262 }
1263 
1264 /*
1265  * Drain the receive queue.
1266  */
1267 static void
1268 vr_rxdrain(sc)
1269 	struct vr_softc *sc;
1270 {
1271 	struct vr_descsoft *ds;
1272 	int i;
1273 
1274 	for (i = 0; i < VR_NRXDESC; i++) {
1275 		ds = VR_DSRX(sc, i);
1276 		if (ds->ds_mbuf != NULL) {
1277 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1278 			m_freem(ds->ds_mbuf);
1279 			ds->ds_mbuf = NULL;
1280 		}
1281 	}
1282 }
1283 
1284 /*
1285  * Stop the adapter and free any mbufs allocated to the
1286  * transmit lists.
1287  */
1288 static void
1289 vr_stop(ifp, disable)
1290 	struct ifnet *ifp;
1291 	int disable;
1292 {
1293 	struct vr_softc *sc = ifp->if_softc;
1294 	struct vr_descsoft *ds;
1295 	int i;
1296 
1297 	/* Cancel one second timer. */
1298 	callout_stop(&sc->vr_tick_ch);
1299 
1300 	/* Down the MII. */
1301 	mii_down(&sc->vr_mii);
1302 
1303 	ifp = &sc->vr_ec.ec_if;
1304 	ifp->if_timer = 0;
1305 
1306 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1307 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1308 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1309 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1310 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1311 
1312 	/*
1313 	 * Release any queued transmit buffers.
1314 	 */
1315 	for (i = 0; i < VR_NTXDESC; i++) {
1316 		ds = VR_DSTX(sc, i);
1317 		if (ds->ds_mbuf != NULL) {
1318 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1319 			m_freem(ds->ds_mbuf);
1320 			ds->ds_mbuf = NULL;
1321 		}
1322 	}
1323 
1324 	if (disable)
1325 		vr_rxdrain(sc);
1326 
1327 	/*
1328 	 * Mark the interface down and cancel the watchdog timer.
1329 	 */
1330 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1331 	ifp->if_timer = 0;
1332 }
1333 
1334 static struct vr_type *vr_lookup __P((struct pci_attach_args *));
1335 static int vr_probe __P((struct device *, struct cfdata *, void *));
1336 static void vr_attach __P((struct device *, struct device *, void *));
1337 static void vr_shutdown __P((void *));
1338 
1339 struct cfattach vr_ca = {
1340 	sizeof (struct vr_softc), vr_probe, vr_attach
1341 };
1342 
1343 static struct vr_type *
1344 vr_lookup(pa)
1345 	struct pci_attach_args *pa;
1346 {
1347 	struct vr_type *vrt;
1348 
1349 	for (vrt = vr_devs; vrt->vr_name != NULL; vrt++) {
1350 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1351 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1352 			return (vrt);
1353 	}
1354 	return (NULL);
1355 }
1356 
1357 static int
1358 vr_probe(parent, match, aux)
1359 	struct device *parent;
1360 	struct cfdata *match;
1361 	void *aux;
1362 {
1363 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1364 
1365 	if (vr_lookup(pa) != NULL)
1366 		return (1);
1367 
1368 	return (0);
1369 }
1370 
1371 /*
1372  * Stop all chip I/O so that the kernel's probe routines don't
1373  * get confused by errant DMAs when rebooting.
1374  */
1375 static void
1376 vr_shutdown(arg)
1377 	void *arg;
1378 {
1379 	struct vr_softc *sc = (struct vr_softc *)arg;
1380 
1381 	vr_stop(&sc->vr_ec.ec_if, 1);
1382 }
1383 
1384 /*
1385  * Attach the interface. Allocate softc structures, do ifmedia
1386  * setup and ethernet/BPF attach.
1387  */
1388 static void
1389 vr_attach(parent, self, aux)
1390 	struct device *parent;
1391 	struct device *self;
1392 	void *aux;
1393 {
1394 	struct vr_softc *sc = (struct vr_softc *) self;
1395 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1396 	bus_dma_segment_t seg;
1397 	struct vr_type *vrt;
1398 	u_int32_t command;
1399 	struct ifnet *ifp;
1400 	u_char eaddr[ETHER_ADDR_LEN];
1401 	int i, rseg, error;
1402 
1403 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(pa->pa_pc, pa->pa_tag, (r), (v))
1404 #define	PCI_CONF_READ(r)	pci_conf_read(pa->pa_pc, pa->pa_tag, (r))
1405 
1406 	callout_init(&sc->vr_tick_ch);
1407 
1408 	vrt = vr_lookup(pa);
1409 	if (vrt == NULL) {
1410 		printf("\n");
1411 		panic("vr_attach: impossible");
1412 	}
1413 
1414 	printf(": %s Ethernet\n", vrt->vr_name);
1415 
1416 	/*
1417 	 * Handle power management nonsense.
1418 	 */
1419 
1420 	command = PCI_CONF_READ(VR_PCI_CAPID) & 0x000000FF;
1421 	if (command == 0x01) {
1422 		command = PCI_CONF_READ(VR_PCI_PWRMGMTCTRL);
1423 		if (command & VR_PSTATE_MASK) {
1424 			u_int32_t iobase, membase, irq;
1425 
1426 			/* Save important PCI config data. */
1427 			iobase = PCI_CONF_READ(VR_PCI_LOIO);
1428 			membase = PCI_CONF_READ(VR_PCI_LOMEM);
1429 			irq = PCI_CONF_READ(VR_PCI_INTLINE);
1430 
1431 			/* Reset the power state. */
1432 			printf("%s: chip is in D%d power mode "
1433 				"-- setting to D0\n",
1434 				sc->vr_dev.dv_xname, command & VR_PSTATE_MASK);
1435 			command &= 0xFFFFFFFC;
1436 			PCI_CONF_WRITE(VR_PCI_PWRMGMTCTRL, command);
1437 
1438 			/* Restore PCI config data. */
1439 			PCI_CONF_WRITE(VR_PCI_LOIO, iobase);
1440 			PCI_CONF_WRITE(VR_PCI_LOMEM, membase);
1441 			PCI_CONF_WRITE(VR_PCI_INTLINE, irq);
1442 		}
1443 	}
1444 
1445 	/* Make sure bus mastering is enabled. */
1446 	command = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1447 	command |= PCI_COMMAND_MASTER_ENABLE;
1448 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, command);
1449 
1450 	/*
1451 	 * Map control/status registers.
1452 	 */
1453 	{
1454 		bus_space_tag_t iot, memt;
1455 		bus_space_handle_t ioh, memh;
1456 		int ioh_valid, memh_valid;
1457 		pci_intr_handle_t intrhandle;
1458 		const char *intrstr;
1459 
1460 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1461 			PCI_MAPREG_TYPE_IO, 0,
1462 			&iot, &ioh, NULL, NULL) == 0);
1463 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1464 			PCI_MAPREG_TYPE_MEM |
1465 			PCI_MAPREG_MEM_TYPE_32BIT,
1466 			0, &memt, &memh, NULL, NULL) == 0);
1467 #if defined(VR_USEIOSPACE)
1468 		if (ioh_valid) {
1469 			sc->vr_bst = iot;
1470 			sc->vr_bsh = ioh;
1471 		} else if (memh_valid) {
1472 			sc->vr_bst = memt;
1473 			sc->vr_bsh = memh;
1474 		}
1475 #else
1476 		if (memh_valid) {
1477 			sc->vr_bst = memt;
1478 			sc->vr_bsh = memh;
1479 		} else if (ioh_valid) {
1480 			sc->vr_bst = iot;
1481 			sc->vr_bsh = ioh;
1482 		}
1483 #endif
1484 		else {
1485 			printf(": unable to map device registers\n");
1486 			return;
1487 		}
1488 
1489 		/* Allocate interrupt */
1490 		if (pci_intr_map(pa, &intrhandle)) {
1491 			printf("%s: couldn't map interrupt\n",
1492 				sc->vr_dev.dv_xname);
1493 			return;
1494 		}
1495 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
1496 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1497 						vr_intr, sc);
1498 		if (sc->vr_ih == NULL) {
1499 			printf("%s: couldn't establish interrupt",
1500 				sc->vr_dev.dv_xname);
1501 			if (intrstr != NULL)
1502 				printf(" at %s", intrstr);
1503 			printf("\n");
1504 		}
1505 		printf("%s: interrupting at %s\n",
1506 			sc->vr_dev.dv_xname, intrstr);
1507 	}
1508 
1509 	/* Reset the adapter. */
1510 	vr_reset(sc);
1511 
1512 	/*
1513 	 * Get station address. The way the Rhine chips work,
1514 	 * you're not allowed to directly access the EEPROM once
1515 	 * they've been programmed a special way. Consequently,
1516 	 * we need to read the node address from the PAR0 and PAR1
1517 	 * registers.
1518 	 */
1519 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1520 	DELAY(200);
1521 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1522 		eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1523 
1524 	/*
1525 	 * A Rhine chip was detected. Inform the world.
1526 	 */
1527 	printf("%s: Ethernet address: %s\n",
1528 		sc->vr_dev.dv_xname, ether_sprintf(eaddr));
1529 
1530 	bcopy(eaddr, sc->vr_enaddr, ETHER_ADDR_LEN);
1531 
1532 	sc->vr_dmat = pa->pa_dmat;
1533 
1534 	/*
1535 	 * Allocate the control data structures, and create and load
1536 	 * the DMA map for it.
1537 	 */
1538 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1539 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1540 	    0)) != 0) {
1541 		printf("%s: unable to allocate control data, error = %d\n",
1542 		    sc->vr_dev.dv_xname, error);
1543 		goto fail_0;
1544 	}
1545 
1546 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1547 	    sizeof(struct vr_control_data), (caddr_t *)&sc->vr_control_data,
1548 	    BUS_DMA_COHERENT)) != 0) {
1549 		printf("%s: unable to map control data, error = %d\n",
1550 		    sc->vr_dev.dv_xname, error);
1551 		goto fail_1;
1552 	}
1553 
1554 	if ((error = bus_dmamap_create(sc->vr_dmat,
1555 	    sizeof(struct vr_control_data), 1,
1556 	    sizeof(struct vr_control_data), 0, 0,
1557 	    &sc->vr_cddmamap)) != 0) {
1558 		printf("%s: unable to create control data DMA map, "
1559 		    "error = %d\n", sc->vr_dev.dv_xname, error);
1560 		goto fail_2;
1561 	}
1562 
1563 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1564 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1565 	    0)) != 0) {
1566 		printf("%s: unable to load control data DMA map, error = %d\n",
1567 		    sc->vr_dev.dv_xname, error);
1568 		goto fail_3;
1569 	}
1570 
1571 	/*
1572 	 * Create the transmit buffer DMA maps.
1573 	 */
1574 	for (i = 0; i < VR_NTXDESC; i++) {
1575 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1576 		    1, MCLBYTES, 0, 0,
1577 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1578 			printf("%s: unable to create tx DMA map %d, "
1579 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1580 			goto fail_4;
1581 		}
1582 	}
1583 
1584 	/*
1585 	 * Create the receive buffer DMA maps.
1586 	 */
1587 	for (i = 0; i < VR_NRXDESC; i++) {
1588 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1589 		    MCLBYTES, 0, 0,
1590 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1591 			printf("%s: unable to create rx DMA map %d, "
1592 			    "error = %d\n", sc->vr_dev.dv_xname, i, error);
1593 			goto fail_5;
1594 		}
1595 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1596 	}
1597 
1598 	ifp = &sc->vr_ec.ec_if;
1599 	ifp->if_softc = sc;
1600 	ifp->if_mtu = ETHERMTU;
1601 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1602 	ifp->if_ioctl = vr_ioctl;
1603 	ifp->if_start = vr_start;
1604 	ifp->if_watchdog = vr_watchdog;
1605 	ifp->if_init = vr_init;
1606 	ifp->if_stop = vr_stop;
1607 	IFQ_SET_READY(&ifp->if_snd);
1608 
1609 	bcopy(sc->vr_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1610 
1611 	/*
1612 	 * Initialize MII/media info.
1613 	 */
1614 	sc->vr_mii.mii_ifp = ifp;
1615 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1616 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1617 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1618 	ifmedia_init(&sc->vr_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
1619 	mii_attach(&sc->vr_dev, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1620 	    MII_OFFSET_ANY, 0);
1621 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1622 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1623 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1624 	} else
1625 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1626 
1627 	/*
1628 	 * Call MI attach routines.
1629 	 */
1630 	if_attach(ifp);
1631 	ether_ifattach(ifp, sc->vr_enaddr);
1632 
1633 	sc->vr_ats = shutdownhook_establish(vr_shutdown, sc);
1634 	if (sc->vr_ats == NULL)
1635 		printf("%s: warning: couldn't establish shutdown hook\n",
1636 			sc->vr_dev.dv_xname);
1637 	return;
1638 
1639  fail_5:
1640 	for (i = 0; i < VR_NRXDESC; i++) {
1641 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1642 			bus_dmamap_destroy(sc->vr_dmat,
1643 			    sc->vr_rxsoft[i].ds_dmamap);
1644 	}
1645  fail_4:
1646 	for (i = 0; i < VR_NTXDESC; i++) {
1647 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1648 			bus_dmamap_destroy(sc->vr_dmat,
1649 			    sc->vr_txsoft[i].ds_dmamap);
1650 	}
1651 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1652  fail_3:
1653 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1654  fail_2:
1655 	bus_dmamem_unmap(sc->vr_dmat, (caddr_t)sc->vr_control_data,
1656 	    sizeof(struct vr_control_data));
1657  fail_1:
1658 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1659  fail_0:
1660 	return;
1661 }
1662