xref: /netbsd-src/sys/dev/pci/if_vr.c (revision e89934bbf778a6d6d6894877c4da59d0c7835b0f)
1 /*	$NetBSD: if_vr.c,v 1.121 2016/12/15 09:28:05 ozaki-r Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1997, 1998
35  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by Bill Paul.
48  * 4. Neither the name of the author nor the names of any co-contributors
49  *    may be used to endorse or promote products derived from this software
50  *    without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
56  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
62  * THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *	$FreeBSD: if_vr.c,v 1.7 1999/01/10 18:51:49 wpaul Exp $
65  */
66 
67 /*
68  * VIA Rhine fast ethernet PCI NIC driver
69  *
70  * Supports various network adapters based on the VIA Rhine
71  * and Rhine II PCI controllers, including the D-Link DFE530TX.
72  * Datasheets are available at http://www.via.com.tw.
73  *
74  * Written by Bill Paul <wpaul@ctr.columbia.edu>
75  * Electrical Engineering Department
76  * Columbia University, New York City
77  */
78 
79 /*
80  * The VIA Rhine controllers are similar in some respects to the
81  * the DEC tulip chips, except less complicated. The controller
82  * uses an MII bus and an external physical layer interface. The
83  * receiver has a one entry perfect filter and a 64-bit hash table
84  * multicast filter. Transmit and receive descriptors are similar
85  * to the tulip.
86  *
87  * The Rhine has a serious flaw in its transmit DMA mechanism:
88  * transmit buffers must be longword aligned. Unfortunately,
89  * the kernel doesn't guarantee that mbufs will be filled in starting
90  * at longword boundaries, so we have to do a buffer copy before
91  * transmission.
92  *
93  * Apparently, the receive DMA mechanism also has the same flaw.  This
94  * means that on systems with struct alignment requirements, incoming
95  * frames must be copied to a new buffer which shifts the data forward
96  * 2 bytes so that the payload is aligned on a 4-byte boundary.
97  */
98 
99 #include <sys/cdefs.h>
100 __KERNEL_RCSID(0, "$NetBSD: if_vr.c,v 1.121 2016/12/15 09:28:05 ozaki-r Exp $");
101 
102 
103 
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/callout.h>
107 #include <sys/sockio.h>
108 #include <sys/mbuf.h>
109 #include <sys/malloc.h>
110 #include <sys/kernel.h>
111 #include <sys/socket.h>
112 #include <sys/device.h>
113 
114 #include <sys/rndsource.h>
115 
116 #include <net/if.h>
117 #include <net/if_arp.h>
118 #include <net/if_dl.h>
119 #include <net/if_media.h>
120 #include <net/if_ether.h>
121 
122 #include <net/bpf.h>
123 
124 #include <sys/bus.h>
125 #include <sys/intr.h>
126 #include <machine/endian.h>
127 
128 #include <dev/mii/mii.h>
129 #include <dev/mii/miivar.h>
130 #include <dev/mii/mii_bitbang.h>
131 
132 #include <dev/pci/pcireg.h>
133 #include <dev/pci/pcivar.h>
134 #include <dev/pci/pcidevs.h>
135 
136 #include <dev/pci/if_vrreg.h>
137 
138 #define	VR_USEIOSPACE
139 
140 /*
141  * Various supported device vendors/types and their names.
142  */
143 static const struct vr_type {
144 	pci_vendor_id_t		vr_vid;
145 	pci_product_id_t	vr_did;
146 } vr_devs[] = {
147 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT3043 },
148 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6102 },
149 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 },
150 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M },
151 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT86C100A }
152 };
153 
154 /*
155  * Transmit descriptor list size.
156  */
157 #define	VR_NTXDESC		64
158 #define	VR_NTXDESC_MASK		(VR_NTXDESC - 1)
159 #define	VR_NEXTTX(x)		(((x) + 1) & VR_NTXDESC_MASK)
160 
161 /*
162  * Receive descriptor list size.
163  */
164 #define	VR_NRXDESC		64
165 #define	VR_NRXDESC_MASK		(VR_NRXDESC - 1)
166 #define	VR_NEXTRX(x)		(((x) + 1) & VR_NRXDESC_MASK)
167 
168 /*
169  * Control data structres that are DMA'd to the Rhine chip.  We allocate
170  * them in a single clump that maps to a single DMA segment to make several
171  * things easier.
172  *
173  * Note that since we always copy outgoing packets to aligned transmit
174  * buffers, we can reduce the transmit descriptors to one per packet.
175  */
176 struct vr_control_data {
177 	struct vr_desc		vr_txdescs[VR_NTXDESC];
178 	struct vr_desc		vr_rxdescs[VR_NRXDESC];
179 };
180 
181 #define	VR_CDOFF(x)		offsetof(struct vr_control_data, x)
182 #define	VR_CDTXOFF(x)		VR_CDOFF(vr_txdescs[(x)])
183 #define	VR_CDRXOFF(x)		VR_CDOFF(vr_rxdescs[(x)])
184 
185 /*
186  * Software state of transmit and receive descriptors.
187  */
188 struct vr_descsoft {
189 	struct mbuf		*ds_mbuf;	/* head of mbuf chain */
190 	bus_dmamap_t		ds_dmamap;	/* our DMA map */
191 };
192 
193 struct vr_softc {
194 	device_t		vr_dev;
195 	void			*vr_ih;		/* interrupt cookie */
196 	bus_space_tag_t		vr_bst;		/* bus space tag */
197 	bus_space_handle_t	vr_bsh;		/* bus space handle */
198 	bus_dma_tag_t		vr_dmat;	/* bus DMA tag */
199 	pci_chipset_tag_t	vr_pc;		/* PCI chipset info */
200 	pcitag_t		vr_tag;		/* PCI tag */
201 	struct ethercom		vr_ec;		/* Ethernet common info */
202 	uint8_t 		vr_enaddr[ETHER_ADDR_LEN];
203 	struct mii_data		vr_mii;		/* MII/media info */
204 
205 	pcireg_t		vr_id;		/* vendor/product ID */
206 	uint8_t			vr_revid;	/* Rhine chip revision */
207 
208 	callout_t		vr_tick_ch;	/* tick callout */
209 
210 	bus_dmamap_t		vr_cddmamap;	/* control data DMA map */
211 #define	vr_cddma	vr_cddmamap->dm_segs[0].ds_addr
212 
213 	/*
214 	 * Software state for transmit and receive descriptors.
215 	 */
216 	struct vr_descsoft	vr_txsoft[VR_NTXDESC];
217 	struct vr_descsoft	vr_rxsoft[VR_NRXDESC];
218 
219 	/*
220 	 * Control data structures.
221 	 */
222 	struct vr_control_data	*vr_control_data;
223 
224 	int	vr_txpending;		/* number of TX requests pending */
225 	int	vr_txdirty;		/* first dirty TX descriptor */
226 	int	vr_txlast;		/* last used TX descriptor */
227 
228 	int	vr_rxptr;		/* next ready RX descriptor */
229 
230 	uint32_t	vr_save_iobase;
231 	uint32_t	vr_save_membase;
232 	uint32_t	vr_save_irq;
233 
234 	bool		vr_link;
235 	int		vr_flags;
236 #define VR_F_RESTART	0x1		/* restart on next tick */
237 	int		vr_if_flags;
238 
239 	krndsource_t rnd_source;	/* random source */
240 };
241 
242 #define	VR_CDTXADDR(sc, x)	((sc)->vr_cddma + VR_CDTXOFF((x)))
243 #define	VR_CDRXADDR(sc, x)	((sc)->vr_cddma + VR_CDRXOFF((x)))
244 
245 #define	VR_CDTX(sc, x)		(&(sc)->vr_control_data->vr_txdescs[(x)])
246 #define	VR_CDRX(sc, x)		(&(sc)->vr_control_data->vr_rxdescs[(x)])
247 
248 #define	VR_DSTX(sc, x)		(&(sc)->vr_txsoft[(x)])
249 #define	VR_DSRX(sc, x)		(&(sc)->vr_rxsoft[(x)])
250 
251 #define	VR_CDTXSYNC(sc, x, ops)						\
252 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
253 	    VR_CDTXOFF((x)), sizeof(struct vr_desc), (ops))
254 
255 #define	VR_CDRXSYNC(sc, x, ops)						\
256 	bus_dmamap_sync((sc)->vr_dmat, (sc)->vr_cddmamap,		\
257 	    VR_CDRXOFF((x)), sizeof(struct vr_desc), (ops))
258 
259 /*
260  * Note we rely on MCLBYTES being a power of two below.
261  */
262 #define	VR_INIT_RXDESC(sc, i)						\
263 do {									\
264 	struct vr_desc *__d = VR_CDRX((sc), (i));			\
265 	struct vr_descsoft *__ds = VR_DSRX((sc), (i));			\
266 									\
267 	__d->vr_next = htole32(VR_CDRXADDR((sc), VR_NEXTRX((i))));	\
268 	__d->vr_data = htole32(__ds->ds_dmamap->dm_segs[0].ds_addr);	\
269 	__d->vr_ctl = htole32(VR_RXCTL_CHAIN | VR_RXCTL_RX_INTR |	\
270 	    ((MCLBYTES - 1) & VR_RXCTL_BUFLEN));			\
271 	__d->vr_status = htole32(VR_RXSTAT_FIRSTFRAG |			\
272 	    VR_RXSTAT_LASTFRAG | VR_RXSTAT_OWN);			\
273 	VR_CDRXSYNC((sc), (i), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
274 } while (/* CONSTCOND */ 0)
275 
276 /*
277  * register space access macros
278  */
279 #define	CSR_WRITE_4(sc, reg, val)					\
280 	bus_space_write_4(sc->vr_bst, sc->vr_bsh, reg, val)
281 #define	CSR_WRITE_2(sc, reg, val)					\
282 	bus_space_write_2(sc->vr_bst, sc->vr_bsh, reg, val)
283 #define	CSR_WRITE_1(sc, reg, val)					\
284 	bus_space_write_1(sc->vr_bst, sc->vr_bsh, reg, val)
285 
286 #define	CSR_READ_4(sc, reg)						\
287 	bus_space_read_4(sc->vr_bst, sc->vr_bsh, reg)
288 #define	CSR_READ_2(sc, reg)						\
289 	bus_space_read_2(sc->vr_bst, sc->vr_bsh, reg)
290 #define	CSR_READ_1(sc, reg)						\
291 	bus_space_read_1(sc->vr_bst, sc->vr_bsh, reg)
292 
293 #define	VR_TIMEOUT		1000
294 
295 static int	vr_add_rxbuf(struct vr_softc *, int);
296 
297 static void	vr_rxeof(struct vr_softc *);
298 static void	vr_rxeoc(struct vr_softc *);
299 static void	vr_txeof(struct vr_softc *);
300 static int	vr_intr(void *);
301 static void	vr_start(struct ifnet *);
302 static int	vr_ioctl(struct ifnet *, u_long, void *);
303 static int	vr_init(struct ifnet *);
304 static void	vr_stop(struct ifnet *, int);
305 static void	vr_rxdrain(struct vr_softc *);
306 static void	vr_watchdog(struct ifnet *);
307 static void	vr_tick(void *);
308 
309 static int	vr_mii_readreg(device_t, int, int);
310 static void	vr_mii_writereg(device_t, int, int, int);
311 static void	vr_mii_statchg(struct ifnet *);
312 
313 static void	vr_setmulti(struct vr_softc *);
314 static void	vr_reset(struct vr_softc *);
315 static int	vr_restore_state(pci_chipset_tag_t, pcitag_t, device_t,
316     pcireg_t);
317 static bool	vr_resume(device_t, const pmf_qual_t *);
318 
319 int	vr_copy_small = 0;
320 
321 #define	VR_SETBIT(sc, reg, x)				\
322 	CSR_WRITE_1(sc, reg,				\
323 	    CSR_READ_1(sc, reg) | (x))
324 
325 #define	VR_CLRBIT(sc, reg, x)				\
326 	CSR_WRITE_1(sc, reg,				\
327 	    CSR_READ_1(sc, reg) & ~(x))
328 
329 #define	VR_SETBIT16(sc, reg, x)				\
330 	CSR_WRITE_2(sc, reg,				\
331 	    CSR_READ_2(sc, reg) | (x))
332 
333 #define	VR_CLRBIT16(sc, reg, x)				\
334 	CSR_WRITE_2(sc, reg,				\
335 	    CSR_READ_2(sc, reg) & ~(x))
336 
337 #define	VR_SETBIT32(sc, reg, x)				\
338 	CSR_WRITE_4(sc, reg,				\
339 	    CSR_READ_4(sc, reg) | (x))
340 
341 #define	VR_CLRBIT32(sc, reg, x)				\
342 	CSR_WRITE_4(sc, reg,				\
343 	    CSR_READ_4(sc, reg) & ~(x))
344 
345 /*
346  * MII bit-bang glue.
347  */
348 static uint32_t vr_mii_bitbang_read(device_t);
349 static void	vr_mii_bitbang_write(device_t, uint32_t);
350 
351 static const struct mii_bitbang_ops vr_mii_bitbang_ops = {
352 	vr_mii_bitbang_read,
353 	vr_mii_bitbang_write,
354 	{
355 		VR_MIICMD_DATAOUT,	/* MII_BIT_MDO */
356 		VR_MIICMD_DATAIN,	/* MII_BIT_MDI */
357 		VR_MIICMD_CLK,		/* MII_BIT_MDC */
358 		VR_MIICMD_DIR,		/* MII_BIT_DIR_HOST_PHY */
359 		0,			/* MII_BIT_DIR_PHY_HOST */
360 	}
361 };
362 
363 static uint32_t
364 vr_mii_bitbang_read(device_t self)
365 {
366 	struct vr_softc *sc = device_private(self);
367 
368 	return (CSR_READ_1(sc, VR_MIICMD));
369 }
370 
371 static void
372 vr_mii_bitbang_write(device_t self, uint32_t val)
373 {
374 	struct vr_softc *sc = device_private(self);
375 
376 	CSR_WRITE_1(sc, VR_MIICMD, (val & 0xff) | VR_MIICMD_DIRECTPGM);
377 }
378 
379 /*
380  * Read an PHY register through the MII.
381  */
382 static int
383 vr_mii_readreg(device_t self, int phy, int reg)
384 {
385 	struct vr_softc *sc = device_private(self);
386 
387 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
388 	return (mii_bitbang_readreg(self, &vr_mii_bitbang_ops, phy, reg));
389 }
390 
391 /*
392  * Write to a PHY register through the MII.
393  */
394 static void
395 vr_mii_writereg(device_t self, int phy, int reg, int val)
396 {
397 	struct vr_softc *sc = device_private(self);
398 
399 	CSR_WRITE_1(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
400 	mii_bitbang_writereg(self, &vr_mii_bitbang_ops, phy, reg, val);
401 }
402 
403 static void
404 vr_mii_statchg(struct ifnet *ifp)
405 {
406 	struct vr_softc *sc = ifp->if_softc;
407 	int i;
408 
409 	/*
410 	 * In order to fiddle with the 'full-duplex' bit in the netconfig
411 	 * register, we first have to put the transmit and/or receive logic
412 	 * in the idle state.
413 	 */
414 	if ((sc->vr_mii.mii_media_status & IFM_ACTIVE) &&
415 	    IFM_SUBTYPE(sc->vr_mii.mii_media_active) != IFM_NONE) {
416 		sc->vr_link = true;
417 
418 		if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))
419 			VR_CLRBIT16(sc, VR_COMMAND,
420 			    (VR_CMD_TX_ON|VR_CMD_RX_ON));
421 
422 		if (sc->vr_mii.mii_media_active & IFM_FDX)
423 			VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
424 		else
425 			VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
426 
427 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
428 	} else {
429 		sc->vr_link = false;
430 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
431 		for (i = VR_TIMEOUT; i > 0; i--) {
432 			delay(10);
433 			if (!(CSR_READ_2(sc, VR_COMMAND) &
434 			    (VR_CMD_TX_ON|VR_CMD_RX_ON)))
435 				break;
436 		}
437 		if (i == 0) {
438 #ifdef VR_DEBUG
439 			printf("%s: rx shutdown error!\n",
440 			    device_xname(sc->vr_dev));
441 #endif
442 			sc->vr_flags |= VR_F_RESTART;
443 		}
444 	}
445 }
446 
447 #define	vr_calchash(addr) \
448 	(ether_crc32_be((addr), ETHER_ADDR_LEN) >> 26)
449 
450 /*
451  * Program the 64-bit multicast hash filter.
452  */
453 static void
454 vr_setmulti(struct vr_softc *sc)
455 {
456 	struct ifnet *ifp;
457 	int h = 0;
458 	uint32_t hashes[2] = { 0, 0 };
459 	struct ether_multistep step;
460 	struct ether_multi *enm;
461 	int mcnt = 0;
462 	uint8_t rxfilt;
463 
464 	ifp = &sc->vr_ec.ec_if;
465 
466 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
467 
468 	if (ifp->if_flags & IFF_PROMISC) {
469 allmulti:
470 		ifp->if_flags |= IFF_ALLMULTI;
471 		rxfilt |= VR_RXCFG_RX_MULTI;
472 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
473 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
474 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
475 		return;
476 	}
477 
478 	/* first, zot all the existing hash bits */
479 	CSR_WRITE_4(sc, VR_MAR0, 0);
480 	CSR_WRITE_4(sc, VR_MAR1, 0);
481 
482 	/* now program new ones */
483 	ETHER_FIRST_MULTI(step, &sc->vr_ec, enm);
484 	while (enm != NULL) {
485 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
486 		    ETHER_ADDR_LEN) != 0)
487 			goto allmulti;
488 
489 		h = vr_calchash(enm->enm_addrlo);
490 
491 		if (h < 32)
492 			hashes[0] |= (1 << h);
493 		else
494 			hashes[1] |= (1 << (h - 32));
495 		ETHER_NEXT_MULTI(step, enm);
496 		mcnt++;
497 	}
498 
499 	ifp->if_flags &= ~IFF_ALLMULTI;
500 
501 	if (mcnt)
502 		rxfilt |= VR_RXCFG_RX_MULTI;
503 	else
504 		rxfilt &= ~VR_RXCFG_RX_MULTI;
505 
506 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
507 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
508 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
509 }
510 
511 static void
512 vr_reset(struct vr_softc *sc)
513 {
514 	int i;
515 
516 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
517 
518 	for (i = 0; i < VR_TIMEOUT; i++) {
519 		DELAY(10);
520 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
521 			break;
522 	}
523 	if (i == VR_TIMEOUT) {
524 		if (sc->vr_revid < REV_ID_VT3065_A) {
525 			printf("%s: reset never completed!\n",
526 			    device_xname(sc->vr_dev));
527 		} else {
528 			/* Use newer force reset command */
529 			printf("%s: using force reset command.\n",
530 			    device_xname(sc->vr_dev));
531 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
532 		}
533 	}
534 
535 	/* Wait a little while for the chip to get its brains in order. */
536 	DELAY(1000);
537 }
538 
539 /*
540  * Initialize an RX descriptor and attach an MBUF cluster.
541  * Note: the length fields are only 11 bits wide, which means the
542  * largest size we can specify is 2047. This is important because
543  * MCLBYTES is 2048, so we have to subtract one otherwise we'll
544  * overflow the field and make a mess.
545  */
546 static int
547 vr_add_rxbuf(struct vr_softc *sc, int i)
548 {
549 	struct vr_descsoft *ds = VR_DSRX(sc, i);
550 	struct mbuf *m_new;
551 	int error;
552 
553 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
554 	if (m_new == NULL)
555 		return (ENOBUFS);
556 
557 	MCLGET(m_new, M_DONTWAIT);
558 	if ((m_new->m_flags & M_EXT) == 0) {
559 		m_freem(m_new);
560 		return (ENOBUFS);
561 	}
562 
563 	if (ds->ds_mbuf != NULL)
564 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
565 
566 	ds->ds_mbuf = m_new;
567 
568 	error = bus_dmamap_load(sc->vr_dmat, ds->ds_dmamap,
569 	    m_new->m_ext.ext_buf, m_new->m_ext.ext_size, NULL,
570 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
571 	if (error) {
572 		aprint_error_dev(sc->vr_dev,
573 		    "unable to load rx DMA map %d, error = %d\n", i, error);
574 		panic("vr_add_rxbuf");		/* XXX */
575 	}
576 
577 	bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
578 	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
579 
580 	VR_INIT_RXDESC(sc, i);
581 
582 	return (0);
583 }
584 
585 /*
586  * A frame has been uploaded: pass the resulting mbuf chain up to
587  * the higher level protocols.
588  */
589 static void
590 vr_rxeof(struct vr_softc *sc)
591 {
592 	struct mbuf *m;
593 	struct ifnet *ifp;
594 	struct vr_desc *d;
595 	struct vr_descsoft *ds;
596 	int i, total_len;
597 	uint32_t rxstat;
598 
599 	ifp = &sc->vr_ec.ec_if;
600 
601 	for (i = sc->vr_rxptr;; i = VR_NEXTRX(i)) {
602 		d = VR_CDRX(sc, i);
603 		ds = VR_DSRX(sc, i);
604 
605 		VR_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
606 
607 		rxstat = le32toh(d->vr_status);
608 
609 		if (rxstat & VR_RXSTAT_OWN) {
610 			/*
611 			 * We have processed all of the receive buffers.
612 			 */
613 			break;
614 		}
615 
616 		/*
617 		 * If an error occurs, update stats, clear the
618 		 * status word and leave the mbuf cluster in place:
619 		 * it should simply get re-used next time this descriptor
620 		 * comes up in the ring.
621 		 */
622 		if (rxstat & VR_RXSTAT_RXERR) {
623 			const char *errstr;
624 
625 			ifp->if_ierrors++;
626 			switch (rxstat & 0x000000FF) {
627 			case VR_RXSTAT_CRCERR:
628 				errstr = "crc error";
629 				break;
630 			case VR_RXSTAT_FRAMEALIGNERR:
631 				errstr = "frame alignment error";
632 				break;
633 			case VR_RXSTAT_FIFOOFLOW:
634 				errstr = "FIFO overflow";
635 				break;
636 			case VR_RXSTAT_GIANT:
637 				errstr = "received giant packet";
638 				break;
639 			case VR_RXSTAT_RUNT:
640 				errstr = "received runt packet";
641 				break;
642 			case VR_RXSTAT_BUSERR:
643 				errstr = "system bus error";
644 				break;
645 			case VR_RXSTAT_BUFFERR:
646 				errstr = "rx buffer error";
647 				break;
648 			default:
649 				errstr = "unknown rx error";
650 				break;
651 			}
652 			printf("%s: receive error: %s\n", device_xname(sc->vr_dev),
653 			    errstr);
654 
655 			VR_INIT_RXDESC(sc, i);
656 
657 			continue;
658 		} else if (!(rxstat & VR_RXSTAT_FIRSTFRAG) ||
659 		           !(rxstat & VR_RXSTAT_LASTFRAG)) {
660 			/*
661 			 * This driver expects to receive whole packets every
662 			 * time.  In case we receive a fragment that is not
663 			 * a complete packet, we discard it.
664 			 */
665 			ifp->if_ierrors++;
666 
667 			printf("%s: receive error: incomplete frame; "
668 			       "size = %d, status = 0x%x\n",
669 			       device_xname(sc->vr_dev),
670 			       VR_RXBYTES(le32toh(d->vr_status)), rxstat);
671 
672 			VR_INIT_RXDESC(sc, i);
673 
674 			continue;
675 		}
676 
677 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
678 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
679 
680 		/* No errors; receive the packet. */
681 		total_len = VR_RXBYTES(le32toh(d->vr_status));
682 #ifdef DIAGNOSTIC
683 		if (total_len == 0) {
684 			/*
685 			 * If we receive a zero-length packet, we probably
686 			 * missed to handle an error condition above.
687 			 * Discard it to avoid a later crash.
688 			 */
689 			ifp->if_ierrors++;
690 
691 			printf("%s: receive error: zero-length packet; "
692 			       "status = 0x%x\n",
693 			       device_xname(sc->vr_dev), rxstat);
694 
695 			VR_INIT_RXDESC(sc, i);
696 
697 			continue;
698 		}
699 #endif
700 
701 		/*
702 		 * The Rhine chip includes the CRC with every packet.
703 		 * Trim it off here.
704 		 */
705 		total_len -= ETHER_CRC_LEN;
706 
707 #ifdef __NO_STRICT_ALIGNMENT
708 		/*
709 		 * If the packet is small enough to fit in a
710 		 * single header mbuf, allocate one and copy
711 		 * the data into it.  This greatly reduces
712 		 * memory consumption when we receive lots
713 		 * of small packets.
714 		 *
715 		 * Otherwise, we add a new buffer to the receive
716 		 * chain.  If this fails, we drop the packet and
717 		 * recycle the old buffer.
718 		 */
719 		if (vr_copy_small != 0 && total_len <= MHLEN) {
720 			MGETHDR(m, M_DONTWAIT, MT_DATA);
721 			if (m == NULL)
722 				goto dropit;
723 			memcpy(mtod(m, void *),
724 			    mtod(ds->ds_mbuf, void *), total_len);
725 			VR_INIT_RXDESC(sc, i);
726 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
727 			    ds->ds_dmamap->dm_mapsize,
728 			    BUS_DMASYNC_PREREAD);
729 		} else {
730 			m = ds->ds_mbuf;
731 			if (vr_add_rxbuf(sc, i) == ENOBUFS) {
732  dropit:
733 				ifp->if_ierrors++;
734 				VR_INIT_RXDESC(sc, i);
735 				bus_dmamap_sync(sc->vr_dmat,
736 				    ds->ds_dmamap, 0,
737 				    ds->ds_dmamap->dm_mapsize,
738 				    BUS_DMASYNC_PREREAD);
739 				continue;
740 			}
741 		}
742 #else
743 		/*
744 		 * The Rhine's packet buffers must be 4-byte aligned.
745 		 * But this means that the data after the Ethernet header
746 		 * is misaligned.  We must allocate a new buffer and
747 		 * copy the data, shifted forward 2 bytes.
748 		 */
749 		MGETHDR(m, M_DONTWAIT, MT_DATA);
750 		if (m == NULL) {
751  dropit:
752 			ifp->if_ierrors++;
753 			VR_INIT_RXDESC(sc, i);
754 			bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
755 			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
756 			continue;
757 		}
758 		if (total_len > (MHLEN - 2)) {
759 			MCLGET(m, M_DONTWAIT);
760 			if ((m->m_flags & M_EXT) == 0) {
761 				m_freem(m);
762 				goto dropit;
763 			}
764 		}
765 		m->m_data += 2;
766 
767 		/*
768 		 * Note that we use clusters for incoming frames, so the
769 		 * buffer is virtually contiguous.
770 		 */
771 		memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
772 		    total_len);
773 
774 		/* Allow the receive descriptor to continue using its mbuf. */
775 		VR_INIT_RXDESC(sc, i);
776 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
777 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
778 #endif /* __NO_STRICT_ALIGNMENT */
779 
780 		m_set_rcvif(m, ifp);
781 		m->m_pkthdr.len = m->m_len = total_len;
782 		/* Pass it on. */
783 		if_percpuq_enqueue(ifp->if_percpuq, m);
784 	}
785 
786 	/* Update the receive pointer. */
787 	sc->vr_rxptr = i;
788 }
789 
790 void
791 vr_rxeoc(struct vr_softc *sc)
792 {
793 	struct ifnet *ifp;
794 	int i;
795 
796 	ifp = &sc->vr_ec.ec_if;
797 
798 	ifp->if_ierrors++;
799 
800 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
801 	for (i = 0; i < VR_TIMEOUT; i++) {
802 		DELAY(10);
803 		if ((CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON) == 0)
804 			break;
805 	}
806 	if (i == VR_TIMEOUT) {
807 		/* XXX need reset? */
808 		printf("%s: RX shutdown never complete\n",
809 		    device_xname(sc->vr_dev));
810 	}
811 
812 	vr_rxeof(sc);
813 
814 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
815 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
816 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
817 }
818 
819 /*
820  * A frame was downloaded to the chip. It's safe for us to clean up
821  * the list buffers.
822  */
823 static void
824 vr_txeof(struct vr_softc *sc)
825 {
826 	struct ifnet *ifp = &sc->vr_ec.ec_if;
827 	struct vr_desc *d;
828 	struct vr_descsoft *ds;
829 	uint32_t txstat;
830 	int i, j;
831 
832 	ifp->if_flags &= ~IFF_OACTIVE;
833 
834 	/*
835 	 * Go through our tx list and free mbufs for those
836 	 * frames that have been transmitted.
837 	 */
838 	for (i = sc->vr_txdirty; sc->vr_txpending != 0;
839 	     i = VR_NEXTTX(i), sc->vr_txpending--) {
840 		d = VR_CDTX(sc, i);
841 		ds = VR_DSTX(sc, i);
842 
843 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
844 
845 		txstat = le32toh(d->vr_status);
846 
847 		if (txstat & (VR_TXSTAT_ABRT | VR_TXSTAT_UDF)) {
848 			VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
849 			for (j = 0; j < VR_TIMEOUT; j++) {
850 				DELAY(10);
851 				if ((CSR_READ_2(sc, VR_COMMAND) &
852 				    VR_CMD_TX_ON) == 0)
853 					break;
854 			}
855 			if (j == VR_TIMEOUT) {
856 				/* XXX need reset? */
857 				printf("%s: TX shutdown never complete\n",
858 				    device_xname(sc->vr_dev));
859 			}
860 			d->vr_status = htole32(VR_TXSTAT_OWN);
861 			CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, i));
862 			break;
863 		}
864 
865 		if (txstat & VR_TXSTAT_OWN)
866 			break;
867 
868 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap,
869 		    0, ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
870 		bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
871 		m_freem(ds->ds_mbuf);
872 		ds->ds_mbuf = NULL;
873 
874 		if (txstat & VR_TXSTAT_ERRSUM) {
875 			ifp->if_oerrors++;
876 			if (txstat & VR_TXSTAT_DEFER)
877 				ifp->if_collisions++;
878 			if (txstat & VR_TXSTAT_LATECOLL)
879 				ifp->if_collisions++;
880 		}
881 
882 		ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3;
883 		ifp->if_opackets++;
884 	}
885 
886 	/* Update the dirty transmit buffer pointer. */
887 	sc->vr_txdirty = i;
888 
889 	/*
890 	 * Cancel the watchdog timer if there are no pending
891 	 * transmissions.
892 	 */
893 	if (sc->vr_txpending == 0)
894 		ifp->if_timer = 0;
895 }
896 
897 static int
898 vr_intr(void *arg)
899 {
900 	struct vr_softc *sc;
901 	struct ifnet *ifp;
902 	uint16_t status;
903 	int handled = 0, dotx = 0;
904 
905 	sc = arg;
906 	ifp = &sc->vr_ec.ec_if;
907 
908 	/* Suppress unwanted interrupts. */
909 	if ((ifp->if_flags & IFF_UP) == 0) {
910 		vr_stop(ifp, 1);
911 		return (0);
912 	}
913 
914 	/* Disable interrupts. */
915 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
916 
917 	for (;;) {
918 		status = CSR_READ_2(sc, VR_ISR);
919 		if (status)
920 			CSR_WRITE_2(sc, VR_ISR, status);
921 
922 		if ((status & VR_INTRS) == 0)
923 			break;
924 
925 		handled = 1;
926 
927 		rnd_add_uint32(&sc->rnd_source, status);
928 
929 		if (status & VR_ISR_RX_OK)
930 			vr_rxeof(sc);
931 
932 		if (status & VR_ISR_RX_DROPPED) {
933 			printf("%s: rx packet lost\n", device_xname(sc->vr_dev));
934 			ifp->if_ierrors++;
935 		}
936 
937 		if (status &
938 		    (VR_ISR_RX_ERR | VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW))
939 			vr_rxeoc(sc);
940 
941 
942 		if (status & (VR_ISR_BUSERR | VR_ISR_TX_UNDERRUN)) {
943 			if (status & VR_ISR_BUSERR)
944 				printf("%s: PCI bus error\n",
945 				    device_xname(sc->vr_dev));
946 			if (status & VR_ISR_TX_UNDERRUN)
947 				printf("%s: transmit underrun\n",
948 				    device_xname(sc->vr_dev));
949 			/* vr_init() calls vr_start() */
950 			dotx = 0;
951 			(void)vr_init(ifp);
952 
953 		}
954 
955 		if (status & VR_ISR_TX_OK) {
956 			dotx = 1;
957 			vr_txeof(sc);
958 		}
959 
960 		if (status &
961 		    (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2 | VR_ISR_TX_UDFI)) {
962 			if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2))
963 				printf("%s: transmit aborted\n",
964 				    device_xname(sc->vr_dev));
965 			if (status & VR_ISR_TX_UDFI)
966 				printf("%s: transmit underflow\n",
967 				    device_xname(sc->vr_dev));
968 			ifp->if_oerrors++;
969 			dotx = 1;
970 			vr_txeof(sc);
971 			if (sc->vr_txpending) {
972 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON);
973 				VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
974 			}
975 		}
976 	}
977 
978 	/* Re-enable interrupts. */
979 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
980 
981 	if (dotx)
982 		if_schedule_deferred_start(ifp);
983 
984 	return (handled);
985 }
986 
987 /*
988  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
989  * to the mbuf data regions directly in the transmit lists. We also save a
990  * copy of the pointers since the transmit list fragment pointers are
991  * physical addresses.
992  */
993 static void
994 vr_start(struct ifnet *ifp)
995 {
996 	struct vr_softc *sc = ifp->if_softc;
997 	struct mbuf *m0, *m;
998 	struct vr_desc *d;
999 	struct vr_descsoft *ds;
1000 	int error, firsttx, nexttx, opending;
1001 
1002 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1003 		return;
1004 	if (sc->vr_link == false)
1005 		return;
1006 
1007 	/*
1008 	 * Remember the previous txpending and the first transmit
1009 	 * descriptor we use.
1010 	 */
1011 	opending = sc->vr_txpending;
1012 	firsttx = VR_NEXTTX(sc->vr_txlast);
1013 
1014 	/*
1015 	 * Loop through the send queue, setting up transmit descriptors
1016 	 * until we drain the queue, or use up all available transmit
1017 	 * descriptors.
1018 	 */
1019 	while (sc->vr_txpending < VR_NTXDESC) {
1020 		/*
1021 		 * Grab a packet off the queue.
1022 		 */
1023 		IFQ_POLL(&ifp->if_snd, m0);
1024 		if (m0 == NULL)
1025 			break;
1026 		m = NULL;
1027 
1028 		/*
1029 		 * Get the next available transmit descriptor.
1030 		 */
1031 		nexttx = VR_NEXTTX(sc->vr_txlast);
1032 		d = VR_CDTX(sc, nexttx);
1033 		ds = VR_DSTX(sc, nexttx);
1034 
1035 		/*
1036 		 * Load the DMA map.  If this fails, the packet didn't
1037 		 * fit in one DMA segment, and we need to copy.  Note,
1038 		 * the packet must also be aligned.
1039 		 * if the packet is too small, copy it too, so we're sure
1040 		 * we have enough room for the pad buffer.
1041 		 */
1042 		if ((mtod(m0, uintptr_t) & 3) != 0 ||
1043 		    m0->m_pkthdr.len < VR_MIN_FRAMELEN ||
1044 		    bus_dmamap_load_mbuf(sc->vr_dmat, ds->ds_dmamap, m0,
1045 		     BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) {
1046 			MGETHDR(m, M_DONTWAIT, MT_DATA);
1047 			if (m == NULL) {
1048 				printf("%s: unable to allocate Tx mbuf\n",
1049 				    device_xname(sc->vr_dev));
1050 				break;
1051 			}
1052 			if (m0->m_pkthdr.len > MHLEN) {
1053 				MCLGET(m, M_DONTWAIT);
1054 				if ((m->m_flags & M_EXT) == 0) {
1055 					printf("%s: unable to allocate Tx "
1056 					    "cluster\n", device_xname(sc->vr_dev));
1057 					m_freem(m);
1058 					break;
1059 				}
1060 			}
1061 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
1062 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
1063 			/*
1064 			 * The Rhine doesn't auto-pad, so we have to do this
1065 			 * ourselves.
1066 			 */
1067 			if (m0->m_pkthdr.len < VR_MIN_FRAMELEN) {
1068 				memset(mtod(m, char *) + m0->m_pkthdr.len,
1069 				    0, VR_MIN_FRAMELEN - m0->m_pkthdr.len);
1070 				m->m_pkthdr.len = m->m_len = VR_MIN_FRAMELEN;
1071 			}
1072 			error = bus_dmamap_load_mbuf(sc->vr_dmat,
1073 			    ds->ds_dmamap, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1074 			if (error) {
1075 				m_freem(m);
1076 				printf("%s: unable to load Tx buffer, "
1077 				    "error = %d\n", device_xname(sc->vr_dev), error);
1078 				break;
1079 			}
1080 		}
1081 
1082 		IFQ_DEQUEUE(&ifp->if_snd, m0);
1083 		if (m != NULL) {
1084 			m_freem(m0);
1085 			m0 = m;
1086 		}
1087 
1088 		/* Sync the DMA map. */
1089 		bus_dmamap_sync(sc->vr_dmat, ds->ds_dmamap, 0,
1090 		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1091 
1092 		/*
1093 		 * Store a pointer to the packet so we can free it later.
1094 		 */
1095 		ds->ds_mbuf = m0;
1096 
1097 		/*
1098 		 * If there's a BPF listener, bounce a copy of this frame
1099 		 * to him.
1100 		 */
1101 		bpf_mtap(ifp, m0);
1102 
1103 		/*
1104 		 * Fill in the transmit descriptor.
1105 		 */
1106 		d->vr_data = htole32(ds->ds_dmamap->dm_segs[0].ds_addr);
1107 		d->vr_ctl = htole32(m0->m_pkthdr.len);
1108 		d->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG | VR_TXCTL_LASTFRAG);
1109 
1110 		/*
1111 		 * If this is the first descriptor we're enqueuing,
1112 		 * don't give it to the Rhine yet.  That could cause
1113 		 * a race condition.  We'll do it below.
1114 		 */
1115 		if (nexttx == firsttx)
1116 			d->vr_status = 0;
1117 		else
1118 			d->vr_status = htole32(VR_TXSTAT_OWN);
1119 
1120 		VR_CDTXSYNC(sc, nexttx,
1121 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1122 
1123 		/* Advance the tx pointer. */
1124 		sc->vr_txpending++;
1125 		sc->vr_txlast = nexttx;
1126 	}
1127 
1128 	if (sc->vr_txpending == VR_NTXDESC) {
1129 		/* No more slots left; notify upper layer. */
1130 		ifp->if_flags |= IFF_OACTIVE;
1131 	}
1132 
1133 	if (sc->vr_txpending != opending) {
1134 		/*
1135 		 * We enqueued packets.  If the transmitter was idle,
1136 		 * reset the txdirty pointer.
1137 		 */
1138 		if (opending == 0)
1139 			sc->vr_txdirty = firsttx;
1140 
1141 		/*
1142 		 * Cause a transmit interrupt to happen on the
1143 		 * last packet we enqueued.
1144 		 */
1145 		VR_CDTX(sc, sc->vr_txlast)->vr_ctl |= htole32(VR_TXCTL_FINT);
1146 		VR_CDTXSYNC(sc, sc->vr_txlast,
1147 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1148 
1149 		/*
1150 		 * The entire packet chain is set up.  Give the
1151 		 * first descriptor to the Rhine now.
1152 		 */
1153 		VR_CDTX(sc, firsttx)->vr_status = htole32(VR_TXSTAT_OWN);
1154 		VR_CDTXSYNC(sc, firsttx,
1155 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1156 
1157 		/* Start the transmitter. */
1158 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO);
1159 
1160 		/* Set the watchdog timer in case the chip flakes out. */
1161 		ifp->if_timer = 5;
1162 	}
1163 }
1164 
1165 /*
1166  * Initialize the interface.  Must be called at splnet.
1167  */
1168 static int
1169 vr_init(struct ifnet *ifp)
1170 {
1171 	struct vr_softc *sc = ifp->if_softc;
1172 	struct vr_desc *d;
1173 	struct vr_descsoft *ds;
1174 	int i, error = 0;
1175 
1176 	/* Cancel pending I/O. */
1177 	vr_stop(ifp, 0);
1178 
1179 	/* Reset the Rhine to a known state. */
1180 	vr_reset(sc);
1181 
1182 	/* set DMA length in BCR0 and BCR1 */
1183 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1184 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1185 
1186 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1187 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTH_128BYTES);
1188 
1189 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1190 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTH_STORENFWD);
1191 
1192 	/* set DMA threshold length in RXCFG and TXCFG */
1193 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1194 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1195 
1196 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1197 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1198 
1199 	/*
1200 	 * Initialize the transmit descriptor ring.  txlast is initialized
1201 	 * to the end of the list so that it will wrap around to the first
1202 	 * descriptor when the first packet is transmitted.
1203 	 */
1204 	for (i = 0; i < VR_NTXDESC; i++) {
1205 		d = VR_CDTX(sc, i);
1206 		memset(d, 0, sizeof(struct vr_desc));
1207 		d->vr_next = htole32(VR_CDTXADDR(sc, VR_NEXTTX(i)));
1208 		VR_CDTXSYNC(sc, i, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1209 	}
1210 	sc->vr_txpending = 0;
1211 	sc->vr_txdirty = 0;
1212 	sc->vr_txlast = VR_NTXDESC - 1;
1213 
1214 	/*
1215 	 * Initialize the receive descriptor ring.
1216 	 */
1217 	for (i = 0; i < VR_NRXDESC; i++) {
1218 		ds = VR_DSRX(sc, i);
1219 		if (ds->ds_mbuf == NULL) {
1220 			if ((error = vr_add_rxbuf(sc, i)) != 0) {
1221 				printf("%s: unable to allocate or map rx "
1222 				    "buffer %d, error = %d\n",
1223 				    device_xname(sc->vr_dev), i, error);
1224 				/*
1225 				 * XXX Should attempt to run with fewer receive
1226 				 * XXX buffers instead of just failing.
1227 				 */
1228 				vr_rxdrain(sc);
1229 				goto out;
1230 			}
1231 		} else
1232 			VR_INIT_RXDESC(sc, i);
1233 	}
1234 	sc->vr_rxptr = 0;
1235 
1236 	/* If we want promiscuous mode, set the allframes bit. */
1237 	if (ifp->if_flags & IFF_PROMISC)
1238 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1239 	else
1240 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1241 
1242 	/* Set capture broadcast bit to capture broadcast frames. */
1243 	if (ifp->if_flags & IFF_BROADCAST)
1244 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1245 	else
1246 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1247 
1248 	/* Program the multicast filter, if necessary. */
1249 	vr_setmulti(sc);
1250 
1251 	/* Give the transmit and receive rings to the Rhine. */
1252 	CSR_WRITE_4(sc, VR_RXADDR, VR_CDRXADDR(sc, sc->vr_rxptr));
1253 	CSR_WRITE_4(sc, VR_TXADDR, VR_CDTXADDR(sc, VR_NEXTTX(sc->vr_txlast)));
1254 
1255 	/* Set current media. */
1256 	sc->vr_link = true;
1257 	if ((error = ether_mediachange(ifp)) != 0)
1258 		goto out;
1259 
1260 	/* Enable receiver and transmitter. */
1261 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1262 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1263 				    VR_CMD_RX_GO);
1264 
1265 	/* Enable interrupts. */
1266 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1267 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1268 
1269 	ifp->if_flags |= IFF_RUNNING;
1270 	ifp->if_flags &= ~IFF_OACTIVE;
1271 
1272 	/* Start one second timer. */
1273 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1274 
1275 	/* Attempt to start output on the interface. */
1276 	vr_start(ifp);
1277 
1278  out:
1279 	if (error)
1280 		printf("%s: interface not running\n", device_xname(sc->vr_dev));
1281 	return (error);
1282 }
1283 
1284 static int
1285 vr_ioctl(struct ifnet *ifp, u_long command, void *data)
1286 {
1287 	struct vr_softc *sc = ifp->if_softc;
1288 	int s, error = 0;
1289 
1290 	s = splnet();
1291 
1292 	switch (command) {
1293 	case SIOCSIFFLAGS:
1294 		if ((error = ifioctl_common(ifp, command, data)) != 0)
1295 			break;
1296 
1297 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
1298 		case IFF_RUNNING:
1299 			vr_stop(ifp, 1);
1300 			break;
1301 		case IFF_UP:
1302 			vr_init(ifp);
1303 			break;
1304 		case IFF_UP | IFF_RUNNING:
1305 			if ((ifp->if_flags ^ sc->vr_if_flags) == IFF_PROMISC)
1306 				vr_setmulti(sc);
1307 			else
1308 				vr_init(ifp);
1309 			break;
1310 		}
1311 		sc->vr_if_flags = ifp->if_flags;
1312 		break;
1313 	default:
1314 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
1315 			break;
1316 		error = 0;
1317 		if (command == SIOCADDMULTI || command == SIOCDELMULTI)
1318 			vr_setmulti(sc);
1319 	}
1320 	splx(s);
1321 
1322 	return error;
1323 }
1324 
1325 static void
1326 vr_watchdog(struct ifnet *ifp)
1327 {
1328 	struct vr_softc *sc = ifp->if_softc;
1329 
1330 	printf("%s: device timeout\n", device_xname(sc->vr_dev));
1331 	ifp->if_oerrors++;
1332 
1333 	(void) vr_init(ifp);
1334 }
1335 
1336 /*
1337  * One second timer, used to tick MII.
1338  */
1339 static void
1340 vr_tick(void *arg)
1341 {
1342 	struct vr_softc *sc = arg;
1343 	int s;
1344 
1345 	s = splnet();
1346 	if (sc->vr_flags & VR_F_RESTART) {
1347 		printf("%s: restarting\n", device_xname(sc->vr_dev));
1348 		vr_init(&sc->vr_ec.ec_if);
1349 		sc->vr_flags &= ~VR_F_RESTART;
1350 	}
1351 	mii_tick(&sc->vr_mii);
1352 	splx(s);
1353 
1354 	callout_reset(&sc->vr_tick_ch, hz, vr_tick, sc);
1355 }
1356 
1357 /*
1358  * Drain the receive queue.
1359  */
1360 static void
1361 vr_rxdrain(struct vr_softc *sc)
1362 {
1363 	struct vr_descsoft *ds;
1364 	int i;
1365 
1366 	for (i = 0; i < VR_NRXDESC; i++) {
1367 		ds = VR_DSRX(sc, i);
1368 		if (ds->ds_mbuf != NULL) {
1369 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1370 			m_freem(ds->ds_mbuf);
1371 			ds->ds_mbuf = NULL;
1372 		}
1373 	}
1374 }
1375 
1376 /*
1377  * Stop the adapter and free any mbufs allocated to the
1378  * transmit lists.
1379  */
1380 static void
1381 vr_stop(struct ifnet *ifp, int disable)
1382 {
1383 	struct vr_softc *sc = ifp->if_softc;
1384 	struct vr_descsoft *ds;
1385 	int i;
1386 
1387 	/* Cancel one second timer. */
1388 	callout_stop(&sc->vr_tick_ch);
1389 
1390 	/* Down the MII. */
1391 	mii_down(&sc->vr_mii);
1392 
1393 	ifp = &sc->vr_ec.ec_if;
1394 	ifp->if_timer = 0;
1395 
1396 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1397 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1398 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1399 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1400 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1401 
1402 	/*
1403 	 * Release any queued transmit buffers.
1404 	 */
1405 	for (i = 0; i < VR_NTXDESC; i++) {
1406 		ds = VR_DSTX(sc, i);
1407 		if (ds->ds_mbuf != NULL) {
1408 			bus_dmamap_unload(sc->vr_dmat, ds->ds_dmamap);
1409 			m_freem(ds->ds_mbuf);
1410 			ds->ds_mbuf = NULL;
1411 		}
1412 	}
1413 
1414 	/*
1415 	 * Mark the interface down and cancel the watchdog timer.
1416 	 */
1417 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1418 	ifp->if_timer = 0;
1419 
1420 	if (disable)
1421 		vr_rxdrain(sc);
1422 }
1423 
1424 static int	vr_probe(device_t, cfdata_t, void *);
1425 static void	vr_attach(device_t, device_t, void *);
1426 static bool	vr_shutdown(device_t, int);
1427 
1428 CFATTACH_DECL_NEW(vr, sizeof (struct vr_softc),
1429     vr_probe, vr_attach, NULL, NULL);
1430 
1431 static const struct vr_type *
1432 vr_lookup(struct pci_attach_args *pa)
1433 {
1434 	const struct vr_type *vrt;
1435 	int i;
1436 
1437 	for (i = 0; i < __arraycount(vr_devs); i++) {
1438 		vrt = &vr_devs[i];
1439 		if (PCI_VENDOR(pa->pa_id) == vrt->vr_vid &&
1440 		    PCI_PRODUCT(pa->pa_id) == vrt->vr_did)
1441 			return (vrt);
1442 	}
1443 	return (NULL);
1444 }
1445 
1446 static int
1447 vr_probe(device_t parent, cfdata_t match, void *aux)
1448 {
1449 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
1450 
1451 	if (vr_lookup(pa) != NULL)
1452 		return (1);
1453 
1454 	return (0);
1455 }
1456 
1457 /*
1458  * Stop all chip I/O so that the kernel's probe routines don't
1459  * get confused by errant DMAs when rebooting.
1460  */
1461 static bool
1462 vr_shutdown(device_t self, int howto)
1463 {
1464 	struct vr_softc *sc = device_private(self);
1465 
1466 	vr_stop(&sc->vr_ec.ec_if, 1);
1467 
1468 	return true;
1469 }
1470 
1471 /*
1472  * Attach the interface. Allocate softc structures, do ifmedia
1473  * setup and ethernet/BPF attach.
1474  */
1475 static void
1476 vr_attach(device_t parent, device_t self, void *aux)
1477 {
1478 	struct vr_softc *sc = device_private(self);
1479 	struct pci_attach_args *pa = (struct pci_attach_args *) aux;
1480 	bus_dma_segment_t seg;
1481 	uint32_t reg;
1482 	struct ifnet *ifp;
1483 	uint8_t eaddr[ETHER_ADDR_LEN], mac;
1484 	int i, rseg, error;
1485 	char intrbuf[PCI_INTRSTR_LEN];
1486 
1487 #define	PCI_CONF_WRITE(r, v)	pci_conf_write(sc->vr_pc, sc->vr_tag, (r), (v))
1488 #define	PCI_CONF_READ(r)	pci_conf_read(sc->vr_pc, sc->vr_tag, (r))
1489 
1490 	sc->vr_dev = self;
1491 	sc->vr_pc = pa->pa_pc;
1492 	sc->vr_tag = pa->pa_tag;
1493 	sc->vr_id = pa->pa_id;
1494 	callout_init(&sc->vr_tick_ch, 0);
1495 
1496 	pci_aprint_devinfo(pa, NULL);
1497 
1498 	/*
1499 	 * Handle power management nonsense.
1500 	 */
1501 
1502 	sc->vr_save_iobase = PCI_CONF_READ(VR_PCI_LOIO);
1503 	sc->vr_save_membase = PCI_CONF_READ(VR_PCI_LOMEM);
1504 	sc->vr_save_irq = PCI_CONF_READ(PCI_INTERRUPT_REG);
1505 
1506 	/* power up chip */
1507 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1508 	    vr_restore_state)) && error != EOPNOTSUPP) {
1509 		aprint_error_dev(self, "cannot activate %d\n", error);
1510 		return;
1511 	}
1512 
1513 	/* Make sure bus mastering is enabled. */
1514 	reg = PCI_CONF_READ(PCI_COMMAND_STATUS_REG);
1515 	reg |= PCI_COMMAND_MASTER_ENABLE;
1516 	PCI_CONF_WRITE(PCI_COMMAND_STATUS_REG, reg);
1517 
1518 	/* Get revision */
1519 	sc->vr_revid = PCI_REVISION(pa->pa_class);
1520 
1521 	/*
1522 	 * Map control/status registers.
1523 	 */
1524 	{
1525 		bus_space_tag_t iot, memt;
1526 		bus_space_handle_t ioh, memh;
1527 		int ioh_valid, memh_valid;
1528 		pci_intr_handle_t intrhandle;
1529 		const char *intrstr;
1530 
1531 		ioh_valid = (pci_mapreg_map(pa, VR_PCI_LOIO,
1532 			PCI_MAPREG_TYPE_IO, 0,
1533 			&iot, &ioh, NULL, NULL) == 0);
1534 		memh_valid = (pci_mapreg_map(pa, VR_PCI_LOMEM,
1535 			PCI_MAPREG_TYPE_MEM |
1536 			PCI_MAPREG_MEM_TYPE_32BIT,
1537 			0, &memt, &memh, NULL, NULL) == 0);
1538 #if defined(VR_USEIOSPACE)
1539 		if (ioh_valid) {
1540 			sc->vr_bst = iot;
1541 			sc->vr_bsh = ioh;
1542 		} else if (memh_valid) {
1543 			sc->vr_bst = memt;
1544 			sc->vr_bsh = memh;
1545 		}
1546 #else
1547 		if (memh_valid) {
1548 			sc->vr_bst = memt;
1549 			sc->vr_bsh = memh;
1550 		} else if (ioh_valid) {
1551 			sc->vr_bst = iot;
1552 			sc->vr_bsh = ioh;
1553 		}
1554 #endif
1555 		else {
1556 			aprint_error(": unable to map device registers\n");
1557 			return;
1558 		}
1559 
1560 		/* Allocate interrupt */
1561 		if (pci_intr_map(pa, &intrhandle)) {
1562 			aprint_error_dev(self, "couldn't map interrupt\n");
1563 			return;
1564 		}
1565 		intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf,
1566 		    sizeof(intrbuf));
1567 		sc->vr_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_NET,
1568 						vr_intr, sc);
1569 		if (sc->vr_ih == NULL) {
1570 			aprint_error_dev(self, "couldn't establish interrupt");
1571 			if (intrstr != NULL)
1572 				aprint_error(" at %s", intrstr);
1573 			aprint_error("\n");
1574 		}
1575 		aprint_normal_dev(self, "interrupting at %s\n", intrstr);
1576 	}
1577 
1578 	/*
1579 	 * Windows may put the chip in suspend mode when it
1580 	 * shuts down. Be sure to kick it in the head to wake it
1581 	 * up again.
1582 	 *
1583 	 * Don't touch this register on VT3043 since it causes
1584 	 * kernel MCHK trap on macppc.
1585 	 * (Note some VT86C100A chip returns a product ID of VT3043)
1586 	 */
1587 	if (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT3043)
1588 		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1589 
1590 	/* Reset the adapter. */
1591 	vr_reset(sc);
1592 
1593 	/*
1594 	 * Get station address. The way the Rhine chips work,
1595 	 * you're not allowed to directly access the EEPROM once
1596 	 * they've been programmed a special way. Consequently,
1597 	 * we need to read the node address from the PAR0 and PAR1
1598 	 * registers.
1599 	 *
1600 	 * XXXSCW: On the Rhine III, setting VR_EECSR_LOAD forces a reload
1601 	 *         of the *whole* EEPROM, not just the MAC address. This is
1602 	 *         pretty pointless since the chip does this automatically
1603 	 *         at powerup/reset.
1604 	 *         I suspect the same thing applies to the other Rhine
1605 	 *         variants, but in the absence of a data sheet for those
1606 	 *         (and the lack of anyone else noticing the problems this
1607 	 *         causes) I'm going to retain the old behaviour for the
1608 	 *         other parts.
1609 	 *         In some cases, the chip really does startup without having
1610 	 *         read the EEPROM (kern/34812). To handle this case, we force
1611 	 *         a reload if we see an all-zeroes MAC address.
1612 	 */
1613 	for (mac = 0, i = 0; i < ETHER_ADDR_LEN; i++)
1614 		mac |= (eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i));
1615 
1616 	if (mac == 0 || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6105 &&
1617 	    PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_VIATECH_VT6102)) {
1618 		VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
1619 		DELAY(200);
1620 		for (i = 0; i < ETHER_ADDR_LEN; i++)
1621 			eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
1622 	}
1623 
1624 	/*
1625 	 * A Rhine chip was detected. Inform the world.
1626 	 */
1627 	aprint_normal("%s: Ethernet address: %s\n",
1628 		device_xname(self), ether_sprintf(eaddr));
1629 
1630 	memcpy(sc->vr_enaddr, eaddr, ETHER_ADDR_LEN);
1631 
1632 	sc->vr_dmat = pa->pa_dmat;
1633 
1634 	/*
1635 	 * Allocate the control data structures, and create and load
1636 	 * the DMA map for it.
1637 	 */
1638 	if ((error = bus_dmamem_alloc(sc->vr_dmat,
1639 	    sizeof(struct vr_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
1640 	    0)) != 0) {
1641 		aprint_error_dev(self,
1642 		    "unable to allocate control data, error = %d\n", error);
1643 		goto fail_0;
1644 	}
1645 
1646 	if ((error = bus_dmamem_map(sc->vr_dmat, &seg, rseg,
1647 	    sizeof(struct vr_control_data), (void **)&sc->vr_control_data,
1648 	    BUS_DMA_COHERENT)) != 0) {
1649 		aprint_error_dev(self,
1650 		    "unable to map control data, error = %d\n", error);
1651 		goto fail_1;
1652 	}
1653 
1654 	if ((error = bus_dmamap_create(sc->vr_dmat,
1655 	    sizeof(struct vr_control_data), 1,
1656 	    sizeof(struct vr_control_data), 0, 0,
1657 	    &sc->vr_cddmamap)) != 0) {
1658 		aprint_error_dev(self,
1659 		    "unable to create control data DMA map, error = %d\n",
1660 		    error);
1661 		goto fail_2;
1662 	}
1663 
1664 	if ((error = bus_dmamap_load(sc->vr_dmat, sc->vr_cddmamap,
1665 	    sc->vr_control_data, sizeof(struct vr_control_data), NULL,
1666 	    0)) != 0) {
1667 		aprint_error_dev(self,
1668 		    "unable to load control data DMA map, error = %d\n",
1669 		    error);
1670 		goto fail_3;
1671 	}
1672 
1673 	/*
1674 	 * Create the transmit buffer DMA maps.
1675 	 */
1676 	for (i = 0; i < VR_NTXDESC; i++) {
1677 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES,
1678 		    1, MCLBYTES, 0, 0,
1679 		    &VR_DSTX(sc, i)->ds_dmamap)) != 0) {
1680 			aprint_error_dev(self,
1681 			    "unable to create tx DMA map %d, error = %d\n", i,
1682 			    error);
1683 			goto fail_4;
1684 		}
1685 	}
1686 
1687 	/*
1688 	 * Create the receive buffer DMA maps.
1689 	 */
1690 	for (i = 0; i < VR_NRXDESC; i++) {
1691 		if ((error = bus_dmamap_create(sc->vr_dmat, MCLBYTES, 1,
1692 		    MCLBYTES, 0, 0,
1693 		    &VR_DSRX(sc, i)->ds_dmamap)) != 0) {
1694 			aprint_error_dev(self,
1695 			    "unable to create rx DMA map %d, error = %d\n", i,
1696 			    error);
1697 			goto fail_5;
1698 		}
1699 		VR_DSRX(sc, i)->ds_mbuf = NULL;
1700 	}
1701 
1702 	ifp = &sc->vr_ec.ec_if;
1703 	ifp->if_softc = sc;
1704 	ifp->if_mtu = ETHERMTU;
1705 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1706 	ifp->if_ioctl = vr_ioctl;
1707 	ifp->if_start = vr_start;
1708 	ifp->if_watchdog = vr_watchdog;
1709 	ifp->if_init = vr_init;
1710 	ifp->if_stop = vr_stop;
1711 	IFQ_SET_READY(&ifp->if_snd);
1712 
1713 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1714 
1715 	/*
1716 	 * Initialize MII/media info.
1717 	 */
1718 	sc->vr_mii.mii_ifp = ifp;
1719 	sc->vr_mii.mii_readreg = vr_mii_readreg;
1720 	sc->vr_mii.mii_writereg = vr_mii_writereg;
1721 	sc->vr_mii.mii_statchg = vr_mii_statchg;
1722 
1723 	sc->vr_ec.ec_mii = &sc->vr_mii;
1724 	ifmedia_init(&sc->vr_mii.mii_media, IFM_IMASK, ether_mediachange,
1725 		ether_mediastatus);
1726 	mii_attach(self, &sc->vr_mii, 0xffffffff, MII_PHY_ANY,
1727 	    MII_OFFSET_ANY, MIIF_FORCEANEG);
1728 	if (LIST_FIRST(&sc->vr_mii.mii_phys) == NULL) {
1729 		ifmedia_add(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1730 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_NONE);
1731 	} else
1732 		ifmedia_set(&sc->vr_mii.mii_media, IFM_ETHER|IFM_AUTO);
1733 
1734 	sc->vr_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1735 
1736 	/*
1737 	 * Call MI attach routines.
1738 	 */
1739 	if_attach(ifp);
1740 	if_deferred_start_init(ifp, NULL);
1741 	ether_ifattach(ifp, sc->vr_enaddr);
1742 
1743 	rnd_attach_source(&sc->rnd_source, device_xname(self),
1744 	    RND_TYPE_NET, RND_FLAG_DEFAULT);
1745 
1746 	if (pmf_device_register1(self, NULL, vr_resume, vr_shutdown))
1747 		pmf_class_network_register(self, ifp);
1748 	else
1749 		aprint_error_dev(self, "couldn't establish power handler\n");
1750 
1751 	return;
1752 
1753  fail_5:
1754 	for (i = 0; i < VR_NRXDESC; i++) {
1755 		if (sc->vr_rxsoft[i].ds_dmamap != NULL)
1756 			bus_dmamap_destroy(sc->vr_dmat,
1757 			    sc->vr_rxsoft[i].ds_dmamap);
1758 	}
1759  fail_4:
1760 	for (i = 0; i < VR_NTXDESC; i++) {
1761 		if (sc->vr_txsoft[i].ds_dmamap != NULL)
1762 			bus_dmamap_destroy(sc->vr_dmat,
1763 			    sc->vr_txsoft[i].ds_dmamap);
1764 	}
1765 	bus_dmamap_unload(sc->vr_dmat, sc->vr_cddmamap);
1766  fail_3:
1767 	bus_dmamap_destroy(sc->vr_dmat, sc->vr_cddmamap);
1768  fail_2:
1769 	bus_dmamem_unmap(sc->vr_dmat, (void *)sc->vr_control_data,
1770 	    sizeof(struct vr_control_data));
1771  fail_1:
1772 	bus_dmamem_free(sc->vr_dmat, &seg, rseg);
1773  fail_0:
1774 	return;
1775 }
1776 
1777 static int
1778 vr_restore_state(pci_chipset_tag_t pc, pcitag_t tag, device_t self,
1779     pcireg_t state)
1780 {
1781 	struct vr_softc *sc = device_private(self);
1782 	int error;
1783 
1784 	if (state == PCI_PMCSR_STATE_D0)
1785 		return 0;
1786 	if ((error = pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0)))
1787 		return error;
1788 
1789 	/* Restore PCI config data. */
1790 	PCI_CONF_WRITE(VR_PCI_LOIO, sc->vr_save_iobase);
1791 	PCI_CONF_WRITE(VR_PCI_LOMEM, sc->vr_save_membase);
1792 	PCI_CONF_WRITE(PCI_INTERRUPT_REG, sc->vr_save_irq);
1793 	return 0;
1794 }
1795 
1796 static bool
1797 vr_resume(device_t self, const pmf_qual_t *qual)
1798 {
1799 	struct vr_softc *sc = device_private(self);
1800 
1801 	if (PCI_PRODUCT(sc->vr_id) != PCI_PRODUCT_VIATECH_VT3043)
1802 		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1803 
1804 	return true;
1805 }
1806