xref: /openbsd-src/sys/dev/pci/if_vr.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: if_vr.c,v 1.134 2014/07/08 05:35:19 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $
35  */
36 
37 /*
38  * VIA Rhine fast ethernet PCI NIC driver
39  *
40  * Supports various network adapters based on the VIA Rhine
41  * and Rhine II PCI controllers, including the D-Link DFE530TX.
42  * Datasheets are available at ftp://ftp.vtbridge.org/Docs/LAN/.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The VIA Rhine controllers are similar in some respects to the
51  * the DEC tulip chips, except less complicated. The controller
52  * uses an MII bus and an external physical layer interface. The
53  * receiver has a one entry perfect filter and a 64-bit hash table
54  * multicast filter. Transmit and receive descriptors are similar
55  * to the tulip.
56  *
57  * Early Rhine has a serious flaw in its transmit DMA mechanism:
58  * transmit buffers must be longword aligned. Unfortunately,
59  * OpenBSD doesn't guarantee that mbufs will be filled in starting
60  * at longword boundaries, so we have to do a buffer copy before
61  * transmission.
62  */
63 
64 #include "bpfilter.h"
65 #include "vlan.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/sockio.h>
70 #include <sys/mbuf.h>
71 #include <sys/kernel.h>
72 #include <sys/timeout.h>
73 #include <sys/socket.h>
74 
75 #include <net/if.h>
76 #include <sys/device.h>
77 #ifdef INET
78 #include <netinet/in.h>
79 #include <netinet/in_systm.h>
80 #include <netinet/ip.h>
81 #include <netinet/if_ether.h>
82 #endif	/* INET */
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #if NVLAN > 0
87 #include <net/if_types.h>
88 #include <net/if_vlan_var.h>
89 #endif
90 
91 #if NBPFILTER > 0
92 #include <net/bpf.h>
93 #endif
94 
95 #include <machine/bus.h>
96 
97 #include <dev/mii/mii.h>
98 #include <dev/mii/miivar.h>
99 
100 #include <dev/pci/pcireg.h>
101 #include <dev/pci/pcivar.h>
102 #include <dev/pci/pcidevs.h>
103 
104 #define VR_USEIOSPACE
105 
106 #include <dev/pci/if_vrreg.h>
107 
108 int vr_probe(struct device *, void *, void *);
109 int vr_quirks(struct pci_attach_args *);
110 void vr_attach(struct device *, struct device *, void *);
111 int vr_activate(struct device *, int);
112 
113 struct cfattach vr_ca = {
114 	sizeof(struct vr_softc), vr_probe, vr_attach, NULL,
115 	vr_activate
116 };
117 struct cfdriver vr_cd = {
118 	NULL, "vr", DV_IFNET
119 };
120 
121 int vr_encap(struct vr_softc *, struct vr_chain **, struct mbuf *);
122 void vr_rxeof(struct vr_softc *);
123 void vr_rxeoc(struct vr_softc *);
124 void vr_txeof(struct vr_softc *);
125 void vr_tick(void *);
126 void vr_rxtick(void *);
127 int vr_intr(void *);
128 int vr_dmamem_alloc(struct vr_softc *, struct vr_dmamem *,
129     bus_size_t, u_int);
130 void vr_dmamem_free(struct vr_softc *, struct vr_dmamem *);
131 void vr_start(struct ifnet *);
132 int vr_ioctl(struct ifnet *, u_long, caddr_t);
133 void vr_chipinit(struct vr_softc *);
134 void vr_init(void *);
135 void vr_stop(struct vr_softc *);
136 void vr_watchdog(struct ifnet *);
137 int vr_ifmedia_upd(struct ifnet *);
138 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
139 
140 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
141 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
142 int vr_miibus_readreg(struct device *, int, int);
143 void vr_miibus_writereg(struct device *, int, int, int);
144 void vr_miibus_statchg(struct device *);
145 
146 void vr_setcfg(struct vr_softc *, int);
147 void vr_iff(struct vr_softc *);
148 void vr_reset(struct vr_softc *);
149 int vr_list_rx_init(struct vr_softc *);
150 void vr_fill_rx_ring(struct vr_softc *);
151 int vr_list_tx_init(struct vr_softc *);
152 #ifndef SMALL_KERNEL
153 int vr_wol(struct ifnet *, int);
154 #endif
155 
156 int vr_alloc_mbuf(struct vr_softc *, struct vr_chain_onefrag *);
157 
158 /*
159  * Supported devices & quirks
160  */
161 #define	VR_Q_NEEDALIGN		(1<<0)
162 #define	VR_Q_CSUM		(1<<1)
163 #define	VR_Q_CAM		(1<<2)
164 #define	VR_Q_HWTAG		(1<<3)
165 #define	VR_Q_INTDISABLE		(1<<4)
166 #define	VR_Q_BABYJUMBO		(1<<5) /* others may work too */
167 
168 struct vr_type {
169 	pci_vendor_id_t		vr_vid;
170 	pci_product_id_t	vr_pid;
171 	int			vr_quirks;
172 } vr_devices[] = {
173 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE,
174 	    VR_Q_NEEDALIGN },
175 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII,
176 	    VR_Q_NEEDALIGN },
177 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2,
178 	    VR_Q_BABYJUMBO },
179 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105,
180 	    VR_Q_BABYJUMBO },
181 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105M,
182 	    VR_Q_CSUM | VR_Q_CAM | VR_Q_HWTAG | VR_Q_INTDISABLE |
183 	    VR_Q_BABYJUMBO },
184 	{ PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII,
185 	    VR_Q_NEEDALIGN },
186 	{ PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII,
187 	    VR_Q_NEEDALIGN }
188 };
189 
190 #define VR_SETBIT(sc, reg, x)				\
191 	CSR_WRITE_1(sc, reg,				\
192 		CSR_READ_1(sc, reg) | (x))
193 
194 #define VR_CLRBIT(sc, reg, x)				\
195 	CSR_WRITE_1(sc, reg,				\
196 		CSR_READ_1(sc, reg) & ~(x))
197 
198 #define VR_SETBIT16(sc, reg, x)				\
199 	CSR_WRITE_2(sc, reg,				\
200 		CSR_READ_2(sc, reg) | (x))
201 
202 #define VR_CLRBIT16(sc, reg, x)				\
203 	CSR_WRITE_2(sc, reg,				\
204 		CSR_READ_2(sc, reg) & ~(x))
205 
206 #define VR_SETBIT32(sc, reg, x)				\
207 	CSR_WRITE_4(sc, reg,				\
208 		CSR_READ_4(sc, reg) | (x))
209 
210 #define VR_CLRBIT32(sc, reg, x)				\
211 	CSR_WRITE_4(sc, reg,				\
212 		CSR_READ_4(sc, reg) & ~(x))
213 
214 #define SIO_SET(x)					\
215 	CSR_WRITE_1(sc, VR_MIICMD,			\
216 		CSR_READ_1(sc, VR_MIICMD) | (x))
217 
218 #define SIO_CLR(x)					\
219 	CSR_WRITE_1(sc, VR_MIICMD,			\
220 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
221 
222 /*
223  * Read an PHY register through the MII.
224  */
225 int
226 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame)
227 {
228 	int			s, i;
229 
230 	s = splnet();
231 
232 	/* Set the PHY-address */
233 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
234 	    frame->mii_phyaddr);
235 
236 	/* Set the register-address */
237 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
238 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
239 
240 	for (i = 0; i < 10000; i++) {
241 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
242 			break;
243 		DELAY(1);
244 	}
245 
246 	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
247 
248 	splx(s);
249 
250 	return(0);
251 }
252 
253 /*
254  * Write to a PHY register through the MII.
255  */
256 int
257 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame)
258 {
259 	int			s, i;
260 
261 	s = splnet();
262 
263 	/* Set the PHY-address */
264 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
265 	    frame->mii_phyaddr);
266 
267 	/* Set the register-address and data to write */
268 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
269 	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
270 
271 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
272 
273 	for (i = 0; i < 10000; i++) {
274 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
275 			break;
276 		DELAY(1);
277 	}
278 
279 	splx(s);
280 
281 	return(0);
282 }
283 
284 int
285 vr_miibus_readreg(struct device *dev, int phy, int reg)
286 {
287 	struct vr_softc *sc = (struct vr_softc *)dev;
288 	struct vr_mii_frame frame;
289 
290 	switch (sc->vr_revid) {
291 	case REV_ID_VT6102_APOLLO:
292 	case REV_ID_VT6103:
293 		if (phy != 1)
294 			return 0;
295 	default:
296 		break;
297 	}
298 
299 	bzero(&frame, sizeof(frame));
300 
301 	frame.mii_phyaddr = phy;
302 	frame.mii_regaddr = reg;
303 	vr_mii_readreg(sc, &frame);
304 
305 	return(frame.mii_data);
306 }
307 
308 void
309 vr_miibus_writereg(struct device *dev, int phy, int reg, int data)
310 {
311 	struct vr_softc *sc = (struct vr_softc *)dev;
312 	struct vr_mii_frame frame;
313 
314 	switch (sc->vr_revid) {
315 	case REV_ID_VT6102_APOLLO:
316 	case REV_ID_VT6103:
317 		if (phy != 1)
318 			return;
319 	default:
320 		break;
321 	}
322 
323 	bzero(&frame, sizeof(frame));
324 
325 	frame.mii_phyaddr = phy;
326 	frame.mii_regaddr = reg;
327 	frame.mii_data = data;
328 
329 	vr_mii_writereg(sc, &frame);
330 }
331 
332 void
333 vr_miibus_statchg(struct device *dev)
334 {
335 	struct vr_softc *sc = (struct vr_softc *)dev;
336 
337 	vr_setcfg(sc, sc->sc_mii.mii_media_active);
338 }
339 
340 void
341 vr_iff(struct vr_softc *sc)
342 {
343 	struct arpcom		*ac = &sc->arpcom;
344 	struct ifnet		*ifp = &sc->arpcom.ac_if;
345 	int			h = 0;
346 	u_int32_t		hashes[2];
347 	struct ether_multi	*enm;
348 	struct ether_multistep	step;
349 	u_int8_t		rxfilt;
350 
351 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
352 	rxfilt &= ~(VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI |
353 	    VR_RXCFG_RX_PROMISC);
354 	ifp->if_flags &= ~IFF_ALLMULTI;
355 
356 	/*
357 	 * Always accept broadcast frames.
358 	 */
359 	rxfilt |= VR_RXCFG_RX_BROAD;
360 
361 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
362 		ifp->if_flags |= IFF_ALLMULTI;
363 		rxfilt |= VR_RXCFG_RX_MULTI;
364 		if (ifp->if_flags & IFF_PROMISC)
365 			rxfilt |= VR_RXCFG_RX_PROMISC;
366 		hashes[0] = hashes[1] = 0xFFFFFFFF;
367 	} else {
368 		/* Program new filter. */
369 		rxfilt |= VR_RXCFG_RX_MULTI;
370 		bzero(hashes, sizeof(hashes));
371 
372 		ETHER_FIRST_MULTI(step, ac, enm);
373 		while (enm != NULL) {
374 			h = ether_crc32_be(enm->enm_addrlo,
375 			    ETHER_ADDR_LEN) >> 26;
376 
377 			if (h < 32)
378 				hashes[0] |= (1 << h);
379 			else
380 				hashes[1] |= (1 << (h - 32));
381 
382 			ETHER_NEXT_MULTI(step, enm);
383 		}
384 	}
385 
386 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
387 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
388 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
389 }
390 
391 /*
392  * In order to fiddle with the
393  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
394  * first have to put the transmit and/or receive logic in the idle state.
395  */
396 void
397 vr_setcfg(struct vr_softc *sc, int media)
398 {
399 	int i;
400 
401 	if (sc->sc_mii.mii_media_status & IFM_ACTIVE &&
402 	    IFM_SUBTYPE(sc->sc_mii.mii_media_active) != IFM_NONE) {
403 		sc->vr_link = 1;
404 
405 		if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON))
406 			VR_CLRBIT16(sc, VR_COMMAND,
407 			    (VR_CMD_TX_ON|VR_CMD_RX_ON));
408 
409 		if ((media & IFM_GMASK) == IFM_FDX)
410 			VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
411 		else
412 			VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
413 
414 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
415 	} else {
416 		sc->vr_link = 0;
417 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
418 		for (i = VR_TIMEOUT; i > 0; i--) {
419 			DELAY(10);
420 			if (!(CSR_READ_2(sc, VR_COMMAND) &
421 			    (VR_CMD_TX_ON|VR_CMD_RX_ON)))
422 				break;
423 		}
424 		if (i == 0) {
425 #ifdef VR_DEBUG
426 			printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
427 #endif
428 			sc->vr_flags |= VR_F_RESTART;
429 		}
430 	}
431 }
432 
433 void
434 vr_reset(struct vr_softc *sc)
435 {
436 	int			i;
437 
438 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
439 
440 	for (i = 0; i < VR_TIMEOUT; i++) {
441 		DELAY(10);
442 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
443 			break;
444 	}
445 	if (i == VR_TIMEOUT) {
446 		if (sc->vr_revid < REV_ID_VT3065_A)
447 			printf("%s: reset never completed!\n",
448 			    sc->sc_dev.dv_xname);
449 		else {
450 #ifdef VR_DEBUG
451 			/* Use newer force reset command */
452 			printf("%s: Using force reset command.\n",
453 			    sc->sc_dev.dv_xname);
454 #endif
455 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
456 		}
457 	}
458 
459 	/* Wait a little while for the chip to get its brains in order. */
460 	DELAY(1000);
461 }
462 
463 /*
464  * Probe for a VIA Rhine chip.
465  */
466 int
467 vr_probe(struct device *parent, void *match, void *aux)
468 {
469 	const struct vr_type *vr;
470 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
471 	int i, nent = nitems(vr_devices);
472 
473 	for (i = 0, vr = vr_devices; i < nent; i++, vr++)
474 		if (PCI_VENDOR(pa->pa_id) == vr->vr_vid &&
475 		   PCI_PRODUCT(pa->pa_id) == vr->vr_pid)
476 			return(1);
477 
478 	return(0);
479 }
480 
481 int
482 vr_quirks(struct pci_attach_args *pa)
483 {
484 	const struct vr_type *vr;
485 	int i, nent = nitems(vr_devices);
486 
487 	for (i = 0, vr = vr_devices; i < nent; i++, vr++)
488 		if (PCI_VENDOR(pa->pa_id) == vr->vr_vid &&
489 		   PCI_PRODUCT(pa->pa_id) == vr->vr_pid)
490 			return(vr->vr_quirks);
491 
492 	return(0);
493 }
494 
495 int
496 vr_dmamem_alloc(struct vr_softc *sc, struct vr_dmamem *vrm,
497     bus_size_t size, u_int align)
498 {
499 	vrm->vrm_size = size;
500 
501 	if (bus_dmamap_create(sc->sc_dmat, vrm->vrm_size, 1,
502 	    vrm->vrm_size, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
503 	    &vrm->vrm_map) != 0)
504 		return (1);
505 	if (bus_dmamem_alloc(sc->sc_dmat, vrm->vrm_size,
506 	    align, 0, &vrm->vrm_seg, 1, &vrm->vrm_nsegs,
507 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
508 		goto destroy;
509 	if (bus_dmamem_map(sc->sc_dmat, &vrm->vrm_seg, vrm->vrm_nsegs,
510 	    vrm->vrm_size, &vrm->vrm_kva, BUS_DMA_WAITOK) != 0)
511 		goto free;
512 	if (bus_dmamap_load(sc->sc_dmat, vrm->vrm_map, vrm->vrm_kva,
513 	    vrm->vrm_size, NULL, BUS_DMA_WAITOK) != 0)
514 		goto unmap;
515 
516 	return (0);
517  unmap:
518 	bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size);
519  free:
520 	bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1);
521  destroy:
522 	bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map);
523 	return (1);
524 }
525 
526 void
527 vr_dmamem_free(struct vr_softc *sc, struct vr_dmamem *vrm)
528 {
529 	bus_dmamap_unload(sc->sc_dmat, vrm->vrm_map);
530 	bus_dmamem_unmap(sc->sc_dmat, vrm->vrm_kva, vrm->vrm_size);
531 	bus_dmamem_free(sc->sc_dmat, &vrm->vrm_seg, 1);
532 	bus_dmamap_destroy(sc->sc_dmat, vrm->vrm_map);
533 }
534 
535 /*
536  * Attach the interface. Allocate softc structures, do ifmedia
537  * setup and ethernet/BPF attach.
538  */
539 void
540 vr_attach(struct device *parent, struct device *self, void *aux)
541 {
542 	int			i;
543 	struct vr_softc		*sc = (struct vr_softc *)self;
544 	struct pci_attach_args	*pa = aux;
545 	pci_chipset_tag_t	pc = pa->pa_pc;
546 	pci_intr_handle_t	ih;
547 	const char		*intrstr = NULL;
548 	struct ifnet		*ifp = &sc->arpcom.ac_if;
549 	bus_size_t		size;
550 
551 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
552 
553 	/*
554 	 * Map control/status registers.
555 	 */
556 
557 #ifdef VR_USEIOSPACE
558 	if (pci_mapreg_map(pa, VR_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
559 	    &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
560 		printf(": can't map i/o space\n");
561 		return;
562 	}
563 #else
564 	if (pci_mapreg_map(pa, VR_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
565 	    &sc->vr_btag, &sc->vr_bhandle, NULL, &size, 0)) {
566 		printf(": can't map mem space\n");
567 		return;
568 	}
569 #endif
570 
571 	/* Allocate interrupt */
572 	if (pci_intr_map(pa, &ih)) {
573 		printf(": can't map interrupt\n");
574 		goto fail;
575 	}
576 	intrstr = pci_intr_string(pc, ih);
577 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc,
578 				       self->dv_xname);
579 	if (sc->sc_ih == NULL) {
580 		printf(": can't establish interrupt");
581 		if (intrstr != NULL)
582 			printf(" at %s", intrstr);
583 		printf("\n");
584 		goto fail;
585 	}
586 	printf(": %s", intrstr);
587 
588 	sc->vr_revid = PCI_REVISION(pa->pa_class);
589 	sc->sc_pc = pa->pa_pc;
590 	sc->sc_tag = pa->pa_tag;
591 
592 	vr_chipinit(sc);
593 
594 	/*
595 	 * Get station address. The way the Rhine chips work,
596 	 * you're not allowed to directly access the EEPROM once
597 	 * they've been programmed a special way. Consequently,
598 	 * we need to read the node address from the PAR0 and PAR1
599 	 * registers.
600 	 */
601 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
602 	DELAY(1000);
603 	for (i = 0; i < ETHER_ADDR_LEN; i++)
604 		sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
605 
606 	/*
607 	 * A Rhine chip was detected. Inform the world.
608 	 */
609 	printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
610 
611 	sc->sc_dmat = pa->pa_dmat;
612 	if (vr_dmamem_alloc(sc, &sc->sc_zeromap, 64, PAGE_SIZE) != 0) {
613 		printf(": failed to allocate zero pad memory\n");
614 		return;
615 	}
616 	bzero(sc->sc_zeromap.vrm_kva, 64);
617 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0,
618 	    sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_PREREAD);
619 	if (vr_dmamem_alloc(sc, &sc->sc_listmap, sizeof(struct vr_list_data),
620 	    PAGE_SIZE) != 0) {
621 		printf(": failed to allocate dma map\n");
622 		goto free_zero;
623 	}
624 
625 	sc->vr_ldata = (struct vr_list_data *)sc->sc_listmap.vrm_kva;
626 	sc->vr_quirks = vr_quirks(pa);
627 
628 	ifp = &sc->arpcom.ac_if;
629 	ifp->if_softc = sc;
630 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
631 	ifp->if_ioctl = vr_ioctl;
632 	ifp->if_start = vr_start;
633 	ifp->if_watchdog = vr_watchdog;
634 	if (sc->vr_quirks & VR_Q_BABYJUMBO)
635 		ifp->if_hardmtu = VR_RXLEN_BABYJUMBO -
636 		    ETHER_HDR_LEN - ETHER_CRC_LEN;
637 	IFQ_SET_READY(&ifp->if_snd);
638 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
639 
640 	ifp->if_capabilities = IFCAP_VLAN_MTU;
641 
642 	if (sc->vr_quirks & VR_Q_CSUM)
643 		ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
644 					IFCAP_CSUM_UDPv4;
645 
646 #if NVLAN > 0
647 	/* if the hardware can do VLAN tagging, say so. */
648 	if (sc->vr_quirks & VR_Q_HWTAG)
649 		ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
650 #endif
651 
652 #ifndef SMALL_KERNEL
653 	if (sc->vr_revid >= REV_ID_VT3065_A) {
654 		ifp->if_capabilities |= IFCAP_WOL;
655 		ifp->if_wol = vr_wol;
656 		vr_wol(ifp, 0);
657 	}
658 #endif
659 
660 	/*
661 	 * Do MII setup.
662 	 */
663 	sc->sc_mii.mii_ifp = ifp;
664 	sc->sc_mii.mii_readreg = vr_miibus_readreg;
665 	sc->sc_mii.mii_writereg = vr_miibus_writereg;
666 	sc->sc_mii.mii_statchg = vr_miibus_statchg;
667 	ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
668 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
669 	    0);
670 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
671 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
672 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
673 	} else
674 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
675 	timeout_set(&sc->sc_to, vr_tick, sc);
676 	timeout_set(&sc->sc_rxto, vr_rxtick, sc);
677 
678 	/*
679 	 * Call MI attach routines.
680 	 */
681 	if_attach(ifp);
682 	ether_ifattach(ifp);
683 	return;
684 
685 free_zero:
686 	bus_dmamap_sync(sc->sc_dmat, sc->sc_zeromap.vrm_map, 0,
687 	    sc->sc_zeromap.vrm_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
688 	vr_dmamem_free(sc, &sc->sc_zeromap);
689 fail:
690 	bus_space_unmap(sc->vr_btag, sc->vr_bhandle, size);
691 }
692 
693 int
694 vr_activate(struct device *self, int act)
695 {
696 	struct vr_softc *sc = (struct vr_softc *)self;
697 	struct ifnet *ifp = &sc->arpcom.ac_if;
698 	int rv = 0;
699 
700 	switch (act) {
701 	case DVACT_SUSPEND:
702 		if (ifp->if_flags & IFF_RUNNING)
703 			vr_stop(sc);
704 		rv = config_activate_children(self, act);
705 		break;
706 	case DVACT_RESUME:
707 		if (ifp->if_flags & IFF_UP)
708 			vr_init(sc);
709 		break;
710 	default:
711 		rv = config_activate_children(self, act);
712 		break;
713 	}
714 	return (rv);
715 }
716 
717 /*
718  * Initialize the transmit descriptors.
719  */
720 int
721 vr_list_tx_init(struct vr_softc *sc)
722 {
723 	struct vr_chain_data	*cd;
724 	struct vr_list_data	*ld;
725 	int			i;
726 
727 	cd = &sc->vr_cdata;
728 	ld = sc->vr_ldata;
729 
730 	cd->vr_tx_cnt = cd->vr_tx_pkts = 0;
731 
732 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
733 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
734 		cd->vr_tx_chain[i].vr_paddr =
735 		    sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
736 		    offsetof(struct vr_list_data, vr_tx_list[i]);
737 
738 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, VR_MAXFRAGS,
739 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map))
740 			return (ENOBUFS);
741 
742 		if (i == (VR_TX_LIST_CNT - 1))
743 			cd->vr_tx_chain[i].vr_nextdesc =
744 				&cd->vr_tx_chain[0];
745 		else
746 			cd->vr_tx_chain[i].vr_nextdesc =
747 				&cd->vr_tx_chain[i + 1];
748 	}
749 
750 	cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0];
751 
752 	return (0);
753 }
754 
755 
756 /*
757  * Initialize the RX descriptors and allocate mbufs for them. Note that
758  * we arrange the descriptors in a closed ring, so that the last descriptor
759  * points back to the first.
760  */
761 int
762 vr_list_rx_init(struct vr_softc *sc)
763 {
764 	struct vr_chain_data	*cd;
765 	struct vr_list_data	*ld;
766 	struct vr_desc		*d;
767 	int			 i, nexti;
768 
769 	cd = &sc->vr_cdata;
770 	ld = sc->vr_ldata;
771 
772 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
773 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
774 		    0, BUS_DMA_NOWAIT | BUS_DMA_READ,
775 		    &cd->vr_rx_chain[i].vr_map))
776 			return (ENOBUFS);
777 
778 		d = (struct vr_desc *)&ld->vr_rx_list[i];
779 		cd->vr_rx_chain[i].vr_ptr = d;
780 		cd->vr_rx_chain[i].vr_paddr =
781 		    sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
782 		    offsetof(struct vr_list_data, vr_rx_list[i]);
783 
784 		if (i == (VR_RX_LIST_CNT - 1))
785 			nexti = 0;
786 		else
787 			nexti = i + 1;
788 
789 		cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[nexti];
790 		ld->vr_rx_list[i].vr_next =
791 		    htole32(sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
792 		    offsetof(struct vr_list_data, vr_rx_list[nexti]));
793 	}
794 
795 	cd->vr_rx_prod = cd->vr_rx_cons = &cd->vr_rx_chain[0];
796 	if_rxr_init(&sc->sc_rxring, 2, VR_RX_LIST_CNT - 1);
797 	vr_fill_rx_ring(sc);
798 
799 	return (0);
800 }
801 
802 void
803 vr_fill_rx_ring(struct vr_softc *sc)
804 {
805 	struct vr_chain_data	*cd;
806 	struct vr_list_data	*ld;
807 	u_int			slots;
808 
809 	cd = &sc->vr_cdata;
810 	ld = sc->vr_ldata;
811 
812 	for (slots = if_rxr_get(&sc->sc_rxring, VR_RX_LIST_CNT);
813 	    slots > 0; slots--) {
814 		if (vr_alloc_mbuf(sc, cd->vr_rx_prod))
815 			break;
816 
817 		cd->vr_rx_prod = cd->vr_rx_prod->vr_nextdesc;
818 	}
819 
820 	if_rxr_put(&sc->sc_rxring, slots);
821 	if (if_rxr_inuse(&sc->sc_rxring) == 0)
822 		timeout_add(&sc->sc_rxto, 0);
823 }
824 
825 /*
826  * A frame has been uploaded: pass the resulting mbuf chain up to
827  * the higher level protocols.
828  */
829 void
830 vr_rxeof(struct vr_softc *sc)
831 {
832 	struct mbuf		*m;
833 	struct ifnet		*ifp;
834 	struct vr_chain_onefrag	*cur_rx;
835 	int			total_len = 0;
836 	u_int32_t		rxstat, rxctl;
837 
838 	ifp = &sc->arpcom.ac_if;
839 
840 	while (if_rxr_inuse(&sc->sc_rxring) > 0) {
841 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map,
842 		    0, sc->sc_listmap.vrm_map->dm_mapsize,
843 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
844 		rxstat = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_status);
845 		if (rxstat & VR_RXSTAT_OWN)
846 			break;
847 
848 		rxctl = letoh32(sc->vr_cdata.vr_rx_cons->vr_ptr->vr_ctl);
849 
850 		cur_rx = sc->vr_cdata.vr_rx_cons;
851 		m = cur_rx->vr_mbuf;
852 		cur_rx->vr_mbuf = NULL;
853 		sc->vr_cdata.vr_rx_cons = cur_rx->vr_nextdesc;
854 		if_rxr_put(&sc->sc_rxring, 1);
855 
856 		/*
857 		 * If an error occurs, update stats, clear the
858 		 * status word and leave the mbuf cluster in place:
859 		 * it should simply get re-used next time this descriptor
860 		 * comes up in the ring.
861 		 */
862 		if ((rxstat & VR_RXSTAT_RX_OK) == 0) {
863 			ifp->if_ierrors++;
864 #ifdef VR_DEBUG
865 			printf("%s: rx error (%02x):",
866 			    sc->sc_dev.dv_xname, rxstat & 0x000000ff);
867 			if (rxstat & VR_RXSTAT_CRCERR)
868 				printf(" crc error");
869 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
870 				printf(" frame alignment error");
871 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
872 				printf(" FIFO overflow");
873 			if (rxstat & VR_RXSTAT_GIANT)
874 				printf(" received giant packet");
875 			if (rxstat & VR_RXSTAT_RUNT)
876 				printf(" received runt packet");
877 			if (rxstat & VR_RXSTAT_BUSERR)
878 				printf(" system bus error");
879 			if (rxstat & VR_RXSTAT_BUFFERR)
880 				printf(" rx buffer error");
881 			printf("\n");
882 #endif
883 
884 			m_freem(m);
885 			continue;
886 		}
887 
888 		/* No errors; receive the packet. */
889 		total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status));
890 
891 		bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
892 		    cur_rx->vr_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
893 		bus_dmamap_unload(sc->sc_dmat, cur_rx->vr_map);
894 
895 		/*
896 		 * The VIA Rhine chip includes the CRC with every
897 		 * received frame, and there's no way to turn this
898 		 * behavior off so trim the CRC manually.
899 		 */
900 		total_len -= ETHER_CRC_LEN;
901 
902 #ifdef __STRICT_ALIGNMENT
903 		{
904 			struct mbuf *m0;
905 			m0 = m_devget(mtod(m, caddr_t), total_len,
906 			    ETHER_ALIGN, ifp);
907 			m_freem(m);
908 			if (m0 == NULL) {
909 				ifp->if_ierrors++;
910 				continue;
911 			}
912 			m = m0;
913 		}
914 #else
915 		m->m_pkthdr.rcvif = ifp;
916 		m->m_pkthdr.len = m->m_len = total_len;
917 #endif
918 
919 		ifp->if_ipackets++;
920 
921 		if (sc->vr_quirks & VR_Q_CSUM &&
922 		    (rxstat & VR_RXSTAT_FRAG) == 0 &&
923 		    (rxctl & VR_RXCTL_IP) != 0) {
924 			/* Checksum is valid for non-fragmented IP packets. */
925 			if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK)
926 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
927 			if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP) &&
928 			    ((rxctl & VR_RXCTL_TCPUDPOK) != 0))
929 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK |
930 				    M_UDP_CSUM_IN_OK;
931 		}
932 
933 #if NVLAN > 0
934 		/*
935 		 * If there's a tagged packet, the 802.1q header will be at the
936 		 * 4-byte boundary following the CRC.  There will be 2 bytes
937 		 * TPID (0x8100) and 2 bytes TCI (including VLAN ID).
938 		 * This isn't in the data sheet.
939 		 */
940 		if (rxctl & VR_RXCTL_TAG) {
941 			int offset = ((total_len + 3) & ~3) + ETHER_CRC_LEN + 2;
942 			m->m_pkthdr.ether_vtag = htons(*(u_int16_t *)
943 			    ((u_int8_t *)m->m_data + offset));
944 			m->m_flags |= M_VLANTAG;
945 		}
946 #endif
947 
948 #if NBPFILTER > 0
949 		/*
950 		 * Handle BPF listeners. Let the BPF user see the packet.
951 		 */
952 		if (ifp->if_bpf)
953 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN);
954 #endif
955 		/* pass it on. */
956 		ether_input_mbuf(ifp, m);
957 	}
958 
959 	vr_fill_rx_ring(sc);
960 
961 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map,
962 	    0, sc->sc_listmap.vrm_map->dm_mapsize,
963 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
964 }
965 
966 void
967 vr_rxeoc(struct vr_softc *sc)
968 {
969 	struct ifnet		*ifp;
970 	int			i;
971 
972 	ifp = &sc->arpcom.ac_if;
973 
974 	ifp->if_ierrors++;
975 
976 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
977 	DELAY(10000);
978 
979 	for (i = 0x400;
980 	    i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
981 	    i--)
982 		;       /* Wait for receiver to stop */
983 
984 	if (!i) {
985 		printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
986 		sc->vr_flags |= VR_F_RESTART;
987 		return;
988 	}
989 
990 	vr_rxeof(sc);
991 
992 	CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr);
993 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
994 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
995 }
996 
997 /*
998  * A frame was downloaded to the chip. It's safe for us to clean up
999  * the list buffers.
1000  */
1001 
1002 void
1003 vr_txeof(struct vr_softc *sc)
1004 {
1005 	struct vr_chain		*cur_tx;
1006 	struct ifnet		*ifp;
1007 
1008 	ifp = &sc->arpcom.ac_if;
1009 
1010 	/*
1011 	 * Go through our tx list and free mbufs for those
1012 	 * frames that have been transmitted.
1013 	 */
1014 	cur_tx = sc->vr_cdata.vr_tx_cons;
1015 	while (cur_tx != sc->vr_cdata.vr_tx_prod) {
1016 		u_int32_t		txstat, txctl;
1017 		int			i;
1018 
1019 		txstat = letoh32(cur_tx->vr_ptr->vr_status);
1020 		txctl = letoh32(cur_tx->vr_ptr->vr_ctl);
1021 
1022 		if ((txstat & VR_TXSTAT_ABRT) ||
1023 		    (txstat & VR_TXSTAT_UDF)) {
1024 			for (i = 0x400;
1025 			    i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1026 			    i--)
1027 				;	/* Wait for chip to shutdown */
1028 			if (!i) {
1029 				printf("%s: tx shutdown timeout\n",
1030 				    sc->sc_dev.dv_xname);
1031 				sc->vr_flags |= VR_F_RESTART;
1032 				break;
1033 			}
1034 			cur_tx->vr_ptr->vr_status = htole32(VR_TXSTAT_OWN);
1035 			CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr);
1036 			break;
1037 		}
1038 
1039 		if (txstat & VR_TXSTAT_OWN)
1040 			break;
1041 
1042 		sc->vr_cdata.vr_tx_cnt--;
1043 		/* Only the first descriptor in the chain is valid. */
1044 		if ((txctl & VR_TXCTL_FIRSTFRAG) == 0)
1045 			goto next;
1046 
1047 		if (txstat & VR_TXSTAT_ERRSUM) {
1048 			ifp->if_oerrors++;
1049 			if (txstat & VR_TXSTAT_DEFER)
1050 				ifp->if_collisions++;
1051 			if (txstat & VR_TXSTAT_LATECOLL)
1052 				ifp->if_collisions++;
1053 		}
1054 
1055 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1056 
1057 		ifp->if_opackets++;
1058 		if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_nsegs > 0)
1059 			bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map);
1060 
1061 		m_freem(cur_tx->vr_mbuf);
1062 		cur_tx->vr_mbuf = NULL;
1063 		ifp->if_flags &= ~IFF_OACTIVE;
1064 
1065 next:
1066 		cur_tx = cur_tx->vr_nextdesc;
1067 	}
1068 
1069 	sc->vr_cdata.vr_tx_cons = cur_tx;
1070 	if (sc->vr_cdata.vr_tx_cnt == 0)
1071 		ifp->if_timer = 0;
1072 }
1073 
1074 void
1075 vr_tick(void *xsc)
1076 {
1077 	struct vr_softc *sc = xsc;
1078 	int s;
1079 
1080 	s = splnet();
1081 	if (sc->vr_flags & VR_F_RESTART) {
1082 		printf("%s: restarting\n", sc->sc_dev.dv_xname);
1083 		vr_init(sc);
1084 		sc->vr_flags &= ~VR_F_RESTART;
1085 	}
1086 
1087 	mii_tick(&sc->sc_mii);
1088 	timeout_add_sec(&sc->sc_to, 1);
1089 	splx(s);
1090 }
1091 
1092 void
1093 vr_rxtick(void *xsc)
1094 {
1095 	struct vr_softc *sc = xsc;
1096 	int s;
1097 
1098 	s = splnet();
1099 	if (if_rxr_inuse(&sc->sc_rxring) == 0) {
1100 		vr_fill_rx_ring(sc);
1101 		if (if_rxr_inuse(&sc->sc_rxring) == 0)
1102 			timeout_add(&sc->sc_rxto, 1);
1103 	}
1104 	splx(s);
1105 }
1106 
1107 int
1108 vr_intr(void *arg)
1109 {
1110 	struct vr_softc		*sc;
1111 	struct ifnet		*ifp;
1112 	u_int16_t		status;
1113 	int claimed = 0;
1114 
1115 	sc = arg;
1116 	ifp = &sc->arpcom.ac_if;
1117 
1118 	/* Suppress unwanted interrupts. */
1119 	if (!(ifp->if_flags & IFF_UP)) {
1120 		vr_stop(sc);
1121 		return 0;
1122 	}
1123 
1124 	status = CSR_READ_2(sc, VR_ISR);
1125 	if (status)
1126 		CSR_WRITE_2(sc, VR_ISR, status);
1127 
1128 	if (status & VR_INTRS) {
1129 		claimed = 1;
1130 
1131 		if (status & VR_ISR_RX_OK)
1132 			vr_rxeof(sc);
1133 
1134 		if (status & VR_ISR_RX_DROPPED) {
1135 #ifdef VR_DEBUG
1136 			printf("%s: rx packet lost\n", sc->sc_dev.dv_xname);
1137 #endif
1138 			ifp->if_ierrors++;
1139 		}
1140 
1141 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1142 		    (status & VR_ISR_RX_OFLOW)) {
1143 #ifdef VR_DEBUG
1144 			printf("%s: receive error (%04x)",
1145 			    sc->sc_dev.dv_xname, status);
1146 			if (status & VR_ISR_RX_NOBUF)
1147 				printf(" no buffers");
1148 			if (status & VR_ISR_RX_OFLOW)
1149 				printf(" overflow");
1150 			printf("\n");
1151 #endif
1152 			vr_rxeoc(sc);
1153 		}
1154 
1155 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1156 			if (status & VR_ISR_BUSERR)
1157 				printf("%s: PCI bus error\n",
1158 				    sc->sc_dev.dv_xname);
1159 			if (status & VR_ISR_TX_UNDERRUN)
1160 				printf("%s: transmit underrun\n",
1161 				    sc->sc_dev.dv_xname);
1162 			vr_init(sc);
1163 			status = 0;
1164 		}
1165 
1166 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1167 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1168 			vr_txeof(sc);
1169 			if ((status & VR_ISR_UDFI) ||
1170 			    (status & VR_ISR_TX_ABRT2) ||
1171 			    (status & VR_ISR_TX_ABRT)) {
1172 #ifdef VR_DEBUG
1173 				if (status & (VR_ISR_TX_ABRT | VR_ISR_TX_ABRT2))
1174 					printf("%s: transmit aborted\n",
1175 					    sc->sc_dev.dv_xname);
1176 				if (status & VR_ISR_UDFI)
1177 					printf("%s: transmit underflow\n",
1178 					    sc->sc_dev.dv_xname);
1179 #endif
1180 				ifp->if_oerrors++;
1181 				if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) {
1182 					VR_SETBIT16(sc, VR_COMMAND,
1183 					    VR_CMD_TX_ON);
1184 					VR_SETBIT16(sc, VR_COMMAND,
1185 					    VR_CMD_TX_GO);
1186 				}
1187 			}
1188 		}
1189 	}
1190 
1191 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1192 		vr_start(ifp);
1193 
1194 	return (claimed);
1195 }
1196 
1197 /*
1198  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1199  * pointers to the fragment pointers.
1200  */
1201 int
1202 vr_encap(struct vr_softc *sc, struct vr_chain **cp, struct mbuf *m_head)
1203 {
1204 	struct vr_chain		*c = *cp;
1205 	struct vr_desc		*f = NULL;
1206 	struct mbuf		*m_new = NULL;
1207 	u_int32_t		vr_ctl = 0, vr_status = 0, intdisable = 0;
1208 	bus_dmamap_t		txmap;
1209 	int			i, runt = 0;
1210 
1211 	if (sc->vr_quirks & VR_Q_CSUM) {
1212 		if (m_head->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1213 			vr_ctl |= VR_TXCTL_IPCSUM;
1214 		if (m_head->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
1215 			vr_ctl |= VR_TXCTL_TCPCSUM;
1216 		if (m_head->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
1217 			vr_ctl |= VR_TXCTL_UDPCSUM;
1218 	}
1219 
1220 	/* Deep copy for chips that need alignment, or too many segments */
1221 	if (sc->vr_quirks & VR_Q_NEEDALIGN ||
1222 	    bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_head,
1223 				 BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
1224 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1225 		if (m_new == NULL)
1226 			return (1);
1227 		if (m_head->m_pkthdr.len > MHLEN) {
1228 			MCLGET(m_new, M_DONTWAIT);
1229 			if (!(m_new->m_flags & M_EXT)) {
1230 				m_freem(m_new);
1231 				return (1);
1232 			}
1233 		}
1234 		m_copydata(m_head, 0, m_head->m_pkthdr.len,
1235 		    mtod(m_new, caddr_t));
1236 		m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1237 
1238 		if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new,
1239 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
1240 			m_freem(m_new);
1241 			return(1);
1242 		}
1243 	}
1244 
1245 	bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
1246 	    BUS_DMASYNC_PREWRITE);
1247 	if (c->vr_map->dm_mapsize < VR_MIN_FRAMELEN)
1248 		runt = 1;
1249 
1250 	/* Check number of available descriptors */
1251 	if (sc->vr_cdata.vr_tx_cnt + c->vr_map->dm_nsegs + runt >=
1252 	    (VR_TX_LIST_CNT - 1)) {
1253 		if (m_new)
1254 			m_freem(m_new);
1255 		return(1);
1256 	}
1257 
1258 #if NVLAN > 0
1259 	/* Tell chip to insert VLAN tag if needed. */
1260 	if (m_head->m_flags & M_VLANTAG) {
1261 		u_int32_t vtag = m_head->m_pkthdr.ether_vtag;
1262 		vtag = (vtag << VR_TXSTAT_PQSHIFT) & VR_TXSTAT_PQMASK;
1263 		vr_status |= vtag;
1264 		vr_ctl |= htole32(VR_TXCTL_INSERTTAG);
1265 	}
1266 #endif
1267 
1268 	/*
1269 	 * We only want TX completion interrupts on every Nth packet.
1270 	 * We need to set VR_TXNEXT_INTDISABLE on every descriptor except
1271 	 * for the last discriptor of every Nth packet, where we set
1272 	 * VR_TXCTL_FINT.  The former is in the specs for only some chips.
1273 	 * present: VT6102 VT6105M VT8235M
1274 	 * not present: VT86C100 6105LOM
1275 	 */
1276 	if (++sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH != 0 &&
1277 	    sc->vr_quirks & VR_Q_INTDISABLE)
1278 		intdisable = VR_TXNEXT_INTDISABLE;
1279 
1280 	if (m_new != NULL) {
1281 		m_freem(m_head);
1282 
1283 		c->vr_mbuf = m_new;
1284 	} else
1285 		c->vr_mbuf = m_head;
1286 	txmap = c->vr_map;
1287 	for (i = 0; i < txmap->dm_nsegs; i++) {
1288 		if (i != 0)
1289 			*cp = c = c->vr_nextdesc;
1290 		f = c->vr_ptr;
1291 		f->vr_ctl = htole32(txmap->dm_segs[i].ds_len | VR_TXCTL_TLINK |
1292 		    vr_ctl);
1293 		if (i == 0)
1294 			f->vr_ctl |= htole32(VR_TXCTL_FIRSTFRAG);
1295 		f->vr_status = htole32(vr_status);
1296 		f->vr_data = htole32(txmap->dm_segs[i].ds_addr);
1297 		f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable);
1298 		sc->vr_cdata.vr_tx_cnt++;
1299 	}
1300 
1301 	/* Pad runt frames */
1302 	if (runt) {
1303 		*cp = c = c->vr_nextdesc;
1304 		f = c->vr_ptr;
1305 		f->vr_ctl = htole32((VR_MIN_FRAMELEN - txmap->dm_mapsize) |
1306 		    VR_TXCTL_TLINK | vr_ctl);
1307 		f->vr_status = htole32(vr_status);
1308 		f->vr_data = htole32(sc->sc_zeromap.vrm_map->dm_segs[0].ds_addr);
1309 		f->vr_next = htole32(c->vr_nextdesc->vr_paddr | intdisable);
1310 		sc->vr_cdata.vr_tx_cnt++;
1311 	}
1312 
1313 	/* Set EOP on the last descriptor */
1314 	f->vr_ctl |= htole32(VR_TXCTL_LASTFRAG);
1315 
1316 	if (sc->vr_cdata.vr_tx_pkts % VR_TX_INTR_THRESH == 0)
1317 		f->vr_ctl |= htole32(VR_TXCTL_FINT);
1318 
1319 	return (0);
1320 }
1321 
1322 /*
1323  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1324  * to the mbuf data regions directly in the transmit lists. We also save a
1325  * copy of the pointers since the transmit list fragment pointers are
1326  * physical addresses.
1327  */
1328 
1329 void
1330 vr_start(struct ifnet *ifp)
1331 {
1332 	struct vr_softc		*sc;
1333 	struct mbuf		*m_head;
1334 	struct vr_chain		*cur_tx, *head_tx;
1335 	unsigned int		 queued = 0;
1336 
1337 	sc = ifp->if_softc;
1338 
1339 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
1340 		return;
1341 
1342 	if (sc->vr_link == 0)
1343 		return;
1344 
1345 	cur_tx = sc->vr_cdata.vr_tx_prod;
1346 	while (cur_tx->vr_mbuf == NULL) {
1347 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1348 		if (m_head == NULL)
1349 			break;
1350 
1351 		/* Pack the data into the descriptor. */
1352 		head_tx = cur_tx;
1353 		if (vr_encap(sc, &cur_tx, m_head)) {
1354 			/* Rollback, send what we were able to encap. */
1355 			IF_PREPEND(&ifp->if_snd, m_head);
1356 			break;
1357 		}
1358 		queued++;
1359 
1360 		/* Only set ownership bit on first descriptor */
1361 		head_tx->vr_ptr->vr_status |= htole32(VR_TXSTAT_OWN);
1362 
1363 #if NBPFILTER > 0
1364 		/*
1365 		 * If there's a BPF listener, bounce a copy of this frame
1366 		 * to him.
1367 		 */
1368 		if (ifp->if_bpf)
1369 			bpf_mtap_ether(ifp->if_bpf, head_tx->vr_mbuf,
1370 			BPF_DIRECTION_OUT);
1371 #endif
1372 		cur_tx = cur_tx->vr_nextdesc;
1373 	}
1374 	if (queued > 0) {
1375 		sc->vr_cdata.vr_tx_prod = cur_tx;
1376 
1377 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1378 		    sc->sc_listmap.vrm_map->dm_mapsize,
1379 		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1380 
1381 		/* Tell the chip to start transmitting. */
1382 		VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1383 
1384 		/* Set a timeout in case the chip goes out to lunch. */
1385 		ifp->if_timer = 5;
1386 
1387 		if (cur_tx->vr_mbuf != NULL)
1388 			ifp->if_flags |= IFF_OACTIVE;
1389 	}
1390 }
1391 
1392 void
1393 vr_chipinit(struct vr_softc *sc)
1394 {
1395 	/*
1396 	 * Make sure it isn't suspended.
1397 	 */
1398 	if (pci_get_capability(sc->sc_pc, sc->sc_tag,
1399 	    PCI_CAP_PWRMGMT, NULL, NULL))
1400 		VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
1401 
1402 	/* Reset the adapter. */
1403 	vr_reset(sc);
1404 
1405 	/*
1406 	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
1407 	 * initialization and disable AUTOPOLL.
1408 	 */
1409 	pci_conf_write(sc->sc_pc, sc->sc_tag, VR_PCI_MODE,
1410 	    pci_conf_read(sc->sc_pc, sc->sc_tag, VR_PCI_MODE) |
1411 	    (VR_MODE3_MIION << 24));
1412 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
1413 }
1414 
1415 void
1416 vr_init(void *xsc)
1417 {
1418 	struct vr_softc		*sc = xsc;
1419 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1420 	struct mii_data		*mii = &sc->sc_mii;
1421 	int			s, i;
1422 
1423 	s = splnet();
1424 
1425 	/*
1426 	 * Cancel pending I/O and free all RX/TX buffers.
1427 	 */
1428 	vr_stop(sc);
1429 	vr_chipinit(sc);
1430 
1431 	/*
1432 	 * Set our station address.
1433 	 */
1434 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1435 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1436 
1437 	/* Set DMA size */
1438 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1439 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1440 
1441 	/*
1442 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1443 	 * so we must set both.
1444 	 */
1445 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1446 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1447 
1448 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1449 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1450 
1451 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1452 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1453 
1454 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1455 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1456 
1457 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
1458 		VR_SETBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN);
1459 
1460 	/* Init circular RX list. */
1461 	if (vr_list_rx_init(sc) == ENOBUFS) {
1462 		printf("%s: initialization failed: no memory for rx buffers\n",
1463 		    sc->sc_dev.dv_xname);
1464 		vr_stop(sc);
1465 		splx(s);
1466 		return;
1467 	}
1468 
1469 	/*
1470 	 * Init tx descriptors.
1471 	 */
1472 	if (vr_list_tx_init(sc) == ENOBUFS) {
1473 		printf("%s: initialization failed: no memory for tx buffers\n",
1474 		    sc->sc_dev.dv_xname);
1475 		vr_stop(sc);
1476 		splx(s);
1477 		return;
1478 	}
1479 
1480 	/*
1481 	 * Program promiscuous mode and multicast filters.
1482 	 */
1483 	vr_iff(sc);
1484 
1485 	/*
1486 	 * Load the address of the RX list.
1487 	 */
1488 	CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_cons->vr_paddr);
1489 
1490 	/* Enable receiver and transmitter. */
1491 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1492 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1493 				    VR_CMD_RX_GO);
1494 
1495 	CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap.vrm_map->dm_segs[0].ds_addr +
1496 	    offsetof(struct vr_list_data, vr_tx_list[0]));
1497 
1498 	/*
1499 	 * Enable interrupts.
1500 	 */
1501 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1502 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1503 
1504 	/* Restore state of BMCR */
1505 	sc->vr_link = 1;
1506 	mii_mediachg(mii);
1507 
1508 	ifp->if_flags |= IFF_RUNNING;
1509 	ifp->if_flags &= ~IFF_OACTIVE;
1510 
1511 	if (!timeout_pending(&sc->sc_to))
1512 		timeout_add_sec(&sc->sc_to, 1);
1513 
1514 	splx(s);
1515 }
1516 
1517 /*
1518  * Set media options.
1519  */
1520 int
1521 vr_ifmedia_upd(struct ifnet *ifp)
1522 {
1523 	struct vr_softc		*sc = ifp->if_softc;
1524 
1525 	if (ifp->if_flags & IFF_UP)
1526 		vr_init(sc);
1527 
1528 	return (0);
1529 }
1530 
1531 /*
1532  * Report current media status.
1533  */
1534 void
1535 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1536 {
1537 	struct vr_softc		*sc = ifp->if_softc;
1538 	struct mii_data		*mii = &sc->sc_mii;
1539 
1540 	mii_pollstat(mii);
1541 	ifmr->ifm_active = mii->mii_media_active;
1542 	ifmr->ifm_status = mii->mii_media_status;
1543 }
1544 
1545 int
1546 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1547 {
1548 	struct vr_softc		*sc = ifp->if_softc;
1549 	struct ifaddr		*ifa = (struct ifaddr *) data;
1550 	struct ifreq		*ifr = (struct ifreq *) data;
1551 	int			s, error = 0;
1552 
1553 	s = splnet();
1554 
1555 	switch(command) {
1556 	case SIOCSIFADDR:
1557 		ifp->if_flags |= IFF_UP;
1558 		if (!(ifp->if_flags & IFF_RUNNING))
1559 			vr_init(sc);
1560 #ifdef INET
1561 		if (ifa->ifa_addr->sa_family == AF_INET)
1562 			arp_ifinit(&sc->arpcom, ifa);
1563 #endif
1564 		break;
1565 
1566 	case SIOCSIFFLAGS:
1567 		if (ifp->if_flags & IFF_UP) {
1568 			if (ifp->if_flags & IFF_RUNNING)
1569 				error = ENETRESET;
1570 			else
1571 				vr_init(sc);
1572 		} else {
1573 			if (ifp->if_flags & IFF_RUNNING)
1574 				vr_stop(sc);
1575 		}
1576 		break;
1577 
1578 	case SIOCGIFMEDIA:
1579 	case SIOCSIFMEDIA:
1580 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1581 		break;
1582 
1583 	default:
1584 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1585 	}
1586 
1587 	if (error == ENETRESET) {
1588 		if (ifp->if_flags & IFF_RUNNING)
1589 			vr_iff(sc);
1590 		error = 0;
1591 	}
1592 
1593 	splx(s);
1594 	return(error);
1595 }
1596 
1597 void
1598 vr_watchdog(struct ifnet *ifp)
1599 {
1600 	struct vr_softc		*sc;
1601 
1602 	sc = ifp->if_softc;
1603 
1604 	/*
1605 	 * Since we're only asking for completion interrupts only every
1606 	 * few packets, occasionally the watchdog will fire when we have
1607 	 * some TX descriptors to reclaim, so check for that first.
1608 	 */
1609 	vr_txeof(sc);
1610 	if (sc->vr_cdata.vr_tx_cnt == 0)
1611 		return;
1612 
1613 	ifp->if_oerrors++;
1614 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1615 	vr_init(sc);
1616 
1617 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1618 		vr_start(ifp);
1619 }
1620 
1621 /*
1622  * Stop the adapter and free any mbufs allocated to the
1623  * RX and TX lists.
1624  */
1625 void
1626 vr_stop(struct vr_softc *sc)
1627 {
1628 	int		i;
1629 	struct ifnet	*ifp;
1630 	bus_dmamap_t	map;
1631 
1632 	ifp = &sc->arpcom.ac_if;
1633 	ifp->if_timer = 0;
1634 
1635 	timeout_del(&sc->sc_to);
1636 
1637 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1638 
1639 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1640 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1641 
1642 	/* wait for xfers to shutdown */
1643 	for (i = VR_TIMEOUT; i > 0; i--) {
1644 		DELAY(10);
1645 		if (!(CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)))
1646 			break;
1647 	}
1648 #ifdef VR_DEBUG
1649 	if (i == 0)
1650 		printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
1651 #endif
1652 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1653 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1654 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1655 
1656 	/*
1657 	 * Free data in the RX lists.
1658 	 */
1659 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1660 		if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) {
1661 			m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf);
1662 			sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL;
1663 		}
1664 		map = sc->vr_cdata.vr_rx_chain[i].vr_map;
1665 		if (map != NULL) {
1666 			if (map->dm_nsegs > 0)
1667 				bus_dmamap_unload(sc->sc_dmat, map);
1668 			bus_dmamap_destroy(sc->sc_dmat, map);
1669 			sc->vr_cdata.vr_rx_chain[i].vr_map = NULL;
1670 		}
1671 	}
1672 	bzero(&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list));
1673 
1674 	/*
1675 	 * Free the TX list buffers.
1676 	 */
1677 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1678 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1679 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1680 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1681 			ifp->if_oerrors++;
1682 		}
1683 		map = sc->vr_cdata.vr_tx_chain[i].vr_map;
1684 		if (map != NULL) {
1685 			if (map->dm_nsegs > 0)
1686 				bus_dmamap_unload(sc->sc_dmat, map);
1687 			bus_dmamap_destroy(sc->sc_dmat, map);
1688 			sc->vr_cdata.vr_tx_chain[i].vr_map = NULL;
1689 		}
1690 	}
1691 	bzero(&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list));
1692 }
1693 
1694 #ifndef SMALL_KERNEL
1695 int
1696 vr_wol(struct ifnet *ifp, int enable)
1697 {
1698 	struct vr_softc *sc = ifp->if_softc;
1699 
1700 	/* Clear WOL configuration */
1701 	CSR_WRITE_1(sc, VR_WOLCRCLR, 0xFF);
1702 
1703 	/* Clear event status bits. */
1704 	CSR_WRITE_1(sc, VR_PWRCSRCLR, 0xFF);
1705 
1706 	/* Disable PME# assertion upon wake event. */
1707 	VR_CLRBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB);
1708 	VR_SETBIT(sc, VR_WOLCFGCLR, VR_WOLCFG_PMEOVR);
1709 
1710 	if (enable) {
1711 		VR_SETBIT(sc, VR_WOLCRSET, VR_WOLCR_MAGIC);
1712 
1713 		/* Enable PME# assertion upon wake event. */
1714 		VR_SETBIT(sc, VR_STICKHW, VR_STICKHW_WOL_ENB);
1715 		VR_SETBIT(sc, VR_WOLCFGSET, VR_WOLCFG_PMEOVR);
1716 	}
1717 
1718 	return (0);
1719 }
1720 #endif
1721 
1722 int
1723 vr_alloc_mbuf(struct vr_softc *sc, struct vr_chain_onefrag *r)
1724 {
1725 	struct vr_desc	*d;
1726 	struct mbuf	*m;
1727 
1728 	if (r == NULL)
1729 		return (EINVAL);
1730 
1731 	m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES);
1732 	if (!m)
1733 		return (ENOBUFS);
1734 
1735 	m->m_len = m->m_pkthdr.len = MCLBYTES;
1736 	m_adj(m, sizeof(u_int64_t));
1737 
1738 	if (bus_dmamap_load_mbuf(sc->sc_dmat, r->vr_map, m, BUS_DMA_NOWAIT)) {
1739 		m_free(m);
1740 		return (ENOBUFS);
1741 	}
1742 
1743 	bus_dmamap_sync(sc->sc_dmat, r->vr_map, 0, r->vr_map->dm_mapsize,
1744 	    BUS_DMASYNC_PREREAD);
1745 
1746 	/* Reinitialize the RX descriptor */
1747 	r->vr_mbuf = m;
1748 	d = r->vr_ptr;
1749 	d->vr_data = htole32(r->vr_map->dm_segs[0].ds_addr);
1750 	if (sc->vr_quirks & VR_Q_BABYJUMBO)
1751 		d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN_BABYJUMBO);
1752 	else
1753 		d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1754 
1755 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1756 	    sc->sc_listmap.vrm_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1757 
1758 	d->vr_status = htole32(VR_RXSTAT);
1759 
1760 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap.vrm_map, 0,
1761 	    sc->sc_listmap.vrm_map->dm_mapsize,
1762 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1763 
1764 	return (0);
1765 }
1766