xref: /openbsd-src/sys/dev/pci/if_vr.c (revision 8500990981f885cbe5e6a4958549cacc238b5ae6)
1 /*	$OpenBSD: if_vr.c,v 1.40 2003/10/14 05:04:00 drahn Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/pci/if_vr.c,v 1.73 2003/08/22 07:13:22 imp Exp $
35  */
36 
37 /*
38  * VIA Rhine fast ethernet PCI NIC driver
39  *
40  * Supports various network adapters based on the VIA Rhine
41  * and Rhine II PCI controllers, including the D-Link DFE530TX.
42  * Datasheets are available at http://www.via.com.tw.
43  *
44  * Written by Bill Paul <wpaul@ctr.columbia.edu>
45  * Electrical Engineering Department
46  * Columbia University, New York City
47  */
48 
49 /*
50  * The VIA Rhine controllers are similar in some respects to the
51  * the DEC tulip chips, except less complicated. The controller
52  * uses an MII bus and an external physical layer interface. The
53  * receiver has a one entry perfect filter and a 64-bit hash table
54  * multicast filter. Transmit and receive descriptors are similar
55  * to the tulip.
56  *
57  * The Rhine has a serious flaw in its transmit DMA mechanism:
58  * transmit buffers must be longword aligned. Unfortunately,
59  * FreeBSD doesn't guarantee that mbufs will be filled in starting
60  * at longword boundaries, so we have to do a buffer copy before
61  * transmission.
62  */
63 
64 #include "bpfilter.h"
65 
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/sockio.h>
69 #include <sys/mbuf.h>
70 #include <sys/malloc.h>
71 #include <sys/kernel.h>
72 #include <sys/socket.h>
73 
74 #include <net/if.h>
75 #include <sys/device.h>
76 #ifdef INET
77 #include <netinet/in.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #include <netinet/if_ether.h>
82 #endif	/* INET */
83 #include <net/if_dl.h>
84 #include <net/if_media.h>
85 
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89 
90 #include <machine/bus.h>
91 
92 #include <uvm/uvm_extern.h>			/* for vtophys */
93 
94 #include <dev/mii/mii.h>
95 #include <dev/mii/miivar.h>
96 
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100 
101 #define VR_USEIOSPACE
102 #undef VR_USESWSHIFT
103 
104 #include <dev/pci/if_vrreg.h>
105 
106 int vr_probe(struct device *, void *, void *);
107 void vr_attach(struct device *, struct device *, void *);
108 
109 struct cfattach vr_ca = {
110 	sizeof(struct vr_softc), vr_probe, vr_attach
111 };
112 struct cfdriver vr_cd = {
113 	0, "vr", DV_IFNET
114 };
115 
116 int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf *);
117 void vr_rxeof(struct vr_softc *);
118 void vr_rxeoc(struct vr_softc *);
119 void vr_txeof(struct vr_softc *);
120 void vr_txeoc(struct vr_softc *);
121 void vr_tick(void *);
122 int vr_intr(void *);
123 void vr_start(struct ifnet *);
124 int vr_ioctl(struct ifnet *, u_long, caddr_t);
125 void vr_init(void *);
126 void vr_stop(struct vr_softc *);
127 void vr_watchdog(struct ifnet *);
128 void vr_shutdown(void *);
129 int vr_ifmedia_upd(struct ifnet *);
130 void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *);
131 
132 void vr_mii_sync(struct vr_softc *);
133 void vr_mii_send(struct vr_softc *, u_int32_t, int);
134 int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *);
135 int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *);
136 int vr_miibus_readreg(struct device *, int, int);
137 void vr_miibus_writereg(struct device *, int, int, int);
138 void vr_miibus_statchg(struct device *);
139 
140 void vr_setcfg(struct vr_softc *, int);
141 u_int8_t vr_calchash(u_int8_t *);
142 void vr_setmulti(struct vr_softc *);
143 void vr_reset(struct vr_softc *);
144 int vr_list_rx_init(struct vr_softc *);
145 int vr_list_tx_init(struct vr_softc *);
146 
147 #define VR_SETBIT(sc, reg, x)				\
148 	CSR_WRITE_1(sc, reg,				\
149 		CSR_READ_1(sc, reg) | (x))
150 
151 #define VR_CLRBIT(sc, reg, x)				\
152 	CSR_WRITE_1(sc, reg,				\
153 		CSR_READ_1(sc, reg) & ~(x))
154 
155 #define VR_SETBIT16(sc, reg, x)				\
156 	CSR_WRITE_2(sc, reg,				\
157 		CSR_READ_2(sc, reg) | (x))
158 
159 #define VR_CLRBIT16(sc, reg, x)				\
160 	CSR_WRITE_2(sc, reg,				\
161 		CSR_READ_2(sc, reg) & ~(x))
162 
163 #define VR_SETBIT32(sc, reg, x)				\
164 	CSR_WRITE_4(sc, reg,				\
165 		CSR_READ_4(sc, reg) | (x))
166 
167 #define VR_CLRBIT32(sc, reg, x)				\
168 	CSR_WRITE_4(sc, reg,				\
169 		CSR_READ_4(sc, reg) & ~(x))
170 
171 #define SIO_SET(x)					\
172 	CSR_WRITE_1(sc, VR_MIICMD,			\
173 		CSR_READ_1(sc, VR_MIICMD) | (x))
174 
175 #define SIO_CLR(x)					\
176 	CSR_WRITE_1(sc, VR_MIICMD,			\
177 		CSR_READ_1(sc, VR_MIICMD) & ~(x))
178 
179 #ifdef VR_USESWSHIFT
180 /*
181  * Sync the PHYs by setting data bit and strobing the clock 32 times.
182  */
183 void
184 vr_mii_sync(sc)
185 	struct vr_softc		*sc;
186 {
187 	register int		i;
188 
189 	SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN);
190 
191 	for (i = 0; i < 32; i++) {
192 		SIO_SET(VR_MIICMD_CLK);
193 		DELAY(1);
194 		SIO_CLR(VR_MIICMD_CLK);
195 		DELAY(1);
196 	}
197 
198 	return;
199 }
200 
201 /*
202  * Clock a series of bits through the MII.
203  */
204 void
205 vr_mii_send(sc, bits, cnt)
206 	struct vr_softc		*sc;
207 	u_int32_t		bits;
208 	int			cnt;
209 {
210 	int			i;
211 
212 	SIO_CLR(VR_MIICMD_CLK);
213 
214 	for (i = (0x1 << (cnt - 1)); i; i >>= 1) {
215 		if (bits & i) {
216 			SIO_SET(VR_MIICMD_DATAIN);
217 		} else {
218 			SIO_CLR(VR_MIICMD_DATAIN);
219 		}
220 		DELAY(1);
221 		SIO_CLR(VR_MIICMD_CLK);
222 		DELAY(1);
223 		SIO_SET(VR_MIICMD_CLK);
224 	}
225 }
226 #endif
227 
228 /*
229  * Read an PHY register through the MII.
230  */
231 int
232 vr_mii_readreg(sc, frame)
233 	struct vr_softc		*sc;
234 	struct vr_mii_frame	*frame;
235 
236 #ifdef VR_USESWSHIFT
237 {
238 	int			i, ack, s;
239 
240 	s = splimp();
241 
242 	/*
243 	 * Set up frame for RX.
244 	 */
245 	frame->mii_stdelim = VR_MII_STARTDELIM;
246 	frame->mii_opcode = VR_MII_READOP;
247 	frame->mii_turnaround = 0;
248 	frame->mii_data = 0;
249 
250 	CSR_WRITE_1(sc, VR_MIICMD, 0);
251 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
252 
253 	/*
254  	 * Turn on data xmit.
255 	 */
256 	SIO_SET(VR_MIICMD_DIR);
257 
258 	vr_mii_sync(sc);
259 
260 	/*
261 	 * Send command/address info.
262 	 */
263 	vr_mii_send(sc, frame->mii_stdelim, 2);
264 	vr_mii_send(sc, frame->mii_opcode, 2);
265 	vr_mii_send(sc, frame->mii_phyaddr, 5);
266 	vr_mii_send(sc, frame->mii_regaddr, 5);
267 
268 	/* Idle bit */
269 	SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN));
270 	DELAY(1);
271 	SIO_SET(VR_MIICMD_CLK);
272 	DELAY(1);
273 
274 	/* Turn off xmit. */
275 	SIO_CLR(VR_MIICMD_DIR);
276 
277 	/* Check for ack */
278 	SIO_CLR(VR_MIICMD_CLK);
279 	DELAY(1);
280 	ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT;
281 	SIO_SET(VR_MIICMD_CLK);
282 	DELAY(1);
283 
284 	/*
285 	 * Now try reading data bits. If the ack failed, we still
286 	 * need to clock through 16 cycles to keep the PHY(s) in sync.
287 	 */
288 	if (ack) {
289 		for(i = 0; i < 16; i++) {
290 			SIO_CLR(VR_MIICMD_CLK);
291 			DELAY(1);
292 			SIO_SET(VR_MIICMD_CLK);
293 			DELAY(1);
294 		}
295 		goto fail;
296 	}
297 
298 	for (i = 0x8000; i; i >>= 1) {
299 		SIO_CLR(VR_MIICMD_CLK);
300 		DELAY(1);
301 		if (!ack) {
302 			if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT)
303 				frame->mii_data |= i;
304 			DELAY(1);
305 		}
306 		SIO_SET(VR_MIICMD_CLK);
307 		DELAY(1);
308 	}
309 
310 fail:
311 
312 	SIO_CLR(VR_MIICMD_CLK);
313 	DELAY(1);
314 	SIO_SET(VR_MIICMD_CLK);
315 	DELAY(1);
316 
317 	splx(s);
318 
319 	if (ack)
320 		return(1);
321 	return(0);
322 }
323 #else
324 {
325 	int			s, i;
326 
327 	s = splimp();
328 
329 	/* Set the PHY-address */
330 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
331 	    frame->mii_phyaddr);
332 
333 	/* Set the register-address */
334 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
335 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB);
336 
337 	for (i = 0; i < 10000; i++) {
338 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0)
339 			break;
340 		DELAY(1);
341 	}
342 
343 	frame->mii_data = CSR_READ_2(sc, VR_MIIDATA);
344 
345 	(void)splx(s);
346 
347 	return(0);
348 }
349 #endif
350 
351 
352 /*
353  * Write to a PHY register through the MII.
354  */
355 int
356 vr_mii_writereg(sc, frame)
357 	struct vr_softc		*sc;
358 	struct vr_mii_frame	*frame;
359 
360 #ifdef VR_USESWSHIFT
361 {
362 	int			s;
363 
364 	s = splimp();
365 
366 	CSR_WRITE_1(sc, VR_MIICMD, 0);
367 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM);
368 
369 	/*
370 	 * Set up frame for TX.
371 	 */
372 
373 	frame->mii_stdelim = VR_MII_STARTDELIM;
374 	frame->mii_opcode = VR_MII_WRITEOP;
375 	frame->mii_turnaround = VR_MII_TURNAROUND;
376 
377 	/*
378  	 * Turn on data output.
379 	 */
380 	SIO_SET(VR_MIICMD_DIR);
381 
382 	vr_mii_sync(sc);
383 
384 	vr_mii_send(sc, frame->mii_stdelim, 2);
385 	vr_mii_send(sc, frame->mii_opcode, 2);
386 	vr_mii_send(sc, frame->mii_phyaddr, 5);
387 	vr_mii_send(sc, frame->mii_regaddr, 5);
388 	vr_mii_send(sc, frame->mii_turnaround, 2);
389 	vr_mii_send(sc, frame->mii_data, 16);
390 
391 	/* Idle bit. */
392 	SIO_SET(VR_MIICMD_CLK);
393 	DELAY(1);
394 	SIO_CLR(VR_MIICMD_CLK);
395 	DELAY(1);
396 
397 	/*
398 	 * Turn off xmit.
399 	 */
400 	SIO_CLR(VR_MIICMD_DIR);
401 
402 	splx(s);
403 
404 	return(0);
405 }
406 #else
407 {
408 	int			s, i;
409 
410 	s = splimp();
411 
412 	/* Set the PHY-address */
413 	CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)|
414 	    frame->mii_phyaddr);
415 
416 	/* Set the register-address and data to write */
417 	CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr);
418 	CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data);
419 
420 	VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB);
421 
422 	for (i = 0; i < 10000; i++) {
423 		if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0)
424 			break;
425 		DELAY(1);
426 	}
427 
428 	(void)splx(s);
429 
430 	return(0);
431 }
432 #endif
433 
434 int
435 vr_miibus_readreg(dev, phy, reg)
436 	struct device *dev;
437 	int phy, reg;
438 {
439 	struct vr_softc *sc = (struct vr_softc *)dev;
440 	struct vr_mii_frame frame;
441 
442 	switch (sc->vr_revid) {
443 	case REV_ID_VT6102_APOLLO:
444 		if (phy != 1)
445 			return 0;
446 	default:
447 		break;
448 	}
449 
450 	bzero((char *)&frame, sizeof(frame));
451 
452 	frame.mii_phyaddr = phy;
453 	frame.mii_regaddr = reg;
454 	vr_mii_readreg(sc, &frame);
455 
456 	return(frame.mii_data);
457 }
458 
459 void
460 vr_miibus_writereg(dev, phy, reg, data)
461 	struct device *dev;
462 	int phy, reg, data;
463 {
464 	struct vr_softc *sc = (struct vr_softc *)dev;
465 	struct vr_mii_frame frame;
466 
467 	switch (sc->vr_revid) {
468 	case REV_ID_VT6102_APOLLO:
469 		if (phy != 1)
470 			return;
471 	default:
472 		break;
473 	}
474 
475 	bzero((char *)&frame, sizeof(frame));
476 
477 	frame.mii_phyaddr = phy;
478 	frame.mii_regaddr = reg;
479 	frame.mii_data = data;
480 
481 	vr_mii_writereg(sc, &frame);
482 
483 	return;
484 }
485 
486 void
487 vr_miibus_statchg(dev)
488 	struct device *dev;
489 {
490 	struct vr_softc *sc = (struct vr_softc *)dev;
491 
492 	vr_setcfg(sc, sc->sc_mii.mii_media_active);
493 }
494 
495 /*
496  * Calculate CRC of a multicast group address, return the lower 6 bits.
497  */
498 u_int8_t
499 vr_calchash(addr)
500 	u_int8_t		*addr;
501 {
502 	u_int32_t		crc, carry;
503 	int			i, j;
504 	u_int8_t		c;
505 
506 	/* Compute CRC for the address value. */
507 	crc = 0xFFFFFFFF; /* initial value */
508 
509 	for (i = 0; i < 6; i++) {
510 		c = *(addr + i);
511 		for (j = 0; j < 8; j++) {
512 			carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01);
513 			crc <<= 1;
514 			c >>= 1;
515 			if (carry)
516 				crc = (crc ^ 0x04c11db6) | carry;
517 		}
518 	}
519 
520 	/* return the filter bit position */
521 	return((crc >> 26) & 0x0000003F);
522 }
523 
524 /*
525  * Program the 64-bit multicast hash filter.
526  */
527 void
528 vr_setmulti(sc)
529 	struct vr_softc		*sc;
530 {
531 	struct ifnet		*ifp;
532 	int			h = 0;
533 	u_int32_t		hashes[2] = { 0, 0 };
534 	struct arpcom *ac = &sc->arpcom;
535 	struct ether_multi *enm;
536 	struct ether_multistep step;
537 	u_int8_t		rxfilt;
538 	int			mcnt = 0;
539 
540 	ifp = &sc->arpcom.ac_if;
541 
542 	rxfilt = CSR_READ_1(sc, VR_RXCFG);
543 
544 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
545 		rxfilt |= VR_RXCFG_RX_MULTI;
546 		CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
547 		CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF);
548 		CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF);
549 		return;
550 	}
551 
552 	/* first, zot all the existing hash bits */
553 	CSR_WRITE_4(sc, VR_MAR0, 0);
554 	CSR_WRITE_4(sc, VR_MAR1, 0);
555 
556 	/* now program new ones */
557 	ETHER_FIRST_MULTI(step, ac, enm);
558 	while (enm != NULL) {
559 		h = vr_calchash(enm->enm_addrlo);
560 		if (h < 32)
561 			hashes[0] |= (1 << h);
562 		else
563 			hashes[1] |= (1 << (h - 32));
564 		mcnt++;
565 
566 		ETHER_NEXT_MULTI(step, enm);
567 	}
568 
569 	if (mcnt)
570 		rxfilt |= VR_RXCFG_RX_MULTI;
571 	else
572 		rxfilt &= ~VR_RXCFG_RX_MULTI;
573 
574 	CSR_WRITE_4(sc, VR_MAR0, hashes[0]);
575 	CSR_WRITE_4(sc, VR_MAR1, hashes[1]);
576 	CSR_WRITE_1(sc, VR_RXCFG, rxfilt);
577 
578 	return;
579 }
580 
581 /*
582  * In order to fiddle with the
583  * 'full-duplex' and '100Mbps' bits in the netconfig register, we
584  * first have to put the transmit and/or receive logic in the idle state.
585  */
586 void
587 vr_setcfg(sc, media)
588 	struct vr_softc *sc;
589 	int media;
590 {
591 	int restart = 0;
592 
593 	if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) {
594 		restart = 1;
595 		VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON));
596 	}
597 
598 	if ((media & IFM_GMASK) == IFM_FDX)
599 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
600 	else
601 		VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX);
602 
603 	if (restart)
604 		VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON);
605 
606 	return;
607 }
608 
609 void
610 vr_reset(sc)
611 	struct vr_softc		*sc;
612 {
613 	register int		i;
614 
615 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET);
616 
617 	for (i = 0; i < VR_TIMEOUT; i++) {
618 		DELAY(10);
619 		if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET))
620 			break;
621 	}
622 	if (i == VR_TIMEOUT) {
623 		if (sc->vr_revid < REV_ID_VT3065_A)
624 			printf("%s: reset never completed!\n",
625 			    sc->sc_dev.dv_xname);
626 		else {
627 			/* Use newer force reset command */
628 			printf("%s: Using force reset command.\n",
629 			    sc->sc_dev.dv_xname);
630 			VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST);
631 		}
632 	}
633 
634 	/* Wait a little while for the chip to get its brains in order. */
635 	DELAY(1000);
636 }
637 
638 const struct pci_matchid vr_devices[] = {
639 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINE },
640 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII },
641 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_RHINEII_2 },
642 	{ PCI_VENDOR_VIATECH, PCI_PRODUCT_VIATECH_VT6105 },
643 	{ PCI_VENDOR_DELTA, PCI_PRODUCT_DELTA_RHINEII },
644 	{ PCI_VENDOR_ADDTRON, PCI_PRODUCT_ADDTRON_RHINEII },
645 };
646 
647 /*
648  * Probe for a VIA Rhine chip.
649  */
650 int
651 vr_probe(parent, match, aux)
652 	struct device *parent;
653 	void *match, *aux;
654 {
655 	return (pci_matchbyid((struct pci_attach_args *)aux, vr_devices,
656 	    sizeof(vr_devices)/sizeof(vr_devices[0])));
657 }
658 
659 /*
660  * Attach the interface. Allocate softc structures, do ifmedia
661  * setup and ethernet/BPF attach.
662  */
663 void
664 vr_attach(parent, self, aux)
665 	struct device *parent, *self;
666 	void *aux;
667 {
668 	int			s, i;
669 	u_int32_t		command;
670 	struct vr_softc		*sc = (struct vr_softc *)self;
671 	struct pci_attach_args 	*pa = aux;
672 	pci_chipset_tag_t	pc = pa->pa_pc;
673 	pci_intr_handle_t	ih;
674 	const char		*intrstr = NULL;
675 	struct ifnet		*ifp = &sc->arpcom.ac_if;
676 	bus_addr_t		iobase;
677 	bus_size_t		iosize;
678 	int rseg;
679 	caddr_t kva;
680 
681 	s = splimp();
682 
683 	/*
684 	 * Handle power management nonsense.
685 	 */
686 	command = pci_conf_read(pa->pa_pc, pa->pa_tag,
687 	    VR_PCI_CAPID) & 0x000000ff;
688 	if (command == 0x01) {
689 		command = pci_conf_read(pa->pa_pc, pa->pa_tag,
690 		    VR_PCI_PWRMGMTCTRL);
691 		if (command & VR_PSTATE_MASK) {
692 			u_int32_t		iobase, membase, irq;
693 
694 			/* Save important PCI config data. */
695 			iobase = pci_conf_read(pa->pa_pc, pa->pa_tag,
696 						VR_PCI_LOIO);
697 			membase = pci_conf_read(pa->pa_pc, pa->pa_tag,
698 						VR_PCI_LOMEM);
699 			irq = pci_conf_read(pa->pa_pc, pa->pa_tag,
700 						VR_PCI_INTLINE);
701 
702 			/* Reset the power state. */
703 			command &= 0xFFFFFFFC;
704 			pci_conf_write(pa->pa_pc, pa->pa_tag,
705 						VR_PCI_PWRMGMTCTRL, command);
706 
707 			/* Restore PCI config data. */
708 			pci_conf_write(pa->pa_pc, pa->pa_tag,
709 						VR_PCI_LOIO, iobase);
710 			pci_conf_write(pa->pa_pc, pa->pa_tag,
711 						VR_PCI_LOMEM, membase);
712 			pci_conf_write(pa->pa_pc, pa->pa_tag,
713 						VR_PCI_INTLINE, irq);
714 		}
715 	}
716 
717 	/*
718 	 * Map control/status registers.
719 	 */
720 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
721 	sc->vr_revid = PCI_REVISION(pa->pa_class);
722 
723 #ifdef VR_USEIOSPACE
724 	if (!(command & PCI_COMMAND_IO_ENABLE)) {
725 		printf(": failed to enable I/O ports\n");
726 		goto fail;
727 	}
728 	if (pci_io_find(pc, pa->pa_tag, VR_PCI_LOIO, &iobase, &iosize)) {
729 		printf(": failed to find i/o space\n");
730 		goto fail;
731 	}
732 	if (bus_space_map(pa->pa_iot, iobase, iosize, 0, &sc->vr_bhandle)) {
733 		printf(": failed map i/o space\n");
734 		goto fail;
735 	}
736 	sc->vr_btag = pa->pa_iot;
737 #else
738 	if (!(command & PCI_COMMAND_MEM_ENABLE)) {
739 		printf(": failed to enable memory mapping\n");
740 		goto fail;
741 	}
742 	if (pci_mem_find(pc, pa->pa_tag, VR_PCI_LOMEM, &iobase, &iosize)) {
743 		printf(": failed to find memory space\n");
744 		goto fail;
745 	}
746 	if (bus_space_map(pa->pa_memt, iobase, iosize, 0, &sc->vr_bhandle)) {
747 		printf(": failed map memory space\n");
748 		goto fail;
749 	}
750 	sc->vr_btag = pa->pa_memt;
751 #endif
752 
753 	/* Allocate interrupt */
754 	if (pci_intr_map(pa, &ih)) {
755 		printf(": couldn't map interrupt\n");
756 		goto fail;
757 	}
758 	intrstr = pci_intr_string(pc, ih);
759 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, vr_intr, sc,
760 				       self->dv_xname);
761 	if (sc->sc_ih == NULL) {
762 		printf(": could not establish interrupt");
763 		if (intrstr != NULL)
764 			printf(" at %s", intrstr);
765 		printf("\n");
766 		goto fail;
767 	}
768 	printf(": %s", intrstr);
769 
770 	/*
771 	 * Windows may put the chip in suspend mode when it
772 	 * shuts down. Be sure to kick it in the head to wake it
773 	 * up again.
774 	 */
775 	VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
776 
777 	/* Reset the adapter. */
778 	vr_reset(sc);
779 
780 	/*
781 	 * Turn on bit2 (MIION) in PCI configuration register 0x53 during
782 	 * initialization and disable AUTOPOLL.
783 	 */
784 	pci_conf_write(pa->pa_pc, pa->pa_tag, VR_PCI_MODE,
785 	    pci_conf_read(pa->pa_pc, pa->pa_tag, VR_PCI_MODE) |
786 	    (VR_MODE3_MIION << 24));
787 	VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL);
788 
789 	/*
790 	 * Get station address. The way the Rhine chips work,
791 	 * you're not allowed to directly access the EEPROM once
792 	 * they've been programmed a special way. Consequently,
793 	 * we need to read the node address from the PAR0 and PAR1
794 	 * registers.
795 	 */
796 	VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD);
797 	DELAY(1000);
798 	for (i = 0; i < ETHER_ADDR_LEN; i++)
799 		sc->arpcom.ac_enaddr[i] = CSR_READ_1(sc, VR_PAR0 + i);
800 
801 	/*
802 	 * A Rhine chip was detected. Inform the world.
803 	 */
804 	printf(" address %s\n", ether_sprintf(sc->arpcom.ac_enaddr));
805 
806 	sc->sc_dmat = pa->pa_dmat;
807 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vr_list_data),
808 	    PAGE_SIZE, 0, &sc->sc_listseg, 1, &rseg, BUS_DMA_NOWAIT)) {
809 		printf("%s: can't alloc list\n", sc->sc_dev.dv_xname);
810 		goto fail;
811 	}
812 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_listseg, rseg,
813 	    sizeof(struct vr_list_data), &kva, BUS_DMA_NOWAIT)) {
814 		printf("%s: can't map dma buffers (%d bytes)\n",
815 		    sc->sc_dev.dv_xname, sizeof(struct vr_list_data));
816 		bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg);
817 		goto fail;
818 	}
819 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct vr_list_data), 1,
820 	    sizeof(struct vr_list_data), 0, BUS_DMA_NOWAIT, &sc->sc_listmap)) {
821 		printf("%s: can't create dma map\n", sc->sc_dev.dv_xname);
822 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data));
823 		bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg);
824 		goto fail;
825 	}
826 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_listmap, kva,
827 	    sizeof(struct vr_list_data), NULL, BUS_DMA_NOWAIT)) {
828 		printf("%s: can't load dma map\n", sc->sc_dev.dv_xname);
829 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_listmap);
830 		bus_dmamem_unmap(sc->sc_dmat, kva, sizeof(struct vr_list_data));
831 		bus_dmamem_free(sc->sc_dmat, &sc->sc_listseg, rseg);
832 		goto fail;
833 	}
834 	sc->vr_ldata = (struct vr_list_data *)kva;
835 	bzero(sc->vr_ldata, sizeof(struct vr_list_data));
836 
837 	ifp = &sc->arpcom.ac_if;
838 	ifp->if_softc = sc;
839 	ifp->if_mtu = ETHERMTU;
840 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
841 	ifp->if_ioctl = vr_ioctl;
842 	ifp->if_output = ether_output;
843 	ifp->if_start = vr_start;
844 	ifp->if_watchdog = vr_watchdog;
845 	ifp->if_baudrate = 10000000;
846 	IFQ_SET_READY(&ifp->if_snd);
847 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
848 
849 	/*
850 	 * Do MII setup.
851 	 */
852 	sc->sc_mii.mii_ifp = ifp;
853 	sc->sc_mii.mii_readreg = vr_miibus_readreg;
854 	sc->sc_mii.mii_writereg = vr_miibus_writereg;
855 	sc->sc_mii.mii_statchg = vr_miibus_statchg;
856 	ifmedia_init(&sc->sc_mii.mii_media, 0, vr_ifmedia_upd, vr_ifmedia_sts);
857 	mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY,
858 	    0);
859 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
860 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
861 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
862 	} else
863 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
864 	timeout_set(&sc->sc_to, vr_tick, sc);
865 
866 	/*
867 	 * Call MI attach routines.
868 	 */
869 	if_attach(ifp);
870 	ether_ifattach(ifp);
871 
872 	shutdownhook_establish(vr_shutdown, sc);
873 
874 fail:
875 	splx(s);
876 	return;
877 }
878 
879 /*
880  * Initialize the transmit descriptors.
881  */
882 int
883 vr_list_tx_init(sc)
884 	struct vr_softc		*sc;
885 {
886 	struct vr_chain_data	*cd;
887 	struct vr_list_data	*ld;
888 	int			i;
889 
890 	cd = &sc->vr_cdata;
891 	ld = sc->vr_ldata;
892 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
893 		cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i];
894 		cd->vr_tx_chain[i].vr_paddr =
895 		    sc->sc_listmap->dm_segs[0].ds_addr +
896 		    offsetof(struct vr_list_data, vr_tx_list[i]);
897 
898 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
899 		    MCLBYTES, 0, BUS_DMA_NOWAIT, &cd->vr_tx_chain[i].vr_map))
900 			return (ENOBUFS);
901 
902 		if (i == (VR_TX_LIST_CNT - 1))
903 			cd->vr_tx_chain[i].vr_nextdesc =
904 				&cd->vr_tx_chain[0];
905 		else
906 			cd->vr_tx_chain[i].vr_nextdesc =
907 				&cd->vr_tx_chain[i + 1];
908 	}
909 
910 	cd->vr_tx_free = &cd->vr_tx_chain[0];
911 	cd->vr_tx_tail = cd->vr_tx_head = NULL;
912 
913 	return (0);
914 }
915 
916 
917 /*
918  * Initialize the RX descriptors and allocate mbufs for them. Note that
919  * we arrange the descriptors in a closed ring, so that the last descriptor
920  * points back to the first.
921  */
922 int
923 vr_list_rx_init(sc)
924 	struct vr_softc		*sc;
925 {
926 	struct vr_chain_data	*cd;
927 	struct vr_list_data	*ld;
928 	int			i;
929 	struct vr_desc		*d;
930 
931 	cd = &sc->vr_cdata;
932 	ld = sc->vr_ldata;
933 
934 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
935 		d = (struct vr_desc *)&ld->vr_rx_list[i];
936 		cd->vr_rx_chain[i].vr_ptr = d;
937 		cd->vr_rx_chain[i].vr_paddr =
938 		    sc->sc_listmap->dm_segs[0].ds_addr +
939 		    offsetof(struct vr_list_data, vr_rx_list[i]);
940 		cd->vr_rx_chain[i].vr_buf =
941 		    (u_int8_t *)malloc(MCLBYTES, M_DEVBUF, M_NOWAIT);
942 		if (cd->vr_rx_chain[i].vr_buf == NULL)
943 			return (ENOBUFS);
944 
945 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
946 		    0, BUS_DMA_NOWAIT | BUS_DMA_READ,
947 		    &cd->vr_rx_chain[i].vr_map))
948 			return (ENOBUFS);
949 
950 		if (bus_dmamap_load(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
951 		    cd->vr_rx_chain[i].vr_buf, MCLBYTES, NULL, BUS_DMA_NOWAIT))
952 			return (ENOBUFS);
953 		bus_dmamap_sync(sc->sc_dmat, cd->vr_rx_chain[i].vr_map,
954 		    0, cd->vr_rx_chain[i].vr_map->dm_mapsize,
955 		    BUS_DMASYNC_PREREAD);
956 
957 		d->vr_status = htole32(VR_RXSTAT);
958 		d->vr_data =
959 		    htole32(cd->vr_rx_chain[i].vr_map->dm_segs[0].ds_addr +
960 		    sizeof(u_int64_t));
961 		d->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
962 
963 		if (i == (VR_RX_LIST_CNT - 1)) {
964 			cd->vr_rx_chain[i].vr_nextdesc =
965 			    &cd->vr_rx_chain[0];
966 			ld->vr_rx_list[i].vr_next =
967 			    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
968 			    offsetof(struct vr_list_data, vr_rx_list[0]));
969 		} else {
970 			cd->vr_rx_chain[i].vr_nextdesc =
971 			    &cd->vr_rx_chain[i + 1];
972 			ld->vr_rx_list[i].vr_next =
973 			    htole32(sc->sc_listmap->dm_segs[0].ds_addr +
974 			    offsetof(struct vr_list_data, vr_rx_list[i + 1]));
975 		}
976 	}
977 
978 	cd->vr_rx_head = &cd->vr_rx_chain[0];
979 
980 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
981 	    sc->sc_listmap->dm_mapsize,
982 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
983 
984 	return(0);
985 }
986 
987 /*
988  * A frame has been uploaded: pass the resulting mbuf chain up to
989  * the higher level protocols.
990  */
991 void
992 vr_rxeof(sc)
993 	struct vr_softc		*sc;
994 {
995 	struct ifnet		*ifp;
996 	struct vr_chain_onefrag	*cur_rx;
997 	int			total_len = 0;
998 	u_int32_t		rxstat;
999 
1000 	ifp = &sc->arpcom.ac_if;
1001 
1002 	for (;;) {
1003 		struct mbuf		*m0 = NULL;
1004 
1005 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1006 		    0, sc->sc_listmap->dm_mapsize,
1007 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1008 		rxstat = letoh32(sc->vr_cdata.vr_rx_head->vr_ptr->vr_status);
1009 		if (rxstat & VR_RXSTAT_OWN)
1010 			break;
1011 
1012 		cur_rx = sc->vr_cdata.vr_rx_head;
1013 		sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc;
1014 
1015 		/*
1016 		 * If an error occurs, update stats, clear the
1017 		 * status word and leave the mbuf cluster in place:
1018 		 * it should simply get re-used next time this descriptor
1019 	 	 * comes up in the ring.
1020 		 */
1021 		if (rxstat & VR_RXSTAT_RXERR) {
1022 			ifp->if_ierrors++;
1023 			printf("%s: rx error (%02x):",
1024 			    sc->sc_dev.dv_xname, rxstat & 0x000000ff);
1025 			if (rxstat & VR_RXSTAT_CRCERR)
1026 				printf(" crc error");
1027 			if (rxstat & VR_RXSTAT_FRAMEALIGNERR)
1028 				printf(" frame alignment error");
1029 			if (rxstat & VR_RXSTAT_FIFOOFLOW)
1030 				printf(" FIFO overflow");
1031 			if (rxstat & VR_RXSTAT_GIANT)
1032 				printf(" received giant packet");
1033 			if (rxstat & VR_RXSTAT_RUNT)
1034 				printf(" received runt packet");
1035 			if (rxstat & VR_RXSTAT_BUSERR)
1036 				printf(" system bus error");
1037 			if (rxstat & VR_RXSTAT_BUFFERR)
1038 				printf(" rx buffer error");
1039 			printf("\n");
1040 
1041 			/* Reinitialize descriptor */
1042 			cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
1043 			cur_rx->vr_ptr->vr_data =
1044 			    htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
1045 			    sizeof(u_int64_t));
1046 			cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1047 			bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1048 			    0, sc->sc_listmap->dm_mapsize,
1049 			    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1050 			continue;
1051 		}
1052 
1053 		/* No errors; receive the packet. */
1054 		total_len = VR_RXBYTES(letoh32(cur_rx->vr_ptr->vr_status));
1055 
1056 		/*
1057 		 * XXX The VIA Rhine chip includes the CRC with every
1058 		 * received frame, and there's no way to turn this
1059 		 * behavior off (at least, I can't find anything in
1060 	 	 * the manual that explains how to do it) so we have
1061 		 * to trim off the CRC manually.
1062 		 */
1063 		total_len -= ETHER_CRC_LEN;
1064 
1065 		bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
1066 		    cur_rx->vr_map->dm_mapsize,
1067 		    BUS_DMASYNC_POSTREAD);
1068 		m0 = m_devget(cur_rx->vr_buf + sizeof(u_int64_t) - ETHER_ALIGN,
1069 		    total_len + ETHER_ALIGN, 0, ifp, NULL);
1070 		bus_dmamap_sync(sc->sc_dmat, cur_rx->vr_map, 0,
1071 		    cur_rx->vr_map->dm_mapsize,
1072 		    BUS_DMASYNC_PREREAD);
1073 
1074 		/* Reinitialize descriptor */
1075 		cur_rx->vr_ptr->vr_status = htole32(VR_RXSTAT);
1076 		cur_rx->vr_ptr->vr_data =
1077 		    htole32(cur_rx->vr_map->dm_segs[0].ds_addr +
1078 		    sizeof(u_int64_t));
1079 		cur_rx->vr_ptr->vr_ctl = htole32(VR_RXCTL | VR_RXLEN);
1080 		bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1081 		    sc->sc_listmap->dm_mapsize,
1082 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1083 
1084 		if (m0 == NULL) {
1085 			ifp->if_ierrors++;
1086 			continue;
1087 		}
1088 		m_adj(m0, ETHER_ALIGN);
1089 
1090 		ifp->if_ipackets++;
1091 
1092 #if NBPFILTER > 0
1093 		/*
1094 		 * Handle BPF listeners. Let the BPF user see the packet.
1095 		 */
1096 		if (ifp->if_bpf)
1097 			bpf_mtap(ifp->if_bpf, m0);
1098 #endif
1099 		/* pass it on. */
1100 		ether_input_mbuf(ifp, m0);
1101 	}
1102 
1103 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap,
1104 	    0, sc->sc_listmap->dm_mapsize,
1105 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1106 
1107 	return;
1108 }
1109 
1110 void
1111 vr_rxeoc(sc)
1112 	struct vr_softc		*sc;
1113 {
1114 	struct ifnet		*ifp;
1115 	int			i;
1116 
1117 	ifp = &sc->arpcom.ac_if;
1118 
1119 	ifp->if_ierrors++;
1120 
1121 	VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1122 	DELAY(10000);
1123 
1124 	for (i = 0x400;
1125 	    i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON);
1126 	    i--)
1127 		;       /* Wait for receiver to stop */
1128 
1129 	if (!i) {
1130 		printf("%s: rx shutdown error!\n", sc->sc_dev.dv_xname);
1131 		sc->vr_flags |= VR_F_RESTART;
1132 		return;
1133 	}
1134 
1135 	vr_rxeof(sc);
1136 
1137 	CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1138 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON);
1139 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO);
1140 
1141 	return;
1142 }
1143 
1144 /*
1145  * A frame was downloaded to the chip. It's safe for us to clean up
1146  * the list buffers.
1147  */
1148 
1149 void
1150 vr_txeof(sc)
1151 	struct vr_softc		*sc;
1152 {
1153 	struct vr_chain		*cur_tx;
1154 	struct ifnet		*ifp;
1155 
1156 	ifp = &sc->arpcom.ac_if;
1157 
1158 	/* Reset the timeout timer; if_txeoc will clear it. */
1159 	ifp->if_timer = 5;
1160 
1161 	/* Sanity check. */
1162 	if (sc->vr_cdata.vr_tx_head == NULL)
1163 		return;
1164 
1165 	/*
1166 	 * Go through our tx list and free mbufs for those
1167 	 * frames that have been transmitted.
1168 	 */
1169 	while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) {
1170 		u_int32_t		txstat;
1171 		int			i;
1172 
1173 		cur_tx = sc->vr_cdata.vr_tx_head;
1174 		txstat = letoh32(cur_tx->vr_ptr->vr_status);
1175 
1176 		if ((txstat & VR_TXSTAT_ABRT) ||
1177 		    (txstat & VR_TXSTAT_UDF)) {
1178 			for (i = 0x400;
1179 			    i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON);
1180 			    i--)
1181 				;	/* Wait for chip to shutdown */
1182 			if (!i) {
1183 				printf("%s: tx shutdown timeout\n",
1184 				    sc->sc_dev.dv_xname);
1185 				sc->vr_flags |= VR_F_RESTART;
1186 				break;
1187 			}
1188 			VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1189 			CSR_WRITE_4(sc, VR_TXADDR, cur_tx->vr_paddr);
1190 			break;
1191 		}
1192 
1193 		if (txstat & VR_TXSTAT_OWN)
1194 			break;
1195 
1196 		if (txstat & VR_TXSTAT_ERRSUM) {
1197 			ifp->if_oerrors++;
1198 			if (txstat & VR_TXSTAT_DEFER)
1199 				ifp->if_collisions++;
1200 			if (txstat & VR_TXSTAT_LATECOLL)
1201 				ifp->if_collisions++;
1202 		}
1203 
1204 		ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3;
1205 
1206 		ifp->if_opackets++;
1207 		if (cur_tx->vr_map != NULL && cur_tx->vr_map->dm_segs > 0)
1208 			bus_dmamap_unload(sc->sc_dmat, cur_tx->vr_map);
1209 		if (cur_tx->vr_mbuf != NULL) {
1210 			m_freem(cur_tx->vr_mbuf);
1211 			cur_tx->vr_mbuf = NULL;
1212 		}
1213 
1214 		if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) {
1215 			sc->vr_cdata.vr_tx_head = NULL;
1216 			sc->vr_cdata.vr_tx_tail = NULL;
1217 			break;
1218 		}
1219 
1220 		sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc;
1221 	}
1222 
1223 	return;
1224 }
1225 
1226 /*
1227  * TX 'end of channel' interrupt handler.
1228  */
1229 void
1230 vr_txeoc(sc)
1231 	struct vr_softc		*sc;
1232 {
1233 	struct ifnet		*ifp;
1234 
1235 	ifp = &sc->arpcom.ac_if;
1236 
1237 	if (sc->vr_cdata.vr_tx_head == NULL) {
1238 		ifp->if_flags &= ~IFF_OACTIVE;
1239 		sc->vr_cdata.vr_tx_tail = NULL;
1240 		ifp->if_timer = 0;
1241 	}
1242 
1243 	return;
1244 }
1245 
1246 void
1247 vr_tick(xsc)
1248 	void *xsc;
1249 {
1250 	struct vr_softc *sc = xsc;
1251 	int s;
1252 
1253 	s = splimp();
1254 	if (sc->vr_flags & VR_F_RESTART) {
1255 		printf("%s: restarting\n", sc->sc_dev.dv_xname);
1256 		vr_stop(sc);
1257 		vr_reset(sc);
1258 		vr_init(sc);
1259 		sc->vr_flags &= ~VR_F_RESTART;
1260 	}
1261 
1262 	mii_tick(&sc->sc_mii);
1263 	timeout_add(&sc->sc_to, hz);
1264 	splx(s);
1265 }
1266 
1267 int
1268 vr_intr(arg)
1269 	void			*arg;
1270 {
1271 	struct vr_softc		*sc;
1272 	struct ifnet		*ifp;
1273 	u_int16_t		status;
1274 	int claimed = 0;
1275 
1276 	sc = arg;
1277 	ifp = &sc->arpcom.ac_if;
1278 
1279 	/* Supress unwanted interrupts. */
1280 	if (!(ifp->if_flags & IFF_UP)) {
1281 		vr_stop(sc);
1282 		return 0;
1283 	}
1284 
1285 	/* Disable interrupts. */
1286 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1287 
1288 	for (;;) {
1289 
1290 		status = CSR_READ_2(sc, VR_ISR);
1291 		if (status)
1292 			CSR_WRITE_2(sc, VR_ISR, status);
1293 
1294 		if ((status & VR_INTRS) == 0)
1295 			break;
1296 
1297 		claimed = 1;
1298 
1299 		if (status & VR_ISR_RX_OK)
1300 			vr_rxeof(sc);
1301 
1302 		if (status & VR_ISR_RX_DROPPED) {
1303 			printf("%s: rx packet lost\n", sc->sc_dev.dv_xname);
1304 			ifp->if_ierrors++;
1305 		}
1306 
1307 		if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) ||
1308 		    (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) {
1309 			printf("%s: receive error (%04x)",
1310 			    sc->sc_dev.dv_xname, status);
1311 			if (status & VR_ISR_RX_NOBUF)
1312 				printf(" no buffers");
1313 			if (status & VR_ISR_RX_OFLOW)
1314 				printf(" overflow");
1315 			if (status & VR_ISR_RX_DROPPED)
1316 				printf(" packet lost");
1317 			printf("\n");
1318 			vr_rxeoc(sc);
1319 		}
1320 
1321 		if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) {
1322 			vr_reset(sc);
1323 			vr_init(sc);
1324 			break;
1325 		}
1326 
1327 		if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) ||
1328 		    (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) {
1329 			vr_txeof(sc);
1330 			if ((status & VR_ISR_UDFI) ||
1331 			    (status & VR_ISR_TX_ABRT2) ||
1332 			    (status & VR_ISR_TX_ABRT)) {
1333 				ifp->if_oerrors++;
1334 				if (sc->vr_cdata.vr_tx_head != NULL) {
1335 					VR_SETBIT16(sc, VR_COMMAND,
1336 					    VR_CMD_TX_ON);
1337 					VR_SETBIT16(sc, VR_COMMAND,
1338 					    VR_CMD_TX_GO);
1339 				}
1340 			} else
1341 				vr_txeoc(sc);
1342 		}
1343 	}
1344 
1345 	/* Re-enable interrupts. */
1346 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1347 
1348 	if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1349 		vr_start(ifp);
1350 	}
1351 
1352 	return (claimed);
1353 }
1354 
1355 /*
1356  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
1357  * pointers to the fragment pointers.
1358  */
1359 int
1360 vr_encap(sc, c, m_head)
1361 	struct vr_softc		*sc;
1362 	struct vr_chain		*c;
1363 	struct mbuf		*m_head;
1364 {
1365 	struct vr_desc		*f = NULL;
1366 	struct mbuf		*m = m_head;
1367 	struct mbuf		*m_new = NULL;
1368 
1369 	m = m_head;
1370 
1371 	MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1372 	if (m_new == NULL)
1373 		return (1);
1374 	if (m_head->m_pkthdr.len > MHLEN) {
1375 		MCLGET(m_new, M_DONTWAIT);
1376 		if (!(m_new->m_flags & M_EXT)) {
1377 			m_freem(m_new);
1378 			return (1);
1379 		}
1380 	}
1381 	m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t));
1382 	m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len;
1383 
1384 	/*
1385 	 * The Rhine chip doesn't auto-pad, so we have to make
1386 	 * sure to pad short frames out to the minimum frame length
1387 	 * ourselves.
1388 	 */
1389 	if (m_new->m_len < VR_MIN_FRAMELEN) {
1390 		/* data field should be padded with octets of zero */
1391 		bzero(&m_new->m_data[m_new->m_len],
1392 		    VR_MIN_FRAMELEN-m_new->m_len);
1393 		m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len;
1394 		m_new->m_len = m_new->m_pkthdr.len;
1395 	}
1396 
1397 	if (bus_dmamap_load_mbuf(sc->sc_dmat, c->vr_map, m_new,
1398 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE)) {
1399 		m_freem(m_new);
1400 		return (1);
1401 	}
1402 	bus_dmamap_sync(sc->sc_dmat, c->vr_map, 0, c->vr_map->dm_mapsize,
1403 	    BUS_DMASYNC_PREWRITE);
1404 
1405 	m_freem(m_head);
1406 
1407 	f = c->vr_ptr;
1408 	f->vr_data = htole32(c->vr_map->dm_segs[0].ds_addr);
1409 	f->vr_ctl = htole32(c->vr_map->dm_mapsize);
1410 	f->vr_ctl |= htole32(VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG);
1411 	f->vr_status = htole32(0);
1412 
1413 	c->vr_mbuf = m_new;
1414 	c->vr_ptr->vr_ctl |= htole32(VR_TXCTL_LASTFRAG|VR_TXCTL_FINT);
1415 	c->vr_ptr->vr_next = htole32(c->vr_nextdesc->vr_paddr);
1416 
1417 	return (0);
1418 }
1419 
1420 /*
1421  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
1422  * to the mbuf data regions directly in the transmit lists. We also save a
1423  * copy of the pointers since the transmit list fragment pointers are
1424  * physical addresses.
1425  */
1426 
1427 void
1428 vr_start(ifp)
1429 	struct ifnet		*ifp;
1430 {
1431 	struct vr_softc		*sc;
1432 	struct mbuf		*m_head = NULL;
1433 	struct vr_chain		*cur_tx = NULL, *start_tx, *prev_tx;
1434 
1435 	sc = ifp->if_softc;
1436 
1437 	/*
1438 	 * Check for an available queue slot. If there are none,
1439 	 * punt.
1440 	 */
1441 	if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) {
1442 		return;
1443 	}
1444 
1445 	start_tx = sc->vr_cdata.vr_tx_free;
1446 
1447 	while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) {
1448 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1449 		if (m_head == NULL)
1450 			break;
1451 
1452 		/* Pick a descriptor off the free list. */
1453 		prev_tx = cur_tx;
1454 		cur_tx = sc->vr_cdata.vr_tx_free;
1455 		sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc;
1456 
1457 		/* Pack the data into the descriptor. */
1458 		if (vr_encap(sc, cur_tx, m_head)) {
1459 			/* Rollback, send what we were able to encap. */
1460 			if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
1461 				m_freem(m_head);
1462 			} else {
1463 				IF_PREPEND(&ifp->if_snd, m_head);
1464 			}
1465 			sc->vr_cdata.vr_tx_free = cur_tx;
1466 			cur_tx = prev_tx;
1467 			break;
1468 		}
1469 
1470 		if (cur_tx != start_tx)
1471 			VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1472 
1473 #if NBPFILTER > 0
1474 		/*
1475 		 * If there's a BPF listener, bounce a copy of this frame
1476 		 * to him.
1477 		 */
1478 		if (ifp->if_bpf)
1479 			bpf_mtap(ifp->if_bpf, cur_tx->vr_mbuf);
1480 #endif
1481 		VR_TXOWN(cur_tx) = htole32(VR_TXSTAT_OWN);
1482 	}
1483 
1484 	/*
1485 	 * If there are no frames queued, bail.
1486 	 */
1487 	if (cur_tx == NULL)
1488 		return;
1489 
1490 	sc->vr_cdata.vr_tx_tail = cur_tx;
1491 
1492 	if (sc->vr_cdata.vr_tx_head == NULL)
1493 		sc->vr_cdata.vr_tx_head = start_tx;
1494 
1495 	bus_dmamap_sync(sc->sc_dmat, sc->sc_listmap, 0,
1496 	    sc->sc_listmap->dm_mapsize,
1497 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1498 
1499 	/* Tell the chip to start transmitting. */
1500 	VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO);
1501 
1502 	/*
1503 	 * Set a timeout in case the chip goes out to lunch.
1504 	 */
1505 	ifp->if_timer = 5;
1506 }
1507 
1508 void
1509 vr_init(xsc)
1510 	void			*xsc;
1511 {
1512 	struct vr_softc		*sc = xsc;
1513 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1514 	struct mii_data		*mii = &sc->sc_mii;
1515 	int			s, i;
1516 
1517 	s = splimp();
1518 
1519 	/*
1520 	 * Cancel pending I/O and free all RX/TX buffers.
1521 	 */
1522 	vr_stop(sc);
1523 	vr_reset(sc);
1524 
1525 	/*
1526 	 * Set our station address.
1527 	 */
1528 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1529 		CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]);
1530 
1531 	/* Set DMA size */
1532 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH);
1533 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD);
1534 
1535 	/*
1536 	 * BCR0 and BCR1 can override the RXCFG and TXCFG registers,
1537 	 * so we must set both.
1538 	 */
1539 	VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH);
1540 	VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES);
1541 
1542 	VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH);
1543 	VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD);
1544 
1545 	VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH);
1546 	VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES);
1547 
1548 	VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH);
1549 	VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD);
1550 
1551 	/* Init circular RX list. */
1552 	if (vr_list_rx_init(sc) == ENOBUFS) {
1553 		printf("%s: initialization failed: no memory for rx buffers\n",
1554 		    sc->sc_dev.dv_xname);
1555 		vr_stop(sc);
1556 		splx(s);
1557 		return;
1558 	}
1559 
1560 	/*
1561 	 * Init tx descriptors.
1562 	 */
1563 	if (vr_list_tx_init(sc) == ENOBUFS) {
1564 		printf("%s: initialization failed: no memory for tx buffers\n",
1565 		    sc->sc_dev.dv_xname);
1566 		vr_stop(sc);
1567 		splx(s);
1568 		return;
1569 	}
1570 
1571 	/* If we want promiscuous mode, set the allframes bit. */
1572 	if (ifp->if_flags & IFF_PROMISC)
1573 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1574 	else
1575 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC);
1576 
1577 	/* Set capture broadcast bit to capture broadcast frames. */
1578 	if (ifp->if_flags & IFF_BROADCAST)
1579 		VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1580 	else
1581 		VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD);
1582 
1583 	/*
1584 	 * Program the multicast filter, if necessary.
1585 	 */
1586 	vr_setmulti(sc);
1587 
1588 	/*
1589 	 * Load the address of the RX list.
1590 	 */
1591 	CSR_WRITE_4(sc, VR_RXADDR, sc->vr_cdata.vr_rx_head->vr_paddr);
1592 
1593 	/* Enable receiver and transmitter. */
1594 	CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START|
1595 				    VR_CMD_TX_ON|VR_CMD_RX_ON|
1596 				    VR_CMD_RX_GO);
1597 
1598 	CSR_WRITE_4(sc, VR_TXADDR, sc->sc_listmap->dm_segs[0].ds_addr +
1599 	    offsetof(struct vr_list_data, vr_tx_list[0]));
1600 
1601 	/*
1602 	 * Enable interrupts.
1603 	 */
1604 	CSR_WRITE_2(sc, VR_ISR, 0xFFFF);
1605 	CSR_WRITE_2(sc, VR_IMR, VR_INTRS);
1606 
1607 	/* Restore state of BMCR */
1608 	mii_mediachg(mii);
1609 
1610 	ifp->if_flags |= IFF_RUNNING;
1611 	ifp->if_flags &= ~IFF_OACTIVE;
1612 
1613 	if (!timeout_pending(&sc->sc_to))
1614 		timeout_add(&sc->sc_to, hz);
1615 
1616 	splx(s);
1617 }
1618 
1619 /*
1620  * Set media options.
1621  */
1622 int
1623 vr_ifmedia_upd(ifp)
1624 	struct ifnet		*ifp;
1625 {
1626 	struct vr_softc		*sc = ifp->if_softc;
1627 
1628 	if (ifp->if_flags & IFF_UP)
1629 		vr_init(sc);
1630 
1631 	return(0);
1632 }
1633 
1634 /*
1635  * Report current media status.
1636  */
1637 void
1638 vr_ifmedia_sts(ifp, ifmr)
1639 	struct ifnet		*ifp;
1640 	struct ifmediareq	*ifmr;
1641 {
1642 	struct vr_softc		*sc = ifp->if_softc;
1643 	struct mii_data		*mii = &sc->sc_mii;
1644 
1645 	mii_pollstat(mii);
1646 	ifmr->ifm_active = mii->mii_media_active;
1647 	ifmr->ifm_status = mii->mii_media_status;
1648 }
1649 
1650 int
1651 vr_ioctl(ifp, command, data)
1652 	struct ifnet		*ifp;
1653 	u_long			command;
1654 	caddr_t			data;
1655 {
1656 	struct vr_softc		*sc = ifp->if_softc;
1657 	struct ifreq		*ifr = (struct ifreq *) data;
1658 	int			s, error = 0;
1659 	struct ifaddr *ifa = (struct ifaddr *)data;
1660 
1661 	s = splimp();
1662 
1663 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1664 		splx(s);
1665 		return error;
1666 	}
1667 
1668 	switch(command) {
1669 	case SIOCSIFADDR:
1670 		ifp->if_flags |= IFF_UP;
1671 		switch (ifa->ifa_addr->sa_family) {
1672 #ifdef INET
1673 		case AF_INET:
1674 			vr_init(sc);
1675 			arp_ifinit(&sc->arpcom, ifa);
1676 			break;
1677 #endif	/* INET */
1678 		default:
1679 			vr_init(sc);
1680 			break;
1681 		}
1682 		break;
1683 	case SIOCSIFFLAGS:
1684 		if (ifp->if_flags & IFF_UP) {
1685 			vr_init(sc);
1686 		} else {
1687 			if (ifp->if_flags & IFF_RUNNING)
1688 				vr_stop(sc);
1689 		}
1690 		error = 0;
1691 		break;
1692 	case SIOCADDMULTI:
1693 	case SIOCDELMULTI:
1694 		error = (command == SIOCADDMULTI) ?
1695 		    ether_addmulti(ifr, &sc->arpcom) :
1696 		    ether_delmulti(ifr, &sc->arpcom);
1697 
1698 		if (error == ENETRESET) {
1699 			/*
1700 			 * Multicast list has changed; set the hardware
1701 			 * filter accordingly.
1702 			 */
1703 			vr_setmulti(sc);
1704 			error = 0;
1705 		}
1706 		break;
1707 	case SIOCGIFMEDIA:
1708 	case SIOCSIFMEDIA:
1709 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1710 		break;
1711 	default:
1712 		error = EINVAL;
1713 		break;
1714 	}
1715 
1716 	splx(s);
1717 
1718 	return(error);
1719 }
1720 
1721 void
1722 vr_watchdog(ifp)
1723 	struct ifnet		*ifp;
1724 {
1725 	struct vr_softc		*sc;
1726 
1727 	sc = ifp->if_softc;
1728 
1729 	ifp->if_oerrors++;
1730 	printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
1731 
1732 	vr_stop(sc);
1733 	vr_reset(sc);
1734 	vr_init(sc);
1735 
1736 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1737 		vr_start(ifp);
1738 
1739 	return;
1740 }
1741 
1742 /*
1743  * Stop the adapter and free any mbufs allocated to the
1744  * RX and TX lists.
1745  */
1746 void
1747 vr_stop(sc)
1748 	struct vr_softc		*sc;
1749 {
1750 	int		i;
1751 	struct ifnet	*ifp;
1752 	bus_dmamap_t	map;
1753 
1754 	ifp = &sc->arpcom.ac_if;
1755 	ifp->if_timer = 0;
1756 
1757 	if (timeout_pending(&sc->sc_to))
1758 		timeout_del(&sc->sc_to);
1759 
1760 	VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP);
1761 	VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON));
1762 	CSR_WRITE_2(sc, VR_IMR, 0x0000);
1763 	CSR_WRITE_4(sc, VR_TXADDR, 0x00000000);
1764 	CSR_WRITE_4(sc, VR_RXADDR, 0x00000000);
1765 
1766 	/*
1767 	 * Free data in the RX lists.
1768 	 */
1769 	for (i = 0; i < VR_RX_LIST_CNT; i++) {
1770 
1771 		if (sc->vr_cdata.vr_rx_chain[i].vr_buf != NULL) {
1772 			free(sc->vr_cdata.vr_rx_chain[i].vr_buf, M_DEVBUF);
1773 			sc->vr_cdata.vr_rx_chain[i].vr_buf = NULL;
1774 		}
1775 
1776 		map = sc->vr_cdata.vr_rx_chain[i].vr_map;
1777 		if (map != NULL) {
1778 			if (map->dm_segs > 0)
1779 				bus_dmamap_unload(sc->sc_dmat, map);
1780 			bus_dmamap_destroy(sc->sc_dmat, map);
1781 			sc->vr_cdata.vr_rx_chain[i].vr_map = NULL;
1782 		}
1783 	}
1784 	bzero((char *)&sc->vr_ldata->vr_rx_list,
1785 		sizeof(sc->vr_ldata->vr_rx_list));
1786 
1787 	/*
1788 	 * Free the TX list buffers.
1789 	 */
1790 	for (i = 0; i < VR_TX_LIST_CNT; i++) {
1791 		bus_dmamap_t map;
1792 
1793 		if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) {
1794 			m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf);
1795 			sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL;
1796 		}
1797 		map = sc->vr_cdata.vr_tx_chain[i].vr_map;
1798 		if (map != NULL) {
1799 			if (map->dm_nsegs > 0)
1800 				bus_dmamap_unload(sc->sc_dmat, map);
1801 			bus_dmamap_destroy(sc->sc_dmat, map);
1802 			sc->vr_cdata.vr_tx_chain[i].vr_map = NULL;
1803 		}
1804 	}
1805 
1806 	bzero((char *)&sc->vr_ldata->vr_tx_list,
1807 		sizeof(sc->vr_ldata->vr_tx_list));
1808 
1809 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1810 
1811 	return;
1812 }
1813 
1814 /*
1815  * Stop all chip I/O so that the kernel's probe routines don't
1816  * get confused by errant DMAs when rebooting.
1817  */
1818 void
1819 vr_shutdown(arg)
1820 	void			*arg;
1821 {
1822 	struct vr_softc		*sc = (struct vr_softc *)arg;
1823 
1824 	vr_stop(sc);
1825 }
1826