xref: /netbsd-src/sys/dev/pci/if_vge.c (revision 267197ec1eebfcb9810ea27a89625b6ddf68e3e7)
1 /* $NetBSD: if_vge.c,v 1.40 2008/02/07 01:21:58 dyoung Exp $ */
2 
3 /*-
4  * Copyright (c) 2004
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * FreeBSD: src/sys/dev/vge/if_vge.c,v 1.5 2005/02/07 19:39:29 glebius Exp
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: if_vge.c,v 1.40 2008/02/07 01:21:58 dyoung Exp $");
39 
40 /*
41  * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver.
42  *
43  * Written by Bill Paul <wpaul@windriver.com>
44  * Senior Networking Software Engineer
45  * Wind River Systems
46  */
47 
48 /*
49  * The VIA Networking VT6122 is a 32bit, 33/66 MHz PCI device that
50  * combines a tri-speed ethernet MAC and PHY, with the following
51  * features:
52  *
53  *	o Jumbo frame support up to 16K
54  *	o Transmit and receive flow control
55  *	o IPv4 checksum offload
56  *	o VLAN tag insertion and stripping
57  *	o TCP large send
58  *	o 64-bit multicast hash table filter
59  *	o 64 entry CAM filter
60  *	o 16K RX FIFO and 48K TX FIFO memory
61  *	o Interrupt moderation
62  *
63  * The VT6122 supports up to four transmit DMA queues. The descriptors
64  * in the transmit ring can address up to 7 data fragments; frames which
65  * span more than 7 data buffers must be coalesced, but in general the
66  * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments
67  * long. The receive descriptors address only a single buffer.
68  *
69  * There are two peculiar design issues with the VT6122. One is that
70  * receive data buffers must be aligned on a 32-bit boundary. This is
71  * not a problem where the VT6122 is used as a LOM device in x86-based
72  * systems, but on architectures that generate unaligned access traps, we
73  * have to do some copying.
74  *
75  * The other issue has to do with the way 64-bit addresses are handled.
76  * The DMA descriptors only allow you to specify 48 bits of addressing
77  * information. The remaining 16 bits are specified using one of the
78  * I/O registers. If you only have a 32-bit system, then this isn't
79  * an issue, but if you have a 64-bit system and more than 4GB of
80  * memory, you must have to make sure your network data buffers reside
81  * in the same 48-bit 'segment.'
82  *
83  * Special thanks to Ryan Fu at VIA Networking for providing documentation
84  * and sample NICs for testing.
85  */
86 
87 #include "bpfilter.h"
88 
89 #include <sys/param.h>
90 #include <sys/endian.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 
99 #include <net/if.h>
100 #include <net/if_arp.h>
101 #include <net/if_ether.h>
102 #include <net/if_dl.h>
103 #include <net/if_media.h>
104 
105 #include <net/bpf.h>
106 
107 #include <sys/bus.h>
108 
109 #include <dev/mii/mii.h>
110 #include <dev/mii/miivar.h>
111 
112 #include <dev/pci/pcireg.h>
113 #include <dev/pci/pcivar.h>
114 #include <dev/pci/pcidevs.h>
115 
116 #include <dev/pci/if_vgereg.h>
117 
118 #define VGE_JUMBO_MTU		9000
119 
120 #define VGE_IFQ_MAXLEN		64
121 
122 #define VGE_RING_ALIGN		256
123 
124 #define VGE_NTXDESC		256
125 #define VGE_NTXDESC_MASK	(VGE_NTXDESC - 1)
126 #define VGE_NEXT_TXDESC(x)	((x + 1) & VGE_NTXDESC_MASK)
127 #define VGE_PREV_TXDESC(x)	((x - 1) & VGE_NTXDESC_MASK)
128 
129 #define VGE_NRXDESC		256	/* Must be a multiple of 4!! */
130 #define VGE_NRXDESC_MASK	(VGE_NRXDESC - 1)
131 #define VGE_NEXT_RXDESC(x)	((x + 1) & VGE_NRXDESC_MASK)
132 #define VGE_PREV_RXDESC(x)	((x - 1) & VGE_NRXDESC_MASK)
133 
134 #define VGE_ADDR_LO(y)		((uint64_t)(y) & 0xFFFFFFFF)
135 #define VGE_ADDR_HI(y)		((uint64_t)(y) >> 32)
136 #define VGE_BUFLEN(y)		((y) & 0x7FFF)
137 #define ETHER_PAD_LEN		(ETHER_MIN_LEN - ETHER_CRC_LEN)
138 
139 #define VGE_POWER_MANAGEMENT	0	/* disabled for now */
140 
141 /*
142  * Mbuf adjust factor to force 32-bit alignment of IP header.
143  * Drivers should pad ETHER_ALIGN bytes when setting up a
144  * RX mbuf so the upper layers get the IP header properly aligned
145  * past the 14-byte Ethernet header.
146  *
147  * See also comment in vge_encap().
148  */
149 #define ETHER_ALIGN		2
150 
151 #ifdef __NO_STRICT_ALIGNMENT
152 #define VGE_RX_BUFSIZE		MCLBYTES
153 #else
154 #define VGE_RX_PAD		sizeof(uint32_t)
155 #define VGE_RX_BUFSIZE		(MCLBYTES - VGE_RX_PAD)
156 #endif
157 
158 /*
159  * Control structures are DMA'd to the vge chip. We allocate them in
160  * a single clump that maps to a single DMA segment to make several things
161  * easier.
162  */
163 struct vge_control_data {
164 	/* TX descriptors */
165 	struct vge_txdesc	vcd_txdescs[VGE_NTXDESC];
166 	/* RX descriptors */
167 	struct vge_rxdesc	vcd_rxdescs[VGE_NRXDESC];
168 	/* dummy data for TX padding */
169 	uint8_t			vcd_pad[ETHER_PAD_LEN];
170 };
171 
172 #define VGE_CDOFF(x)	offsetof(struct vge_control_data, x)
173 #define VGE_CDTXOFF(x)	VGE_CDOFF(vcd_txdescs[(x)])
174 #define VGE_CDRXOFF(x)	VGE_CDOFF(vcd_rxdescs[(x)])
175 #define VGE_CDPADOFF()	VGE_CDOFF(vcd_pad[0])
176 
177 /*
178  * Software state for TX jobs.
179  */
180 struct vge_txsoft {
181 	struct mbuf	*txs_mbuf;		/* head of our mbuf chain */
182 	bus_dmamap_t	txs_dmamap;		/* our DMA map */
183 };
184 
185 /*
186  * Software state for RX jobs.
187  */
188 struct vge_rxsoft {
189 	struct mbuf	*rxs_mbuf;		/* head of our mbuf chain */
190 	bus_dmamap_t	rxs_dmamap;		/* our DMA map */
191 };
192 
193 
194 struct vge_softc {
195 	struct device		sc_dev;
196 
197 	bus_space_tag_t		sc_bst;		/* bus space tag */
198 	bus_space_handle_t	sc_bsh;		/* bus space handle */
199 	bus_dma_tag_t		sc_dmat;
200 
201 	struct ethercom		sc_ethercom;	/* interface info */
202 	uint8_t			sc_eaddr[ETHER_ADDR_LEN];
203 
204 	void			*sc_intrhand;
205 	struct mii_data		sc_mii;
206 	uint8_t			sc_type;
207 	int			sc_if_flags;
208 	int			sc_link;
209 	int			sc_camidx;
210 	callout_t		sc_timeout;
211 
212 	bus_dmamap_t		sc_cddmamap;
213 #define sc_cddma		sc_cddmamap->dm_segs[0].ds_addr
214 
215 	struct vge_txsoft	sc_txsoft[VGE_NTXDESC];
216 	struct vge_rxsoft	sc_rxsoft[VGE_NRXDESC];
217 	struct vge_control_data	*sc_control_data;
218 #define sc_txdescs		sc_control_data->vcd_txdescs
219 #define sc_rxdescs		sc_control_data->vcd_rxdescs
220 
221 	int			sc_tx_prodidx;
222 	int			sc_tx_considx;
223 	int			sc_tx_free;
224 
225 	struct mbuf		*sc_rx_mhead;
226 	struct mbuf		*sc_rx_mtail;
227 	int			sc_rx_prodidx;
228 	int			sc_rx_consumed;
229 
230 	int			sc_suspended;	/* 0 = normal  1 = suspended */
231 	uint32_t		sc_saved_maps[5];	/* pci data */
232 	uint32_t		sc_saved_biosaddr;
233 	uint8_t			sc_saved_intline;
234 	uint8_t			sc_saved_cachelnsz;
235 	uint8_t			sc_saved_lattimer;
236 };
237 
238 #define VGE_CDTXADDR(sc, x)	((sc)->sc_cddma + VGE_CDTXOFF(x))
239 #define VGE_CDRXADDR(sc, x)	((sc)->sc_cddma + VGE_CDRXOFF(x))
240 #define VGE_CDPADADDR(sc)	((sc)->sc_cddma + VGE_CDPADOFF())
241 
242 #define VGE_TXDESCSYNC(sc, idx, ops)					\
243 	bus_dmamap_sync((sc)->sc_dmat,(sc)->sc_cddmamap,		\
244 	    VGE_CDTXOFF(idx),						\
245 	    offsetof(struct vge_txdesc, td_frag[0]),			\
246 	    (ops))
247 #define VGE_TXFRAGSYNC(sc, idx, nsegs, ops)				\
248 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
249 	    VGE_CDTXOFF(idx) +						\
250 	    offsetof(struct vge_txdesc, td_frag[0]),			\
251 	    sizeof(struct vge_txfrag) * (nsegs),			\
252 	    (ops))
253 #define VGE_RXDESCSYNC(sc, idx, ops)					\
254 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
255 	    VGE_CDRXOFF(idx),						\
256 	    sizeof(struct vge_rxdesc),					\
257 	    (ops))
258 
259 /*
260  * register space access macros
261  */
262 #define CSR_WRITE_4(sc, reg, val)	\
263 	bus_space_write_4((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
264 #define CSR_WRITE_2(sc, reg, val)	\
265 	bus_space_write_2((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
266 #define CSR_WRITE_1(sc, reg, val)	\
267 	bus_space_write_1((sc)->sc_bst, (sc)->sc_bsh, (reg), (val))
268 
269 #define CSR_READ_4(sc, reg)		\
270 	bus_space_read_4((sc)->sc_bst, (sc)->sc_bsh, (reg))
271 #define CSR_READ_2(sc, reg)		\
272 	bus_space_read_2((sc)->sc_bst, (sc)->sc_bsh, (reg))
273 #define CSR_READ_1(sc, reg)		\
274 	bus_space_read_1((sc)->sc_bst, (sc)->sc_bsh, (reg))
275 
276 #define CSR_SETBIT_1(sc, reg, x)	\
277 	CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) | (x))
278 #define CSR_SETBIT_2(sc, reg, x)	\
279 	CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) | (x))
280 #define CSR_SETBIT_4(sc, reg, x)	\
281 	CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) | (x))
282 
283 #define CSR_CLRBIT_1(sc, reg, x)	\
284 	CSR_WRITE_1((sc), (reg), CSR_READ_1((sc), (reg)) & ~(x))
285 #define CSR_CLRBIT_2(sc, reg, x)	\
286 	CSR_WRITE_2((sc), (reg), CSR_READ_2((sc), (reg)) & ~(x))
287 #define CSR_CLRBIT_4(sc, reg, x)	\
288 	CSR_WRITE_4((sc), (reg), CSR_READ_4((sc), (reg)) & ~(x))
289 
290 #define VGE_TIMEOUT		10000
291 
292 #define VGE_PCI_LOIO             0x10
293 #define VGE_PCI_LOMEM            0x14
294 
295 static inline void vge_set_txaddr(struct vge_txfrag *, bus_addr_t);
296 static inline void vge_set_rxaddr(struct vge_rxdesc *, bus_addr_t);
297 
298 static int vge_match(struct device *, struct cfdata *, void *);
299 static void vge_attach(struct device *, struct device *, void *);
300 
301 static int vge_encap(struct vge_softc *, struct mbuf *, int);
302 
303 static int vge_allocmem(struct vge_softc *);
304 static int vge_newbuf(struct vge_softc *, int, struct mbuf *);
305 #ifndef __NO_STRICT_ALIGNMENT
306 static inline void vge_fixup_rx(struct mbuf *);
307 #endif
308 static void vge_rxeof(struct vge_softc *);
309 static void vge_txeof(struct vge_softc *);
310 static int vge_intr(void *);
311 static void vge_tick(void *);
312 static void vge_start(struct ifnet *);
313 static int vge_ioctl(struct ifnet *, u_long, void *);
314 static int vge_init(struct ifnet *);
315 static void vge_stop(struct vge_softc *);
316 static void vge_watchdog(struct ifnet *);
317 #if VGE_POWER_MANAGEMENT
318 static int vge_suspend(struct device *);
319 static int vge_resume(struct device *);
320 #endif
321 static void vge_shutdown(void *);
322 
323 static uint16_t vge_read_eeprom(struct vge_softc *, int);
324 
325 static void vge_miipoll_start(struct vge_softc *);
326 static void vge_miipoll_stop(struct vge_softc *);
327 static int vge_miibus_readreg(struct device *, int, int);
328 static void vge_miibus_writereg(struct device *, int, int, int);
329 static void vge_miibus_statchg(struct device *);
330 
331 static void vge_cam_clear(struct vge_softc *);
332 static int vge_cam_set(struct vge_softc *, uint8_t *);
333 static void vge_setmulti(struct vge_softc *);
334 static void vge_reset(struct vge_softc *);
335 
336 CFATTACH_DECL(vge, sizeof(struct vge_softc),
337     vge_match, vge_attach, NULL, NULL);
338 
339 static inline void
340 vge_set_txaddr(struct vge_txfrag *f, bus_addr_t daddr)
341 {
342 
343 	f->tf_addrlo = htole32((uint32_t)daddr);
344 	if (sizeof(bus_addr_t) == sizeof(uint64_t))
345 		f->tf_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
346 	else
347 		f->tf_addrhi = 0;
348 }
349 
350 static inline void
351 vge_set_rxaddr(struct vge_rxdesc *rxd, bus_addr_t daddr)
352 {
353 
354 	rxd->rd_addrlo = htole32((uint32_t)daddr);
355 	if (sizeof(bus_addr_t) == sizeof(uint64_t))
356 		rxd->rd_addrhi = htole16(((uint64_t)daddr >> 32) & 0xFFFF);
357 	else
358 		rxd->rd_addrhi = 0;
359 }
360 
361 /*
362  * Defragment mbuf chain contents to be as linear as possible.
363  * Returns new mbuf chain on success, NULL on failure. Old mbuf
364  * chain is always freed.
365  * XXX temporary until there would be generic function doing this.
366  */
367 #define m_defrag	vge_m_defrag
368 struct mbuf * vge_m_defrag(struct mbuf *, int);
369 
370 struct mbuf *
371 vge_m_defrag(struct mbuf *mold, int flags)
372 {
373 	struct mbuf *m0, *mn, *n;
374 	size_t sz = mold->m_pkthdr.len;
375 
376 #ifdef DIAGNOSTIC
377 	if ((mold->m_flags & M_PKTHDR) == 0)
378 		panic("m_defrag: not a mbuf chain header");
379 #endif
380 
381 	MGETHDR(m0, flags, MT_DATA);
382 	if (m0 == NULL)
383 		return NULL;
384 	m0->m_pkthdr.len = mold->m_pkthdr.len;
385 	mn = m0;
386 
387 	do {
388 		if (sz > MHLEN) {
389 			MCLGET(mn, M_DONTWAIT);
390 			if ((mn->m_flags & M_EXT) == 0) {
391 				m_freem(m0);
392 				return NULL;
393 			}
394 		}
395 
396 		mn->m_len = MIN(sz, MCLBYTES);
397 
398 		m_copydata(mold, mold->m_pkthdr.len - sz, mn->m_len,
399 		     mtod(mn, void *));
400 
401 		sz -= mn->m_len;
402 
403 		if (sz > 0) {
404 			/* need more mbufs */
405 			MGET(n, M_NOWAIT, MT_DATA);
406 			if (n == NULL) {
407 				m_freem(m0);
408 				return NULL;
409 			}
410 
411 			mn->m_next = n;
412 			mn = n;
413 		}
414 	} while (sz > 0);
415 
416 	return m0;
417 }
418 
419 /*
420  * Read a word of data stored in the EEPROM at address 'addr.'
421  */
422 static uint16_t
423 vge_read_eeprom(struct vge_softc *sc, int addr)
424 {
425 	int i;
426 	uint16_t word = 0;
427 
428 	/*
429 	 * Enter EEPROM embedded programming mode. In order to
430 	 * access the EEPROM at all, we first have to set the
431 	 * EELOAD bit in the CHIPCFG2 register.
432 	 */
433 	CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
434 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
435 
436 	/* Select the address of the word we want to read */
437 	CSR_WRITE_1(sc, VGE_EEADDR, addr);
438 
439 	/* Issue read command */
440 	CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD);
441 
442 	/* Wait for the done bit to be set. */
443 	for (i = 0; i < VGE_TIMEOUT; i++) {
444 		if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE)
445 			break;
446 	}
447 
448 	if (i == VGE_TIMEOUT) {
449 		aprint_error("%s: EEPROM read timed out\n",
450 		    sc->sc_dev.dv_xname);
451 		return 0;
452 	}
453 
454 	/* Read the result */
455 	word = CSR_READ_2(sc, VGE_EERDDAT);
456 
457 	/* Turn off EEPROM access mode. */
458 	CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/);
459 	CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD);
460 
461 	return word;
462 }
463 
464 static void
465 vge_miipoll_stop(struct vge_softc *sc)
466 {
467 	int i;
468 
469 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
470 
471 	for (i = 0; i < VGE_TIMEOUT; i++) {
472 		DELAY(1);
473 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
474 			break;
475 	}
476 
477 	if (i == VGE_TIMEOUT) {
478 		aprint_error("%s: failed to idle MII autopoll\n",
479 		    sc->sc_dev.dv_xname);
480 	}
481 }
482 
483 static void
484 vge_miipoll_start(struct vge_softc *sc)
485 {
486 	int i;
487 
488 	/* First, make sure we're idle. */
489 
490 	CSR_WRITE_1(sc, VGE_MIICMD, 0);
491 	CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL);
492 
493 	for (i = 0; i < VGE_TIMEOUT; i++) {
494 		DELAY(1);
495 		if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL)
496 			break;
497 	}
498 
499 	if (i == VGE_TIMEOUT) {
500 		aprint_error("%s: failed to idle MII autopoll\n",
501 		    sc->sc_dev.dv_xname);
502 		return;
503 	}
504 
505 	/* Now enable auto poll mode. */
506 
507 	CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO);
508 
509 	/* And make sure it started. */
510 
511 	for (i = 0; i < VGE_TIMEOUT; i++) {
512 		DELAY(1);
513 		if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0)
514 			break;
515 	}
516 
517 	if (i == VGE_TIMEOUT) {
518 		aprint_error("%s: failed to start MII autopoll\n",
519 		    sc->sc_dev.dv_xname);
520 	}
521 }
522 
523 static int
524 vge_miibus_readreg(struct device *dev, int phy, int reg)
525 {
526 	struct vge_softc *sc;
527 	int i, s;
528 	uint16_t rval;
529 
530 	sc = (void *)dev;
531 	rval = 0;
532 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
533 		return 0;
534 
535 	s = splnet();
536 	vge_miipoll_stop(sc);
537 
538 	/* Specify the register we want to read. */
539 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
540 
541 	/* Issue read command. */
542 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD);
543 
544 	/* Wait for the read command bit to self-clear. */
545 	for (i = 0; i < VGE_TIMEOUT; i++) {
546 		DELAY(1);
547 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0)
548 			break;
549 	}
550 
551 	if (i == VGE_TIMEOUT)
552 		aprint_error("%s: MII read timed out\n", sc->sc_dev.dv_xname);
553 	else
554 		rval = CSR_READ_2(sc, VGE_MIIDATA);
555 
556 	vge_miipoll_start(sc);
557 	splx(s);
558 
559 	return rval;
560 }
561 
562 static void
563 vge_miibus_writereg(struct device *dev, int phy, int reg, int data)
564 {
565 	struct vge_softc *sc;
566 	int i, s;
567 
568 	sc = (void *)dev;
569 	if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F))
570 		return;
571 
572 	s = splnet();
573 	vge_miipoll_stop(sc);
574 
575 	/* Specify the register we want to write. */
576 	CSR_WRITE_1(sc, VGE_MIIADDR, reg);
577 
578 	/* Specify the data we want to write. */
579 	CSR_WRITE_2(sc, VGE_MIIDATA, data);
580 
581 	/* Issue write command. */
582 	CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD);
583 
584 	/* Wait for the write command bit to self-clear. */
585 	for (i = 0; i < VGE_TIMEOUT; i++) {
586 		DELAY(1);
587 		if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0)
588 			break;
589 	}
590 
591 	if (i == VGE_TIMEOUT) {
592 		aprint_error("%s: MII write timed out\n", sc->sc_dev.dv_xname);
593 	}
594 
595 	vge_miipoll_start(sc);
596 	splx(s);
597 }
598 
599 static void
600 vge_cam_clear(struct vge_softc *sc)
601 {
602 	int i;
603 
604 	/*
605 	 * Turn off all the mask bits. This tells the chip
606 	 * that none of the entries in the CAM filter are valid.
607 	 * desired entries will be enabled as we fill the filter in.
608 	 */
609 
610 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
611 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
612 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE);
613 	for (i = 0; i < 8; i++)
614 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
615 
616 	/* Clear the VLAN filter too. */
617 
618 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0);
619 	for (i = 0; i < 8; i++)
620 		CSR_WRITE_1(sc, VGE_CAM0 + i, 0);
621 
622 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
623 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
624 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
625 
626 	sc->sc_camidx = 0;
627 }
628 
629 static int
630 vge_cam_set(struct vge_softc *sc, uint8_t *addr)
631 {
632 	int i, error;
633 
634 	error = 0;
635 
636 	if (sc->sc_camidx == VGE_CAM_MAXADDRS)
637 		return ENOSPC;
638 
639 	/* Select the CAM data page. */
640 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
641 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA);
642 
643 	/* Set the filter entry we want to update and enable writing. */
644 	CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE | sc->sc_camidx);
645 
646 	/* Write the address to the CAM registers */
647 	for (i = 0; i < ETHER_ADDR_LEN; i++)
648 		CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]);
649 
650 	/* Issue a write command. */
651 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE);
652 
653 	/* Wake for it to clear. */
654 	for (i = 0; i < VGE_TIMEOUT; i++) {
655 		DELAY(1);
656 		if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0)
657 			break;
658 	}
659 
660 	if (i == VGE_TIMEOUT) {
661 		aprint_error("%s: setting CAM filter failed\n",
662 		    sc->sc_dev.dv_xname);
663 		error = EIO;
664 		goto fail;
665 	}
666 
667 	/* Select the CAM mask page. */
668 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
669 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK);
670 
671 	/* Set the mask bit that enables this filter. */
672 	CSR_SETBIT_1(sc, VGE_CAM0 + (sc->sc_camidx / 8),
673 	    1 << (sc->sc_camidx & 7));
674 
675 	sc->sc_camidx++;
676 
677  fail:
678 	/* Turn off access to CAM. */
679 	CSR_WRITE_1(sc, VGE_CAMADDR, 0);
680 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
681 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
682 
683 	return error;
684 }
685 
686 /*
687  * Program the multicast filter. We use the 64-entry CAM filter
688  * for perfect filtering. If there's more than 64 multicast addresses,
689  * we use the hash filter instead.
690  */
691 static void
692 vge_setmulti(struct vge_softc *sc)
693 {
694 	struct ifnet *ifp;
695 	int error;
696 	uint32_t h, hashes[2] = { 0, 0 };
697 	struct ether_multi *enm;
698 	struct ether_multistep step;
699 
700 	error = 0;
701 	ifp = &sc->sc_ethercom.ec_if;
702 
703 	/* First, zot all the multicast entries. */
704 	vge_cam_clear(sc);
705 	CSR_WRITE_4(sc, VGE_MAR0, 0);
706 	CSR_WRITE_4(sc, VGE_MAR1, 0);
707 	ifp->if_flags &= ~IFF_ALLMULTI;
708 
709 	/*
710 	 * If the user wants allmulti or promisc mode, enable reception
711 	 * of all multicast frames.
712 	 */
713 	if (ifp->if_flags & IFF_PROMISC) {
714  allmulti:
715 		CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF);
716 		CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF);
717 		ifp->if_flags |= IFF_ALLMULTI;
718 		return;
719 	}
720 
721 	/* Now program new ones */
722 	ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
723 	while (enm != NULL) {
724 		/*
725 		 * If multicast range, fall back to ALLMULTI.
726 		 */
727 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
728 				ETHER_ADDR_LEN) != 0)
729 			goto allmulti;
730 
731 		error = vge_cam_set(sc, enm->enm_addrlo);
732 		if (error)
733 			break;
734 
735 		ETHER_NEXT_MULTI(step, enm);
736 	}
737 
738 	/* If there were too many addresses, use the hash filter. */
739 	if (error) {
740 		vge_cam_clear(sc);
741 
742 		ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm);
743 		while (enm != NULL) {
744 			/*
745 			 * If multicast range, fall back to ALLMULTI.
746 			 */
747 			if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
748 					ETHER_ADDR_LEN) != 0)
749 				goto allmulti;
750 
751 			h = ether_crc32_be(enm->enm_addrlo,
752 			    ETHER_ADDR_LEN) >> 26;
753 			hashes[h >> 5] |= 1 << (h & 0x1f);
754 
755 			ETHER_NEXT_MULTI(step, enm);
756 		}
757 
758 		CSR_WRITE_4(sc, VGE_MAR0, hashes[0]);
759 		CSR_WRITE_4(sc, VGE_MAR1, hashes[1]);
760 	}
761 }
762 
763 static void
764 vge_reset(struct vge_softc *sc)
765 {
766 	int i;
767 
768 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET);
769 
770 	for (i = 0; i < VGE_TIMEOUT; i++) {
771 		DELAY(5);
772 		if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0)
773 			break;
774 	}
775 
776 	if (i == VGE_TIMEOUT) {
777 		aprint_error("%s: soft reset timed out", sc->sc_dev.dv_xname);
778 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE);
779 		DELAY(2000);
780 	}
781 
782 	DELAY(5000);
783 
784 	CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD);
785 
786 	for (i = 0; i < VGE_TIMEOUT; i++) {
787 		DELAY(5);
788 		if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0)
789 			break;
790 	}
791 
792 	if (i == VGE_TIMEOUT) {
793 		aprint_error("%s: EEPROM reload timed out\n",
794 		    sc->sc_dev.dv_xname);
795 		return;
796 	}
797 
798 	/*
799 	 * On some machine, the first read data from EEPROM could be
800 	 * messed up, so read one dummy data here to avoid the mess.
801 	 */
802 	(void)vge_read_eeprom(sc, 0);
803 
804 	CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI);
805 }
806 
807 /*
808  * Probe for a VIA gigabit chip. Check the PCI vendor and device
809  * IDs against our list and return a device name if we find a match.
810  */
811 static int
812 vge_match(struct device *parent, struct cfdata *match, void *aux)
813 {
814 	struct pci_attach_args *pa = aux;
815 
816 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_VIATECH
817 	    && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT612X)
818 		return 1;
819 
820 	return 0;
821 }
822 
823 static int
824 vge_allocmem(struct vge_softc *sc)
825 {
826 	int error;
827 	int nseg;
828 	int i;
829 	bus_dma_segment_t seg;
830 
831 	/*
832 	 * Allocate memory for control data.
833 	 */
834 
835 	error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct vge_control_data),
836 	     VGE_RING_ALIGN, 0, &seg, 1, &nseg, BUS_DMA_NOWAIT);
837 	if (error) {
838 		aprint_error("%s: could not allocate control data dma memory\n",
839 		    sc->sc_dev.dv_xname);
840 		goto fail_1;
841 	}
842 
843 	/* Map the memory to kernel VA space */
844 
845 	error = bus_dmamem_map(sc->sc_dmat, &seg, nseg,
846 	    sizeof(struct vge_control_data), (void **)&sc->sc_control_data,
847 	    BUS_DMA_NOWAIT);
848 	if (error) {
849 		aprint_error("%s: could not map control data dma memory\n",
850 		    sc->sc_dev.dv_xname);
851 		goto fail_2;
852 	}
853 	memset(sc->sc_control_data, 0, sizeof(struct vge_control_data));
854 
855 	/*
856 	 * Create map for control data.
857 	 */
858 	error = bus_dmamap_create(sc->sc_dmat,
859 	    sizeof(struct vge_control_data), 1,
860 	    sizeof(struct vge_control_data), 0, BUS_DMA_NOWAIT,
861 	    &sc->sc_cddmamap);
862 	if (error) {
863 		aprint_error("%s: could not create control data dmamap\n",
864 		    sc->sc_dev.dv_xname);
865 		goto fail_3;
866 	}
867 
868 	/* Load the map for the control data. */
869 	error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
870 	    sc->sc_control_data, sizeof(struct vge_control_data), NULL,
871 	    BUS_DMA_NOWAIT);
872 	if (error) {
873 		aprint_error("%s: could not load control data dma memory\n",
874 		    sc->sc_dev.dv_xname);
875 		goto fail_4;
876 	}
877 
878 	/* Create DMA maps for TX buffers */
879 
880 	for (i = 0; i < VGE_NTXDESC; i++) {
881 		error = bus_dmamap_create(sc->sc_dmat, VGE_TX_MAXLEN,
882 		    VGE_TX_FRAGS, VGE_TX_MAXLEN, 0, BUS_DMA_NOWAIT,
883 		    &sc->sc_txsoft[i].txs_dmamap);
884 		if (error) {
885 			aprint_error("%s: can't create DMA map for TX descs\n",
886 			    sc->sc_dev.dv_xname);
887 			goto fail_5;
888 		}
889 	}
890 
891 	/* Create DMA maps for RX buffers */
892 
893 	for (i = 0; i < VGE_NRXDESC; i++) {
894 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
895 		    1, MCLBYTES, 0, BUS_DMA_NOWAIT,
896 		    &sc->sc_rxsoft[i].rxs_dmamap);
897 		if (error) {
898 			aprint_error("%s: can't create DMA map for RX descs\n",
899 			    sc->sc_dev.dv_xname);
900 			goto fail_6;
901 		}
902 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
903 	}
904 
905 	return 0;
906 
907  fail_6:
908 	for (i = 0; i < VGE_NRXDESC; i++) {
909 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
910 			bus_dmamap_destroy(sc->sc_dmat,
911 			    sc->sc_rxsoft[i].rxs_dmamap);
912 	}
913  fail_5:
914 	for (i = 0; i < VGE_NTXDESC; i++) {
915 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
916 			bus_dmamap_destroy(sc->sc_dmat,
917 			    sc->sc_txsoft[i].txs_dmamap);
918 	}
919 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
920  fail_4:
921 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
922  fail_3:
923 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
924 	    sizeof(struct vge_control_data));
925  fail_2:
926 	bus_dmamem_free(sc->sc_dmat, &seg, nseg);
927  fail_1:
928 	return ENOMEM;
929 }
930 
931 /*
932  * Attach the interface. Allocate softc structures, do ifmedia
933  * setup and ethernet/BPF attach.
934  */
935 static void
936 vge_attach(struct device *parent, struct device *self, void *aux)
937 {
938 	uint8_t	*eaddr;
939 	struct vge_softc *sc = (void *)self;
940 	struct ifnet *ifp;
941 	struct pci_attach_args *pa = aux;
942 	pci_chipset_tag_t pc = pa->pa_pc;
943 	const char *intrstr;
944 	pci_intr_handle_t ih;
945 	uint16_t val;
946 
947 	aprint_normal(": VIA VT612X Gigabit Ethernet (rev. %#x)\n",
948 	    PCI_REVISION(pa->pa_class));
949 
950 	/* Make sure bus-mastering is enabled */
951         pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
952 	    pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) |
953 	    PCI_COMMAND_MASTER_ENABLE);
954 
955 	/*
956 	 * Map control/status registers.
957 	 */
958 	if (pci_mapreg_map(pa, VGE_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0,
959 	    &sc->sc_bst, &sc->sc_bsh, NULL, NULL) != 0) {
960 		aprint_error("%s: couldn't map memory\n", sc->sc_dev.dv_xname);
961 		return;
962 	}
963 
964         /*
965          * Map and establish our interrupt.
966          */
967 	if (pci_intr_map(pa, &ih)) {
968 		aprint_error("%s: unable to map interrupt\n",
969 		    sc->sc_dev.dv_xname);
970 		return;
971 	}
972 	intrstr = pci_intr_string(pc, ih);
973 	sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, vge_intr, sc);
974 	if (sc->sc_intrhand == NULL) {
975 		aprint_error("%s: unable to establish interrupt",
976 		    sc->sc_dev.dv_xname);
977 		if (intrstr != NULL)
978 			aprint_error(" at %s", intrstr);
979 		aprint_error("\n");
980 		return;
981 	}
982 	aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
983 
984 	/* Reset the adapter. */
985 	vge_reset(sc);
986 
987 	/*
988 	 * Get station address from the EEPROM.
989 	 */
990 	eaddr = sc->sc_eaddr;
991 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 0);
992 	eaddr[0] = val & 0xff;
993 	eaddr[1] = val >> 8;
994 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 1);
995 	eaddr[2] = val & 0xff;
996 	eaddr[3] = val >> 8;
997 	val = vge_read_eeprom(sc, VGE_EE_EADDR + 2);
998 	eaddr[4] = val & 0xff;
999 	eaddr[5] = val >> 8;
1000 
1001 	aprint_normal("%s: Ethernet address: %s\n", sc->sc_dev.dv_xname,
1002 	    ether_sprintf(eaddr));
1003 
1004 	/*
1005 	 * Use the 32bit tag. Hardware supports 48bit physical addresses,
1006 	 * but we don't use that for now.
1007 	 */
1008 	sc->sc_dmat = pa->pa_dmat;
1009 
1010 	if (vge_allocmem(sc) != 0)
1011 		return;
1012 
1013 	ifp = &sc->sc_ethercom.ec_if;
1014 	ifp->if_softc = sc;
1015 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1016 	ifp->if_mtu = ETHERMTU;
1017 	ifp->if_baudrate = IF_Gbps(1);
1018 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1019 	ifp->if_ioctl = vge_ioctl;
1020 	ifp->if_start = vge_start;
1021 
1022 	/*
1023 	 * We can support 802.1Q VLAN-sized frames and jumbo
1024 	 * Ethernet frames.
1025 	 */
1026 	sc->sc_ethercom.ec_capabilities |=
1027 	    ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU |
1028 	    ETHERCAP_VLAN_HWTAGGING;
1029 
1030 	/*
1031 	 * We can do IPv4/TCPv4/UDPv4 checksums in hardware.
1032 	 */
1033 	ifp->if_capabilities |=
1034 	    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1035 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1036 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
1037 
1038 #ifdef DEVICE_POLLING
1039 #ifdef IFCAP_POLLING
1040 	ifp->if_capabilities |= IFCAP_POLLING;
1041 #endif
1042 #endif
1043 	ifp->if_watchdog = vge_watchdog;
1044 	ifp->if_init = vge_init;
1045 	IFQ_SET_MAXLEN(&ifp->if_snd, max(VGE_IFQ_MAXLEN, IFQ_MAXLEN));
1046 
1047 	/*
1048 	 * Initialize our media structures and probe the MII.
1049 	 */
1050 	sc->sc_mii.mii_ifp = ifp;
1051 	sc->sc_mii.mii_readreg = vge_miibus_readreg;
1052 	sc->sc_mii.mii_writereg = vge_miibus_writereg;
1053 	sc->sc_mii.mii_statchg = vge_miibus_statchg;
1054 
1055 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
1056 	ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange,
1057 	    ether_mediastatus);
1058 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
1059 	    MII_OFFSET_ANY, MIIF_DOPAUSE);
1060 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1061 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
1062 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
1063 	} else
1064 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1065 
1066 	/*
1067 	 * Attach the interface.
1068 	 */
1069 	if_attach(ifp);
1070 	ether_ifattach(ifp, eaddr);
1071 
1072 	callout_init(&sc->sc_timeout, 0);
1073 	callout_setfunc(&sc->sc_timeout, vge_tick, sc);
1074 
1075 	/*
1076 	 * Make sure the interface is shutdown during reboot.
1077 	 */
1078 	if (shutdownhook_establish(vge_shutdown, sc) == NULL) {
1079 		aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1080 		    sc->sc_dev.dv_xname);
1081 	}
1082 }
1083 
1084 static int
1085 vge_newbuf(struct vge_softc *sc, int idx, struct mbuf *m)
1086 {
1087 	struct mbuf *m_new;
1088 	struct vge_rxdesc *rxd;
1089 	struct vge_rxsoft *rxs;
1090 	bus_dmamap_t map;
1091 	int i;
1092 #ifdef DIAGNOSTIC
1093 	uint32_t rd_sts;
1094 #endif
1095 
1096 	m_new = NULL;
1097 	if (m == NULL) {
1098 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
1099 		if (m_new == NULL)
1100 			return ENOBUFS;
1101 
1102 		MCLGET(m_new, M_DONTWAIT);
1103 		if ((m_new->m_flags & M_EXT) == 0) {
1104 			m_freem(m_new);
1105 			return ENOBUFS;
1106 		}
1107 
1108 		m = m_new;
1109 	} else
1110 		m->m_data = m->m_ext.ext_buf;
1111 
1112 
1113 	/*
1114 	 * This is part of an evil trick to deal with non-x86 platforms.
1115 	 * The VIA chip requires RX buffers to be aligned on 32-bit
1116 	 * boundaries, but that will hose non-x86 machines. To get around
1117 	 * this, we leave some empty space at the start of each buffer
1118 	 * and for non-x86 hosts, we copy the buffer back two bytes
1119 	 * to achieve word alignment. This is slightly more efficient
1120 	 * than allocating a new buffer, copying the contents, and
1121 	 * discarding the old buffer.
1122 	 */
1123 	m->m_len = m->m_pkthdr.len = VGE_RX_BUFSIZE;
1124 #ifndef __NO_STRICT_ALIGNMENT
1125 	m->m_data += VGE_RX_PAD;
1126 #endif
1127 	rxs = &sc->sc_rxsoft[idx];
1128 	map = rxs->rxs_dmamap;
1129 
1130 	if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0)
1131 		goto out;
1132 
1133 	rxd = &sc->sc_rxdescs[idx];
1134 
1135 #ifdef DIAGNOSTIC
1136 	/* If this descriptor is still owned by the chip, bail. */
1137 	VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1138 	rd_sts = le32toh(rxd->rd_sts);
1139 	VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1140 	if (rd_sts & VGE_RDSTS_OWN) {
1141 		panic("%s: tried to map busy RX descriptor",
1142 		    sc->sc_dev.dv_xname);
1143 	}
1144 #endif
1145 
1146 	rxs->rxs_mbuf = m;
1147 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1148 	    BUS_DMASYNC_PREREAD);
1149 
1150 	rxd->rd_buflen =
1151 	    htole16(VGE_BUFLEN(map->dm_segs[0].ds_len) | VGE_RXDESC_I);
1152 	vge_set_rxaddr(rxd, map->dm_segs[0].ds_addr);
1153 	rxd->rd_sts = 0;
1154 	rxd->rd_ctl = 0;
1155 	VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1156 
1157 	/*
1158 	 * Note: the manual fails to document the fact that for
1159 	 * proper opration, the driver needs to replentish the RX
1160 	 * DMA ring 4 descriptors at a time (rather than one at a
1161 	 * time, like most chips). We can allocate the new buffers
1162 	 * but we should not set the OWN bits until we're ready
1163 	 * to hand back 4 of them in one shot.
1164 	 */
1165 
1166 #define VGE_RXCHUNK 4
1167 	sc->sc_rx_consumed++;
1168 	if (sc->sc_rx_consumed == VGE_RXCHUNK) {
1169 		for (i = idx; i != idx - VGE_RXCHUNK; i--) {
1170 			KASSERT(i >= 0);
1171 			sc->sc_rxdescs[i].rd_sts |= htole32(VGE_RDSTS_OWN);
1172 			VGE_RXDESCSYNC(sc, i,
1173 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174 		}
1175 		sc->sc_rx_consumed = 0;
1176 	}
1177 
1178 	return 0;
1179  out:
1180 	if (m_new != NULL)
1181 		m_freem(m_new);
1182 	return ENOMEM;
1183 }
1184 
1185 #ifndef __NO_STRICT_ALIGNMENT
1186 static inline void
1187 vge_fixup_rx(struct mbuf *m)
1188 {
1189 	int i;
1190 	uint16_t *src, *dst;
1191 
1192 	src = mtod(m, uint16_t *);
1193 	dst = src - 1;
1194 
1195 	for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++)
1196 		*dst++ = *src++;
1197 
1198 	m->m_data -= ETHER_ALIGN;
1199 }
1200 #endif
1201 
1202 /*
1203  * RX handler. We support the reception of jumbo frames that have
1204  * been fragmented across multiple 2K mbuf cluster buffers.
1205  */
1206 static void
1207 vge_rxeof(struct vge_softc *sc)
1208 {
1209 	struct mbuf *m;
1210 	struct ifnet *ifp;
1211 	int idx, total_len, lim;
1212 	struct vge_rxdesc *cur_rxd;
1213 	struct vge_rxsoft *rxs;
1214 	uint32_t rxstat, rxctl;
1215 
1216 	ifp = &sc->sc_ethercom.ec_if;
1217 	lim = 0;
1218 
1219 	/* Invalidate the descriptor memory */
1220 
1221 	for (idx = sc->sc_rx_prodidx;; idx = VGE_NEXT_RXDESC(idx)) {
1222 		cur_rxd = &sc->sc_rxdescs[idx];
1223 
1224 		VGE_RXDESCSYNC(sc, idx,
1225 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1226 		rxstat = le32toh(cur_rxd->rd_sts);
1227 		if ((rxstat & VGE_RDSTS_OWN) != 0) {
1228 			VGE_RXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1229 			break;
1230 		}
1231 
1232 		rxctl = le32toh(cur_rxd->rd_ctl);
1233 		rxs = &sc->sc_rxsoft[idx];
1234 		m = rxs->rxs_mbuf;
1235 		total_len = (rxstat & VGE_RDSTS_BUFSIZ) >> 16;
1236 
1237 		/* Invalidate the RX mbuf and unload its map */
1238 
1239 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap,
1240 		    0, rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1241 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1242 
1243 		/*
1244 		 * If the 'start of frame' bit is set, this indicates
1245 		 * either the first fragment in a multi-fragment receive,
1246 		 * or an intermediate fragment. Either way, we want to
1247 		 * accumulate the buffers.
1248 		 */
1249 		if (rxstat & VGE_RXPKT_SOF) {
1250 			m->m_len = VGE_RX_BUFSIZE;
1251 			if (sc->sc_rx_mhead == NULL)
1252 				sc->sc_rx_mhead = sc->sc_rx_mtail = m;
1253 			else {
1254 				m->m_flags &= ~M_PKTHDR;
1255 				sc->sc_rx_mtail->m_next = m;
1256 				sc->sc_rx_mtail = m;
1257 			}
1258 			vge_newbuf(sc, idx, NULL);
1259 			continue;
1260 		}
1261 
1262 		/*
1263 		 * Bad/error frames will have the RXOK bit cleared.
1264 		 * However, there's one error case we want to allow:
1265 		 * if a VLAN tagged frame arrives and the chip can't
1266 		 * match it against the CAM filter, it considers this
1267 		 * a 'VLAN CAM filter miss' and clears the 'RXOK' bit.
1268 		 * We don't want to drop the frame though: our VLAN
1269 		 * filtering is done in software.
1270 		 */
1271 		if ((rxstat & VGE_RDSTS_RXOK) == 0 &&
1272 		    (rxstat & VGE_RDSTS_VIDM) == 0 &&
1273 		    (rxstat & VGE_RDSTS_CSUMERR) == 0) {
1274 			ifp->if_ierrors++;
1275 			/*
1276 			 * If this is part of a multi-fragment packet,
1277 			 * discard all the pieces.
1278 			 */
1279 			if (sc->sc_rx_mhead != NULL) {
1280 				m_freem(sc->sc_rx_mhead);
1281 				sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1282 			}
1283 			vge_newbuf(sc, idx, m);
1284 			continue;
1285 		}
1286 
1287 		/*
1288 		 * If allocating a replacement mbuf fails,
1289 		 * reload the current one.
1290 		 */
1291 
1292 		if (vge_newbuf(sc, idx, NULL)) {
1293 			ifp->if_ierrors++;
1294 			if (sc->sc_rx_mhead != NULL) {
1295 				m_freem(sc->sc_rx_mhead);
1296 				sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1297 			}
1298 			vge_newbuf(sc, idx, m);
1299 			continue;
1300 		}
1301 
1302 		if (sc->sc_rx_mhead != NULL) {
1303 			m->m_len = total_len % VGE_RX_BUFSIZE;
1304 			/*
1305 			 * Special case: if there's 4 bytes or less
1306 			 * in this buffer, the mbuf can be discarded:
1307 			 * the last 4 bytes is the CRC, which we don't
1308 			 * care about anyway.
1309 			 */
1310 			if (m->m_len <= ETHER_CRC_LEN) {
1311 				sc->sc_rx_mtail->m_len -=
1312 				    (ETHER_CRC_LEN - m->m_len);
1313 				m_freem(m);
1314 			} else {
1315 				m->m_len -= ETHER_CRC_LEN;
1316 				m->m_flags &= ~M_PKTHDR;
1317 				sc->sc_rx_mtail->m_next = m;
1318 			}
1319 			m = sc->sc_rx_mhead;
1320 			sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1321 			m->m_pkthdr.len = total_len - ETHER_CRC_LEN;
1322 		} else
1323 			m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN;
1324 
1325 #ifndef __NO_STRICT_ALIGNMENT
1326 		vge_fixup_rx(m);
1327 #endif
1328 		ifp->if_ipackets++;
1329 		m->m_pkthdr.rcvif = ifp;
1330 
1331 		/* Do RX checksumming if enabled */
1332 		if (ifp->if_csum_flags_rx & M_CSUM_IPv4) {
1333 
1334 			/* Check IP header checksum */
1335 			if (rxctl & VGE_RDCTL_IPPKT)
1336 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1337 			if ((rxctl & VGE_RDCTL_IPCSUMOK) == 0)
1338 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1339 		}
1340 
1341 		if (ifp->if_csum_flags_rx & M_CSUM_TCPv4) {
1342 			/* Check UDP checksum */
1343 			if (rxctl & VGE_RDCTL_TCPPKT)
1344 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1345 
1346 			if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1347 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1348 		}
1349 
1350 		if (ifp->if_csum_flags_rx & M_CSUM_UDPv4) {
1351 			/* Check UDP checksum */
1352 			if (rxctl & VGE_RDCTL_UDPPKT)
1353 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1354 
1355 			if ((rxctl & VGE_RDCTL_PROTOCSUMOK) == 0)
1356 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1357 		}
1358 
1359 		if (rxstat & VGE_RDSTS_VTAG) {
1360 			/*
1361 			 * We use bswap16() here because:
1362 			 * On LE machines, tag is stored in BE as stream data.
1363 			 * On BE machines, tag is stored in BE as stream data
1364 			 *  but it was already swapped by le32toh() above.
1365 			 */
1366 			VLAN_INPUT_TAG(ifp, m,
1367 			    bswap16(rxctl & VGE_RDCTL_VLANID), continue);
1368 		}
1369 
1370 #if NBPFILTER > 0
1371 		/*
1372 		 * Handle BPF listeners.
1373 		 */
1374 		if (ifp->if_bpf)
1375 			bpf_mtap(ifp->if_bpf, m);
1376 #endif
1377 
1378 		(*ifp->if_input)(ifp, m);
1379 
1380 		lim++;
1381 		if (lim == VGE_NRXDESC)
1382 			break;
1383 	}
1384 
1385 	sc->sc_rx_prodidx = idx;
1386 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim);
1387 }
1388 
1389 static void
1390 vge_txeof(struct vge_softc *sc)
1391 {
1392 	struct ifnet *ifp;
1393 	struct vge_txsoft *txs;
1394 	uint32_t txstat;
1395 	int idx;
1396 
1397 	ifp = &sc->sc_ethercom.ec_if;
1398 
1399 	for (idx = sc->sc_tx_considx;
1400 	    sc->sc_tx_free < VGE_NTXDESC;
1401 	    idx = VGE_NEXT_TXDESC(idx), sc->sc_tx_free++) {
1402 		VGE_TXDESCSYNC(sc, idx,
1403 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1404 		txstat = le32toh(sc->sc_txdescs[idx].td_sts);
1405 		VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1406 		if (txstat & VGE_TDSTS_OWN) {
1407 			break;
1408 		}
1409 
1410 		txs = &sc->sc_txsoft[idx];
1411 		m_freem(txs->txs_mbuf);
1412 		txs->txs_mbuf = NULL;
1413 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 0,
1414 		    txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1415 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1416 		if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL))
1417 			ifp->if_collisions++;
1418 		if (txstat & VGE_TDSTS_TXERR)
1419 			ifp->if_oerrors++;
1420 		else
1421 			ifp->if_opackets++;
1422 	}
1423 
1424 	sc->sc_tx_considx = idx;
1425 
1426 	if (sc->sc_tx_free > 0) {
1427 		ifp->if_flags &= ~IFF_OACTIVE;
1428 	}
1429 
1430 	/*
1431 	 * If not all descriptors have been released reaped yet,
1432 	 * reload the timer so that we will eventually get another
1433 	 * interrupt that will cause us to re-enter this routine.
1434 	 * This is done in case the transmitter has gone idle.
1435 	 */
1436 	if (sc->sc_tx_free < VGE_NTXDESC)
1437 		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1438 	else
1439 		ifp->if_timer = 0;
1440 }
1441 
1442 static void
1443 vge_tick(void *xsc)
1444 {
1445 	struct vge_softc *sc;
1446 	struct ifnet *ifp;
1447 	struct mii_data *mii;
1448 	int s;
1449 
1450 	sc = xsc;
1451 	ifp = &sc->sc_ethercom.ec_if;
1452 	mii = &sc->sc_mii;
1453 
1454 	s = splnet();
1455 
1456 	callout_schedule(&sc->sc_timeout, hz);
1457 
1458 	mii_tick(mii);
1459 	if (sc->sc_link) {
1460 		if ((mii->mii_media_status & IFM_ACTIVE) == 0)
1461 			sc->sc_link = 0;
1462 	} else {
1463 		if (mii->mii_media_status & IFM_ACTIVE &&
1464 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
1465 			sc->sc_link = 1;
1466 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
1467 				vge_start(ifp);
1468 		}
1469 	}
1470 
1471 	splx(s);
1472 }
1473 
1474 static int
1475 vge_intr(void *arg)
1476 {
1477 	struct vge_softc *sc;
1478 	struct ifnet *ifp;
1479 	uint32_t status;
1480 	int claim;
1481 
1482 	sc = arg;
1483 	claim = 0;
1484 	if (sc->sc_suspended) {
1485 		return claim;
1486 	}
1487 
1488 	ifp = &sc->sc_ethercom.ec_if;
1489 
1490 	if ((ifp->if_flags & IFF_UP) == 0) {
1491 		return claim;
1492 	}
1493 
1494 	/* Disable interrupts */
1495 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1496 
1497 	for (;;) {
1498 
1499 		status = CSR_READ_4(sc, VGE_ISR);
1500 		/* If the card has gone away the read returns 0xffff. */
1501 		if (status == 0xFFFFFFFF)
1502 			break;
1503 
1504 		if (status) {
1505 			claim = 1;
1506 			CSR_WRITE_4(sc, VGE_ISR, status);
1507 		}
1508 
1509 		if ((status & VGE_INTRS) == 0)
1510 			break;
1511 
1512 		if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO))
1513 			vge_rxeof(sc);
1514 
1515 		if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) {
1516 			vge_rxeof(sc);
1517 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1518 			CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1519 		}
1520 
1521 		if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0))
1522 			vge_txeof(sc);
1523 
1524 		if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL))
1525 			vge_init(ifp);
1526 
1527 		if (status & VGE_ISR_LINKSTS)
1528 			vge_tick(sc);
1529 	}
1530 
1531 	/* Re-enable interrupts */
1532 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1533 
1534 	if (claim && !IFQ_IS_EMPTY(&ifp->if_snd))
1535 		vge_start(ifp);
1536 
1537 	return claim;
1538 }
1539 
1540 static int
1541 vge_encap(struct vge_softc *sc, struct mbuf *m_head, int idx)
1542 {
1543 	struct vge_txsoft *txs;
1544 	struct vge_txdesc *txd;
1545 	struct vge_txfrag *f;
1546 	struct mbuf *m_new;
1547 	bus_dmamap_t map;
1548 	int m_csumflags, seg, error, flags;
1549 	struct m_tag *mtag;
1550 	size_t sz;
1551 	uint32_t td_sts, td_ctl;
1552 
1553 	KASSERT(sc->sc_tx_free > 0);
1554 
1555 	txd = &sc->sc_txdescs[idx];
1556 
1557 #ifdef DIAGNOSTIC
1558 	/* If this descriptor is still owned by the chip, bail. */
1559 	VGE_TXDESCSYNC(sc, idx,
1560 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1561 	td_sts = le32toh(txd->td_sts);
1562 	VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD);
1563 	if (td_sts & VGE_TDSTS_OWN) {
1564 		return ENOBUFS;
1565 	}
1566 #endif
1567 
1568 	/*
1569 	 * Preserve m_pkthdr.csum_flags here since m_head might be
1570 	 * updated by m_defrag()
1571 	 */
1572 	m_csumflags = m_head->m_pkthdr.csum_flags;
1573 
1574 	txs = &sc->sc_txsoft[idx];
1575 	map = txs->txs_dmamap;
1576 	error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m_head, BUS_DMA_NOWAIT);
1577 
1578 	/* If too many segments to map, coalesce */
1579 	if (error == EFBIG ||
1580 	    (m_head->m_pkthdr.len < ETHER_PAD_LEN &&
1581 	     map->dm_nsegs == VGE_TX_FRAGS)) {
1582 		m_new = m_defrag(m_head, M_DONTWAIT);
1583 		if (m_new == NULL)
1584 			return EFBIG;
1585 
1586 		error = bus_dmamap_load_mbuf(sc->sc_dmat, map,
1587 		    m_new, BUS_DMA_NOWAIT);
1588 		if (error) {
1589 			m_freem(m_new);
1590 			return error;
1591 		}
1592 
1593 		m_head = m_new;
1594 	} else if (error)
1595 		return error;
1596 
1597 	txs->txs_mbuf = m_head;
1598 
1599 	bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1600 	    BUS_DMASYNC_PREWRITE);
1601 
1602 	for (seg = 0, f = &txd->td_frag[0]; seg < map->dm_nsegs; seg++, f++) {
1603 		f->tf_buflen = htole16(VGE_BUFLEN(map->dm_segs[seg].ds_len));
1604 		vge_set_txaddr(f, map->dm_segs[seg].ds_addr);
1605 	}
1606 
1607 	/* Argh. This chip does not autopad short frames */
1608 	sz = m_head->m_pkthdr.len;
1609 	if (sz < ETHER_PAD_LEN) {
1610 		f->tf_buflen = htole16(VGE_BUFLEN(ETHER_PAD_LEN - sz));
1611 		vge_set_txaddr(f, VGE_CDPADADDR(sc));
1612 		sz = ETHER_PAD_LEN;
1613 		seg++;
1614 	}
1615 	VGE_TXFRAGSYNC(sc, idx, seg, BUS_DMASYNC_PREWRITE);
1616 
1617 	/*
1618 	 * When telling the chip how many segments there are, we
1619 	 * must use nsegs + 1 instead of just nsegs. Darned if I
1620 	 * know why.
1621 	 */
1622 	seg++;
1623 
1624 	flags = 0;
1625 	if (m_csumflags & M_CSUM_IPv4)
1626 		flags |= VGE_TDCTL_IPCSUM;
1627 	if (m_csumflags & M_CSUM_TCPv4)
1628 		flags |= VGE_TDCTL_TCPCSUM;
1629 	if (m_csumflags & M_CSUM_UDPv4)
1630 		flags |= VGE_TDCTL_UDPCSUM;
1631 	td_sts = sz << 16;
1632 	td_ctl = flags | (seg << 28) | VGE_TD_LS_NORM;
1633 
1634 	if (sz > ETHERMTU + ETHER_HDR_LEN)
1635 		td_ctl |= VGE_TDCTL_JUMBO;
1636 
1637 	/*
1638 	 * Set up hardware VLAN tagging.
1639 	 */
1640 	mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m_head);
1641 	if (mtag != NULL) {
1642 		/*
1643 		 * No need htons() here since vge(4) chip assumes
1644 		 * that tags are written in little endian and
1645 		 * we already use htole32() here.
1646 		 */
1647 		td_ctl |= VLAN_TAG_VALUE(mtag) | VGE_TDCTL_VTAG;
1648 	}
1649 	txd->td_ctl = htole32(td_ctl);
1650 	txd->td_sts = htole32(td_sts);
1651 	VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1652 
1653 	txd->td_sts = htole32(VGE_TDSTS_OWN | td_sts);
1654 	VGE_TXDESCSYNC(sc, idx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1655 
1656 	sc->sc_tx_free--;
1657 
1658 	return 0;
1659 }
1660 
1661 /*
1662  * Main transmit routine.
1663  */
1664 
1665 static void
1666 vge_start(struct ifnet *ifp)
1667 {
1668 	struct vge_softc *sc;
1669 	struct vge_txsoft *txs;
1670 	struct mbuf *m_head;
1671 	int idx, pidx, ofree, error;
1672 
1673 	sc = ifp->if_softc;
1674 
1675 	if (!sc->sc_link ||
1676 	    (ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) {
1677 		return;
1678 	}
1679 
1680 	m_head = NULL;
1681 	idx = sc->sc_tx_prodidx;
1682 	pidx = VGE_PREV_TXDESC(idx);
1683 	ofree = sc->sc_tx_free;
1684 
1685 	/*
1686 	 * Loop through the send queue, setting up transmit descriptors
1687 	 * until we drain the queue, or use up all available transmit
1688 	 * descriptors.
1689 	 */
1690 	for (;;) {
1691 		/* Grab a packet off the queue. */
1692 		IFQ_POLL(&ifp->if_snd, m_head);
1693 		if (m_head == NULL)
1694 			break;
1695 
1696 		if (sc->sc_tx_free == 0) {
1697 			/*
1698 			 * All slots used, stop for now.
1699 			 */
1700 			ifp->if_flags |= IFF_OACTIVE;
1701 			break;
1702 		}
1703 
1704 		txs = &sc->sc_txsoft[idx];
1705 		KASSERT(txs->txs_mbuf == NULL);
1706 
1707 		if ((error = vge_encap(sc, m_head, idx))) {
1708 			if (error == EFBIG) {
1709 				aprint_error("%s: Tx packet consumes too many "
1710 				    "DMA segments, dropping...\n",
1711 				    sc->sc_dev.dv_xname);
1712 				IFQ_DEQUEUE(&ifp->if_snd, m_head);
1713 				m_freem(m_head);
1714 				continue;
1715 			}
1716 
1717 			/*
1718 			 * Short on resources, just stop for now.
1719 			 */
1720 			if (error == ENOBUFS)
1721 				ifp->if_flags |= IFF_OACTIVE;
1722 			break;
1723 		}
1724 
1725 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1726 
1727 		/*
1728 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1729 		 */
1730 
1731 		sc->sc_txdescs[pidx].td_frag[0].tf_buflen |=
1732 		    htole16(VGE_TXDESC_Q);
1733 		VGE_TXFRAGSYNC(sc, pidx, 1,
1734 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1735 
1736 		if (txs->txs_mbuf != m_head) {
1737 			m_freem(m_head);
1738 			m_head = txs->txs_mbuf;
1739 		}
1740 
1741 		pidx = idx;
1742 		idx = VGE_NEXT_TXDESC(idx);
1743 
1744 		/*
1745 		 * If there's a BPF listener, bounce a copy of this frame
1746 		 * to him.
1747 		 */
1748 #if NBPFILTER > 0
1749 		if (ifp->if_bpf)
1750 			bpf_mtap(ifp->if_bpf, m_head);
1751 #endif
1752 	}
1753 
1754 	if (sc->sc_tx_free < ofree) {
1755 		/* TX packet queued */
1756 
1757 		sc->sc_tx_prodidx = idx;
1758 
1759 		/* Issue a transmit command. */
1760 		CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0);
1761 
1762 		/*
1763 		 * Use the countdown timer for interrupt moderation.
1764 		 * 'TX done' interrupts are disabled. Instead, we reset the
1765 		 * countdown timer, which will begin counting until it hits
1766 		 * the value in the SSTIMER register, and then trigger an
1767 		 * interrupt. Each time we set the TIMER0_ENABLE bit, the
1768 		 * the timer count is reloaded. Only when the transmitter
1769 		 * is idle will the timer hit 0 and an interrupt fire.
1770 		 */
1771 		CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE);
1772 
1773 		/*
1774 		 * Set a timeout in case the chip goes out to lunch.
1775 		 */
1776 		ifp->if_timer = 5;
1777 	}
1778 }
1779 
1780 static int
1781 vge_init(struct ifnet *ifp)
1782 {
1783 	struct vge_softc *sc;
1784 	int i, rc = 0;
1785 
1786 	sc = ifp->if_softc;
1787 
1788 	/*
1789 	 * Cancel pending I/O and free all RX/TX buffers.
1790 	 */
1791 	vge_stop(sc);
1792 	vge_reset(sc);
1793 
1794 	/* Initialize the RX descriptors and mbufs. */
1795 	memset(sc->sc_rxdescs, 0, sizeof(sc->sc_rxdescs));
1796 	sc->sc_rx_consumed = 0;
1797 	for (i = 0; i < VGE_NRXDESC; i++) {
1798 		if (vge_newbuf(sc, i, NULL) == ENOBUFS) {
1799 			aprint_error("%s: unable to allocate or map "
1800 			    "rx buffer\n", sc->sc_dev.dv_xname);
1801 			return 1; /* XXX */
1802 		}
1803 	}
1804 	sc->sc_rx_prodidx = 0;
1805 	sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
1806 
1807 	/* Initialize the  TX descriptors and mbufs. */
1808 	memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1809 	bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap,
1810 	    VGE_CDTXOFF(0), sizeof(sc->sc_txdescs),
1811 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1812 	for (i = 0; i < VGE_NTXDESC; i++)
1813 		sc->sc_txsoft[i].txs_mbuf = NULL;
1814 
1815 	sc->sc_tx_prodidx = 0;
1816 	sc->sc_tx_considx = 0;
1817 	sc->sc_tx_free = VGE_NTXDESC;
1818 
1819 	/* Set our station address */
1820 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1821 		CSR_WRITE_1(sc, VGE_PAR0 + i, sc->sc_eaddr[i]);
1822 
1823 	/*
1824 	 * Set receive FIFO threshold. Also allow transmission and
1825 	 * reception of VLAN tagged frames.
1826 	 */
1827 	CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT);
1828 	CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2);
1829 
1830 	/* Set DMA burst length */
1831 	CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN);
1832 	CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128);
1833 
1834 	CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK);
1835 
1836 	/* Set collision backoff algorithm */
1837 	CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM|
1838 	    VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT);
1839 	CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET);
1840 
1841 	/* Disable LPSEL field in priority resolution */
1842 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS);
1843 
1844 	/*
1845 	 * Load the addresses of the DMA queues into the chip.
1846 	 * Note that we only use one transmit queue.
1847 	 */
1848 
1849 	CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(VGE_CDTXADDR(sc, 0)));
1850 	CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_NTXDESC - 1);
1851 
1852 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(VGE_CDRXADDR(sc, 0)));
1853 	CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_NRXDESC - 1);
1854 	CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_NRXDESC);
1855 
1856 	/* Enable and wake up the RX descriptor queue */
1857 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN);
1858 	CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK);
1859 
1860 	/* Enable the TX descriptor queue */
1861 	CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0);
1862 
1863 	/* Set up the receive filter -- allow large frames for VLANs. */
1864 	CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT);
1865 
1866 	/* If we want promiscuous mode, set the allframes bit. */
1867 	if (ifp->if_flags & IFF_PROMISC) {
1868 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC);
1869 	}
1870 
1871 	/* Set capture broadcast bit to capture broadcast frames. */
1872 	if (ifp->if_flags & IFF_BROADCAST) {
1873 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST);
1874 	}
1875 
1876 	/* Set multicast bit to capture multicast frames. */
1877 	if (ifp->if_flags & IFF_MULTICAST) {
1878 		CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST);
1879 	}
1880 
1881 	/* Init the cam filter. */
1882 	vge_cam_clear(sc);
1883 
1884 	/* Init the multicast filter. */
1885 	vge_setmulti(sc);
1886 
1887 	/* Enable flow control */
1888 
1889 	CSR_WRITE_1(sc, VGE_CRS2, 0x8B);
1890 
1891 	/* Enable jumbo frame reception (if desired) */
1892 
1893 	/* Start the MAC. */
1894 	CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP);
1895 	CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL);
1896 	CSR_WRITE_1(sc, VGE_CRS0,
1897 	    VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START);
1898 
1899 	/*
1900 	 * Configure one-shot timer for microsecond
1901 	 * resulution and load it for 500 usecs.
1902 	 */
1903 	CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES);
1904 	CSR_WRITE_2(sc, VGE_SSTIMER, 400);
1905 
1906 	/*
1907 	 * Configure interrupt moderation for receive. Enable
1908 	 * the holdoff counter and load it, and set the RX
1909 	 * suppression count to the number of descriptors we
1910 	 * want to allow before triggering an interrupt.
1911 	 * The holdoff timer is in units of 20 usecs.
1912 	 */
1913 
1914 #ifdef notyet
1915 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE);
1916 	/* Select the interrupt holdoff timer page. */
1917 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1918 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF);
1919 	CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */
1920 
1921 	/* Enable use of the holdoff timer. */
1922 	CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF);
1923 	CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD);
1924 
1925 	/* Select the RX suppression threshold page. */
1926 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1927 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR);
1928 	CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */
1929 
1930 	/* Restore the page select bits. */
1931 	CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL);
1932 	CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR);
1933 #endif
1934 
1935 #ifdef DEVICE_POLLING
1936 	/*
1937 	 * Disable interrupts if we are polling.
1938 	 */
1939 	if (ifp->if_flags & IFF_POLLING) {
1940 		CSR_WRITE_4(sc, VGE_IMR, 0);
1941 		CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
1942 	} else	/* otherwise ... */
1943 #endif /* DEVICE_POLLING */
1944 	{
1945 	/*
1946 	 * Enable interrupts.
1947 	 */
1948 		CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS);
1949 		CSR_WRITE_4(sc, VGE_ISR, 0);
1950 		CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK);
1951 	}
1952 
1953 	if ((rc = ether_mediachange(ifp)) != 0)
1954 		goto out;
1955 
1956 	ifp->if_flags |= IFF_RUNNING;
1957 	ifp->if_flags &= ~IFF_OACTIVE;
1958 
1959 	sc->sc_if_flags = 0;
1960 	sc->sc_link = 0;
1961 
1962 	callout_schedule(&sc->sc_timeout, hz);
1963 
1964 out:
1965 	return rc;
1966 }
1967 
1968 static void
1969 vge_miibus_statchg(struct device *self)
1970 {
1971 	struct vge_softc *sc;
1972 	struct mii_data *mii;
1973 	struct ifmedia_entry *ife;
1974 
1975 	sc = (void *)self;
1976 	mii = &sc->sc_mii;
1977 	ife = mii->mii_media.ifm_cur;
1978 	/*
1979 	 * If the user manually selects a media mode, we need to turn
1980 	 * on the forced MAC mode bit in the DIAGCTL register. If the
1981 	 * user happens to choose a full duplex mode, we also need to
1982 	 * set the 'force full duplex' bit. This applies only to
1983 	 * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC
1984 	 * mode is disabled, and in 1000baseT mode, full duplex is
1985 	 * always implied, so we turn on the forced mode bit but leave
1986 	 * the FDX bit cleared.
1987 	 */
1988 
1989 	switch (IFM_SUBTYPE(ife->ifm_media)) {
1990 	case IFM_AUTO:
1991 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1992 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1993 		break;
1994 	case IFM_1000_T:
1995 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
1996 		CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
1997 		break;
1998 	case IFM_100_TX:
1999 	case IFM_10_T:
2000 		CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE);
2001 		if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) {
2002 			CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2003 		} else {
2004 			CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE);
2005 		}
2006 		break;
2007 	default:
2008 		aprint_error("%s: unknown media type: %x\n",
2009 		    sc->sc_dev.dv_xname,
2010 		    IFM_SUBTYPE(ife->ifm_media));
2011 		break;
2012 	}
2013 }
2014 
2015 static int
2016 vge_ioctl(struct ifnet *ifp, u_long command, void *data)
2017 {
2018 	struct vge_softc *sc;
2019 	struct ifreq *ifr;
2020 	int s, error;
2021 
2022 	sc = ifp->if_softc;
2023 	ifr = (struct ifreq *)data;
2024 	error = 0;
2025 
2026 	s = splnet();
2027 
2028 	switch (command) {
2029 	case SIOCSIFMTU:
2030 		if (ifr->ifr_mtu > VGE_JUMBO_MTU)
2031 			error = EINVAL;
2032 		else if ((error = ifioctl_common(ifp, command, data)) == ENETRESET)
2033 			error = 0;
2034 		break;
2035 	case SIOCSIFFLAGS:
2036 		if (ifp->if_flags & IFF_UP) {
2037 			if (ifp->if_flags & IFF_RUNNING &&
2038 			    ifp->if_flags & IFF_PROMISC &&
2039 			    (sc->sc_if_flags & IFF_PROMISC) == 0) {
2040 				CSR_SETBIT_1(sc, VGE_RXCTL,
2041 				    VGE_RXCTL_RX_PROMISC);
2042 				vge_setmulti(sc);
2043 			} else if (ifp->if_flags & IFF_RUNNING &&
2044 			    (ifp->if_flags & IFF_PROMISC) == 0 &&
2045 			    sc->sc_if_flags & IFF_PROMISC) {
2046 				CSR_CLRBIT_1(sc, VGE_RXCTL,
2047 				    VGE_RXCTL_RX_PROMISC);
2048 				vge_setmulti(sc);
2049                         } else
2050 				vge_init(ifp);
2051 		} else {
2052 			if (ifp->if_flags & IFF_RUNNING)
2053 				vge_stop(sc);
2054 		}
2055 		sc->sc_if_flags = ifp->if_flags;
2056 		break;
2057 	default:
2058 		if ((error = ether_ioctl(ifp, command, data)) != ENETRESET)
2059 			break;
2060 
2061 		error = 0;
2062 
2063 		if (command != SIOCADDMULTI && command != SIOCDELMULTI)
2064 			;
2065 		else if (ifp->if_flags & IFF_RUNNING) {
2066 			/*
2067 			 * Multicast list has changed; set the hardware filter
2068 			 * accordingly.
2069 			 */
2070 			vge_setmulti(sc);
2071 		}
2072 		break;
2073 	}
2074 
2075 	splx(s);
2076 	return error;
2077 }
2078 
2079 static void
2080 vge_watchdog(struct ifnet *ifp)
2081 {
2082 	struct vge_softc *sc;
2083 	int s;
2084 
2085 	sc = ifp->if_softc;
2086 	s = splnet();
2087 	aprint_error("%s: watchdog timeout\n", sc->sc_dev.dv_xname);
2088 	ifp->if_oerrors++;
2089 
2090 	vge_txeof(sc);
2091 	vge_rxeof(sc);
2092 
2093 	vge_init(ifp);
2094 
2095 	splx(s);
2096 }
2097 
2098 /*
2099  * Stop the adapter and free any mbufs allocated to the
2100  * RX and TX lists.
2101  */
2102 static void
2103 vge_stop(struct vge_softc *sc)
2104 {
2105 	struct ifnet *ifp;
2106 	struct vge_txsoft *txs;
2107 	struct vge_rxsoft *rxs;
2108 	int i, s;
2109 
2110 	ifp = &sc->sc_ethercom.ec_if;
2111 
2112 	s = splnet();
2113 	ifp->if_timer = 0;
2114 
2115 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2116 #ifdef DEVICE_POLLING
2117 	ether_poll_deregister(ifp);
2118 #endif /* DEVICE_POLLING */
2119 
2120 	CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK);
2121 	CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP);
2122 	CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF);
2123 	CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF);
2124 	CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF);
2125 	CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0);
2126 
2127 	if (sc->sc_rx_mhead != NULL) {
2128 		m_freem(sc->sc_rx_mhead);
2129 		sc->sc_rx_mhead = sc->sc_rx_mtail = NULL;
2130 	}
2131 
2132 	/* Free the TX list buffers. */
2133 
2134 	for (i = 0; i < VGE_NTXDESC; i++) {
2135 		txs = &sc->sc_txsoft[i];
2136 		if (txs->txs_mbuf != NULL) {
2137 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2138 			m_freem(txs->txs_mbuf);
2139 			txs->txs_mbuf = NULL;
2140 		}
2141 	}
2142 
2143 	/* Free the RX list buffers. */
2144 
2145 	for (i = 0; i < VGE_NRXDESC; i++) {
2146 		rxs = &sc->sc_rxsoft[i];
2147 		if (rxs->rxs_mbuf != NULL) {
2148 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2149 			m_freem(rxs->rxs_mbuf);
2150 			rxs->rxs_mbuf = NULL;
2151 		}
2152 	}
2153 
2154 	splx(s);
2155 }
2156 
2157 #if VGE_POWER_MANAGEMENT
2158 /*
2159  * Device suspend routine.  Stop the interface and save some PCI
2160  * settings in case the BIOS doesn't restore them properly on
2161  * resume.
2162  */
2163 static int
2164 vge_suspend(struct device *dev)
2165 {
2166 	struct vge_softc *sc;
2167 	int i;
2168 
2169 	sc = device_get_softc(dev);
2170 
2171 	vge_stop(sc);
2172 
2173         for (i = 0; i < 5; i++)
2174 		sc->sc_saved_maps[i] =
2175 		    pci_read_config(dev, PCIR_MAPS + i * 4, 4);
2176 	sc->sc_saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
2177 	sc->sc_saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
2178 	sc->sc_saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
2179 	sc->sc_saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
2180 
2181 	sc->suspended = 1;
2182 
2183 	return 0;
2184 }
2185 
2186 /*
2187  * Device resume routine.  Restore some PCI settings in case the BIOS
2188  * doesn't, re-enable busmastering, and restart the interface if
2189  * appropriate.
2190  */
2191 static int
2192 vge_resume(struct device *dev)
2193 {
2194 	struct vge_softc *sc;
2195 	struct ifnet *ifp;
2196 	int i;
2197 
2198 	sc = (void *)dev;
2199 	ifp = &sc->sc_ethercom.ec_if;
2200 
2201         /* better way to do this? */
2202 	for (i = 0; i < 5; i++)
2203 		pci_write_config(dev, PCIR_MAPS + i * 4,
2204 		    sc->sc_saved_maps[i], 4);
2205 	pci_write_config(dev, PCIR_BIOS, sc->sc_saved_biosaddr, 4);
2206 	pci_write_config(dev, PCIR_INTLINE, sc->sc_saved_intline, 1);
2207 	pci_write_config(dev, PCIR_CACHELNSZ, sc->sc_saved_cachelnsz, 1);
2208 	pci_write_config(dev, PCIR_LATTIMER, sc->sc_saved_lattimer, 1);
2209 
2210 	/* reenable busmastering */
2211 	pci_enable_busmaster(dev);
2212 	pci_enable_io(dev, SYS_RES_MEMORY);
2213 
2214 	/* reinitialize interface if necessary */
2215 	if (ifp->if_flags & IFF_UP)
2216 		vge_init(sc);
2217 
2218 	sc->suspended = 0;
2219 
2220 	return 0;
2221 }
2222 #endif
2223 
2224 /*
2225  * Stop all chip I/O so that the kernel's probe routines don't
2226  * get confused by errant DMAs when rebooting.
2227  */
2228 static void
2229 vge_shutdown(void *arg)
2230 {
2231 	struct vge_softc *sc;
2232 
2233 	sc = arg;
2234 	vge_stop(sc);
2235 }
2236