xref: /openbsd-src/sys/dev/pci/if_lge.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: if_lge.c,v 1.72 2016/04/13 10:34:32 mpi Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2000, 2001
5  *	Bill Paul <william.paul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: src/sys/dev/lge/if_lge.c,v 1.6 2001/06/20 19:47:55 bmilekic Exp $
35  */
36 
37 /*
38  * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public
39  * documentation not available, but ask me nicely.
40  *
41  * Written by Bill Paul <william.paul@windriver.com>
42  * Wind River Systems
43  */
44 
45 /*
46  * The Level 1 chip is used on some D-Link, SMC and Addtron NICs.
47  * It's a 64-bit PCI part that supports TCP/IP checksum offload,
48  * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There
49  * are three supported methods for data transfer between host and
50  * NIC: programmed I/O, traditional scatter/gather DMA and Packet
51  * Propulsion Technology (tm) DMA. The latter mechanism is a form
52  * of double buffer DMA where the packet data is copied to a
53  * pre-allocated DMA buffer who's physical address has been loaded
54  * into a table at device initialization time. The rationale is that
55  * the virtual to physical address translation needed for normal
56  * scatter/gather DMA is more expensive than the data copy needed
57  * for double buffering. This may be true in Windows NT and the like,
58  * but it isn't true for us, at least on the x86 arch. This driver
59  * uses the scatter/gather I/O method for both TX and RX.
60  *
61  * The LXT1001 only supports TCP/IP checksum offload on receive.
62  * Also, the VLAN tagging is done using a 16-entry table which allows
63  * the chip to perform hardware filtering based on VLAN tags. Sadly,
64  * our vlan support doesn't currently play well with this kind of
65  * hardware support.
66  *
67  * Special thanks to:
68  * - Jeff James at Intel, for arranging to have the LXT1001 manual
69  *   released (at long last)
70  * - Beny Chen at D-Link, for actually sending it to me
71  * - Brad Short and Keith Alexis at SMC, for sending me sample
72  *   SMC9462SX and SMC9462TX adapters for testing
73  * - Paul Saab at Y!, for not killing me (though it remains to be seen
74  *   if in fact he did me much of a favor)
75  */
76 
77 #include "bpfilter.h"
78 
79 #include <sys/param.h>
80 #include <sys/systm.h>
81 #include <sys/sockio.h>
82 #include <sys/mbuf.h>
83 #include <sys/malloc.h>
84 #include <sys/kernel.h>
85 #include <sys/device.h>
86 #include <sys/socket.h>
87 
88 #include <net/if.h>
89 #include <net/if_media.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/if_ether.h>
93 
94 #if NBPFILTER > 0
95 #include <net/bpf.h>
96 #endif
97 
98 #include <uvm/uvm_extern.h>              /* for vtophys */
99 #define	VTOPHYS(v)	vtophys((vaddr_t)(v))
100 
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcidevs.h>
104 
105 #include <dev/mii/mii.h>
106 #include <dev/mii/miivar.h>
107 
108 #define LGE_USEIOSPACE
109 
110 #include <dev/pci/if_lgereg.h>
111 
112 int lge_probe(struct device *, void *, void *);
113 void lge_attach(struct device *, struct device *, void *);
114 
115 struct cfattach lge_ca = {
116 	sizeof(struct lge_softc), lge_probe, lge_attach
117 };
118 
119 struct cfdriver lge_cd = {
120 	NULL, "lge", DV_IFNET
121 };
122 
123 int lge_newbuf(struct lge_softc *, struct lge_rx_desc *,
124 			     struct mbuf *);
125 int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *);
126 void lge_rxeof(struct lge_softc *, int);
127 void lge_txeof(struct lge_softc *);
128 int lge_intr(void *);
129 void lge_tick(void *);
130 void lge_start(struct ifnet *);
131 int lge_ioctl(struct ifnet *, u_long, caddr_t);
132 void lge_init(void *);
133 void lge_stop(struct lge_softc *);
134 void lge_watchdog(struct ifnet *);
135 int lge_ifmedia_upd(struct ifnet *);
136 void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *);
137 
138 void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *);
139 void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int);
140 
141 int lge_miibus_readreg(struct device *, int, int);
142 void lge_miibus_writereg(struct device *, int, int, int);
143 void lge_miibus_statchg(struct device *);
144 
145 void lge_setmulti(struct lge_softc *);
146 void lge_reset(struct lge_softc *);
147 int lge_list_rx_init(struct lge_softc *);
148 int lge_list_tx_init(struct lge_softc *);
149 
150 #ifdef LGE_DEBUG
151 #define DPRINTF(x)	if (lgedebug) printf x
152 #define DPRINTFN(n,x)	if (lgedebug >= (n)) printf x
153 int	lgedebug = 0;
154 #else
155 #define DPRINTF(x)
156 #define DPRINTFN(n,x)
157 #endif
158 
159 const struct pci_matchid lge_devices[] = {
160 	{ PCI_VENDOR_LEVEL1, PCI_PRODUCT_LEVEL1_LXT1001 }
161 };
162 
163 #define LGE_SETBIT(sc, reg, x)				\
164 	CSR_WRITE_4(sc, reg,				\
165 		CSR_READ_4(sc, reg) | (x))
166 
167 #define LGE_CLRBIT(sc, reg, x)				\
168 	CSR_WRITE_4(sc, reg,				\
169 		CSR_READ_4(sc, reg) & ~(x))
170 
171 #define SIO_SET(x)					\
172 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x)
173 
174 #define SIO_CLR(x)					\
175 	CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x)
176 
177 /*
178  * Read a word of data stored in the EEPROM at address 'addr.'
179  */
180 void
181 lge_eeprom_getword(struct lge_softc *sc, int addr, u_int16_t *dest)
182 {
183 	int			i;
184 	u_int32_t		val;
185 
186 	CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ|
187 	    LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8));
188 
189 	for (i = 0; i < LGE_TIMEOUT; i++)
190 		if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ))
191 			break;
192 
193 	if (i == LGE_TIMEOUT) {
194 		printf("%s: EEPROM read timed out\n", sc->sc_dv.dv_xname);
195 		return;
196 	}
197 
198 	val = CSR_READ_4(sc, LGE_EEDATA);
199 
200 	if (addr & 1)
201 		*dest = (val >> 16) & 0xFFFF;
202 	else
203 		*dest = val & 0xFFFF;
204 }
205 
206 /*
207  * Read a sequence of words from the EEPROM.
208  */
209 void
210 lge_read_eeprom(struct lge_softc *sc, caddr_t dest, int off,
211     int cnt, int swap)
212 {
213 	int			i;
214 	u_int16_t		word = 0, *ptr;
215 
216 	for (i = 0; i < cnt; i++) {
217 		lge_eeprom_getword(sc, off + i, &word);
218 		ptr = (u_int16_t *)(dest + (i * 2));
219 		if (swap)
220 			*ptr = ntohs(word);
221 		else
222 			*ptr = word;
223 	}
224 }
225 
226 int
227 lge_miibus_readreg(struct device *dev, int phy, int reg)
228 {
229 	struct lge_softc	*sc = (struct lge_softc *)dev;
230 	int			i;
231 
232 	/*
233 	 * If we have a non-PCS PHY, pretend that the internal
234 	 * autoneg stuff at PHY address 0 isn't there so that
235 	 * the miibus code will find only the GMII PHY.
236 	 */
237 	if (sc->lge_pcs == 0 && phy == 0)
238 		return (0);
239 
240 	CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ);
241 
242 	for (i = 0; i < LGE_TIMEOUT; i++)
243 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
244 			break;
245 
246 	if (i == LGE_TIMEOUT) {
247 		printf("%s: PHY read timed out\n", sc->sc_dv.dv_xname);
248 		return (0);
249 	}
250 
251 	return (CSR_READ_4(sc, LGE_GMIICTL) >> 16);
252 }
253 
254 void
255 lge_miibus_writereg(struct device *dev, int phy, int reg, int data)
256 {
257 	struct lge_softc	*sc = (struct lge_softc *)dev;
258 	int			i;
259 
260 	CSR_WRITE_4(sc, LGE_GMIICTL,
261 	    (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE);
262 
263 	for (i = 0; i < LGE_TIMEOUT; i++)
264 		if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY))
265 			break;
266 
267 	if (i == LGE_TIMEOUT) {
268 		printf("%s: PHY write timed out\n", sc->sc_dv.dv_xname);
269 	}
270 }
271 
272 void
273 lge_miibus_statchg(struct device *dev)
274 {
275 	struct lge_softc	*sc = (struct lge_softc *)dev;
276 	struct mii_data		*mii = &sc->lge_mii;
277 
278 	LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED);
279 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
280 	case IFM_1000_T:
281 	case IFM_1000_SX:
282 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
283 		break;
284 	case IFM_100_TX:
285 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100);
286 		break;
287 	case IFM_10_T:
288 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10);
289 		break;
290 	default:
291 		/*
292 		 * Choose something, even if it's wrong. Clearing
293 		 * all the bits will hose autoneg on the internal
294 		 * PHY.
295 		 */
296 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000);
297 		break;
298 	}
299 
300 	if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) {
301 		LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
302 	} else {
303 		LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX);
304 	}
305 }
306 
307 void
308 lge_setmulti(struct lge_softc *sc)
309 {
310 	struct arpcom		*ac = &sc->arpcom;
311 	struct ifnet		*ifp = &ac->ac_if;
312 	struct ether_multi      *enm;
313 	struct ether_multistep  step;
314 	u_int32_t		h = 0, hashes[2] = { 0, 0 };
315 
316 	/* Make sure multicast hash table is enabled. */
317 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST);
318 
319 	if (ac->ac_multirangecnt > 0)
320 		ifp->if_flags |= IFF_ALLMULTI;
321 
322 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
323 		CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF);
324 		CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF);
325 		return;
326 	}
327 
328 	/* first, zot all the existing hash bits */
329 	CSR_WRITE_4(sc, LGE_MAR0, 0);
330 	CSR_WRITE_4(sc, LGE_MAR1, 0);
331 
332 	/* now program new ones */
333 	ETHER_FIRST_MULTI(step, ac, enm);
334 	while (enm != NULL) {
335 		h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 26) &
336 		    0x0000003F;
337 		if (h < 32)
338 			hashes[0] |= (1 << h);
339 		else
340 			hashes[1] |= (1 << (h - 32));
341 		ETHER_NEXT_MULTI(step, enm);
342 	}
343 
344 	CSR_WRITE_4(sc, LGE_MAR0, hashes[0]);
345 	CSR_WRITE_4(sc, LGE_MAR1, hashes[1]);
346 }
347 
348 void
349 lge_reset(struct lge_softc *sc)
350 {
351 	int			i;
352 
353 	LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST);
354 
355 	for (i = 0; i < LGE_TIMEOUT; i++) {
356 		if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST))
357 			break;
358 	}
359 
360 	if (i == LGE_TIMEOUT)
361 		printf("%s: reset never completed\n", sc->sc_dv.dv_xname);
362 
363 	/* Wait a little while for the chip to get its brains in order. */
364 	DELAY(1000);
365 }
366 
367 /*
368  * Probe for a Level 1 chip. Check the PCI vendor and device
369  * IDs against our list and return a device name if we find a match.
370  */
371 int
372 lge_probe(struct device *parent, void *match, void *aux)
373 {
374 	return (pci_matchbyid((struct pci_attach_args *)aux, lge_devices,
375 	    nitems(lge_devices)));
376 }
377 
378 /*
379  * Attach the interface. Allocate softc structures, do ifmedia
380  * setup and ethernet/BPF attach.
381  */
382 void
383 lge_attach(struct device *parent, struct device *self, void *aux)
384 {
385 	struct lge_softc	*sc = (struct lge_softc *)self;
386 	struct pci_attach_args	*pa = aux;
387 	pci_chipset_tag_t	pc = pa->pa_pc;
388 	pci_intr_handle_t	ih;
389 	const char		*intrstr = NULL;
390 	bus_size_t		size;
391 	bus_dma_segment_t	seg;
392 	bus_dmamap_t		dmamap;
393 	int			rseg;
394 	u_char			eaddr[ETHER_ADDR_LEN];
395 #ifndef LGE_USEIOSPACE
396 	pcireg_t		memtype;
397 #endif
398 	struct ifnet		*ifp;
399 	caddr_t			kva;
400 
401 	pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0);
402 
403 	/*
404 	 * Map control/status registers.
405 	 */
406 	DPRINTFN(5, ("Map control/status regs\n"));
407 
408 	DPRINTFN(5, ("pci_mapreg_map\n"));
409 #ifdef LGE_USEIOSPACE
410 	if (pci_mapreg_map(pa, LGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0,
411 	    &sc->lge_btag, &sc->lge_bhandle, NULL, &size, 0)) {
412 		printf(": can't map i/o space\n");
413 		return;
414 	}
415 #else
416 	memtype = pci_mapreg_type(pc, pa->pa_tag, LGE_PCI_LOMEM);
417 	if (pci_mapreg_map(pa, LGE_PCI_LOMEM, memtype, 0, &sc->lge_btag,
418 	    &sc->lge_bhandle, NULL, &size, 0)) {
419 		printf(": can't map mem space\n");
420 		return;
421 	}
422 #endif
423 
424 	DPRINTFN(5, ("pci_intr_map\n"));
425 	if (pci_intr_map(pa, &ih)) {
426 		printf(": couldn't map interrupt\n");
427 		goto fail_1;
428 	}
429 
430 	DPRINTFN(5, ("pci_intr_string\n"));
431 	intrstr = pci_intr_string(pc, ih);
432 	DPRINTFN(5, ("pci_intr_establish\n"));
433 	sc->lge_intrhand = pci_intr_establish(pc, ih, IPL_NET, lge_intr, sc,
434 					      sc->sc_dv.dv_xname);
435 	if (sc->lge_intrhand == NULL) {
436 		printf(": couldn't establish interrupt");
437 		if (intrstr != NULL)
438 			printf(" at %s", intrstr);
439 		printf("\n");
440 		goto fail_1;
441 	}
442 	printf(": %s", intrstr);
443 
444 	/* Reset the adapter. */
445 	DPRINTFN(5, ("lge_reset\n"));
446 	lge_reset(sc);
447 
448 	/*
449 	 * Get station address from the EEPROM.
450 	 */
451 	DPRINTFN(5, ("lge_read_eeprom\n"));
452 	lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0);
453 	lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0);
454 	lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0);
455 
456 	/*
457 	 * A Level 1 chip was detected. Inform the world.
458 	 */
459 	printf(", address %s\n", ether_sprintf(eaddr));
460 
461 	bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
462 
463 	sc->sc_dmatag = pa->pa_dmat;
464 	DPRINTFN(5, ("bus_dmamem_alloc\n"));
465 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct lge_list_data),
466 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) {
467 		printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname);
468 		goto fail_2;
469 	}
470 	DPRINTFN(5, ("bus_dmamem_map\n"));
471 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
472 			   sizeof(struct lge_list_data), &kva,
473 			   BUS_DMA_NOWAIT)) {
474 		printf("%s: can't map dma buffers (%zd bytes)\n",
475 		       sc->sc_dv.dv_xname, sizeof(struct lge_list_data));
476 		goto fail_3;
477 	}
478 	DPRINTFN(5, ("bus_dmamem_create\n"));
479 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct lge_list_data), 1,
480 			      sizeof(struct lge_list_data), 0,
481 			      BUS_DMA_NOWAIT, &dmamap)) {
482 		printf("%s: can't create dma map\n", sc->sc_dv.dv_xname);
483 		goto fail_4;
484 	}
485 	DPRINTFN(5, ("bus_dmamem_load\n"));
486 	if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva,
487 			    sizeof(struct lge_list_data), NULL,
488 			    BUS_DMA_NOWAIT)) {
489 		goto fail_5;
490 	}
491 
492 	DPRINTFN(5, ("bzero\n"));
493 	sc->lge_ldata = (struct lge_list_data *)kva;
494 
495 	ifp = &sc->arpcom.ac_if;
496 	ifp->if_softc = sc;
497 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
498 	ifp->if_ioctl = lge_ioctl;
499 	ifp->if_start = lge_start;
500 	ifp->if_watchdog = lge_watchdog;
501 	ifp->if_hardmtu = LGE_JUMBO_MTU;
502 	IFQ_SET_MAXLEN(&ifp->if_snd, LGE_TX_LIST_CNT - 1);
503 	DPRINTFN(5, ("bcopy\n"));
504 	bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ);
505 
506 	ifp->if_capabilities = IFCAP_VLAN_MTU;
507 
508 	if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH)
509 		sc->lge_pcs = 1;
510 	else
511 		sc->lge_pcs = 0;
512 
513 	/*
514 	 * Do MII setup.
515 	 */
516 	DPRINTFN(5, ("mii setup\n"));
517 	sc->lge_mii.mii_ifp = ifp;
518 	sc->lge_mii.mii_readreg = lge_miibus_readreg;
519 	sc->lge_mii.mii_writereg = lge_miibus_writereg;
520 	sc->lge_mii.mii_statchg = lge_miibus_statchg;
521 	ifmedia_init(&sc->lge_mii.mii_media, 0, lge_ifmedia_upd,
522 		     lge_ifmedia_sts);
523 	mii_attach(&sc->sc_dv, &sc->lge_mii, 0xffffffff, MII_PHY_ANY,
524 		   MII_OFFSET_ANY, 0);
525 
526 	if (LIST_FIRST(&sc->lge_mii.mii_phys) == NULL) {
527 		printf("%s: no PHY found!\n", sc->sc_dv.dv_xname);
528 		ifmedia_add(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL,
529 			    0, NULL);
530 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_MANUAL);
531 	} else {
532 		DPRINTFN(5, ("ifmedia_set\n"));
533 		ifmedia_set(&sc->lge_mii.mii_media, IFM_ETHER|IFM_AUTO);
534 	}
535 
536 	/*
537 	 * Call MI attach routine.
538 	 */
539 	DPRINTFN(5, ("if_attach\n"));
540 	if_attach(ifp);
541 	DPRINTFN(5, ("ether_ifattach\n"));
542 	ether_ifattach(ifp);
543 	DPRINTFN(5, ("timeout_set\n"));
544 	timeout_set(&sc->lge_timeout, lge_tick, sc);
545 	timeout_add_sec(&sc->lge_timeout, 1);
546 	return;
547 
548 fail_5:
549 	bus_dmamap_destroy(sc->sc_dmatag, dmamap);
550 
551 fail_4:
552 	bus_dmamem_unmap(sc->sc_dmatag, kva,
553 	    sizeof(struct lge_list_data));
554 
555 fail_3:
556 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
557 
558 fail_2:
559 	pci_intr_disestablish(pc, sc->lge_intrhand);
560 
561 fail_1:
562 	bus_space_unmap(sc->lge_btag, sc->lge_bhandle, size);
563 }
564 
565 /*
566  * Initialize the transmit descriptors.
567  */
568 int
569 lge_list_tx_init(struct lge_softc *sc)
570 {
571 	struct lge_list_data	*ld;
572 	struct lge_ring_data	*cd;
573 	int			i;
574 
575 	cd = &sc->lge_cdata;
576 	ld = sc->lge_ldata;
577 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
578 		ld->lge_tx_list[i].lge_mbuf = NULL;
579 		ld->lge_tx_list[i].lge_ctl = 0;
580 	}
581 
582 	cd->lge_tx_prod = cd->lge_tx_cons = 0;
583 
584 	return (0);
585 }
586 
587 
588 /*
589  * Initialize the RX descriptors and allocate mbufs for them. Note that
590  * we arrange the descriptors in a closed ring, so that the last descriptor
591  * points back to the first.
592  */
593 int
594 lge_list_rx_init(struct lge_softc *sc)
595 {
596 	struct lge_list_data	*ld;
597 	struct lge_ring_data	*cd;
598 	int			i;
599 
600 	ld = sc->lge_ldata;
601 	cd = &sc->lge_cdata;
602 
603 	cd->lge_rx_prod = cd->lge_rx_cons = 0;
604 
605 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
606 
607 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
608 		if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0)
609 			break;
610 		if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS)
611 			return (ENOBUFS);
612 	}
613 
614 	/* Clear possible 'rx command queue empty' interrupt. */
615 	CSR_READ_4(sc, LGE_ISR);
616 
617 	return (0);
618 }
619 
620 /*
621  * Initialize a RX descriptor and attach a MBUF cluster.
622  */
623 int
624 lge_newbuf(struct lge_softc *sc, struct lge_rx_desc *c, struct mbuf *m)
625 {
626 	struct mbuf		*m_new = NULL;
627 
628 	if (m == NULL) {
629 		m_new = MCLGETI(NULL, LGE_JLEN, NULL, M_DONTWAIT);
630 		if (m_new == NULL)
631 			return (ENOBUFS);
632 	} else {
633 		/*
634 		 * We're re-using a previously allocated mbuf;
635 		 * be sure to re-init pointers and lengths to
636 		 * default values.
637 		 */
638 		m_new = m;
639 		m_new->m_data = m_new->m_ext.ext_buf;
640 	}
641 	m_new->m_len = m_new->m_pkthdr.len = LGE_JLEN;
642 
643 	/*
644 	 * Adjust alignment so packet payload begins on a
645 	 * longword boundary. Mandatory for Alpha, useful on
646 	 * x86 too.
647 	*/
648 	m_adj(m_new, ETHER_ALIGN);
649 
650 	c->lge_mbuf = m_new;
651 	c->lge_fragptr_hi = 0;
652 	c->lge_fragptr_lo = VTOPHYS(mtod(m_new, caddr_t));
653 	c->lge_fraglen = m_new->m_len;
654 	c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1);
655 	c->lge_sts = 0;
656 
657 	/*
658 	 * Put this buffer in the RX command FIFO. To do this,
659 	 * we just write the physical address of the descriptor
660 	 * into the RX descriptor address registers. Note that
661 	 * there are two registers, one high DWORD and one low
662 	 * DWORD, which lets us specify a 64-bit address if
663 	 * desired. We only use a 32-bit address for now.
664 	 * Writing to the low DWORD register is what actually
665 	 * causes the command to be issued, so we do that
666 	 * last.
667 	 */
668 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, VTOPHYS(c));
669 	LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT);
670 
671 	return (0);
672 }
673 
674 /*
675  * A frame has been uploaded: pass the resulting mbuf chain up to
676  * the higher level protocols.
677  */
678 void
679 lge_rxeof(struct lge_softc *sc, int cnt)
680 {
681 	struct mbuf_list	ml = MBUF_LIST_INITIALIZER();
682         struct mbuf		*m;
683         struct ifnet		*ifp;
684 	struct lge_rx_desc	*cur_rx;
685 	int			c, i, total_len = 0;
686 	u_int32_t		rxsts, rxctl;
687 
688 	ifp = &sc->arpcom.ac_if;
689 
690 	/* Find out how many frames were processed. */
691 	c = cnt;
692 	i = sc->lge_cdata.lge_rx_cons;
693 
694 	/* Suck them in. */
695 	while(c) {
696 		struct mbuf		*m0 = NULL;
697 
698 		cur_rx = &sc->lge_ldata->lge_rx_list[i];
699 		rxctl = cur_rx->lge_ctl;
700 		rxsts = cur_rx->lge_sts;
701 		m = cur_rx->lge_mbuf;
702 		cur_rx->lge_mbuf = NULL;
703 		total_len = LGE_RXBYTES(cur_rx);
704 		LGE_INC(i, LGE_RX_LIST_CNT);
705 		c--;
706 
707 		/*
708 		 * If an error occurs, update stats, clear the
709 		 * status word and leave the mbuf cluster in place:
710 		 * it should simply get re-used next time this descriptor
711 	 	 * comes up in the ring.
712 		 */
713 		if (rxctl & LGE_RXCTL_ERRMASK) {
714 			ifp->if_ierrors++;
715 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
716 			continue;
717 		}
718 
719 		if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) {
720 			m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN);
721 			lge_newbuf(sc, &LGE_RXTAIL(sc), m);
722 			if (m0 == NULL) {
723 				ifp->if_ierrors++;
724 				continue;
725 			}
726 			m = m0;
727 		} else {
728 			m->m_pkthdr.len = m->m_len = total_len;
729 		}
730 
731 		/* Do IP checksum checking. */
732 		if (rxsts & LGE_RXSTS_ISIP) {
733 			if (!(rxsts & LGE_RXSTS_IPCSUMERR))
734 				m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
735 		}
736 		if (rxsts & LGE_RXSTS_ISTCP) {
737 			if (!(rxsts & LGE_RXSTS_TCPCSUMERR))
738 				m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
739 		}
740 		if (rxsts & LGE_RXSTS_ISUDP) {
741 			if (!(rxsts & LGE_RXSTS_UDPCSUMERR))
742 				m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK;
743 		}
744 
745 		ml_enqueue(&ml, m);
746 	}
747 
748 	if_input(ifp, &ml);
749 
750 	sc->lge_cdata.lge_rx_cons = i;
751 }
752 
753 /*
754  * A frame was downloaded to the chip. It's safe for us to clean up
755  * the list buffers.
756  */
757 
758 void
759 lge_txeof(struct lge_softc *sc)
760 {
761 	struct lge_tx_desc	*cur_tx = NULL;
762 	struct ifnet		*ifp;
763 	u_int32_t		idx, txdone;
764 
765 	ifp = &sc->arpcom.ac_if;
766 
767 	/* Clear the timeout timer. */
768 	ifp->if_timer = 0;
769 
770 	/*
771 	 * Go through our tx list and free mbufs for those
772 	 * frames that have been transmitted.
773 	 */
774 	idx = sc->lge_cdata.lge_tx_cons;
775 	txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT);
776 
777 	while (idx != sc->lge_cdata.lge_tx_prod && txdone) {
778 		cur_tx = &sc->lge_ldata->lge_tx_list[idx];
779 
780 		ifp->if_opackets++;
781 		if (cur_tx->lge_mbuf != NULL) {
782 			m_freem(cur_tx->lge_mbuf);
783 			cur_tx->lge_mbuf = NULL;
784 		}
785 		cur_tx->lge_ctl = 0;
786 
787 		txdone--;
788 		LGE_INC(idx, LGE_TX_LIST_CNT);
789 		ifp->if_timer = 0;
790 	}
791 
792 	sc->lge_cdata.lge_tx_cons = idx;
793 
794 	if (cur_tx != NULL)
795 		ifq_clr_oactive(&ifp->if_snd);
796 }
797 
798 void
799 lge_tick(void *xsc)
800 {
801 	struct lge_softc	*sc = xsc;
802 	struct mii_data		*mii = &sc->lge_mii;
803 	struct ifnet		*ifp = &sc->arpcom.ac_if;
804 	int			s;
805 
806 	s = splnet();
807 
808 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS);
809 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
810 	CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS);
811 	ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL);
812 
813 	if (!sc->lge_link) {
814 		mii_tick(mii);
815 		if (mii->mii_media_status & IFM_ACTIVE &&
816 		    IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) {
817 			sc->lge_link++;
818 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
819 				lge_start(ifp);
820 		}
821 	}
822 
823 	timeout_add_sec(&sc->lge_timeout, 1);
824 
825 	splx(s);
826 }
827 
828 int
829 lge_intr(void *arg)
830 {
831 	struct lge_softc	*sc;
832 	struct ifnet		*ifp;
833 	u_int32_t		status;
834 	int			claimed = 0;
835 
836 	sc = arg;
837 	ifp = &sc->arpcom.ac_if;
838 
839 	/* Suppress unwanted interrupts */
840 	if (!(ifp->if_flags & IFF_UP)) {
841 		lge_stop(sc);
842 		return (0);
843 	}
844 
845 	for (;;) {
846 		/*
847 		 * Reading the ISR register clears all interrupts, and
848 		 * clears the 'interrupts enabled' bit in the IMR
849 		 * register.
850 		 */
851 		status = CSR_READ_4(sc, LGE_ISR);
852 
853 		if ((status & LGE_INTRS) == 0)
854 			break;
855 
856 		claimed = 1;
857 
858 		if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE)))
859 			lge_txeof(sc);
860 
861 		if (status & LGE_ISR_RXDMA_DONE)
862 			lge_rxeof(sc, LGE_RX_DMACNT(status));
863 
864 		if (status & LGE_ISR_RXCMDFIFO_EMPTY)
865 			lge_init(sc);
866 
867 		if (status & LGE_ISR_PHY_INTR) {
868 			sc->lge_link = 0;
869 			timeout_del(&sc->lge_timeout);
870 			lge_tick(sc);
871 		}
872 	}
873 
874 	/* Re-enable interrupts. */
875 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB);
876 
877 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
878 		lge_start(ifp);
879 
880 	return (claimed);
881 }
882 
883 /*
884  * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data
885  * pointers to the fragment pointers.
886  */
887 int
888 lge_encap(struct lge_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
889 {
890 	struct lge_frag		*f = NULL;
891 	struct lge_tx_desc	*cur_tx;
892 	struct mbuf		*m;
893 	int			frag = 0, tot_len = 0;
894 
895 	/*
896  	 * Start packing the mbufs in this chain into
897 	 * the fragment pointers. Stop when we run out
898  	 * of fragments or hit the end of the mbuf chain.
899 	 */
900 	m = m_head;
901 	cur_tx = &sc->lge_ldata->lge_tx_list[*txidx];
902 	frag = 0;
903 
904 	for (m = m_head; m != NULL; m = m->m_next) {
905 		if (m->m_len != 0) {
906 			tot_len += m->m_len;
907 			f = &cur_tx->lge_frags[frag];
908 			f->lge_fraglen = m->m_len;
909 			f->lge_fragptr_lo = VTOPHYS(mtod(m, vaddr_t));
910 			f->lge_fragptr_hi = 0;
911 			frag++;
912 		}
913 	}
914 
915 	if (m != NULL)
916 		return (ENOBUFS);
917 
918 	cur_tx->lge_mbuf = m_head;
919 	cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len;
920 	LGE_INC((*txidx), LGE_TX_LIST_CNT);
921 
922 	/* Queue for transmit */
923 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, VTOPHYS(cur_tx));
924 
925 	return (0);
926 }
927 
928 /*
929  * Main transmit routine. To avoid having to do mbuf copies, we put pointers
930  * to the mbuf data regions directly in the transmit lists. We also save a
931  * copy of the pointers since the transmit list fragment pointers are
932  * physical addresses.
933  */
934 
935 void
936 lge_start(struct ifnet *ifp)
937 {
938 	struct lge_softc	*sc;
939 	struct mbuf		*m_head = NULL;
940 	u_int32_t		idx;
941 	int			pkts = 0;
942 
943 	sc = ifp->if_softc;
944 
945 	if (!sc->lge_link)
946 		return;
947 
948 	idx = sc->lge_cdata.lge_tx_prod;
949 
950 	if (ifq_is_oactive(&ifp->if_snd))
951 		return;
952 
953 	while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) {
954 		if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0)
955 			break;
956 
957 		m_head = ifq_deq_begin(&ifp->if_snd);
958 		if (m_head == NULL)
959 			break;
960 
961 		if (lge_encap(sc, m_head, &idx)) {
962 			ifq_deq_rollback(&ifp->if_snd, m_head);
963 			ifq_set_oactive(&ifp->if_snd);
964 			break;
965 		}
966 
967 		/* now we are committed to transmit the packet */
968 		ifq_deq_commit(&ifp->if_snd, m_head);
969 		pkts++;
970 
971 #if NBPFILTER > 0
972 		/*
973 		 * If there's a BPF listener, bounce a copy of this frame
974 		 * to him.
975 		 */
976 		if (ifp->if_bpf)
977 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
978 #endif
979 	}
980 	if (pkts == 0)
981 		return;
982 
983 	sc->lge_cdata.lge_tx_prod = idx;
984 
985 	/*
986 	 * Set a timeout in case the chip goes out to lunch.
987 	 */
988 	ifp->if_timer = 5;
989 }
990 
991 void
992 lge_init(void *xsc)
993 {
994 	struct lge_softc	*sc = xsc;
995 	struct ifnet		*ifp = &sc->arpcom.ac_if;
996 	int			s;
997 
998 	s = splnet();
999 
1000 	/*
1001 	 * Cancel pending I/O and free all RX/TX buffers.
1002 	 */
1003 	lge_stop(sc);
1004 	lge_reset(sc);
1005 
1006 	/* Set MAC address */
1007 	CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0]));
1008 	CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4]));
1009 
1010 	/* Init circular RX list. */
1011 	if (lge_list_rx_init(sc) == ENOBUFS) {
1012 		printf("%s: initialization failed: no "
1013 		       "memory for rx buffers\n", sc->sc_dv.dv_xname);
1014 		lge_stop(sc);
1015 		splx(s);
1016 		return;
1017 	}
1018 
1019 	/*
1020 	 * Init tx descriptors.
1021 	 */
1022 	lge_list_tx_init(sc);
1023 
1024 	/* Set initial value for MODE1 register. */
1025 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST|
1026 	    LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD|
1027 	    LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0|
1028 	    LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2);
1029 
1030 	 /* If we want promiscuous mode, set the allframes bit. */
1031 	if (ifp->if_flags & IFF_PROMISC) {
1032 		CSR_WRITE_4(sc, LGE_MODE1,
1033 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC);
1034 	} else {
1035 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC);
1036 	}
1037 
1038 	/*
1039 	 * Set the capture broadcast bit to capture broadcast frames.
1040 	 */
1041 	if (ifp->if_flags & IFF_BROADCAST) {
1042 		CSR_WRITE_4(sc, LGE_MODE1,
1043 		    LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST);
1044 	} else {
1045 		CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST);
1046 	}
1047 
1048 	/* Packet padding workaround? */
1049 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD);
1050 
1051 	/* No error frames */
1052 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS);
1053 
1054 	/* Receive large frames */
1055 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS);
1056 
1057 	/* Workaround: disable RX/TX flow control */
1058 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL);
1059 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL);
1060 
1061 	/* Make sure to strip CRC from received frames */
1062 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC);
1063 
1064 	/* Turn off magic packet mode */
1065 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB);
1066 
1067 	/* Turn off all VLAN stuff */
1068 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX|
1069 	    LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT);
1070 
1071 	/* Workarond: FIFO overflow */
1072 	CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF);
1073 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT);
1074 
1075 	/*
1076 	 * Load the multicast filter.
1077 	 */
1078 	lge_setmulti(sc);
1079 
1080 	/*
1081 	 * Enable hardware checksum validation for all received IPv4
1082 	 * packets, do not reject packets with bad checksums.
1083 	 */
1084 	CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM|
1085 	    LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM|
1086 	    LGE_MODE2_RX_ERRCSUM);
1087 
1088 	/*
1089 	 * Enable the delivery of PHY interrupts based on
1090 	 * link/speed/duplex status chalges.
1091 	 */
1092 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL);
1093 
1094 	/* Enable receiver and transmitter. */
1095 	CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0);
1096 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB);
1097 
1098 	CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0);
1099 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB);
1100 
1101 	/*
1102 	 * Enable interrupts.
1103 	 */
1104 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|
1105 	    LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS);
1106 
1107 	lge_ifmedia_upd(ifp);
1108 
1109 	ifp->if_flags |= IFF_RUNNING;
1110 	ifq_clr_oactive(&ifp->if_snd);
1111 
1112 	splx(s);
1113 
1114 	timeout_add_sec(&sc->lge_timeout, 1);
1115 }
1116 
1117 /*
1118  * Set media options.
1119  */
1120 int
1121 lge_ifmedia_upd(struct ifnet *ifp)
1122 {
1123 	struct lge_softc	*sc = ifp->if_softc;
1124 	struct mii_data		*mii = &sc->lge_mii;
1125 
1126 	sc->lge_link = 0;
1127 	if (mii->mii_instance) {
1128 		struct mii_softc *miisc;
1129 		LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
1130 			mii_phy_reset(miisc);
1131 	}
1132 	mii_mediachg(mii);
1133 
1134 	return (0);
1135 }
1136 
1137 /*
1138  * Report current media status.
1139  */
1140 void
1141 lge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1142 {
1143 	struct lge_softc	*sc = ifp->if_softc;
1144 	struct mii_data		*mii = &sc->lge_mii;
1145 
1146 	mii_pollstat(mii);
1147 	ifmr->ifm_active = mii->mii_media_active;
1148 	ifmr->ifm_status = mii->mii_media_status;
1149 }
1150 
1151 int
1152 lge_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1153 {
1154 	struct lge_softc	*sc = ifp->if_softc;
1155 	struct ifreq		*ifr = (struct ifreq *) data;
1156 	struct mii_data		*mii;
1157 	int			s, error = 0;
1158 
1159 	s = splnet();
1160 
1161 	switch(command) {
1162 	case SIOCSIFADDR:
1163 		ifp->if_flags |= IFF_UP;
1164 		if (!(ifp->if_flags & IFF_RUNNING))
1165 			lge_init(sc);
1166 		break;
1167 
1168 	case SIOCSIFFLAGS:
1169 		if (ifp->if_flags & IFF_UP) {
1170 			if (ifp->if_flags & IFF_RUNNING &&
1171 			    ifp->if_flags & IFF_PROMISC &&
1172 			    !(sc->lge_if_flags & IFF_PROMISC)) {
1173 				CSR_WRITE_4(sc, LGE_MODE1,
1174 				    LGE_MODE1_SETRST_CTL1|
1175 				    LGE_MODE1_RX_PROMISC);
1176 				lge_setmulti(sc);
1177 			} else if (ifp->if_flags & IFF_RUNNING &&
1178 			    !(ifp->if_flags & IFF_PROMISC) &&
1179 			    sc->lge_if_flags & IFF_PROMISC) {
1180 				CSR_WRITE_4(sc, LGE_MODE1,
1181 				    LGE_MODE1_RX_PROMISC);
1182 				lge_setmulti(sc);
1183 			} else if (ifp->if_flags & IFF_RUNNING &&
1184 			    (ifp->if_flags ^ sc->lge_if_flags) & IFF_ALLMULTI) {
1185 				lge_setmulti(sc);
1186 			} else {
1187 				if (!(ifp->if_flags & IFF_RUNNING))
1188 					lge_init(sc);
1189 			}
1190 		} else {
1191 			if (ifp->if_flags & IFF_RUNNING)
1192 				lge_stop(sc);
1193 		}
1194 		sc->lge_if_flags = ifp->if_flags;
1195 		break;
1196 
1197 	case SIOCGIFMEDIA:
1198 	case SIOCSIFMEDIA:
1199 		mii = &sc->lge_mii;
1200 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
1201 		break;
1202 
1203 	default:
1204 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1205 	}
1206 
1207 	if (error == ENETRESET) {
1208 		if (ifp->if_flags & IFF_RUNNING)
1209 			lge_setmulti(sc);
1210 		error = 0;
1211 	}
1212 
1213 	splx(s);
1214 	return (error);
1215 }
1216 
1217 void
1218 lge_watchdog(struct ifnet *ifp)
1219 {
1220 	struct lge_softc	*sc;
1221 
1222 	sc = ifp->if_softc;
1223 
1224 	ifp->if_oerrors++;
1225 	printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname);
1226 
1227 	lge_stop(sc);
1228 	lge_reset(sc);
1229 	lge_init(sc);
1230 
1231 	if (!IFQ_IS_EMPTY(&ifp->if_snd))
1232 		lge_start(ifp);
1233 }
1234 
1235 /*
1236  * Stop the adapter and free any mbufs allocated to the
1237  * RX and TX lists.
1238  */
1239 void
1240 lge_stop(struct lge_softc *sc)
1241 {
1242 	int			i;
1243 	struct ifnet		*ifp;
1244 
1245 	ifp = &sc->arpcom.ac_if;
1246 	ifp->if_timer = 0;
1247 	timeout_del(&sc->lge_timeout);
1248 
1249 	ifp->if_flags &= ~IFF_RUNNING;
1250 	ifq_clr_oactive(&ifp->if_snd);
1251 
1252 	CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB);
1253 
1254 	/* Disable receiver and transmitter. */
1255 	CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB);
1256 	sc->lge_link = 0;
1257 
1258 	/*
1259 	 * Free data in the RX lists.
1260 	 */
1261 	for (i = 0; i < LGE_RX_LIST_CNT; i++) {
1262 		if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) {
1263 			m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf);
1264 			sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL;
1265 		}
1266 	}
1267 	bzero(&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list));
1268 
1269 	/*
1270 	 * Free the TX list buffers.
1271 	 */
1272 	for (i = 0; i < LGE_TX_LIST_CNT; i++) {
1273 		if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) {
1274 			m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf);
1275 			sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL;
1276 		}
1277 	}
1278 
1279 	bzero(&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list));
1280 }
1281