xref: /openbsd-src/sys/dev/ic/fxp.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: fxp.c,v 1.24 2001/08/10 15:02:05 jason Exp $	*/
2 /*	$NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1995, David Greenman
6  * All rights reserved.
7  *
8  * Modifications to support NetBSD:
9  * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice unmodified, this list of conditions, and the following
16  *    disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp
34  */
35 
36 /*
37  * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
38  */
39 
40 #include "bpfilter.h"
41 #include "vlan.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/mbuf.h>
46 #include <sys/malloc.h>
47 #include <sys/kernel.h>
48 #include <sys/socket.h>
49 #include <sys/syslog.h>
50 #include <sys/timeout.h>
51 
52 #include <net/if.h>
53 #include <net/if_dl.h>
54 #include <net/if_media.h>
55 #include <net/if_types.h>
56 
57 #ifdef INET
58 #include <netinet/in.h>
59 #include <netinet/in_systm.h>
60 #include <netinet/in_var.h>
61 #include <netinet/ip.h>
62 #endif
63 
64 #ifdef IPX
65 #include <netipx/ipx.h>
66 #include <netipx/ipx_if.h>
67 #endif
68 
69 #ifdef NS
70 #include <netns/ns.h>
71 #include <netns/ns_if.h>
72 #endif
73 
74 #if NBPFILTER > 0
75 #include <net/bpf.h>
76 #include <net/bpfdesc.h>
77 #endif
78 
79 #include <sys/ioctl.h>
80 #include <sys/errno.h>
81 #include <sys/device.h>
82 
83 #include <netinet/if_ether.h>
84 
85 #include <vm/vm.h>
86 
87 #include <machine/cpu.h>
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 
91 #include <dev/mii/miivar.h>
92 
93 #include <dev/ic/fxpreg.h>
94 #include <dev/ic/fxpvar.h>
95 
96 #ifdef __alpha__		/* XXX */
97 /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
98 #undef vtophys
99 #define	vtophys(va)	alpha_XXX_dmamap((vm_offset_t)(va))
100 #endif /* __alpha__ */
101 
102 /*
103  * NOTE!  On the Alpha, we have an alignment constraint.  The
104  * card DMAs the packet immediately following the RFA.  However,
105  * the first thing in the packet is a 14-byte Ethernet header.
106  * This means that the packet is misaligned.  To compensate,
107  * we actually offset the RFA 2 bytes into the cluster.  This
108  * aligns the packet after the Ethernet header at a 32-bit
109  * boundary.  HOWEVER!  This means that the RFA is misaligned!
110  */
111 #define	RFA_ALIGNMENT_FUDGE	2
112 
113 /*
114  * Inline function to copy a 16-bit aligned 32-bit quantity.
115  */
116 static __inline void fxp_lwcopy __P((volatile u_int32_t *,
117 	volatile u_int32_t *));
118 static __inline void
119 fxp_lwcopy(src, dst)
120 	volatile u_int32_t *src, *dst;
121 {
122 	volatile u_int16_t *a = (u_int16_t *)src;
123 	volatile u_int16_t *b = (u_int16_t *)dst;
124 
125 	b[0] = a[0];
126 	b[1] = a[1];
127 }
128 
129 /*
130  * Template for default configuration parameters.
131  * See struct fxp_cb_config for the bit definitions.
132  */
133 static u_char fxp_cb_config_template[] = {
134 	0x0, 0x0,		/* cb_status */
135 	0x80, 0x2,		/* cb_command */
136 	0xff, 0xff, 0xff, 0xff,	/* link_addr */
137 	0x16,	/*  0 */
138 	0x8,	/*  1 */
139 	0x0,	/*  2 */
140 	0x0,	/*  3 */
141 	0x0,	/*  4 */
142 	0x80,	/*  5 */
143 	0xb2,	/*  6 */
144 	0x3,	/*  7 */
145 	0x1,	/*  8 */
146 	0x0,	/*  9 */
147 	0x26,	/* 10 */
148 	0x0,	/* 11 */
149 	0x60,	/* 12 */
150 	0x0,	/* 13 */
151 	0xf2,	/* 14 */
152 	0x48,	/* 15 */
153 	0x0,	/* 16 */
154 	0x40,	/* 17 */
155 	0xf3,	/* 18 */
156 	0x0,	/* 19 */
157 	0x3f,	/* 20 */
158 	0x5	/* 21 */
159 };
160 
161 int fxp_mediachange		__P((struct ifnet *));
162 void fxp_mediastatus		__P((struct ifnet *, struct ifmediareq *));
163 void fxp_scb_wait	__P((struct fxp_softc *));
164 void fxp_start			__P((struct ifnet *));
165 int fxp_ioctl			__P((struct ifnet *, u_long, caddr_t));
166 void fxp_init			__P((void *));
167 void fxp_stop			__P((struct fxp_softc *, int));
168 void fxp_watchdog		__P((struct ifnet *));
169 int fxp_add_rfabuf		__P((struct fxp_softc *, struct mbuf *));
170 int fxp_mdi_read		__P((struct device *, int, int));
171 void fxp_mdi_write		__P((struct device *, int, int, int));
172 void fxp_autosize_eeprom	__P((struct fxp_softc *));
173 void fxp_statchg		__P((struct device *));
174 void fxp_read_eeprom		__P((struct fxp_softc *, u_int16_t *,
175 				    int, int));
176 void fxp_stats_update		__P((void *));
177 void fxp_mc_setup		__P((struct fxp_softc *, int));
178 void fxp_scb_cmd		__P((struct fxp_softc *, u_int8_t));
179 
180 /*
181  * Set initial transmit threshold at 64 (512 bytes). This is
182  * increased by 64 (512 bytes) at a time, to maximum of 192
183  * (1536 bytes), if an underrun occurs.
184  */
185 static int tx_threshold = 64;
186 
187 /*
188  * Number of completed TX commands at which point an interrupt
189  * will be generated to garbage collect the attached buffers.
190  * Must be at least one less than FXP_NTXCB, and should be
191  * enough less so that the transmitter doesn't becomes idle
192  * during the buffer rundown (which would reduce performance).
193  */
194 #define FXP_CXINT_THRESH 120
195 
196 /*
197  * TxCB list index mask. This is used to do list wrap-around.
198  */
199 #define FXP_TXCB_MASK	(FXP_NTXCB - 1)
200 
201 /*
202  * Number of receive frame area buffers. These are large so chose
203  * wisely.
204  */
205 #define FXP_NRFABUFS	64
206 
207 /*
208  * Maximum number of seconds that the receiver can be idle before we
209  * assume it's dead and attempt to reset it by reprogramming the
210  * multicast filter. This is part of a work-around for a bug in the
211  * NIC. See fxp_stats_update().
212  */
213 #define FXP_MAX_RX_IDLE	15
214 
215 /*
216  * Wait for the previous command to be accepted (but not necessarily
217  * completed).
218  */
219 void
220 fxp_scb_wait(sc)
221 	struct fxp_softc *sc;
222 {
223 	int i = 10000;
224 
225 	while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
226 		DELAY(2);
227 	if (i == 0)
228 		printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname);
229 }
230 
231 /*************************************************************
232  * Operating system-specific autoconfiguration glue
233  *************************************************************/
234 
235 void	fxp_shutdown __P((void *));
236 void	fxp_power __P((int, void *));
237 
238 struct cfdriver fxp_cd = {
239 	NULL, "fxp", DV_IFNET
240 };
241 
242 /*
243  * Device shutdown routine. Called at system shutdown after sync. The
244  * main purpose of this routine is to shut off receiver DMA so that
245  * kernel memory doesn't get clobbered during warmboot.
246  */
247 void
248 fxp_shutdown(sc)
249 	void *sc;
250 {
251 	fxp_stop((struct fxp_softc *) sc, 0);
252 }
253 
254 /*
255  * Power handler routine. Called when the system is transitioning
256  * into/out of power save modes.  As with fxp_shutdown, the main
257  * purpose of this routine is to shut off receiver DMA so it doesn't
258  * clobber kernel memory at the wrong time.
259  */
260 void
261 fxp_power(why, arg)
262 	int why;
263 	void *arg;
264 {
265 	struct fxp_softc *sc = arg;
266 	struct ifnet *ifp;
267 	int s;
268 
269 	s = splimp();
270 	if (why != PWR_RESUME)
271 		fxp_stop(sc, 0);
272 	else {
273 		ifp = &sc->arpcom.ac_if;
274 		if (ifp->if_flags & IFF_UP)
275 			fxp_init(sc);
276 	}
277 	splx(s);
278 }
279 
280 /*************************************************************
281  * End of operating system-specific autoconfiguration glue
282  *************************************************************/
283 
284 /*
285  * Do generic parts of attach.
286  */
287 int
288 fxp_attach_common(sc, enaddr, intrstr)
289 	struct fxp_softc *sc;
290 	u_int8_t *enaddr;
291 	const char *intrstr;
292 {
293 	struct ifnet *ifp;
294 	u_int16_t data;
295 	int i, err;
296 
297 	/*
298 	 * Reset to a stable state.
299 	 */
300 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
301 	DELAY(10);
302 
303 	if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl),
304 	    PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg, BUS_DMA_NOWAIT))
305 		goto fail;
306 	if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg,
307 	    sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl,
308 	    BUS_DMA_NOWAIT)) {
309 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
310 		goto fail;
311 	}
312 	if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl),
313 	    1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT,
314 	    &sc->tx_cb_map)) {
315 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
316 		    sizeof(struct fxp_ctrl));
317 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
318 		goto fail;
319 	}
320 	if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl,
321 	    sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) {
322 		bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
323 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
324 		    sizeof(struct fxp_ctrl));
325 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
326 	}
327 
328 	for (i = 0; i < FXP_NTXCB; i++) {
329 		if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
330 		    FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) {
331 			printf("%s: unable to create tx dma map %d, error %d\n",
332 			    sc->sc_dev.dv_xname, i, err);
333 			goto fail;
334 		}
335 		sc->txs[i].tx_mbuf = NULL;
336 		sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i;
337 		sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK];
338 	}
339 	bzero(sc->sc_ctrl, sizeof(struct fxp_ctrl));
340 
341 	/*
342 	 * Pre-allocate our receive buffers.
343 	 */
344 	for (i = 0; i < FXP_NRFABUFS; i++) {
345 		if (fxp_add_rfabuf(sc, NULL) != 0) {
346 			goto fail;
347 		}
348 	}
349 
350 	/*
351 	 * Find out how large of an SEEPROM we have.
352 	 */
353 	fxp_autosize_eeprom(sc);
354 
355 	/*
356 	 * Get info about the primary PHY
357 	 */
358 	fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1);
359 	sc->phy_primary_addr = data & 0xff;
360 	sc->phy_primary_device = (data >> 8) & 0x3f;
361 	sc->phy_10Mbps_only = data >> 15;
362 
363 	/*
364 	 * Read MAC address.
365 	 */
366 	fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3);
367 
368 	ifp = &sc->arpcom.ac_if;
369 	bcopy(enaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
370 	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
371 	ifp->if_softc = sc;
372 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
373 	ifp->if_ioctl = fxp_ioctl;
374 	ifp->if_start = fxp_start;
375 	ifp->if_watchdog = fxp_watchdog;
376 	IFQ_SET_READY(&ifp->if_snd);
377 
378 #if NVLAN > 0
379 	/*
380 	 * Only 82558 and newer cards have a bit to ignore oversized frames.
381 	 */
382 	if (sc->not_82557)
383 		ifp->if_capabilities |= IFCAP_VLAN_MTU;
384 #endif
385 
386 	printf(": %s, address %s\n", intrstr,
387 	    ether_sprintf(sc->arpcom.ac_enaddr));
388 
389 	/*
390 	 * Initialize our media structures and probe the MII.
391 	 */
392 	sc->sc_mii.mii_ifp = ifp;
393 	sc->sc_mii.mii_readreg = fxp_mdi_read;
394 	sc->sc_mii.mii_writereg = fxp_mdi_write;
395 	sc->sc_mii.mii_statchg = fxp_statchg;
396 	ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange,
397 	    fxp_mediastatus);
398 	mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
399 	    MII_OFFSET_ANY, MIIF_NOISOLATE);
400 	/* If no phy found, just use auto mode */
401 	if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
402 		ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL,
403 		    0, NULL);
404 		printf("%s: no phy found, using manual mode\n",
405 		    sc->sc_dev.dv_xname);
406 	}
407 
408 	if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0))
409 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
410 	else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0))
411 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
412 	else
413 		ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T);
414 
415 	/*
416 	 * Attach the interface.
417 	 */
418 	if_attach(ifp);
419 	/*
420 	 * Let the system queue as many packets as we have available
421 	 * TX descriptors.
422 	 */
423 	IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
424 	ether_ifattach(ifp);
425 
426 	/*
427 	 * Add shutdown hook so that DMA is disabled prior to reboot. Not
428 	 * doing do could allow DMA to corrupt kernel memory during the
429 	 * reboot before the driver initializes.
430 	 */
431 	sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc);
432 
433 	/*
434 	 * Add suspend hook, for similiar reasons..
435 	 */
436 	sc->sc_powerhook = powerhook_establish(fxp_power, sc);
437 
438 	/*
439 	 * Initialize timeout for statistics update.
440 	 */
441 	timeout_set(&sc->stats_update_to, fxp_stats_update, sc);
442 
443 	return (0);
444 
445  fail:
446 	printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname);
447 	if (sc->tx_cb_map != NULL) {
448 		bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map);
449 		bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map);
450 		bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl,
451 		    sizeof(struct fxp_cb_tx) * FXP_NTXCB);
452 		bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg);
453 	}
454 	/* frees entire chain */
455 	if (sc->rfa_headm)
456 		m_freem(sc->rfa_headm);
457 
458 	return (ENOMEM);
459 }
460 
461 int
462 fxp_detach(sc)
463 	struct fxp_softc *sc;
464 {
465 	struct ifnet *ifp = &sc->arpcom.ac_if;
466 
467 	/* Unhook our tick handler. */
468 	timeout_del(&sc->stats_update_to);
469 
470 	/* Detach any PHYs we might have. */
471 	if (LIST_FIRST(&sc->sc_mii.mii_phys) != NULL)
472 		mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
473 
474 	/* Delete any remaining media. */
475 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
476 
477 	ether_ifdetach(ifp);
478 	if_detach(ifp);
479 
480 	shutdownhook_disestablish(sc->sc_sdhook);
481 	powerhook_disestablish(sc->sc_powerhook);
482 
483 	return (0);
484 }
485 
486 /*
487  * From NetBSD:
488  *
489  * Figure out EEPROM size.
490  *
491  * 559's can have either 64-word or 256-word EEPROMs, the 558
492  * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
493  * talks about the existance of 16 to 256 word EEPROMs.
494  *
495  * The only known sizes are 64 and 256, where the 256 version is used
496  * by CardBus cards to store CIS information.
497  *
498  * The address is shifted in msb-to-lsb, and after the last
499  * address-bit the EEPROM is supposed to output a `dummy zero' bit,
500  * after which follows the actual data. We try to detect this zero, by
501  * probing the data-out bit in the EEPROM control register just after
502  * having shifted in a bit. If the bit is zero, we assume we've
503  * shifted enough address bits. The data-out should be tri-state,
504  * before this, which should translate to a logical one.
505  *
506  * Other ways to do this would be to try to read a register with known
507  * contents with a varying number of address bits, but no such
508  * register seem to be available. The high bits of register 10 are 01
509  * on the 558 and 559, but apparently not on the 557.
510  *
511  * The Linux driver computes a checksum on the EEPROM data, but the
512  * value of this checksum is not very well documented.
513  */
514 void
515 fxp_autosize_eeprom(sc)
516 	struct fxp_softc *sc;
517 {
518 	u_int16_t reg;
519 	int x;
520 
521 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
522 	/*
523 	 * Shift in read opcode.
524 	 */
525 	for (x = 3; x > 0; x--) {
526 		if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
527 			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
528 		} else {
529 			reg = FXP_EEPROM_EECS;
530 		}
531 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
532 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
533 		    reg | FXP_EEPROM_EESK);
534 		DELAY(1);
535 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
536 		DELAY(1);
537 	}
538 	/*
539 	 * Shift in address.
540 	 * Wait for the dummy zero following a correct address shift.
541 	 */
542 	for (x = 1; x <= 8; x++) {
543 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
544 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
545 			FXP_EEPROM_EECS | FXP_EEPROM_EESK);
546 		DELAY(1);
547 		if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0)
548 			break;
549 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
550 		DELAY(1);
551 	}
552 	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
553 	DELAY(1);
554 	sc->eeprom_size = x;
555 }
556 /*
557  * Read from the serial EEPROM. Basically, you manually shift in
558  * the read opcode (one bit at a time) and then shift in the address,
559  * and then you shift out the data (all of this one bit at a time).
560  * The word size is 16 bits, so you have to provide the address for
561  * every 16 bits of data.
562  */
563 void
564 fxp_read_eeprom(sc, data, offset, words)
565 	struct fxp_softc *sc;
566 	u_short *data;
567 	int offset;
568 	int words;
569 {
570 	u_int16_t reg;
571 	int i, x;
572 
573 	for (i = 0; i < words; i++) {
574 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
575 		/*
576 		 * Shift in read opcode.
577 		 */
578 		for (x = 3; x > 0; x--) {
579 			if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
580 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
581 			} else {
582 				reg = FXP_EEPROM_EECS;
583 			}
584 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
585 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
586 			    reg | FXP_EEPROM_EESK);
587 			DELAY(1);
588 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
589 			DELAY(1);
590 		}
591 		/*
592 		 * Shift in address.
593 		 */
594 		for (x = sc->eeprom_size; x > 0; x--) {
595 			if ((i + offset) & (1 << (x - 1))) {
596 				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
597 			} else {
598 				reg = FXP_EEPROM_EECS;
599 			}
600 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
601 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
602 			    reg | FXP_EEPROM_EESK);
603 			DELAY(1);
604 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
605 			DELAY(1);
606 		}
607 		reg = FXP_EEPROM_EECS;
608 		data[i] = 0;
609 		/*
610 		 * Shift out data.
611 		 */
612 		for (x = 16; x > 0; x--) {
613 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
614 			    reg | FXP_EEPROM_EESK);
615 			DELAY(1);
616 			if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
617 			    FXP_EEPROM_EEDO)
618 				data[i] |= (1 << (x - 1));
619 			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
620 			DELAY(1);
621 		}
622 		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
623 		DELAY(1);
624 	}
625 }
626 
627 /*
628  * Start packet transmission on the interface.
629  */
630 void
631 fxp_start(ifp)
632 	struct ifnet *ifp;
633 {
634 	struct fxp_softc *sc = ifp->if_softc;
635 	struct fxp_txsw *txs = sc->sc_cbt_prod;
636 	struct fxp_cb_tx *txc;
637 	struct mbuf *m0, *m = NULL;
638 	int cnt = sc->sc_cbt_cnt, seg;
639 
640 	if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING)
641 		return;
642 
643 	while (1) {
644 		if (cnt >= (FXP_NTXCB - 1)) {
645 			ifp->if_flags |= IFF_OACTIVE;
646 			break;
647 		}
648 
649 		txs = txs->tx_next;
650 
651 		IFQ_POLL(&ifp->if_snd, m0);
652 		if (m0 == NULL)
653 			break;
654 
655 		if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
656 		    m0, BUS_DMA_NOWAIT) != 0) {
657 			MGETHDR(m, M_DONTWAIT, MT_DATA);
658 			if (m == NULL)
659 				break;
660 			if (m0->m_pkthdr.len > MHLEN) {
661 				MCLGET(m, M_DONTWAIT);
662 				if (!(m->m_flags & M_EXT)) {
663 					m_freem(m);
664 					break;
665 				}
666 			}
667 			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
668 			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
669 			if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map,
670 			    m, BUS_DMA_NOWAIT) != 0)
671 				break;
672 		}
673 
674 		IFQ_DEQUEUE(&ifp->if_snd, m0);
675 		if (m != NULL) {
676 			m_freem(m0);
677 			m0 = m;
678 			m = NULL;
679 		}
680 
681 		txs->tx_mbuf = m0;
682 
683 #if NBPFILTER > 0
684 		if (ifp->if_bpf)
685 			bpf_mtap(ifp->if_bpf, m0);
686 #endif
687 
688 		bus_dmamap_sync(sc->sc_dmat, txs->tx_map, BUS_DMASYNC_PREREAD);
689 
690 		txc = txs->tx_cb;
691 		txc->tbd_number = txs->tx_map->dm_nsegs;
692 		txc->cb_status = 0;
693 		txc->cb_command = FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF;
694 		txc->tx_threshold = tx_threshold;
695 		for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) {
696 			txc->tbd[seg].tb_addr =
697 			    txs->tx_map->dm_segs[seg].ds_addr;
698 			txc->tbd[seg].tb_size =
699 			    txs->tx_map->dm_segs[seg].ds_len;
700 		}
701 
702 		++cnt;
703 		sc->sc_cbt_prod = txs;
704 	}
705 
706 	if (cnt != sc->sc_cbt_cnt) {
707 		/* We enqueued at least one. */
708 		ifp->if_timer = 5;
709 
710 		txs = sc->sc_cbt_prod;
711 		txs->tx_cb->cb_command |= FXP_CB_COMMAND_I | FXP_CB_COMMAND_S;
712 		sc->sc_cbt_prev->tx_cb->cb_command &= ~FXP_CB_COMMAND_S;
713 		sc->sc_cbt_prev = txs;
714 
715 		bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
716 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
717 
718 		fxp_scb_wait(sc);
719 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
720 
721 		sc->sc_cbt_cnt = cnt;
722 	}
723 }
724 
725 /*
726  * Process interface interrupts.
727  */
728 int
729 fxp_intr(arg)
730 	void *arg;
731 {
732 	struct fxp_softc *sc = arg;
733 	struct ifnet *ifp = &sc->arpcom.ac_if;
734 	u_int8_t statack;
735 	int claimed = 0, rnr;
736 
737 	/*
738 	 * If the interface isn't running, don't try to
739 	 * service the interrupt.. just ack it and bail.
740 	 */
741 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
742 		statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
743 		if (statack) {
744 			claimed = 1;
745 			CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
746 		}
747 		return claimed;
748 	}
749 
750 	while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
751 		claimed = 1;
752 		rnr = 0;
753 
754 		/*
755 		 * First ACK all the interrupts in this pass.
756 		 */
757 		CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
758 
759 		/*
760 		 * Free any finished transmit mbuf chains.
761 		 */
762 		if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) {
763 			int txcnt = sc->sc_cbt_cnt;
764 			struct fxp_txsw *txs = sc->sc_cbt_cons;
765 
766 			bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
767 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
768 
769 			while ((txcnt > 0) &&
770 			    (txs->tx_cb->cb_status & FXP_CB_STATUS_C)) {
771 				if (txs->tx_mbuf != NULL) {
772 					bus_dmamap_sync(sc->sc_dmat,
773 					    txs->tx_map, BUS_DMASYNC_POSTREAD);
774 					bus_dmamap_unload(sc->sc_dmat,
775 					    txs->tx_map);
776 					m_freem(txs->tx_mbuf);
777 					txs->tx_mbuf = NULL;
778 				}
779 				--txcnt;
780 				txs = txs->tx_next;
781 			}
782 			sc->sc_cbt_cons = txs;
783 			sc->sc_cbt_cnt = txcnt;
784 			ifp->if_timer = 0;
785 			ifp->if_flags &= ~IFF_OACTIVE;
786 
787 			if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
788 				/*
789 				 * Try to start more packets transmitting.
790 				 */
791 				fxp_start(ifp);
792 			}
793 		}
794 		/*
795 		 * Process receiver interrupts. If a no-resource (RNR)
796 		 * condition exists, get whatever packets we can and
797 		 * re-start the receiver.
798 		 */
799 		if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
800 			struct mbuf *m;
801 			u_int8_t *rfap;
802 rcvloop:
803 			m = sc->rfa_headm;
804 			rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
805 
806 			if (*(u_int16_t *)(rfap +
807 			    offsetof(struct fxp_rfa, rfa_status)) &
808 			    FXP_RFA_STATUS_C) {
809 				if (*(u_int16_t *)(rfap +
810 				    offsetof(struct fxp_rfa, rfa_status)) &
811 				    FXP_RFA_STATUS_RNR)
812 					rnr = 1;
813 
814 				/*
815 				 * Remove first packet from the chain.
816 				 */
817 				sc->rfa_headm = m->m_next;
818 				m->m_next = NULL;
819 
820 				/*
821 				 * Add a new buffer to the receive chain.
822 				 * If this fails, the old buffer is recycled
823 				 * instead.
824 				 */
825 				if (fxp_add_rfabuf(sc, m) == 0) {
826 					u_int16_t total_len;
827 
828 					total_len = *(u_int16_t *)(rfap +
829 					    offsetof(struct fxp_rfa,
830 					    actual_size)) &
831 					    (MCLBYTES - 1);
832 					if (total_len <
833 					    sizeof(struct ether_header)) {
834 						m_freem(m);
835 						goto rcvloop;
836 					}
837 					m->m_pkthdr.rcvif = ifp;
838 					m->m_pkthdr.len = m->m_len =
839 					    total_len;
840 #if NBPFILTER > 0
841 					if (ifp->if_bpf)
842 						bpf_mtap(ifp->if_bpf, m);
843 #endif /* NBPFILTER > 0 */
844 					ether_input_mbuf(ifp, m);
845 				}
846 				goto rcvloop;
847 			}
848 			if (rnr) {
849 				fxp_scb_wait(sc);
850 				CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
851 				    vtophys((vaddr_t)sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE);
852 				fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
853 			}
854 		}
855 	}
856 	return (claimed);
857 }
858 
859 /*
860  * Update packet in/out/collision statistics. The i82557 doesn't
861  * allow you to access these counters without doing a fairly
862  * expensive DMA to get _all_ of the statistics it maintains, so
863  * we do this operation here only once per second. The statistics
864  * counters in the kernel are updated from the previous dump-stats
865  * DMA and then a new dump-stats DMA is started. The on-chip
866  * counters are zeroed when the DMA completes. If we can't start
867  * the DMA immediately, we don't wait - we just prepare to read
868  * them again next time.
869  */
870 void
871 fxp_stats_update(arg)
872 	void *arg;
873 {
874 	struct fxp_softc *sc = arg;
875 	struct ifnet *ifp = &sc->arpcom.ac_if;
876 	struct fxp_stats *sp = &sc->sc_ctrl->stats;
877 	int s;
878 
879 	ifp->if_opackets += sp->tx_good;
880 	ifp->if_collisions += sp->tx_total_collisions;
881 	if (sp->rx_good) {
882 		ifp->if_ipackets += sp->rx_good;
883 		sc->rx_idle_secs = 0;
884 	} else {
885 		sc->rx_idle_secs++;
886 	}
887 	ifp->if_ierrors +=
888 	    sp->rx_crc_errors +
889 	    sp->rx_alignment_errors +
890 	    sp->rx_rnr_errors +
891 	    sp->rx_overrun_errors;
892 	/*
893 	 * If any transmit underruns occured, bump up the transmit
894 	 * threshold by another 512 bytes (64 * 8).
895 	 */
896 	if (sp->tx_underruns) {
897 		ifp->if_oerrors += sp->tx_underruns;
898 		if (tx_threshold < 192)
899 			tx_threshold += 64;
900 	}
901 	s = splimp();
902 	/*
903 	 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds,
904 	 * then assume the receiver has locked up and attempt to clear
905 	 * the condition by reprogramming the multicast filter. This is
906 	 * a work-around for a bug in the 82557 where the receiver locks
907 	 * up if it gets certain types of garbage in the syncronization
908 	 * bits prior to the packet header. This bug is supposed to only
909 	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
910 	 * mode as well (perhaps due to a 10/100 speed transition).
911 	 */
912 	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
913 		sc->rx_idle_secs = 0;
914 		fxp_init(sc);
915 		return;
916 	}
917 	/*
918 	 * If there is no pending command, start another stats
919 	 * dump. Otherwise punt for now.
920 	 */
921 	if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
922 		/*
923 		 * Start another stats dump.
924 		 */
925 		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
926 	} else {
927 		/*
928 		 * A previous command is still waiting to be accepted.
929 		 * Just zero our copy of the stats and wait for the
930 		 * next timer event to update them.
931 		 */
932 		sp->tx_good = 0;
933 		sp->tx_underruns = 0;
934 		sp->tx_total_collisions = 0;
935 
936 		sp->rx_good = 0;
937 		sp->rx_crc_errors = 0;
938 		sp->rx_alignment_errors = 0;
939 		sp->rx_rnr_errors = 0;
940 		sp->rx_overrun_errors = 0;
941 	}
942 
943 	/* Tick the MII clock. */
944 	mii_tick(&sc->sc_mii);
945 
946 	splx(s);
947 	/*
948 	 * Schedule another timeout one second from now.
949 	 */
950 	timeout_add(&sc->stats_update_to, hz);
951 }
952 
953 /*
954  * Stop the interface. Cancels the statistics updater and resets
955  * the interface.
956  */
957 void
958 fxp_stop(sc, drain)
959 	struct fxp_softc *sc;
960 	int drain;
961 {
962 	struct ifnet *ifp = &sc->arpcom.ac_if;
963 	int i;
964 
965 	/*
966 	 * Turn down interface (done early to avoid bad interactions
967 	 * between panics, shutdown hooks, and the watchdog timer)
968 	 */
969 	ifp->if_timer = 0;
970 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
971 
972 	/*
973 	 * Cancel stats updater.
974 	 */
975 	timeout_del(&sc->stats_update_to);
976 	mii_down(&sc->sc_mii);
977 
978 	/*
979 	 * Issue software reset
980 	 */
981 	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
982 	DELAY(10);
983 
984 	/*
985 	 * Release any xmit buffers.
986 	 */
987 	for (i = 0; i < FXP_NTXCB; i++) {
988 		if (sc->txs[i].tx_mbuf != NULL) {
989 			bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map);
990 			m_freem(sc->txs[i].tx_mbuf);
991 			sc->txs[i].tx_mbuf = NULL;
992 		}
993 	}
994 	sc->sc_cbt_cnt = 0;
995 
996 	if (drain) {
997 		/*
998 		 * Free all the receive buffers then reallocate/reinitialize
999 		 */
1000 		if (sc->rfa_headm != NULL)
1001 			m_freem(sc->rfa_headm);
1002 		sc->rfa_headm = NULL;
1003 		sc->rfa_tailm = NULL;
1004 		for (i = 0; i < FXP_NRFABUFS; i++) {
1005 			if (fxp_add_rfabuf(sc, NULL) != 0) {
1006 				/*
1007 				 * This "can't happen" - we're at splimp()
1008 				 * and we just freed all the buffers we need
1009 				 * above.
1010 				 */
1011 				panic("fxp_stop: no buffers!");
1012 			}
1013 		}
1014 	}
1015 }
1016 
1017 /*
1018  * Watchdog/transmission transmit timeout handler. Called when a
1019  * transmission is started on the interface, but no interrupt is
1020  * received before the timeout. This usually indicates that the
1021  * card has wedged for some reason.
1022  */
1023 void
1024 fxp_watchdog(ifp)
1025 	struct ifnet *ifp;
1026 {
1027 	struct fxp_softc *sc = ifp->if_softc;
1028 
1029 	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
1030 	ifp->if_oerrors++;
1031 
1032 	fxp_init(sc);
1033 }
1034 
1035 /*
1036  * Submit a command to the i82557.
1037  */
1038 void
1039 fxp_scb_cmd(sc, cmd)
1040 	struct fxp_softc *sc;
1041 	u_int8_t cmd;
1042 {
1043 	if (cmd == FXP_SCB_COMMAND_CU_RESUME &&
1044 	    (sc->sc_flags & FXPF_FIX_RESUME_BUG) != 0) {
1045 		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
1046 		fxp_scb_wait(sc);
1047 	}
1048 	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
1049 }
1050 
1051 void
1052 fxp_init(xsc)
1053 	void *xsc;
1054 {
1055 	struct fxp_softc *sc = xsc;
1056 	struct ifnet *ifp = &sc->arpcom.ac_if;
1057 	struct fxp_cb_config *cbp;
1058 	struct fxp_cb_ias *cb_ias;
1059 	struct fxp_cb_tx *txp;
1060 	int i, prm, allm, s;
1061 
1062 	s = splimp();
1063 
1064 	/*
1065 	 * Cancel any pending I/O
1066 	 */
1067 	fxp_stop(sc, 0);
1068 
1069 	/*
1070 	 * Initialize base of CBL and RFA memory. Loading with zero
1071 	 * sets it up for regular linear addressing.
1072 	 */
1073 	fxp_scb_wait(sc);
1074 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1075 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1076 
1077 	fxp_scb_wait(sc);
1078 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1079 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1080 
1081 	/* Once through to set flags */
1082 	fxp_mc_setup(sc, 0);
1083 
1084 	/*
1085 	 * Initialize base of dump-stats buffer.
1086 	 */
1087 	fxp_scb_wait(sc);
1088 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1089 	    sc->tx_cb_map->dm_segs->ds_addr +
1090 	    offsetof(struct fxp_ctrl, stats));
1091 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1092 
1093 	cbp = &sc->sc_ctrl->u.cfg;
1094 	/*
1095 	 * This bcopy is kind of disgusting, but there are a bunch of must be
1096 	 * zero and must be one bits in this structure and this is the easiest
1097 	 * way to initialize them all to proper values.
1098 	 */
1099 	bcopy(fxp_cb_config_template, (void *)&cbp->cb_status,
1100 		sizeof(fxp_cb_config_template));
1101 
1102 	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1103 	allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1104 
1105 	cbp->cb_status =	0;
1106 	cbp->cb_command =	FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1107 	cbp->link_addr =	0xffffffff;	/* (no) next command */
1108 	cbp->byte_count =	22;		/* (22) bytes to config */
1109 	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1110 	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1111 	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1112 	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1113 	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
1114 	cbp->dma_bce =		0;	/* (disable) dma max counters */
1115 	cbp->late_scb =		0;	/* (don't) defer SCB update */
1116 	cbp->tno_int =		0;	/* (disable) tx not okay interrupt */
1117 	cbp->ci_int =		1;	/* interrupt on CU idle */
1118 	cbp->save_bf =		prm;	/* save bad frames */
1119 	cbp->disc_short_rx =	!prm;	/* discard short packets */
1120 	cbp->underrun_retry =	1;	/* retry mode (1) on DMA underrun */
1121 	cbp->mediatype =	!sc->phy_10Mbps_only; /* interface mode */
1122 	cbp->nsai =		1;	/* (don't) disable source addr insert */
1123 	cbp->preamble_length =	2;	/* (7 byte) preamble */
1124 	cbp->loopback =		0;	/* (don't) loopback */
1125 	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
1126 	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
1127 	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
1128 	cbp->promiscuous =	prm;	/* promiscuous mode */
1129 	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
1130 	cbp->crscdt =		0;	/* (CRS only) */
1131 	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
1132 	cbp->padding =		1;	/* (do) pad short tx packets */
1133 	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
1134 	cbp->long_rx =		sc->not_82557; /* (enable) long packets */
1135 	cbp->force_fdx =	0;	/* (don't) force full duplex */
1136 	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
1137 	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
1138 	cbp->mc_all =		allm;
1139 
1140 	/*
1141 	 * Start the config command/DMA.
1142 	 */
1143 	fxp_scb_wait(sc);
1144 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1145 	    offsetof(struct fxp_ctrl, u.cfg));
1146 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1147 	/* ...and wait for it to complete. */
1148 	bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1149 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1150 	do {
1151 		DELAY(1);
1152 		bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1153 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1154 	} while ((cbp->cb_status & FXP_CB_STATUS_C) == 0);
1155 
1156 	/*
1157 	 * Now initialize the station address.
1158 	 */
1159 	cb_ias = &sc->sc_ctrl->u.ias;
1160 	cb_ias->cb_status = 0;
1161 	cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1162 	cb_ias->link_addr = 0xffffffff;
1163 	bcopy(sc->arpcom.ac_enaddr, (void *)cb_ias->macaddr,
1164 	    sizeof(sc->arpcom.ac_enaddr));
1165 
1166 	/*
1167 	 * Start the IAS (Individual Address Setup) command/DMA.
1168 	 */
1169 	fxp_scb_wait(sc);
1170 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1171 	    offsetof(struct fxp_ctrl, u.ias));
1172 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1173 	/* ...and wait for it to complete. */
1174 	bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1175 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1176 	do {
1177 		DELAY(1);
1178 		bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1179 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1180 	} while (!(cb_ias->cb_status & FXP_CB_STATUS_C));
1181 
1182 	/* Again, this time really upload the multicast addresses */
1183 	fxp_mc_setup(sc, 1);
1184 
1185 	/*
1186 	 * Initialize transmit control block (TxCB) list.
1187 	 */
1188 	bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1189 	txp = sc->sc_ctrl->tx_cb;
1190 	for (i = 0; i < FXP_NTXCB; i++) {
1191 		txp[i].cb_command = FXP_CB_COMMAND_NOP;
1192 		txp[i].link_addr = sc->tx_cb_map->dm_segs->ds_addr +
1193 		    offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK]);
1194 		txp[i].tbd_array_addr = sc->tx_cb_map->dm_segs->ds_addr +
1195 		    offsetof(struct fxp_ctrl, tx_cb[i].tbd[0]);
1196 	}
1197 	/*
1198 	 * Set the suspend flag on the first TxCB and start the control
1199 	 * unit. It will execute the NOP and then suspend.
1200 	 */
1201 	sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs;
1202 	sc->sc_cbt_cnt = 1;
1203 	sc->sc_ctrl->tx_cb[0].cb_command = FXP_CB_COMMAND_NOP |
1204 	    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1205 	bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1206 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1207 
1208 	fxp_scb_wait(sc);
1209 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1210 	    offsetof(struct fxp_ctrl, tx_cb[0]));
1211 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1212 
1213 	/*
1214 	 * Initialize receiver buffer area - RFA.
1215 	 */
1216 	fxp_scb_wait(sc);
1217 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1218 	    vtophys((vaddr_t)sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE);
1219 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1220 
1221 	/*
1222 	 * Set current media.
1223 	 */
1224 	mii_mediachg(&sc->sc_mii);
1225 
1226 	ifp->if_flags |= IFF_RUNNING;
1227 	ifp->if_flags &= ~IFF_OACTIVE;
1228 	splx(s);
1229 
1230 	/*
1231 	 * Start stats updater.
1232 	 */
1233 	timeout_add(&sc->stats_update_to, hz);
1234 }
1235 
1236 /*
1237  * Change media according to request.
1238  */
1239 int
1240 fxp_mediachange(ifp)
1241 	struct ifnet *ifp;
1242 {
1243 	struct fxp_softc *sc = ifp->if_softc;
1244 
1245 	mii_mediachg(&sc->sc_mii);
1246 	return (0);
1247 }
1248 
1249 /*
1250  * Notify the world which media we're using.
1251  */
1252 void
1253 fxp_mediastatus(ifp, ifmr)
1254 	struct ifnet *ifp;
1255 	struct ifmediareq *ifmr;
1256 {
1257 	struct fxp_softc *sc = ifp->if_softc;
1258 
1259 	mii_pollstat(&sc->sc_mii);
1260 	ifmr->ifm_status = sc->sc_mii.mii_media_status;
1261 	ifmr->ifm_active = sc->sc_mii.mii_media_active;
1262 }
1263 
1264 /*
1265  * Add a buffer to the end of the RFA buffer list.
1266  * Return 0 if successful, 1 for failure. A failure results in
1267  * adding the 'oldm' (if non-NULL) on to the end of the list -
1268  * tossing out its old contents and recycling it.
1269  * The RFA struct is stuck at the beginning of mbuf cluster and the
1270  * data pointer is fixed up to point just past it.
1271  */
1272 int
1273 fxp_add_rfabuf(sc, oldm)
1274 	struct fxp_softc *sc;
1275 	struct mbuf *oldm;
1276 {
1277 	u_int32_t v;
1278 	struct mbuf *m;
1279 	u_int8_t *rfap;
1280 
1281 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1282 	if (m != NULL) {
1283 		MCLGET(m, M_DONTWAIT);
1284 		if ((m->m_flags & M_EXT) == 0) {
1285 			m_freem(m);
1286 			if (oldm == NULL)
1287 				return 1;
1288 			m = oldm;
1289 			m->m_data = m->m_ext.ext_buf;
1290 		}
1291 	} else {
1292 		if (oldm == NULL)
1293 			return 1;
1294 		m = oldm;
1295 		m->m_data = m->m_ext.ext_buf;
1296 	}
1297 
1298 	/*
1299 	 * Move the data pointer up so that the incoming data packet
1300 	 * will be 32-bit aligned.
1301 	 */
1302 	m->m_data += RFA_ALIGNMENT_FUDGE;
1303 
1304 	/*
1305 	 * Get a pointer to the base of the mbuf cluster and move
1306 	 * data start past it.
1307 	 */
1308 	rfap = m->m_data;
1309 	m->m_data += sizeof(struct fxp_rfa);
1310 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) =
1311 	    MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE;
1312 
1313 	/*
1314 	 * Initialize the rest of the RFA.  Note that since the RFA
1315 	 * is misaligned, we cannot store values directly.  Instead,
1316 	 * we use an optimized, inline copy.
1317 	 */
1318 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0;
1319 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) =
1320 	    FXP_RFA_CONTROL_EL;
1321 	*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0;
1322 
1323 	v = -1;
1324 	fxp_lwcopy(&v,
1325 	    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1326 	fxp_lwcopy(&v,
1327 	    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr)));
1328 
1329 	/*
1330 	 * If there are other buffers already on the list, attach this
1331 	 * one to the end by fixing up the tail to point to this one.
1332 	 */
1333 	if (sc->rfa_headm != NULL) {
1334 		sc->rfa_tailm->m_next = m;
1335 		v = vtophys((vaddr_t)rfap);
1336 		rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE;
1337 		fxp_lwcopy(&v,
1338 		    (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr)));
1339 		*(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &=
1340 		    ~FXP_RFA_CONTROL_EL;
1341 	} else {
1342 		sc->rfa_headm = m;
1343 	}
1344 	sc->rfa_tailm = m;
1345 
1346 	return (m == oldm);
1347 }
1348 
1349 volatile int
1350 fxp_mdi_read(self, phy, reg)
1351 	struct device *self;
1352 	int phy;
1353 	int reg;
1354 {
1355 	struct fxp_softc *sc = (struct fxp_softc *)self;
1356 	int count = 10000;
1357 	int value;
1358 
1359 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1360 	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1361 
1362 	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1363 	    && count--)
1364 		DELAY(10);
1365 
1366 	if (count <= 0)
1367 		printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname);
1368 
1369 	return (value & 0xffff);
1370 }
1371 
1372 void
1373 fxp_statchg(self)
1374 	struct device *self;
1375 {
1376 	struct fxp_softc *sc = (struct fxp_softc *)self;
1377 
1378 	/*
1379 	 * Determine whether or not we have to work-around the
1380 	 * Resume Bug.
1381 	 */
1382 	if (sc->sc_flags & FXPF_HAS_RESUME_BUG) {
1383 		if (IFM_TYPE(sc->sc_mii.mii_media_active) == IFM_10_T)
1384 			sc->sc_flags |= FXPF_FIX_RESUME_BUG;
1385 		else
1386 			sc->sc_flags &= ~FXPF_FIX_RESUME_BUG;
1387 	}
1388 }
1389 
1390 void
1391 fxp_mdi_write(self, phy, reg, value)
1392 	struct device *self;
1393 	int phy;
1394 	int reg;
1395 	int value;
1396 {
1397 	struct fxp_softc *sc = (struct fxp_softc *)self;
1398 	int count = 10000;
1399 
1400 	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1401 	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1402 	    (value & 0xffff));
1403 
1404 	while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1405 	    count--)
1406 		DELAY(10);
1407 
1408 	if (count <= 0)
1409 		printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname);
1410 }
1411 
1412 int
1413 fxp_ioctl(ifp, command, data)
1414 	struct ifnet *ifp;
1415 	u_long command;
1416 	caddr_t data;
1417 {
1418 	struct fxp_softc *sc = ifp->if_softc;
1419 	struct ifreq *ifr = (struct ifreq *)data;
1420 	struct ifaddr *ifa = (struct ifaddr *)data;
1421 	int s, error = 0;
1422 
1423 	s = splimp();
1424 
1425 	if ((error = ether_ioctl(ifp, &sc->arpcom, command, data)) > 0) {
1426 		splx(s);
1427 		return (error);
1428 	}
1429 
1430 	switch (command) {
1431 	case SIOCSIFADDR:
1432 		ifp->if_flags |= IFF_UP;
1433 
1434 		switch (ifa->ifa_addr->sa_family) {
1435 #ifdef INET
1436 		case AF_INET:
1437 			fxp_init(sc);
1438 			arp_ifinit(&sc->arpcom, ifa);
1439 			break;
1440 #endif
1441 #ifdef NS
1442 		case AF_NS:
1443 		    {
1444 			 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
1445 
1446 			 if (ns_nullhost(*ina))
1447 				ina->x_host = *(union ns_host *)
1448 				    LLADDR(ifp->if_sadl);
1449 			 else
1450 				bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
1451 				    ifp->if_addrlen);
1452 			 /* Set new address. */
1453 			 fxp_init(sc);
1454 			 break;
1455 		    }
1456 #endif
1457 		default:
1458 			fxp_init(sc);
1459 			break;
1460 		}
1461 		break;
1462 
1463 	case SIOCSIFMTU:
1464 		if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) {
1465 			error = EINVAL;
1466 		} else if (ifp->if_mtu != ifr->ifr_mtu) {
1467 			ifp->if_mtu = ifr->ifr_mtu;
1468 		}
1469 		break;
1470 
1471 	case SIOCSIFFLAGS:
1472 		/*
1473 		 * If interface is marked up and not running, then start it.
1474 		 * If it is marked down and running, stop it.
1475 		 * XXX If it's up then re-initialize it. This is so flags
1476 		 * such as IFF_PROMISC are handled.
1477 		 */
1478 		if (ifp->if_flags & IFF_UP)
1479 			fxp_init(sc);
1480 		else if (ifp->if_flags & IFF_RUNNING)
1481 			fxp_stop(sc, 1);
1482 		break;
1483 
1484 	case SIOCADDMULTI:
1485 	case SIOCDELMULTI:
1486 		error = (command == SIOCADDMULTI) ?
1487 		    ether_addmulti(ifr, &sc->arpcom) :
1488 		    ether_delmulti(ifr, &sc->arpcom);
1489 		if (error == ENETRESET) {
1490 			/*
1491 			 * Multicast list has changed; set the hardware
1492 			 * filter accordingly.
1493 			 */
1494 			fxp_init(sc);
1495 			error = 0;
1496 		}
1497 		break;
1498 
1499 	case SIOCSIFMEDIA:
1500 	case SIOCGIFMEDIA:
1501 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command);
1502 		break;
1503 
1504 	default:
1505 		error = EINVAL;
1506 	}
1507 	(void) splx(s);
1508 	return (error);
1509 }
1510 
1511 /*
1512  * Program the multicast filter.
1513  *
1514  * We have an artificial restriction that the multicast setup command
1515  * must be the first command in the chain, so we take steps to ensure
1516  * this. By requiring this, it allows us to keep up the performance of
1517  * the pre-initialized command ring (esp. link pointers) by not actually
1518  * inserting the mcsetup command in the ring - i.e. its link pointer
1519  * points to the TxCB ring, but the mcsetup descriptor itself is not part
1520  * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1521  * lead into the regular TxCB ring when it completes.
1522  *
1523  * This function must be called at splimp.
1524  */
1525 void
1526 fxp_mc_setup(sc, doit)
1527 	struct fxp_softc *sc;
1528 	int doit;
1529 {
1530 	struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs;
1531 	struct ifnet *ifp = &sc->arpcom.ac_if;
1532 	struct ether_multistep step;
1533 	struct ether_multi *enm;
1534 	int nmcasts;
1535 
1536 	/*
1537 	 * Initialize multicast setup descriptor.
1538 	 */
1539 	mcsp->cb_status = 0;
1540 	mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL;
1541 	mcsp->link_addr = -1;
1542 
1543 	nmcasts = 0;
1544 	if (!(ifp->if_flags & IFF_ALLMULTI)) {
1545 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1546 		while (enm != NULL) {
1547 			if (nmcasts >= MAXMCADDR) {
1548 				ifp->if_flags |= IFF_ALLMULTI;
1549 				nmcasts = 0;
1550 				break;
1551 			}
1552 
1553 			/* Punt on ranges. */
1554 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
1555 			    sizeof(enm->enm_addrlo)) != 0) {
1556 				ifp->if_flags |= IFF_ALLMULTI;
1557 				nmcasts = 0;
1558 				break;
1559 			}
1560 			bcopy(enm->enm_addrlo,
1561 			    (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
1562 			nmcasts++;
1563 			ETHER_NEXT_MULTI(step, enm);
1564 		}
1565 	}
1566 	if (doit == 0)
1567 		return;
1568 	mcsp->mc_cnt = nmcasts * ETHER_ADDR_LEN;
1569 
1570 	/*
1571 	 * Wait until command unit is not active. This should never
1572 	 * be the case when nothing is queued, but make sure anyway.
1573 	 */
1574 	while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) != FXP_SCB_CUS_IDLE);
1575 
1576 	/*
1577 	 * Start the multicast setup command.
1578 	 */
1579 	fxp_scb_wait(sc);
1580 	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr +
1581 	    offsetof(struct fxp_ctrl, u.mcs));
1582 	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1583 
1584 	bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1585 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1586 
1587 	do {
1588 		DELAY(1);
1589 		bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map,
1590 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1591 	} while (!(mcsp->cb_status & FXP_CB_STATUS_C));
1592 }
1593