xref: /netbsd-src/sys/dev/ic/elinkxl.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: elinkxl.c,v 1.124 2018/06/26 06:48:00 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Frank van der Linden.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.124 2018/06/26 06:48:00 msaitoh Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/callout.h>
38 #include <sys/kernel.h>
39 #include <sys/mbuf.h>
40 #include <sys/socket.h>
41 #include <sys/ioctl.h>
42 #include <sys/errno.h>
43 #include <sys/syslog.h>
44 #include <sys/select.h>
45 #include <sys/device.h>
46 #include <sys/rndsource.h>
47 
48 #include <net/if.h>
49 #include <net/if_dl.h>
50 #include <net/if_ether.h>
51 #include <net/if_media.h>
52 #include <net/bpf.h>
53 
54 #include <sys/cpu.h>
55 #include <sys/bus.h>
56 #include <sys/intr.h>
57 #include <machine/endian.h>
58 
59 #include <dev/mii/miivar.h>
60 #include <dev/mii/mii.h>
61 #include <dev/mii/mii_bitbang.h>
62 
63 #include <dev/ic/elink3reg.h>
64 /* #include <dev/ic/elink3var.h> */
65 #include <dev/ic/elinkxlreg.h>
66 #include <dev/ic/elinkxlvar.h>
67 
68 #ifdef DEBUG
69 int exdebug = 0;
70 #endif
71 
72 /* ifmedia callbacks */
73 int ex_media_chg(struct ifnet *ifp);
74 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req);
75 
76 static int ex_ifflags_cb(struct ethercom *);
77 
78 void ex_probe_media(struct ex_softc *);
79 void ex_set_filter(struct ex_softc *);
80 void ex_set_media(struct ex_softc *);
81 void ex_set_xcvr(struct ex_softc *, uint16_t);
82 struct mbuf *ex_get(struct ex_softc *, int);
83 uint16_t ex_read_eeprom(struct ex_softc *, int);
84 int ex_init(struct ifnet *);
85 void ex_read(struct ex_softc *);
86 void ex_reset(struct ex_softc *);
87 void ex_set_mc(struct ex_softc *);
88 void ex_getstats(struct ex_softc *);
89 void ex_printstats(struct ex_softc *);
90 void ex_tick(void *);
91 
92 static int ex_eeprom_busy(struct ex_softc *);
93 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *);
94 static void ex_init_txdescs(struct ex_softc *);
95 
96 static void ex_setup_tx(struct ex_softc *);
97 static bool ex_shutdown(device_t, int);
98 static void ex_start(struct ifnet *);
99 static void ex_txstat(struct ex_softc *);
100 
101 int ex_mii_readreg(device_t, int, int);
102 void ex_mii_writereg(device_t, int, int, int);
103 void ex_mii_statchg(struct ifnet *);
104 
105 void ex_probemedia(struct ex_softc *);
106 
107 /*
108  * Structure to map media-present bits in boards to ifmedia codes and
109  * printable media names.  Used for table-driven ifmedia initialization.
110  */
111 struct ex_media {
112 	int	exm_mpbit;		/* media present bit */
113 	const char *exm_name;		/* name of medium */
114 	int	exm_ifmedia;		/* ifmedia word for medium */
115 	int	exm_epmedia;		/* ELINKMEDIA_* constant */
116 };
117 
118 /*
119  * Media table for 3c90x chips.  Note that chips with MII have no
120  * `native' media.
121  */
122 static const struct ex_media ex_native_media[] = {
123 	{ ELINK_PCI_10BASE_T,	"10baseT",	IFM_ETHER|IFM_10_T,
124 	  ELINKMEDIA_10BASE_T },
125 	{ ELINK_PCI_10BASE_T,	"10baseT-FDX",	IFM_ETHER|IFM_10_T|IFM_FDX,
126 	  ELINKMEDIA_10BASE_T },
127 	{ ELINK_PCI_AUI,	"10base5",	IFM_ETHER|IFM_10_5,
128 	  ELINKMEDIA_AUI },
129 	{ ELINK_PCI_BNC,	"10base2",	IFM_ETHER|IFM_10_2,
130 	  ELINKMEDIA_10BASE_2 },
131 	{ ELINK_PCI_100BASE_TX,	"100baseTX",	IFM_ETHER|IFM_100_TX,
132 	  ELINKMEDIA_100BASE_TX },
133 	{ ELINK_PCI_100BASE_TX,	"100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX,
134 	  ELINKMEDIA_100BASE_TX },
135 	{ ELINK_PCI_100BASE_FX,	"100baseFX",	IFM_ETHER|IFM_100_FX,
136 	  ELINKMEDIA_100BASE_FX },
137 	{ ELINK_PCI_100BASE_MII,"manual",	IFM_ETHER|IFM_MANUAL,
138 	  ELINKMEDIA_MII },
139 	{ ELINK_PCI_100BASE_T4,	"100baseT4",	IFM_ETHER|IFM_100_T4,
140 	  ELINKMEDIA_100BASE_T4 },
141 	{ 0,			NULL,		0,
142 	  0 },
143 };
144 
145 /*
146  * MII bit-bang glue.
147  */
148 uint32_t ex_mii_bitbang_read(device_t);
149 void ex_mii_bitbang_write(device_t, uint32_t);
150 
151 const struct mii_bitbang_ops ex_mii_bitbang_ops = {
152 	ex_mii_bitbang_read,
153 	ex_mii_bitbang_write,
154 	{
155 		ELINK_PHY_DATA,		/* MII_BIT_MDO */
156 		ELINK_PHY_DATA,		/* MII_BIT_MDI */
157 		ELINK_PHY_CLK,		/* MII_BIT_MDC */
158 		ELINK_PHY_DIR,		/* MII_BIT_DIR_HOST_PHY */
159 		0,			/* MII_BIT_DIR_PHY_HOST */
160 	}
161 };
162 
163 /*
164  * Back-end attach and configure.
165  */
166 void
167 ex_config(struct ex_softc *sc)
168 {
169 	struct ifnet *ifp;
170 	uint16_t val;
171 	uint8_t macaddr[ETHER_ADDR_LEN] = {0};
172 	bus_space_tag_t iot = sc->sc_iot;
173 	bus_space_handle_t ioh = sc->sc_ioh;
174 	int i, error, attach_stage;
175 
176 	pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual);
177 
178 	callout_init(&sc->ex_mii_callout, 0);
179 
180 	ex_reset(sc);
181 
182 	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0);
183 	macaddr[0] = val >> 8;
184 	macaddr[1] = val & 0xff;
185 	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1);
186 	macaddr[2] = val >> 8;
187 	macaddr[3] = val & 0xff;
188 	val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2);
189 	macaddr[4] = val >> 8;
190 	macaddr[5] = val & 0xff;
191 
192 	aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr));
193 
194 	if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) {
195 		GO_WINDOW(2);
196 		val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
197 		if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
198 			val |= ELINK_RESET_OPT_LEDPOLAR;
199 		if (sc->ex_conf & EX_CONF_PHY_POWER)
200 			val |= ELINK_RESET_OPT_PHYPOWER;
201 		bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
202 	}
203 	if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) {
204 		GO_WINDOW(0);
205 		bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID,
206 		    EX_XCVR_PWR_MAGICBITS);
207 	}
208 
209 	attach_stage = 0;
210 
211 	/*
212 	 * Allocate the upload descriptors, and create and load the DMA
213 	 * map for them.
214 	 */
215 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
216 	    EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1,
217             &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) {
218 		aprint_error_dev(sc->sc_dev,
219 		    "can't allocate upload descriptors, error = %d\n", error);
220 		goto fail;
221 	}
222 
223 	attach_stage = 1;
224 
225 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg,
226 	    EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd,
227 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
228 		aprint_error_dev(sc->sc_dev,
229 		    "can't map upload descriptors, error = %d\n", error);
230 		goto fail;
231 	}
232 
233 	attach_stage = 2;
234 
235 	if ((error = bus_dmamap_create(sc->sc_dmat,
236 	    EX_NUPD * sizeof (struct ex_upd), 1,
237 	    EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT,
238 	    &sc->sc_upd_dmamap)) != 0) {
239 		aprint_error_dev(sc->sc_dev,
240 		    "can't create upload desc. DMA map, error = %d\n", error);
241 		goto fail;
242 	}
243 
244 	attach_stage = 3;
245 
246 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap,
247 	    sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL,
248 	    BUS_DMA_NOWAIT)) != 0) {
249 		aprint_error_dev(sc->sc_dev,
250 		    "can't load upload desc. DMA map, error = %d\n", error);
251 		goto fail;
252 	}
253 
254 	attach_stage = 4;
255 
256 	/*
257 	 * Allocate the download descriptors, and create and load the DMA
258 	 * map for them.
259 	 */
260 	if ((error = bus_dmamem_alloc(sc->sc_dmat,
261 	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1,
262 	    &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) {
263 		aprint_error_dev(sc->sc_dev,
264 		    "can't allocate download descriptors, error = %d\n", error);
265 		goto fail;
266 	}
267 
268 	attach_stage = 5;
269 
270 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg,
271 	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd,
272 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
273 		aprint_error_dev(sc->sc_dev,
274 		    "can't map download descriptors, error = %d\n", error);
275 		goto fail;
276 	}
277 	memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN);
278 
279 	attach_stage = 6;
280 
281 	if ((error = bus_dmamap_create(sc->sc_dmat,
282 	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1,
283 	    DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT,
284 	    &sc->sc_dpd_dmamap)) != 0) {
285 		aprint_error_dev(sc->sc_dev,
286 		    "can't create download desc. DMA map, error = %d\n", error);
287 		goto fail;
288 	}
289 
290 	attach_stage = 7;
291 
292 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap,
293 	    sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL,
294 	    BUS_DMA_NOWAIT)) != 0) {
295 		aprint_error_dev(sc->sc_dev,
296 		    "can't load download desc. DMA map, error = %d\n", error);
297 		goto fail;
298 	}
299 	bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
300 	    DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE);
301 
302 	attach_stage = 8;
303 
304 
305 	/*
306 	 * Create the transmit buffer DMA maps.
307 	 */
308 	for (i = 0; i < EX_NDPD; i++) {
309 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
310 		    EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
311 		    &sc->sc_tx_dmamaps[i])) != 0) {
312 			aprint_error_dev(sc->sc_dev,
313 			    "can't create tx DMA map %d, error = %d\n",
314 			    i, error);
315 			goto fail;
316 		}
317 	}
318 
319 	attach_stage = 9;
320 
321 	/*
322 	 * Create the receive buffer DMA maps.
323 	 */
324 	for (i = 0; i < EX_NUPD; i++) {
325 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
326 		    EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
327 		    &sc->sc_rx_dmamaps[i])) != 0) {
328 			aprint_error_dev(sc->sc_dev,
329 			    "can't create rx DMA map %d, error = %d\n",
330 			    i, error);
331 			goto fail;
332 		}
333 	}
334 
335 	attach_stage = 10;
336 
337 	/*
338 	 * Create ring of upload descriptors, only once. The DMA engine
339 	 * will loop over this when receiving packets, stalling if it
340 	 * hits an UPD with a finished receive.
341 	 */
342 	for (i = 0; i < EX_NUPD; i++) {
343 		sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i];
344 		sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i];
345 		sc->sc_upd[i].upd_frags[0].fr_len =
346 		    htole32((MCLBYTES - 2) | EX_FR_LAST);
347 		if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) {
348 			aprint_error_dev(sc->sc_dev,
349 			    "can't allocate or map rx buffers\n");
350 			goto fail;
351 		}
352 	}
353 
354 	bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0,
355 	    EX_NUPD * sizeof (struct ex_upd),
356 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
357 
358 	ex_init_txdescs(sc);
359 
360 	attach_stage = 11;
361 
362 
363 	GO_WINDOW(3);
364 	val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
365 	if (val & ELINK_MEDIACAP_MII)
366 		sc->ex_conf |= EX_CONF_MII;
367 
368 	ifp = &sc->sc_ethercom.ec_if;
369 
370 	/*
371 	 * Initialize our media structures and MII info.  We'll
372 	 * probe the MII if we discover that we have one.
373 	 */
374 	sc->ex_mii.mii_ifp = ifp;
375 	sc->ex_mii.mii_readreg = ex_mii_readreg;
376 	sc->ex_mii.mii_writereg = ex_mii_writereg;
377 	sc->ex_mii.mii_statchg = ex_mii_statchg;
378 	ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg,
379 	    ex_media_stat);
380 
381 	if (sc->ex_conf & EX_CONF_MII) {
382 		/*
383 		 * Find PHY, extract media information from it.
384 		 * First, select the right transceiver.
385 		 */
386 		ex_set_xcvr(sc, val);
387 
388 		mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff,
389 		    MII_PHY_ANY, MII_OFFSET_ANY, 0);
390 		if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) {
391 			ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE,
392 			    0, NULL);
393 			ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE);
394 		} else {
395 			ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO);
396 		}
397 	} else
398 		ex_probemedia(sc);
399 
400 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ);
401 	ifp->if_softc = sc;
402 	ifp->if_start = ex_start;
403 	ifp->if_ioctl = ex_ioctl;
404 	ifp->if_watchdog = ex_watchdog;
405 	ifp->if_init = ex_init;
406 	ifp->if_stop = ex_stop;
407 	ifp->if_flags =
408 	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST;
409 	sc->sc_if_flags = ifp->if_flags;
410 	IFQ_SET_READY(&ifp->if_snd);
411 
412 	/*
413 	 * We can support 802.1Q VLAN-sized frames.
414 	 */
415 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
416 
417 	/*
418 	 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support.
419 	 */
420 	if (sc->ex_conf & EX_CONF_90XB)
421 		sc->sc_ethercom.ec_if.if_capabilities |=
422 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
423 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
424 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx;
425 
426 	if_attach(ifp);
427 	if_deferred_start_init(ifp, NULL);
428 	ether_ifattach(ifp, macaddr);
429 	ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb);
430 
431 	GO_WINDOW(1);
432 
433 	sc->tx_start_thresh = 20;
434 	sc->tx_succ_ok = 0;
435 
436 	/* TODO: set queues to 0 */
437 
438 	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
439 			  RND_TYPE_NET, RND_FLAG_DEFAULT);
440 
441 	if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown))
442 		pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if);
443 	else
444 		aprint_error_dev(sc->sc_dev,
445 		    "couldn't establish power handler\n");
446 
447 	/* The attach is successful. */
448 	sc->ex_flags |= EX_FLAGS_ATTACHED;
449 	return;
450 
451  fail:
452 	/*
453 	 * Free any resources we've allocated during the failed attach
454 	 * attempt.  Do this in reverse order and fall though.
455 	 */
456 	switch (attach_stage) {
457 	case 11:
458 	    {
459 		struct ex_rxdesc *rxd;
460 
461 		for (i = 0; i < EX_NUPD; i++) {
462 			rxd = &sc->sc_rxdescs[i];
463 			if (rxd->rx_mbhead != NULL) {
464 				bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
465 				m_freem(rxd->rx_mbhead);
466 			}
467 		}
468 	    }
469 		/* FALLTHROUGH */
470 
471 	case 10:
472 		for (i = 0; i < EX_NUPD; i++)
473 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
474 		/* FALLTHROUGH */
475 
476 	case 9:
477 		for (i = 0; i < EX_NDPD; i++)
478 			bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
479 		/* FALLTHROUGH */
480 	case 8:
481 		bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
482 		/* FALLTHROUGH */
483 
484 	case 7:
485 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
486 		/* FALLTHROUGH */
487 
488 	case 6:
489 		bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
490 		    EX_NDPD * sizeof (struct ex_dpd));
491 		/* FALLTHROUGH */
492 
493 	case 5:
494 		bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
495 		break;
496 
497 	case 4:
498 		bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
499 		/* FALLTHROUGH */
500 
501 	case 3:
502 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
503 		/* FALLTHROUGH */
504 
505 	case 2:
506 		bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
507 		    EX_NUPD * sizeof (struct ex_upd));
508 		/* FALLTHROUGH */
509 
510 	case 1:
511 		bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
512 		break;
513 	}
514 
515 }
516 
517 /*
518  * Find the media present on non-MII chips.
519  */
520 void
521 ex_probemedia(struct ex_softc *sc)
522 {
523 	bus_space_tag_t iot = sc->sc_iot;
524 	bus_space_handle_t ioh = sc->sc_ioh;
525 	struct ifmedia *ifm = &sc->ex_mii.mii_media;
526 	const struct ex_media *exm;
527 	uint16_t config1, reset_options, default_media;
528 	int defmedia = 0;
529 	const char *sep = "", *defmedianame = NULL;
530 
531 	GO_WINDOW(3);
532 	config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2);
533 	reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS);
534 	GO_WINDOW(0);
535 
536 	default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT;
537 
538 	/* Sanity check that there are any media! */
539 	if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) {
540 		aprint_error_dev(sc->sc_dev, "no media present!\n");
541 		ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL);
542 		ifmedia_set(ifm, IFM_ETHER|IFM_NONE);
543 		return;
544 	}
545 
546 	aprint_normal_dev(sc->sc_dev, "");
547 
548 #define	PRINT(str)	aprint_normal("%s%s", sep, str); sep = ", "
549 
550 	for (exm = ex_native_media; exm->exm_name != NULL; exm++) {
551 		if (reset_options & exm->exm_mpbit) {
552 			/*
553 			 * Default media is a little complicated.  We
554 			 * support full-duplex which uses the same
555 			 * reset options bit.
556 			 *
557 			 * XXX Check EEPROM for default to FDX?
558 			 */
559 			if (exm->exm_epmedia == default_media) {
560 				if ((exm->exm_ifmedia & IFM_FDX) == 0) {
561 					defmedia = exm->exm_ifmedia;
562 					defmedianame = exm->exm_name;
563 				}
564 			} else if (defmedia == 0) {
565 				defmedia = exm->exm_ifmedia;
566 				defmedianame = exm->exm_name;
567 			}
568 			ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia,
569 			    NULL);
570 			PRINT(exm->exm_name);
571 		}
572 	}
573 
574 #undef PRINT
575 
576 #ifdef DIAGNOSTIC
577 	if (defmedia == 0)
578 		panic("ex_probemedia: impossible");
579 #endif
580 
581 	aprint_normal(", default %s\n", defmedianame);
582 	ifmedia_set(ifm, defmedia);
583 }
584 
585 /*
586  * Setup transmitter parameters.
587  */
588 static void
589 ex_setup_tx(struct ex_softc *sc)
590 {
591 	bus_space_tag_t iot = sc->sc_iot;
592 	bus_space_handle_t ioh = sc->sc_ioh;
593 
594 	/*
595 	 * Disable reclaim threshold for 90xB, set free threshold to
596 	 * 6 * 256 = 1536 for 90x.
597 	 */
598 	if (sc->ex_conf & EX_CONF_90XB)
599 		bus_space_write_2(iot, ioh, ELINK_COMMAND,
600 		    ELINK_TXRECLTHRESH | 255);
601 	else
602 		bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6);
603 
604 	/* Setup early transmission start threshold. */
605 	bus_space_write_2(iot, ioh, ELINK_COMMAND,
606 	    ELINK_TXSTARTTHRESH | sc->tx_start_thresh);
607 }
608 
609 /*
610  * Bring device up.
611  */
612 int
613 ex_init(struct ifnet *ifp)
614 {
615 	struct ex_softc *sc = ifp->if_softc;
616 	bus_space_tag_t iot = sc->sc_iot;
617 	bus_space_handle_t ioh = sc->sc_ioh;
618 	int i;
619 	uint16_t val;
620 	int error = 0;
621 
622 	if ((error = ex_enable(sc)) != 0)
623 		goto out;
624 
625 	ex_waitcmd(sc);
626 	ex_stop(ifp, 0);
627 
628 	GO_WINDOW(2);
629 
630 	/* Turn on PHY power. */
631 	if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) {
632 		val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS);
633 		if (sc->ex_conf & EX_CONF_PHY_POWER)
634 			val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */
635 		if (sc->ex_conf & EX_CONF_INV_LED_POLARITY)
636 			val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */
637 		bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val);
638 	}
639 
640 	/*
641 	 * Set the station address and clear the station mask. The latter
642 	 * is needed for 90x cards, 0 is the default for 90xB cards.
643 	 */
644 	for (i = 0; i < ETHER_ADDR_LEN; i++) {
645 		bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i,
646 		    CLLADDR(ifp->if_sadl)[i]);
647 		bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0);
648 	}
649 
650 	GO_WINDOW(3);
651 
652 	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET);
653 	ex_waitcmd(sc);
654 	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
655 	ex_waitcmd(sc);
656 
657 	/* Load Tx parameters. */
658 	ex_setup_tx(sc);
659 
660 	bus_space_write_2(iot, ioh, ELINK_COMMAND,
661 	    SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE);
662 
663 	bus_space_write_4(iot, ioh, ELINK_DMACTRL,
664 	    bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN);
665 
666 	bus_space_write_2(iot, ioh, ELINK_COMMAND,
667 	    SET_RD_0_MASK | XL_WATCHED_INTERRUPTS);
668 	bus_space_write_2(iot, ioh, ELINK_COMMAND,
669 	    SET_INTR_MASK | XL_WATCHED_INTERRUPTS);
670 
671 	bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff);
672 	if (sc->intr_ack)
673 	    (* sc->intr_ack)(sc);
674 	ex_set_media(sc);
675 	ex_set_mc(sc);
676 
677 
678 	bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE);
679 	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
680 	bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma);
681 	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE);
682 	bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL);
683 
684 	ifp->if_flags |= IFF_RUNNING;
685 	ifp->if_flags &= ~IFF_OACTIVE;
686 	ex_start(ifp);
687 	sc->sc_if_flags = ifp->if_flags;
688 
689 	GO_WINDOW(1);
690 
691 	callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
692 
693  out:
694 	if (error) {
695 		ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
696 		ifp->if_timer = 0;
697 		aprint_error_dev(sc->sc_dev, "interface not running\n");
698 	}
699 	return (error);
700 }
701 
702 #define	MCHASHSIZE		256
703 #define	ex_mchash(addr)		(ether_crc32_be((addr), ETHER_ADDR_LEN) & \
704 				    (MCHASHSIZE - 1))
705 
706 /*
707  * Set multicast receive filter. Also take care of promiscuous mode
708  * here (XXX).
709  */
710 void
711 ex_set_mc(struct ex_softc *sc)
712 {
713 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
714 	struct ethercom *ec = &sc->sc_ethercom;
715 	struct ether_multi *enm;
716 	struct ether_multistep estep;
717 	int i;
718 	uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST;
719 
720 	if (ifp->if_flags & IFF_PROMISC) {
721 		mask |= FIL_PROMISC;
722 		goto allmulti;
723 	}
724 
725 	ETHER_FIRST_MULTI(estep, ec, enm);
726 	if (enm == NULL)
727 		goto nomulti;
728 
729 	if ((sc->ex_conf & EX_CONF_90XB) == 0)
730 		/* No multicast hash filtering. */
731 		goto allmulti;
732 
733 	for (i = 0; i < MCHASHSIZE; i++)
734 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
735 		    ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i);
736 
737 	do {
738 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
739 		    ETHER_ADDR_LEN) != 0)
740 			goto allmulti;
741 
742 		i = ex_mchash(enm->enm_addrlo);
743 		bus_space_write_2(sc->sc_iot, sc->sc_ioh,
744 		    ELINK_COMMAND, ELINK_SETHASHFILBIT | i);
745 		ETHER_NEXT_MULTI(estep, enm);
746 	} while (enm != NULL);
747 	mask |= FIL_MULTIHASH;
748 
749 nomulti:
750 	ifp->if_flags &= ~IFF_ALLMULTI;
751 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
752 	    SET_RX_FILTER | mask);
753 	return;
754 
755 allmulti:
756 	ifp->if_flags |= IFF_ALLMULTI;
757 	mask |= FIL_MULTICAST;
758 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND,
759 	    SET_RX_FILTER | mask);
760 }
761 
762 
763 /*
764  * The Tx Complete interrupts occur only on errors,
765  * and this is the error handler.
766  */
767 static void
768 ex_txstat(struct ex_softc *sc)
769 {
770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
771 	bus_space_tag_t iot = sc->sc_iot;
772 	bus_space_handle_t ioh = sc->sc_ioh;
773 	int i, err = 0;
774 
775 	/*
776 	 * We need to read+write TX_STATUS until we get a 0 status
777 	 * in order to turn off the interrupt flag.
778 	 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER.
779 	 */
780 	for (;;) {
781 		i = bus_space_read_2(iot, ioh, ELINK_TIMER);
782 		if ((i & TXS_COMPLETE) == 0)
783 			break;
784 		bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0);
785 		err |= i;
786 	}
787 	err &= ~TXS_TIMER;
788 
789 	if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM))
790 	    || err == 0 /* should not happen, just in case */) {
791 		/*
792 		 * Make sure the transmission is stopped.
793 		 */
794 		bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL);
795 		for (i = 1000; i > 0; i--)
796 			if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) &
797 			    ELINK_DMAC_DNINPROG) == 0)
798 				break;
799 
800 		/*
801 		 * Reset the transmitter.
802 		 */
803 		bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET);
804 
805 		/* Resetting takes a while and we will do more than wait. */
806 
807 		ifp->if_flags &= ~IFF_OACTIVE;
808 		++sc->sc_ethercom.ec_if.if_oerrors;
809 		aprint_error_dev(sc->sc_dev, "%s%s%s",
810 		    (err & TXS_UNDERRUN) ? " transmit underrun" : "",
811 		    (err & TXS_JABBER) ? " jabber" : "",
812 		    (err & TXS_RECLAIM) ? " reclaim" : "");
813 		if (err == 0)
814 			aprint_error(" unknown Tx error");
815 		printf(" (%x)", err);
816 		if (err & TXS_UNDERRUN) {
817 			aprint_error(" @%d", sc->tx_start_thresh);
818 			if (sc->tx_succ_ok < 256 &&
819 			    (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20))
820 			    > sc->tx_start_thresh) {
821 				aprint_error(", new threshold is %d", i);
822 				sc->tx_start_thresh = i;
823 			}
824 			sc->tx_succ_ok = 0;
825 		}
826 		aprint_error("\n");
827 		if (err & TXS_MAX_COLLISION)
828 			++sc->sc_ethercom.ec_if.if_collisions;
829 
830 		/* Wait for TX_RESET to finish. */
831 		ex_waitcmd(sc);
832 
833 		/* Reload Tx parameters. */
834 		ex_setup_tx(sc);
835 	} else {
836 		if (err & TXS_MAX_COLLISION)
837 			++sc->sc_ethercom.ec_if.if_collisions;
838 		sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE;
839 	}
840 
841 	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE);
842 
843 	/* Retransmit current packet if any. */
844 	if (sc->tx_head) {
845 		ifp->if_flags |= IFF_OACTIVE;
846 		bus_space_write_2(iot, ioh, ELINK_COMMAND,
847 		    ELINK_DNUNSTALL);
848 		bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
849 		    DPD_DMADDR(sc, sc->tx_head));
850 
851 		/* Retrigger watchdog if stopped. */
852 		if (ifp->if_timer == 0)
853 			ifp->if_timer = 1;
854 	}
855 }
856 
857 int
858 ex_media_chg(struct ifnet *ifp)
859 {
860 
861 	if (ifp->if_flags & IFF_UP)
862 		ex_init(ifp);
863 	return 0;
864 }
865 
866 void
867 ex_set_xcvr(struct ex_softc *sc, const uint16_t media)
868 {
869 	bus_space_tag_t iot = sc->sc_iot;
870 	bus_space_handle_t ioh = sc->sc_ioh;
871 	uint32_t icfg;
872 
873 	/*
874 	 * We're already in Window 3
875 	 */
876 	icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
877 	icfg &= ~(CONFIG_XCVR_SEL << 16);
878 	if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4))
879 		icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16);
880 	if (media & ELINK_MEDIACAP_100BASETX)
881 		icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16);
882 	if (media & ELINK_MEDIACAP_100BASEFX)
883 		icfg |= ELINKMEDIA_100BASE_FX
884 			<< (CONFIG_XCVR_SEL_SHIFT + 16);
885 	bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg);
886 }
887 
888 void
889 ex_set_media(struct ex_softc *sc)
890 {
891 	bus_space_tag_t iot = sc->sc_iot;
892 	bus_space_handle_t ioh = sc->sc_ioh;
893 	uint32_t configreg;
894 
895 	if (((sc->ex_conf & EX_CONF_MII) &&
896 	    (sc->ex_mii.mii_media_active & IFM_FDX))
897 	    || (!(sc->ex_conf & EX_CONF_MII) &&
898 	    (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) {
899 		bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL,
900 		    MAC_CONTROL_FDX);
901 	} else {
902 		bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0);
903 	}
904 
905 	/*
906 	 * If the device has MII, select it, and then tell the
907 	 * PHY which media to use.
908 	 */
909 	if (sc->ex_conf & EX_CONF_MII) {
910 		uint16_t val;
911 
912 		GO_WINDOW(3);
913 		val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS);
914 		ex_set_xcvr(sc, val);
915 		mii_mediachg(&sc->ex_mii);
916 		return;
917 	}
918 
919 	GO_WINDOW(4);
920 	bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0);
921 	bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
922 	delay(800);
923 
924 	/*
925 	 * Now turn on the selected media/transceiver.
926 	 */
927 	switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) {
928 	case IFM_10_T:
929 		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
930 		    JABBER_GUARD_ENABLE|LINKBEAT_ENABLE);
931 		break;
932 
933 	case IFM_10_2:
934 		bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER);
935 		DELAY(800);
936 		break;
937 
938 	case IFM_100_TX:
939 	case IFM_100_FX:
940 		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
941 		    LINKBEAT_ENABLE);
942 		DELAY(800);
943 		break;
944 
945 	case IFM_10_5:
946 		bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE,
947 		    SQE_ENABLE);
948 		DELAY(800);
949 		break;
950 
951 	case IFM_MANUAL:
952 		break;
953 
954 	case IFM_NONE:
955 		return;
956 
957 	default:
958 		panic("ex_set_media: impossible");
959 	}
960 
961 	GO_WINDOW(3);
962 	configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG);
963 
964 	configreg &= ~(CONFIG_MEDIAMASK << 16);
965 	configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data <<
966 	    (CONFIG_MEDIAMASK_SHIFT + 16));
967 
968 	bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg);
969 }
970 
971 /*
972  * Get currently-selected media from card.
973  * (if_media callback, may be called before interface is brought up).
974  */
975 void
976 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req)
977 {
978 	struct ex_softc *sc = ifp->if_softc;
979 	uint16_t help;
980 
981 	if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) {
982 		if (sc->ex_conf & EX_CONF_MII) {
983 			mii_pollstat(&sc->ex_mii);
984 			req->ifm_status = sc->ex_mii.mii_media_status;
985 			req->ifm_active = sc->ex_mii.mii_media_active;
986 		} else {
987 			GO_WINDOW(4);
988 			req->ifm_status = IFM_AVALID;
989 			req->ifm_active =
990 			    sc->ex_mii.mii_media.ifm_cur->ifm_media;
991 			help = bus_space_read_2(sc->sc_iot, sc->sc_ioh,
992 						ELINK_W4_MEDIA_TYPE);
993 			if (help & LINKBEAT_DETECT)
994 				req->ifm_status |= IFM_ACTIVE;
995 			GO_WINDOW(1);
996 		}
997 	}
998 }
999 
1000 
1001 
1002 /*
1003  * Start outputting on the interface.
1004  */
1005 static void
1006 ex_start(struct ifnet *ifp)
1007 {
1008 	struct ex_softc *sc = ifp->if_softc;
1009 	bus_space_tag_t iot = sc->sc_iot;
1010 	bus_space_handle_t ioh = sc->sc_ioh;
1011 	volatile struct ex_fraghdr *fr = NULL;
1012 	volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL;
1013 	struct ex_txdesc *txp;
1014 	struct mbuf *mb_head;
1015 	bus_dmamap_t dmamap;
1016 	int m_csumflags, offset, seglen, totlen, segment, error;
1017 	uint32_t csum_flags;
1018 
1019 	if (sc->tx_head || sc->tx_free == NULL)
1020 		return;
1021 
1022 	txp = NULL;
1023 
1024 	/*
1025 	 * We're finished if there is nothing more to add to the list or if
1026 	 * we're all filled up with buffers to transmit.
1027 	 */
1028 	while (sc->tx_free != NULL) {
1029 		/*
1030 		 * Grab a packet to transmit.
1031 		 */
1032 		IFQ_DEQUEUE(&ifp->if_snd, mb_head);
1033 		if (mb_head == NULL)
1034 			break;
1035 
1036 		/*
1037 		 * mb_head might be updated later,
1038 		 * so preserve csum_flags here.
1039 		 */
1040 		m_csumflags = mb_head->m_pkthdr.csum_flags;
1041 
1042 		/*
1043 		 * Get pointer to next available tx desc.
1044 		 */
1045 		txp = sc->tx_free;
1046 		dmamap = txp->tx_dmamap;
1047 
1048 		/*
1049 		 * Go through each of the mbufs in the chain and initialize
1050 		 * the transmit buffer descriptors with the physical address
1051 		 * and size of the mbuf.
1052 		 */
1053  reload:
1054 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
1055 		    mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1056 		switch (error) {
1057 		case 0:
1058 			/* Success. */
1059 			break;
1060 
1061 		case EFBIG:
1062 		    {
1063 			struct mbuf *mn;
1064 
1065 			/*
1066 			 * We ran out of segments.  We have to recopy this
1067 			 * mbuf chain first.  Bail out if we can't get the
1068 			 * new buffers.
1069 			 */
1070 			aprint_error_dev(sc->sc_dev, "too many segments, ");
1071 
1072 			MGETHDR(mn, M_DONTWAIT, MT_DATA);
1073 			if (mn == NULL) {
1074 				m_freem(mb_head);
1075 				aprint_error("aborting\n");
1076 				goto out;
1077 			}
1078 			if (mb_head->m_pkthdr.len > MHLEN) {
1079 				MCLGET(mn, M_DONTWAIT);
1080 				if ((mn->m_flags & M_EXT) == 0) {
1081 					m_freem(mn);
1082 					m_freem(mb_head);
1083 					aprint_error("aborting\n");
1084 					goto out;
1085 				}
1086 			}
1087 			m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1088 			    mtod(mn, void *));
1089 			mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1090 			m_freem(mb_head);
1091 			mb_head = mn;
1092 			aprint_error("retrying\n");
1093 			goto reload;
1094 		    }
1095 
1096 		default:
1097 			/*
1098 			 * Some other problem; report it.
1099 			 */
1100 			aprint_error_dev(sc->sc_dev,
1101 			    "can't load mbuf chain, error = %d\n", error);
1102 			m_freem(mb_head);
1103 			goto out;
1104 		}
1105 
1106 		/*
1107 		 * remove our tx desc from freelist.
1108 		 */
1109 		sc->tx_free = txp->tx_next;
1110 		txp->tx_next = NULL;
1111 
1112 		fr = &txp->tx_dpd->dpd_frags[0];
1113 		totlen = 0;
1114 		for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) {
1115 			fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr);
1116 			seglen = dmamap->dm_segs[segment].ds_len;
1117 			fr->fr_len = htole32(seglen);
1118 			totlen += seglen;
1119 		}
1120 		if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN &&
1121 		    (m_csumflags & M_CSUM_IPv4) != 0)) {
1122 			/*
1123 			 * Pad short packets to avoid ip4csum-tx bug.
1124 			 *
1125 			 * XXX Should we still consider if such short
1126 			 *     (36 bytes or less) packets might already
1127 			 *     occupy EX_NTFRAG (== 32) fragments here?
1128 			 */
1129 			KASSERT(segment < EX_NTFRAGS);
1130 			fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc));
1131 			seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen;
1132 			fr->fr_len = htole32(EX_FR_LAST | seglen);
1133 			totlen += seglen;
1134 		} else {
1135 			fr--;
1136 			fr->fr_len |= htole32(EX_FR_LAST);
1137 		}
1138 		txp->tx_mbhead = mb_head;
1139 
1140 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1141 		    BUS_DMASYNC_PREWRITE);
1142 
1143 		dpd = txp->tx_dpd;
1144 		dpd->dpd_nextptr = 0;
1145 		dpd->dpd_fsh = htole32(totlen);
1146 
1147 		/* Byte-swap constants so compiler can optimize. */
1148 
1149 		if (sc->ex_conf & EX_CONF_90XB) {
1150 			csum_flags = 0;
1151 
1152 			if (m_csumflags & M_CSUM_IPv4)
1153 				csum_flags |= htole32(EX_DPD_IPCKSUM);
1154 
1155 			if (m_csumflags & M_CSUM_TCPv4)
1156 				csum_flags |= htole32(EX_DPD_TCPCKSUM);
1157 			else if (m_csumflags & M_CSUM_UDPv4)
1158 				csum_flags |= htole32(EX_DPD_UDPCKSUM);
1159 
1160 			dpd->dpd_fsh |= csum_flags;
1161 		} else {
1162 			KDASSERT((mb_head->m_pkthdr.csum_flags &
1163 			    (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0);
1164 		}
1165 
1166 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1167 		    ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd),
1168 		    sizeof (struct ex_dpd),
1169 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1170 
1171 		/*
1172 		 * No need to stall the download engine, we know it's
1173 		 * not busy right now.
1174 		 *
1175 		 * Fix up pointers in both the "soft" tx and the physical
1176 		 * tx list.
1177 		 */
1178 		if (sc->tx_head != NULL) {
1179 			prevdpd = sc->tx_tail->tx_dpd;
1180 			offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd);
1181 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1182 			    offset, sizeof (struct ex_dpd),
1183 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1184 			prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp));
1185 			bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1186 			    offset, sizeof (struct ex_dpd),
1187 			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1188 			sc->tx_tail->tx_next = txp;
1189 			sc->tx_tail = txp;
1190 		} else {
1191 			sc->tx_tail = sc->tx_head = txp;
1192 		}
1193 
1194 		/*
1195 		 * Pass packet to bpf if there is a listener.
1196 		 */
1197 		bpf_mtap(ifp, mb_head, BPF_D_OUT);
1198 	}
1199  out:
1200 	if (sc->tx_head) {
1201 		sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND);
1202 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1203 		    ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd),
1204 		    sizeof (struct ex_dpd),
1205 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1206 		ifp->if_flags |= IFF_OACTIVE;
1207 		bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL);
1208 		bus_space_write_4(iot, ioh, ELINK_DNLISTPTR,
1209 		    DPD_DMADDR(sc, sc->tx_head));
1210 
1211 		/* trigger watchdog */
1212 		ifp->if_timer = 5;
1213 	}
1214 }
1215 
1216 
1217 int
1218 ex_intr(void *arg)
1219 {
1220 	struct ex_softc *sc = arg;
1221 	bus_space_tag_t iot = sc->sc_iot;
1222 	bus_space_handle_t ioh = sc->sc_ioh;
1223 	uint16_t stat;
1224 	int ret = 0;
1225 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1226 
1227 	if ((ifp->if_flags & IFF_RUNNING) == 0 ||
1228 	    !device_is_active(sc->sc_dev))
1229 		return (0);
1230 
1231 	for (;;) {
1232 		stat = bus_space_read_2(iot, ioh, ELINK_STATUS);
1233 
1234 		if ((stat & XL_WATCHED_INTERRUPTS) == 0) {
1235 			if ((stat & INTR_LATCH) == 0) {
1236 #if 0
1237 				aprint_error_dev(sc->sc_dev,
1238 				       "intr latch cleared\n");
1239 #endif
1240 				break;
1241 			}
1242 		}
1243 
1244 		ret = 1;
1245 
1246 		/*
1247 		 * Acknowledge interrupts.
1248 		 */
1249 		bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR |
1250 		    (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH)));
1251 		if (sc->intr_ack)
1252 			(*sc->intr_ack)(sc);
1253 
1254 		if (stat & HOST_ERROR) {
1255 			aprint_error_dev(sc->sc_dev,
1256 			    "adapter failure (%x)\n", stat);
1257 			ex_reset(sc);
1258 			ex_init(ifp);
1259 			return 1;
1260 		}
1261 		if (stat & UPD_STATS) {
1262 			ex_getstats(sc);
1263 		}
1264 		if (stat & TX_COMPLETE) {
1265 			ex_txstat(sc);
1266 #if 0
1267 			if (stat & DN_COMPLETE)
1268 				aprint_error_dev(sc->sc_dev,
1269 				    "Ignoring Dn interrupt (%x)\n", stat);
1270 #endif
1271 			/*
1272 			 * In some rare cases, both Tx Complete and
1273 			 * Dn Complete bits are set.  However, the packet
1274 			 * has been reloaded in ex_txstat() and should not
1275 			 * handle the Dn Complete event here.
1276 			 * Hence the "else" below.
1277 			 */
1278 		} else if (stat & DN_COMPLETE) {
1279 			struct ex_txdesc *txp, *ptxp = NULL;
1280 			bus_dmamap_t txmap;
1281 
1282 			/* reset watchdog timer, was set in ex_start() */
1283 			ifp->if_timer = 0;
1284 
1285 			for (txp = sc->tx_head; txp != NULL;
1286 			    txp = txp->tx_next) {
1287 				bus_dmamap_sync(sc->sc_dmat,
1288 				    sc->sc_dpd_dmamap,
1289 				    (char *)txp->tx_dpd - (char *)sc->sc_dpd,
1290 				    sizeof (struct ex_dpd),
1291 				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1292 				if (txp->tx_mbhead != NULL) {
1293 					txmap = txp->tx_dmamap;
1294 					bus_dmamap_sync(sc->sc_dmat, txmap,
1295 					    0, txmap->dm_mapsize,
1296 					    BUS_DMASYNC_POSTWRITE);
1297 					bus_dmamap_unload(sc->sc_dmat, txmap);
1298 					m_freem(txp->tx_mbhead);
1299 					txp->tx_mbhead = NULL;
1300 				}
1301 				ptxp = txp;
1302 			}
1303 
1304 			/*
1305 			 * Move finished tx buffers back to the tx free list.
1306 			 */
1307 			if (sc->tx_free) {
1308 				sc->tx_ftail->tx_next = sc->tx_head;
1309 				sc->tx_ftail = ptxp;
1310 			} else
1311 				sc->tx_ftail = sc->tx_free = sc->tx_head;
1312 
1313 			sc->tx_head = sc->tx_tail = NULL;
1314 			ifp->if_flags &= ~IFF_OACTIVE;
1315 
1316 			if (sc->tx_succ_ok < 256)
1317 				sc->tx_succ_ok++;
1318 		}
1319 
1320 		if (stat & UP_COMPLETE) {
1321 			struct ex_rxdesc *rxd;
1322 			struct mbuf *m;
1323 			struct ex_upd *upd;
1324 			bus_dmamap_t rxmap;
1325 			uint32_t pktstat;
1326 
1327  rcvloop:
1328 			rxd = sc->rx_head;
1329 			rxmap = rxd->rx_dmamap;
1330 			m = rxd->rx_mbhead;
1331 			upd = rxd->rx_upd;
1332 
1333 			bus_dmamap_sync(sc->sc_dmat, rxmap, 0,
1334 			    rxmap->dm_mapsize,
1335 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1336 			bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1337 			    ((char *)upd - (char *)sc->sc_upd),
1338 			    sizeof (struct ex_upd),
1339 			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1340 			pktstat = le32toh(upd->upd_pktstatus);
1341 
1342 			if (pktstat & EX_UPD_COMPLETE) {
1343 				/*
1344 				 * Remove first packet from the chain.
1345 				 */
1346 				sc->rx_head = rxd->rx_next;
1347 				rxd->rx_next = NULL;
1348 
1349 				/*
1350 				 * Add a new buffer to the receive chain.
1351 				 * If this fails, the old buffer is recycled
1352 				 * instead.
1353 				 */
1354 				if (ex_add_rxbuf(sc, rxd) == 0) {
1355 					uint16_t total_len;
1356 
1357 					if (pktstat &
1358 					    ((sc->sc_ethercom.ec_capenable &
1359 					    ETHERCAP_VLAN_MTU) ?
1360 					    EX_UPD_ERR_VLAN : EX_UPD_ERR)) {
1361 						ifp->if_ierrors++;
1362 						m_freem(m);
1363 						goto rcvloop;
1364 					}
1365 
1366 					total_len = pktstat & EX_UPD_PKTLENMASK;
1367 					if (total_len <
1368 					    sizeof(struct ether_header)) {
1369 						m_freem(m);
1370 						goto rcvloop;
1371 					}
1372 					m_set_rcvif(m, ifp);
1373 					m->m_pkthdr.len = m->m_len = total_len;
1374 		/*
1375 		 * Set the incoming checksum information for the packet.
1376 		 */
1377 		if ((sc->ex_conf & EX_CONF_90XB) != 0 &&
1378 		    (pktstat & EX_UPD_IPCHECKED) != 0) {
1379 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1380 			if (pktstat & EX_UPD_IPCKSUMERR)
1381 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1382 			if (pktstat & EX_UPD_TCPCHECKED) {
1383 				m->m_pkthdr.csum_flags |= M_CSUM_TCPv4;
1384 				if (pktstat & EX_UPD_TCPCKSUMERR)
1385 					m->m_pkthdr.csum_flags |=
1386 					    M_CSUM_TCP_UDP_BAD;
1387 			} else if (pktstat & EX_UPD_UDPCHECKED) {
1388 				m->m_pkthdr.csum_flags |= M_CSUM_UDPv4;
1389 				if (pktstat & EX_UPD_UDPCKSUMERR)
1390 					m->m_pkthdr.csum_flags |=
1391 					    M_CSUM_TCP_UDP_BAD;
1392 			}
1393 		}
1394 					if_percpuq_enqueue(ifp->if_percpuq, m);
1395 				}
1396 				goto rcvloop;
1397 			}
1398 			/*
1399 			 * Just in case we filled up all UPDs and the DMA engine
1400 			 * stalled. We could be more subtle about this.
1401 			 */
1402 			if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) {
1403 				aprint_error_dev(sc->sc_dev,
1404 				       "uplistptr was 0\n");
1405 				ex_init(ifp);
1406 			} else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS)
1407 				   & 0x2000) {
1408 				aprint_error_dev(sc->sc_dev,
1409 				       "receive stalled\n");
1410 				bus_space_write_2(iot, ioh, ELINK_COMMAND,
1411 						  ELINK_UPUNSTALL);
1412 			}
1413 		}
1414 
1415 		if (stat)
1416 			rnd_add_uint32(&sc->rnd_source, stat);
1417 	}
1418 
1419 	/* no more interrupts */
1420 	if (ret)
1421 		if_schedule_deferred_start(ifp);
1422 	return ret;
1423 }
1424 
1425 static int
1426 ex_ifflags_cb(struct ethercom *ec)
1427 {
1428 	struct ifnet *ifp = &ec->ec_if;
1429 	struct ex_softc *sc = ifp->if_softc;
1430 	int change = ifp->if_flags ^ sc->sc_if_flags;
1431 
1432 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
1433 		return ENETRESET;
1434 	else if ((change & IFF_PROMISC) != 0)
1435 		ex_set_mc(sc);
1436 	return 0;
1437 }
1438 
1439 int
1440 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1441 {
1442 	struct ex_softc *sc = ifp->if_softc;
1443 	struct ifreq *ifr = (struct ifreq *)data;
1444 	int s, error;
1445 
1446 	s = splnet();
1447 
1448 	switch (cmd) {
1449 	case SIOCSIFMEDIA:
1450 	case SIOCGIFMEDIA:
1451 		error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd);
1452 		break;
1453 	default:
1454 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
1455 			break;
1456 
1457 		error = 0;
1458 
1459 		if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
1460 			;
1461 		else if (ifp->if_flags & IFF_RUNNING) {
1462 			/*
1463 			 * Multicast list has changed; set the hardware filter
1464 			 * accordingly.
1465 			 */
1466 			ex_set_mc(sc);
1467 		}
1468 		break;
1469 	}
1470 
1471 	sc->sc_if_flags = ifp->if_flags;
1472 	splx(s);
1473 	return (error);
1474 }
1475 
1476 void
1477 ex_getstats(struct ex_softc *sc)
1478 {
1479 	bus_space_handle_t ioh = sc->sc_ioh;
1480 	bus_space_tag_t iot = sc->sc_iot;
1481 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1482 	uint8_t upperok;
1483 
1484 	GO_WINDOW(6);
1485 	upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK);
1486 	ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK);
1487 	ifp->if_ipackets += (upperok & 0x03) << 8;
1488 	ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK);
1489 	ifp->if_opackets += (upperok & 0x30) << 4;
1490 	ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS);
1491 	ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS);
1492 	/*
1493 	 * There seems to be no way to get the exact number of collisions,
1494 	 * this is the number that occurred at the very least.
1495 	 */
1496 	ifp->if_collisions += 2 * bus_space_read_1(iot, ioh,
1497 	    TX_AFTER_X_COLLISIONS);
1498 	/*
1499 	 * Interface byte counts are counted by ether_input() and
1500 	 * ether_output(), so don't accumulate them here.  Just
1501 	 * read the NIC counters so they don't generate overflow interrupts.
1502 	 * Upper byte counters are latched from reading the totals, so
1503 	 * they don't need to be read if we don't need their values.
1504 	 */
1505 	(void)bus_space_read_2(iot, ioh, RX_TOTAL_OK);
1506 	(void)bus_space_read_2(iot, ioh, TX_TOTAL_OK);
1507 
1508 	/*
1509 	 * Clear the following to avoid stats overflow interrupts
1510 	 */
1511 	(void)bus_space_read_1(iot, ioh, TX_DEFERRALS);
1512 	(void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION);
1513 	(void)bus_space_read_1(iot, ioh, TX_NO_SQE);
1514 	(void)bus_space_read_1(iot, ioh, TX_CD_LOST);
1515 	GO_WINDOW(4);
1516 	(void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD);
1517 	GO_WINDOW(1);
1518 }
1519 
1520 void
1521 ex_printstats(struct ex_softc *sc)
1522 {
1523 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1524 
1525 	ex_getstats(sc);
1526 	printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes "
1527 	    "%llu\n", (unsigned long long)ifp->if_ipackets,
1528 	    (unsigned long long)ifp->if_opackets,
1529 	    (unsigned long long)ifp->if_ierrors,
1530 	    (unsigned long long)ifp->if_oerrors,
1531 	    (unsigned long long)ifp->if_ibytes,
1532 	    (unsigned long long)ifp->if_obytes);
1533 }
1534 
1535 void
1536 ex_tick(void *arg)
1537 {
1538 	struct ex_softc *sc = arg;
1539 	int s;
1540 
1541 	if (!device_is_active(sc->sc_dev))
1542 		return;
1543 
1544 	s = splnet();
1545 
1546 	if (sc->ex_conf & EX_CONF_MII)
1547 		mii_tick(&sc->ex_mii);
1548 
1549 	if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS)
1550 	    & COMMAND_IN_PROGRESS))
1551 		ex_getstats(sc);
1552 
1553 	splx(s);
1554 
1555 	callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc);
1556 }
1557 
1558 void
1559 ex_reset(struct ex_softc *sc)
1560 {
1561 	uint16_t val = GLOBAL_RESET;
1562 
1563 	if (sc->ex_conf & EX_CONF_RESETHACK)
1564 		val |= 0x10;
1565 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val);
1566 	/*
1567 	 * XXX apparently the command in progress bit can't be trusted
1568 	 * during a reset, so we just always wait this long. Fortunately
1569 	 * we normally only reset the chip during autoconfig.
1570 	 */
1571 	delay(100000);
1572 	ex_waitcmd(sc);
1573 }
1574 
1575 void
1576 ex_watchdog(struct ifnet *ifp)
1577 {
1578 	struct ex_softc *sc = ifp->if_softc;
1579 
1580 	log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev));
1581 	++sc->sc_ethercom.ec_if.if_oerrors;
1582 
1583 	ex_reset(sc);
1584 	ex_init(ifp);
1585 }
1586 
1587 void
1588 ex_stop(struct ifnet *ifp, int disable)
1589 {
1590 	struct ex_softc *sc = ifp->if_softc;
1591 	bus_space_tag_t iot = sc->sc_iot;
1592 	bus_space_handle_t ioh = sc->sc_ioh;
1593 	struct ex_txdesc *tx;
1594 	struct ex_rxdesc *rx;
1595 	int i;
1596 
1597 	bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE);
1598 	bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE);
1599 	bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER);
1600 
1601 	for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) {
1602 		if (tx->tx_mbhead == NULL)
1603 			continue;
1604 		m_freem(tx->tx_mbhead);
1605 		tx->tx_mbhead = NULL;
1606 		bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap);
1607 		tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0;
1608 		bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap,
1609 		    ((char *)tx->tx_dpd - (char *)sc->sc_dpd),
1610 		    sizeof (struct ex_dpd),
1611 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1612 	}
1613 	sc->tx_tail = sc->tx_head = NULL;
1614 	ex_init_txdescs(sc);
1615 
1616 	sc->rx_tail = sc->rx_head = 0;
1617 	for (i = 0; i < EX_NUPD; i++) {
1618 		rx = &sc->sc_rxdescs[i];
1619 		if (rx->rx_mbhead != NULL) {
1620 			bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap);
1621 			m_freem(rx->rx_mbhead);
1622 			rx->rx_mbhead = NULL;
1623 		}
1624 		ex_add_rxbuf(sc, rx);
1625 	}
1626 
1627 	bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH);
1628 
1629 	callout_stop(&sc->ex_mii_callout);
1630 	if (sc->ex_conf & EX_CONF_MII)
1631 		mii_down(&sc->ex_mii);
1632 
1633 	if (disable)
1634 		ex_disable(sc);
1635 
1636 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1637 	sc->sc_if_flags = ifp->if_flags;
1638 	ifp->if_timer = 0;
1639 }
1640 
1641 static void
1642 ex_init_txdescs(struct ex_softc *sc)
1643 {
1644 	int i;
1645 
1646 	for (i = 0; i < EX_NDPD; i++) {
1647 		sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i];
1648 		sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i];
1649 		if (i < EX_NDPD - 1)
1650 			sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1];
1651 		else
1652 			sc->sc_txdescs[i].tx_next = NULL;
1653 	}
1654 	sc->tx_free = &sc->sc_txdescs[0];
1655 	sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1];
1656 }
1657 
1658 
1659 int
1660 ex_activate(device_t self, enum devact act)
1661 {
1662 	struct ex_softc *sc = device_private(self);
1663 
1664 	switch (act) {
1665 	case DVACT_DEACTIVATE:
1666 		if_deactivate(&sc->sc_ethercom.ec_if);
1667 		return 0;
1668 	default:
1669 		return EOPNOTSUPP;
1670 	}
1671 }
1672 
1673 int
1674 ex_detach(struct ex_softc *sc)
1675 {
1676 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1677 	struct ex_rxdesc *rxd;
1678 	int i, s;
1679 
1680 	/* Succeed now if there's no work to do. */
1681 	if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0)
1682 		return (0);
1683 
1684 	s = splnet();
1685 	/* Stop the interface. Callouts are stopped in it. */
1686 	ex_stop(ifp, 1);
1687 	splx(s);
1688 
1689 	/* Destroy our callout. */
1690 	callout_destroy(&sc->ex_mii_callout);
1691 
1692 	if (sc->ex_conf & EX_CONF_MII) {
1693 		/* Detach all PHYs */
1694 		mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1695 	}
1696 
1697 	/* Delete all remaining media. */
1698 	ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY);
1699 
1700 	rnd_detach_source(&sc->rnd_source);
1701 	ether_ifdetach(ifp);
1702 	if_detach(ifp);
1703 
1704 	for (i = 0; i < EX_NUPD; i++) {
1705 		rxd = &sc->sc_rxdescs[i];
1706 		if (rxd->rx_mbhead != NULL) {
1707 			bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap);
1708 			m_freem(rxd->rx_mbhead);
1709 			rxd->rx_mbhead = NULL;
1710 		}
1711 	}
1712 	for (i = 0; i < EX_NUPD; i++)
1713 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]);
1714 	for (i = 0; i < EX_NDPD; i++)
1715 		bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]);
1716 	bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap);
1717 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap);
1718 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd,
1719 	    EX_NDPD * sizeof (struct ex_dpd));
1720 	bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg);
1721 	bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap);
1722 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap);
1723 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd,
1724 	    EX_NUPD * sizeof (struct ex_upd));
1725 	bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg);
1726 
1727 	pmf_device_deregister(sc->sc_dev);
1728 
1729 	return (0);
1730 }
1731 
1732 /*
1733  * Before reboots, reset card completely.
1734  */
1735 static bool
1736 ex_shutdown(device_t self, int flags)
1737 {
1738 	struct ex_softc *sc = device_private(self);
1739 
1740 	ex_stop(&sc->sc_ethercom.ec_if, 1);
1741 	/*
1742 	 * Make sure the interface is powered up when we reboot,
1743 	 * otherwise firmware on some systems gets really confused.
1744 	 */
1745 	(void) ex_enable(sc);
1746 	return true;
1747 }
1748 
1749 /*
1750  * Read EEPROM data.
1751  * XXX what to do if EEPROM doesn't unbusy?
1752  */
1753 uint16_t
1754 ex_read_eeprom(struct ex_softc *sc, int offset)
1755 {
1756 	bus_space_tag_t iot = sc->sc_iot;
1757 	bus_space_handle_t ioh = sc->sc_ioh;
1758 	uint16_t data = 0, cmd = READ_EEPROM;
1759 	int off;
1760 
1761 	off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0;
1762 	cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM;
1763 
1764 	GO_WINDOW(0);
1765 	if (ex_eeprom_busy(sc))
1766 		goto out;
1767 	bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND,
1768 	    cmd | (off + (offset & 0x3f)));
1769 	if (ex_eeprom_busy(sc))
1770 		goto out;
1771 	data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA);
1772 out:
1773 	return data;
1774 }
1775 
1776 static int
1777 ex_eeprom_busy(struct ex_softc *sc)
1778 {
1779 	bus_space_tag_t iot = sc->sc_iot;
1780 	bus_space_handle_t ioh = sc->sc_ioh;
1781 	int i = 100;
1782 
1783 	while (i--) {
1784 		if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) &
1785 		    EEPROM_BUSY))
1786 			return 0;
1787 		delay(100);
1788 	}
1789 	aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n");
1790 	return (1);
1791 }
1792 
1793 /*
1794  * Create a new rx buffer and add it to the 'soft' rx list.
1795  */
1796 static int
1797 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd)
1798 {
1799 	struct mbuf *m, *oldm;
1800 	bus_dmamap_t rxmap;
1801 	int error, rval = 0;
1802 
1803 	oldm = rxd->rx_mbhead;
1804 	rxmap = rxd->rx_dmamap;
1805 
1806 	MGETHDR(m, M_DONTWAIT, MT_DATA);
1807 	if (m != NULL) {
1808 		MCLGET(m, M_DONTWAIT);
1809 		if ((m->m_flags & M_EXT) == 0) {
1810 			m_freem(m);
1811 			if (oldm == NULL)
1812 				return 1;
1813 			m = oldm;
1814 			MRESETDATA(m);
1815 			rval = 1;
1816 		}
1817 	} else {
1818 		if (oldm == NULL)
1819 			return 1;
1820 		m = oldm;
1821 		MRESETDATA(m);
1822 		rval = 1;
1823 	}
1824 
1825 	/*
1826 	 * Setup the DMA map for this receive buffer.
1827 	 */
1828 	if (m != oldm) {
1829 		if (oldm != NULL)
1830 			bus_dmamap_unload(sc->sc_dmat, rxmap);
1831 		error = bus_dmamap_load(sc->sc_dmat, rxmap,
1832 		    m->m_ext.ext_buf, MCLBYTES, NULL,
1833 		    BUS_DMA_READ|BUS_DMA_NOWAIT);
1834 		if (error) {
1835 			aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n",
1836 			    error);
1837 			panic("ex_add_rxbuf");	/* XXX */
1838 		}
1839 	}
1840 
1841 	/*
1842 	 * Align for data after 14 byte header.
1843 	 */
1844 	m->m_data += 2;
1845 
1846 	rxd->rx_mbhead = m;
1847 	rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2);
1848 	rxd->rx_upd->upd_frags[0].fr_addr =
1849 	    htole32(rxmap->dm_segs[0].ds_addr + 2);
1850 	rxd->rx_upd->upd_nextptr = 0;
1851 
1852 	/*
1853 	 * Attach it to the end of the list.
1854 	 */
1855 	if (sc->rx_head != NULL) {
1856 		sc->rx_tail->rx_next = rxd;
1857 		sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma +
1858 		    ((char *)rxd->rx_upd - (char *)sc->sc_upd));
1859 		bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1860 		    (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd,
1861 		    sizeof (struct ex_upd),
1862 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1863 	} else {
1864 		sc->rx_head = rxd;
1865 	}
1866 	sc->rx_tail = rxd;
1867 
1868 	bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize,
1869 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1870 	bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap,
1871 	    ((char *)rxd->rx_upd - (char *)sc->sc_upd),
1872 	    sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1873 	return (rval);
1874 }
1875 
1876 uint32_t
1877 ex_mii_bitbang_read(device_t self)
1878 {
1879 	struct ex_softc *sc = device_private(self);
1880 
1881 	/* We're already in Window 4. */
1882 	return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT));
1883 }
1884 
1885 void
1886 ex_mii_bitbang_write(device_t self, uint32_t val)
1887 {
1888 	struct ex_softc *sc = device_private(self);
1889 
1890 	/* We're already in Window 4. */
1891 	bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val);
1892 }
1893 
1894 int
1895 ex_mii_readreg(device_t v, int phy, int reg)
1896 {
1897 	struct ex_softc *sc = device_private(v);
1898 	int val;
1899 
1900 	if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID)
1901 		return 0;
1902 
1903 	GO_WINDOW(4);
1904 
1905 	val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg);
1906 
1907 	GO_WINDOW(1);
1908 
1909 	return (val);
1910 }
1911 
1912 void
1913 ex_mii_writereg(device_t v, int phy, int reg, int data)
1914 {
1915 	struct ex_softc *sc = device_private(v);
1916 
1917 	GO_WINDOW(4);
1918 
1919 	mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data);
1920 
1921 	GO_WINDOW(1);
1922 }
1923 
1924 void
1925 ex_mii_statchg(struct ifnet *ifp)
1926 {
1927 	struct ex_softc *sc = ifp->if_softc;
1928 	bus_space_tag_t iot = sc->sc_iot;
1929 	bus_space_handle_t ioh = sc->sc_ioh;
1930 	int mctl;
1931 
1932 	GO_WINDOW(3);
1933 	mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL);
1934 	if (sc->ex_mii.mii_media_active & IFM_FDX)
1935 		mctl |= MAC_CONTROL_FDX;
1936 	else
1937 		mctl &= ~MAC_CONTROL_FDX;
1938 	bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl);
1939 	GO_WINDOW(1);   /* back to operating window */
1940 }
1941 
1942 int
1943 ex_enable(struct ex_softc *sc)
1944 {
1945 	if (sc->enabled == 0 && sc->enable != NULL) {
1946 		if ((*sc->enable)(sc) != 0) {
1947 			aprint_error_dev(sc->sc_dev, "device enable failed\n");
1948 			return (EIO);
1949 		}
1950 		sc->enabled = 1;
1951 	}
1952 	return (0);
1953 }
1954 
1955 void
1956 ex_disable(struct ex_softc *sc)
1957 {
1958 	if (sc->enabled == 1 && sc->disable != NULL) {
1959 		(*sc->disable)(sc);
1960 		sc->enabled = 0;
1961 	}
1962 }
1963 
1964