xref: /openbsd-src/sys/dev/pci/if_sk.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: if_sk.c,v 1.151 2009/03/30 19:09:43 kettenis Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35  */
36 
37 /*
38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39  *
40  * Permission to use, copy, modify, and distribute this software for any
41  * purpose with or without fee is hereby granted, provided that the above
42  * copyright notice and this permission notice appear in all copies.
43  *
44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51  */
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 
72 /*
73  * The SysKonnect gigabit ethernet adapters consist of two main
74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76  * components and a PHY while the GEnesis controller provides a PCI
77  * interface with DMA support. Each card may have between 512K and
78  * 2MB of SRAM on board depending on the configuration.
79  *
80  * The SysKonnect GEnesis controller can have either one or two XMAC
81  * chips connected to it, allowing single or dual port NIC configurations.
82  * SysKonnect has the distinction of being the only vendor on the market
83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85  * XMAC registers. This driver takes advantage of these features to allow
86  * both XMACs to operate as independent interfaces.
87  */
88 
89 #include "bpfilter.h"
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 #include <sys/timeout.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_types.h>
105 
106 #ifdef INET
107 #include <netinet/in.h>
108 #include <netinet/in_systm.h>
109 #include <netinet/in_var.h>
110 #include <netinet/ip.h>
111 #include <netinet/udp.h>
112 #include <netinet/tcp.h>
113 #include <netinet/if_ether.h>
114 #endif
115 
116 #include <net/if_media.h>
117 #include <net/if_vlan_var.h>
118 
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #endif
122 
123 #include <dev/mii/mii.h>
124 #include <dev/mii/miivar.h>
125 #include <dev/mii/brgphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_skreg.h>
132 #include <dev/pci/if_skvar.h>
133 
134 int skc_probe(struct device *, void *, void *);
135 void skc_attach(struct device *, struct device *self, void *aux);
136 void skc_shutdown(void *);
137 int sk_probe(struct device *, void *, void *);
138 void sk_attach(struct device *, struct device *self, void *aux);
139 int skcprint(void *, const char *);
140 int sk_intr(void *);
141 void sk_intr_bcom(struct sk_if_softc *);
142 void sk_intr_xmac(struct sk_if_softc *);
143 void sk_intr_yukon(struct sk_if_softc *);
144 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
145 void sk_rxeof(struct sk_if_softc *);
146 void sk_txeof(struct sk_if_softc *);
147 int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
148 void sk_start(struct ifnet *);
149 int sk_ioctl(struct ifnet *, u_long, caddr_t);
150 void sk_init(void *);
151 void sk_init_xmac(struct sk_if_softc *);
152 void sk_init_yukon(struct sk_if_softc *);
153 void sk_stop(struct sk_if_softc *);
154 void sk_watchdog(struct ifnet *);
155 int sk_ifmedia_upd(struct ifnet *);
156 void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 void sk_reset(struct sk_softc *);
158 int sk_newbuf(struct sk_if_softc *);
159 int sk_init_rx_ring(struct sk_if_softc *);
160 int sk_init_tx_ring(struct sk_if_softc *);
161 void sk_fill_rx_ring(struct sk_if_softc *);
162 
163 int sk_xmac_miibus_readreg(struct device *, int, int);
164 void sk_xmac_miibus_writereg(struct device *, int, int, int);
165 void sk_xmac_miibus_statchg(struct device *);
166 
167 int sk_marv_miibus_readreg(struct device *, int, int);
168 void sk_marv_miibus_writereg(struct device *, int, int, int);
169 void sk_marv_miibus_statchg(struct device *);
170 
171 u_int32_t sk_xmac_hash(caddr_t);
172 u_int32_t sk_yukon_hash(caddr_t);
173 void sk_setfilt(struct sk_if_softc *, caddr_t, int);
174 void sk_setmulti(struct sk_if_softc *);
175 void sk_setpromisc(struct sk_if_softc *);
176 void sk_tick(void *);
177 void sk_yukon_tick(void *);
178 void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t);
179 
180 #ifdef SK_DEBUG
181 #define DPRINTF(x)	if (skdebug) printf x
182 #define DPRINTFN(n,x)	if (skdebug >= (n)) printf x
183 int	skdebug = 0;
184 
185 void sk_dump_txdesc(struct sk_tx_desc *, int);
186 void sk_dump_mbuf(struct mbuf *);
187 void sk_dump_bytes(const char *, int);
188 #else
189 #define DPRINTF(x)
190 #define DPRINTFN(n,x)
191 #endif
192 
193 /* supported device vendors */
194 const struct pci_matchid skc_devices[] = {
195 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940 },
196 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940B },
197 	{ PCI_VENDOR_CNET,		PCI_PRODUCT_CNET_GIGACARD },
198 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_A1 },
199 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_B1 },
200 	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1064 },
201 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON },
202 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_BELKIN },
203 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX },
204 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 },
205 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9821 },
206 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9843 }
207 };
208 
209 #define SK_LINKSYS_EG1032_SUBID 0x00151737
210 
211 static inline u_int32_t
212 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
213 {
214 	return CSR_READ_4(sc, reg);
215 }
216 
217 static inline u_int16_t
218 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
219 {
220 	return CSR_READ_2(sc, reg);
221 }
222 
223 static inline u_int8_t
224 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
225 {
226 	return CSR_READ_1(sc, reg);
227 }
228 
229 static inline void
230 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
231 {
232 	CSR_WRITE_4(sc, reg, x);
233 }
234 
235 static inline void
236 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
237 {
238 	CSR_WRITE_2(sc, reg, x);
239 }
240 
241 static inline void
242 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
243 {
244 	CSR_WRITE_1(sc, reg, x);
245 }
246 
247 int
248 sk_xmac_miibus_readreg(struct device *dev, int phy, int reg)
249 {
250 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
251 	int i;
252 
253 	DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
254 
255 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
256 		return (0);
257 
258 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
259 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
260 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
261 		for (i = 0; i < SK_TIMEOUT; i++) {
262 			DELAY(1);
263 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
264 			    XM_MMUCMD_PHYDATARDY)
265 				break;
266 		}
267 
268 		if (i == SK_TIMEOUT) {
269 			printf("%s: phy failed to come ready\n",
270 			    sc_if->sk_dev.dv_xname);
271 			return (0);
272 		}
273 	}
274 	DELAY(1);
275 	return (SK_XM_READ_2(sc_if, XM_PHY_DATA));
276 }
277 
278 void
279 sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val)
280 {
281 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
282 	int i;
283 
284 	DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
285 
286 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
287 	for (i = 0; i < SK_TIMEOUT; i++) {
288 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
289 			break;
290 	}
291 
292 	if (i == SK_TIMEOUT) {
293 		printf("%s: phy failed to come ready\n",
294 		    sc_if->sk_dev.dv_xname);
295 		return;
296 	}
297 
298 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
299 	for (i = 0; i < SK_TIMEOUT; i++) {
300 		DELAY(1);
301 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
302 			break;
303 	}
304 
305 	if (i == SK_TIMEOUT)
306 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
307 }
308 
309 void
310 sk_xmac_miibus_statchg(struct device *dev)
311 {
312 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
313 	struct mii_data *mii = &sc_if->sk_mii;
314 
315 	DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
316 
317 	/*
318 	 * If this is a GMII PHY, manually set the XMAC's
319 	 * duplex mode accordingly.
320 	 */
321 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
322 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
323 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
324 		else
325 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
326 	}
327 }
328 
329 int
330 sk_marv_miibus_readreg(struct device *dev, int phy, int reg)
331 {
332 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
333 	u_int16_t val;
334 	int i;
335 
336 	if (phy != 0 ||
337 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
338 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
339 		DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
340 			     phy, reg));
341 		return (0);
342 	}
343 
344         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
345 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
346 
347 	for (i = 0; i < SK_TIMEOUT; i++) {
348 		DELAY(1);
349 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
350 		if (val & YU_SMICR_READ_VALID)
351 			break;
352 	}
353 
354 	if (i == SK_TIMEOUT) {
355 		printf("%s: phy failed to come ready\n",
356 		       sc_if->sk_dev.dv_xname);
357 		return (0);
358 	}
359 
360  	DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
361 		     SK_TIMEOUT));
362 
363         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
364 
365 	DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
366 		     phy, reg, val));
367 
368 	return (val);
369 }
370 
371 void
372 sk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val)
373 {
374 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
375 	int i;
376 
377 	DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
378 		     phy, reg, val));
379 
380 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
381 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
382 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
383 
384 	for (i = 0; i < SK_TIMEOUT; i++) {
385 		DELAY(1);
386 		if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
387 			break;
388 	}
389 
390 	if (i == SK_TIMEOUT)
391 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
392 }
393 
394 void
395 sk_marv_miibus_statchg(struct device *dev)
396 {
397 	DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
398 		     SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
399 }
400 
401 u_int32_t
402 sk_xmac_hash(caddr_t addr)
403 {
404 	u_int32_t crc;
405 
406 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
407 	return (~crc & ((1 << SK_HASH_BITS) - 1));
408 }
409 
410 u_int32_t
411 sk_yukon_hash(caddr_t addr)
412 {
413 	u_int32_t crc;
414 
415 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
416 	return (crc & ((1 << SK_HASH_BITS) - 1));
417 }
418 
419 void
420 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
421 {
422 	int base = XM_RXFILT_ENTRY(slot);
423 
424 	SK_XM_WRITE_2(sc_if, base, letoh16(*(u_int16_t *)(&addr[0])));
425 	SK_XM_WRITE_2(sc_if, base + 2, letoh16(*(u_int16_t *)(&addr[2])));
426 	SK_XM_WRITE_2(sc_if, base + 4, letoh16(*(u_int16_t *)(&addr[4])));
427 }
428 
429 void
430 sk_setmulti(struct sk_if_softc *sc_if)
431 {
432 	struct sk_softc *sc = sc_if->sk_softc;
433 	struct ifnet *ifp= &sc_if->arpcom.ac_if;
434 	u_int32_t hashes[2] = { 0, 0 };
435 	int h, i;
436 	struct arpcom *ac = &sc_if->arpcom;
437 	struct ether_multi *enm;
438 	struct ether_multistep step;
439 	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
440 
441 	/* First, zot all the existing filters. */
442 	switch(sc->sk_type) {
443 	case SK_GENESIS:
444 		for (i = 1; i < XM_RXFILT_MAX; i++)
445 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
446 
447 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
448 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
449 		break;
450 	case SK_YUKON:
451 	case SK_YUKON_LITE:
452 	case SK_YUKON_LP:
453 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
454 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
455 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
456 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
457 		break;
458 	}
459 
460 	/* Now program new ones. */
461 allmulti:
462 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
463 		hashes[0] = 0xFFFFFFFF;
464 		hashes[1] = 0xFFFFFFFF;
465 	} else {
466 		i = 1;
467 		/* First find the tail of the list. */
468 		ETHER_FIRST_MULTI(step, ac, enm);
469 		while (enm != NULL) {
470 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
471 				 ETHER_ADDR_LEN)) {
472 				ifp->if_flags |= IFF_ALLMULTI;
473 				goto allmulti;
474 			}
475 			/*
476 			 * Program the first XM_RXFILT_MAX multicast groups
477 			 * into the perfect filter. For all others,
478 			 * use the hash table.
479 			 */
480 			if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
481 				sk_setfilt(sc_if, enm->enm_addrlo, i);
482 				i++;
483 			}
484 			else {
485 				switch(sc->sk_type) {
486 				case SK_GENESIS:
487 					h = sk_xmac_hash(enm->enm_addrlo);
488 					break;
489 
490 				case SK_YUKON:
491 				case SK_YUKON_LITE:
492 				case SK_YUKON_LP:
493 					h = sk_yukon_hash(enm->enm_addrlo);
494 					break;
495 				}
496 				if (h < 32)
497 					hashes[0] |= (1 << h);
498 				else
499 					hashes[1] |= (1 << (h - 32));
500 			}
501 
502 			ETHER_NEXT_MULTI(step, enm);
503 		}
504 	}
505 
506 	switch(sc->sk_type) {
507 	case SK_GENESIS:
508 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
509 			       XM_MODE_RX_USE_PERFECT);
510 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
511 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
512 		break;
513 	case SK_YUKON:
514 	case SK_YUKON_LITE:
515 	case SK_YUKON_LP:
516 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
517 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
518 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
519 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
520 		break;
521 	}
522 }
523 
524 void
525 sk_setpromisc(struct sk_if_softc *sc_if)
526 {
527 	struct sk_softc	*sc = sc_if->sk_softc;
528 	struct ifnet *ifp= &sc_if->arpcom.ac_if;
529 
530 	switch(sc->sk_type) {
531 	case SK_GENESIS:
532 		if (ifp->if_flags & IFF_PROMISC)
533 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
534 		else
535 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
536 		break;
537 	case SK_YUKON:
538 	case SK_YUKON_LITE:
539 	case SK_YUKON_LP:
540 		if (ifp->if_flags & IFF_PROMISC) {
541 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
542 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
543 		} else {
544 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
545 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
546 		}
547 		break;
548 	}
549 }
550 
551 int
552 sk_init_rx_ring(struct sk_if_softc *sc_if)
553 {
554 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
555 	struct sk_ring_data	*rd = sc_if->sk_rdata;
556 	int			i, nexti;
557 
558 	bzero((char *)rd->sk_rx_ring,
559 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
560 
561 	for (i = 0; i < SK_RX_RING_CNT; i++) {
562 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
563 		if (i == (SK_RX_RING_CNT - 1))
564 			nexti = 0;
565 		else
566 			nexti = i + 1;
567 		cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
568 		rd->sk_rx_ring[i].sk_next = htole32(SK_RX_RING_ADDR(sc_if, nexti));
569 		rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
570 		rd->sk_rx_ring[i].sk_csum2_start = htole16(ETHER_HDR_LEN +
571 		    sizeof(struct ip));
572 	}
573 
574 	sc_if->sk_cdata.sk_rx_prod = 0;
575 	sc_if->sk_cdata.sk_rx_cons = 0;
576 	sc_if->sk_cdata.sk_rx_cnt = 0;
577 
578 	sk_fill_rx_ring(sc_if);
579 	return (0);
580 }
581 
582 void
583 sk_fill_rx_ring(struct sk_if_softc *sc_if)
584 {
585 	while (sc_if->sk_cdata.sk_rx_cnt < SK_RX_RING_CNT) {
586 		if (sk_newbuf(sc_if) == ENOBUFS)
587 			break;
588 	}
589 }
590 
591 int
592 sk_init_tx_ring(struct sk_if_softc *sc_if)
593 {
594 	struct sk_softc		*sc = sc_if->sk_softc;
595 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
596 	struct sk_ring_data	*rd = sc_if->sk_rdata;
597 	bus_dmamap_t		dmamap;
598 	struct sk_txmap_entry	*entry;
599 	int			i, nexti;
600 
601 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
602 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
603 
604 	SIMPLEQ_INIT(&sc_if->sk_txmap_head);
605 	for (i = 0; i < SK_TX_RING_CNT; i++) {
606 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
607 		if (i == (SK_TX_RING_CNT - 1))
608 			nexti = 0;
609 		else
610 			nexti = i + 1;
611 		cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
612 		rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti));
613 
614 		if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
615 		   SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
616 			return (ENOBUFS);
617 
618 		entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
619 		if (!entry) {
620 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
621 			return (ENOBUFS);
622 		}
623 		entry->dmamap = dmamap;
624 		SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
625 	}
626 
627 	sc_if->sk_cdata.sk_tx_prod = 0;
628 	sc_if->sk_cdata.sk_tx_cons = 0;
629 	sc_if->sk_cdata.sk_tx_cnt = 0;
630 
631 	SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
632 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
633 
634 	return (0);
635 }
636 
637 int
638 sk_newbuf(struct sk_if_softc *sc_if)
639 {
640 	struct sk_chain		*c;
641 	struct sk_rx_desc	*r;
642 	struct mbuf		*m;
643 	bus_dmamap_t		dmamap;
644 	u_int32_t		sk_ctl;
645 	int			i, error;
646 
647 	MGETHDR(m, M_DONTWAIT, MT_DATA);
648 	if (m == NULL)
649 		return (ENOBUFS);
650 
651 	MCLGETI(m, M_DONTWAIT, &sc_if->arpcom.ac_if, SK_JLEN);
652 	if ((m->m_flags & M_EXT) == 0) {
653 		m_freem(m);
654 		return (ENOBUFS);
655 	}
656 	m->m_len = m->m_pkthdr.len = SK_JLEN;
657 	m_adj(m, ETHER_ALIGN);
658 
659 	dmamap = sc_if->sk_cdata.sk_rx_map[sc_if->sk_cdata.sk_rx_prod];
660 
661 	error = bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, dmamap, m,
662 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
663 	if (error) {
664 		m_freem(m);
665 		return (ENOBUFS);
666 	}
667 
668 	if (dmamap->dm_nsegs > (SK_RX_RING_CNT - sc_if->sk_cdata.sk_rx_cnt)) {
669 		bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap);
670 		m_freem(m);
671 		return (ENOBUFS);
672 	}
673 
674 	bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
675 	    dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
676 
677 	c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
678 	r = c->sk_desc;
679 	c->sk_mbuf = m;
680 
681 	sk_ctl = SK_RXSTAT;
682 	for (i = 0; i < dmamap->dm_nsegs; i++) {
683 		r->sk_data_lo = htole32(dmamap->dm_segs[i].ds_addr);
684 		r->sk_ctl = htole32(dmamap->dm_segs[i].ds_len | sk_ctl);
685 		sk_ctl &= ~SK_RXCTL_FIRSTFRAG;
686 
687 		SK_INC(sc_if->sk_cdata.sk_rx_prod, SK_RX_RING_CNT);
688 		sc_if->sk_cdata.sk_rx_cnt++;
689 
690 		c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod];
691 		r = c->sk_desc;
692 		c->sk_mbuf = NULL;
693 	}
694 
695 	SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
696 
697 	return (0);
698 }
699 
700 /*
701  * Set media options.
702  */
703 int
704 sk_ifmedia_upd(struct ifnet *ifp)
705 {
706 	struct sk_if_softc *sc_if = ifp->if_softc;
707 
708 	mii_mediachg(&sc_if->sk_mii);
709 	return (0);
710 }
711 
712 /*
713  * Report current media status.
714  */
715 void
716 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
717 {
718 	struct sk_if_softc *sc_if = ifp->if_softc;
719 
720 	mii_pollstat(&sc_if->sk_mii);
721 	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
722 	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
723 }
724 
725 int
726 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
727 {
728 	struct sk_if_softc *sc_if = ifp->if_softc;
729 	struct ifaddr *ifa = (struct ifaddr *) data;
730 	struct ifreq *ifr = (struct ifreq *) data;
731 	struct mii_data *mii;
732 	int s, error = 0;
733 
734 	s = splnet();
735 
736 	switch(command) {
737 	case SIOCSIFADDR:
738 		ifp->if_flags |= IFF_UP;
739 		if (!(ifp->if_flags & IFF_RUNNING))
740 			sk_init(sc_if);
741 #ifdef INET
742 		if (ifa->ifa_addr->sa_family == AF_INET)
743 			arp_ifinit(&sc_if->arpcom, ifa);
744 #endif /* INET */
745 		break;
746 
747 	case SIOCSIFFLAGS:
748 		if (ifp->if_flags & IFF_UP) {
749 			if (ifp->if_flags & IFF_RUNNING &&
750 			    (ifp->if_flags ^ sc_if->sk_if_flags)
751 			     & IFF_PROMISC) {
752 				sk_setpromisc(sc_if);
753 				sk_setmulti(sc_if);
754 			} else {
755 				if (!(ifp->if_flags & IFF_RUNNING))
756 					sk_init(sc_if);
757 			}
758 		} else {
759 			if (ifp->if_flags & IFF_RUNNING)
760 				sk_stop(sc_if);
761 		}
762 		sc_if->sk_if_flags = ifp->if_flags;
763 		break;
764 
765 	case SIOCGIFMEDIA:
766 	case SIOCSIFMEDIA:
767 		mii = &sc_if->sk_mii;
768 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
769 		break;
770 
771 	default:
772 		error = ether_ioctl(ifp, &sc_if->arpcom, command, data);
773 	}
774 
775 	if (error == ENETRESET) {
776 		if (ifp->if_flags & IFF_RUNNING)
777 			sk_setmulti(sc_if);
778 		error = 0;
779 	}
780 
781 	splx(s);
782 	return (error);
783 }
784 
785 /*
786  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
787  * IDs against our list and return a device name if we find a match.
788  */
789 int
790 skc_probe(struct device *parent, void *match, void *aux)
791 {
792 	struct pci_attach_args *pa = aux;
793 	pci_chipset_tag_t pc = pa->pa_pc;
794 	pcireg_t subid;
795 
796 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
797 
798 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
799 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
800 	    subid == SK_LINKSYS_EG1032_SUBID)
801 		return (1);
802 
803 	return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices,
804 	    sizeof(skc_devices)/sizeof(skc_devices[0])));
805 }
806 
807 /*
808  * Force the GEnesis into reset, then bring it out of reset.
809  */
810 void
811 sk_reset(struct sk_softc *sc)
812 {
813 	u_int32_t imtimer_ticks;
814 
815 	DPRINTFN(2, ("sk_reset\n"));
816 
817 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
818 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
819 	if (SK_IS_YUKON(sc))
820 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
821 
822 	DELAY(1000);
823 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
824 	DELAY(2);
825 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
826 	if (SK_IS_YUKON(sc))
827 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
828 
829 	DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
830 	DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
831 		     CSR_READ_2(sc, SK_LINK_CTRL)));
832 
833 	if (SK_IS_GENESIS(sc)) {
834 		/* Configure packet arbiter */
835 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
836 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
837 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
838 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
839 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
840 	}
841 
842 	/* Enable RAM interface */
843 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
844 
845 	/*
846 	 * Configure interrupt moderation. The moderation timer
847 	 * defers interrupts specified in the interrupt moderation
848 	 * timer mask based on the timeout specified in the interrupt
849 	 * moderation timer init register. Each bit in the timer
850 	 * register represents one tick, so to specify a timeout in
851 	 * microseconds, we have to multiply by the correct number of
852 	 * ticks-per-microsecond.
853 	 */
854 	switch (sc->sk_type) {
855 	case SK_GENESIS:
856 		imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
857 		break;
858 	default:
859 		imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
860 	}
861 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
862 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
863 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
864 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
865 }
866 
867 int
868 sk_probe(struct device *parent, void *match, void *aux)
869 {
870 	struct skc_attach_args *sa = aux;
871 
872 	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
873 		return (0);
874 
875 	switch (sa->skc_type) {
876 	case SK_GENESIS:
877 	case SK_YUKON:
878 	case SK_YUKON_LITE:
879 	case SK_YUKON_LP:
880 		return (1);
881 	}
882 
883 	return (0);
884 }
885 
886 /*
887  * Each XMAC chip is attached as a separate logical IP interface.
888  * Single port cards will have only one logical interface of course.
889  */
890 void
891 sk_attach(struct device *parent, struct device *self, void *aux)
892 {
893 	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
894 	struct sk_softc *sc = (struct sk_softc *)parent;
895 	struct skc_attach_args *sa = aux;
896 	struct ifnet *ifp;
897 	caddr_t kva;
898 	bus_dma_segment_t seg;
899 	int i, rseg;
900 	int error;
901 
902 	sc_if->sk_port = sa->skc_port;
903 	sc_if->sk_softc = sc;
904 	sc->sk_if[sa->skc_port] = sc_if;
905 
906 	if (sa->skc_port == SK_PORT_A)
907 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
908 	if (sa->skc_port == SK_PORT_B)
909 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
910 
911 	DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
912 
913 	/*
914 	 * Get station address for this interface. Note that
915 	 * dual port cards actually come with three station
916 	 * addresses: one for each port, plus an extra. The
917 	 * extra one is used by the SysKonnect driver software
918 	 * as a 'virtual' station address for when both ports
919 	 * are operating in failover mode. Currently we don't
920 	 * use this extra address.
921 	 */
922 	for (i = 0; i < ETHER_ADDR_LEN; i++)
923 		sc_if->arpcom.ac_enaddr[i] =
924 		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
925 
926 	printf(": address %s\n",
927 	    ether_sprintf(sc_if->arpcom.ac_enaddr));
928 
929 	/*
930 	 * Set up RAM buffer addresses. The NIC will have a certain
931 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
932 	 * need to divide this up a) between the transmitter and
933  	 * receiver and b) between the two XMACs, if this is a
934 	 * dual port NIC. Our algorithm is to divide up the memory
935 	 * evenly so that everyone gets a fair share.
936 	 */
937 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
938 		u_int32_t		chunk, val;
939 
940 		chunk = sc->sk_ramsize / 2;
941 		val = sc->sk_rboff / sizeof(u_int64_t);
942 		sc_if->sk_rx_ramstart = val;
943 		val += (chunk / sizeof(u_int64_t));
944 		sc_if->sk_rx_ramend = val - 1;
945 		sc_if->sk_tx_ramstart = val;
946 		val += (chunk / sizeof(u_int64_t));
947 		sc_if->sk_tx_ramend = val - 1;
948 	} else {
949 		u_int32_t		chunk, val;
950 
951 		chunk = sc->sk_ramsize / 4;
952 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
953 		    sizeof(u_int64_t);
954 		sc_if->sk_rx_ramstart = val;
955 		val += (chunk / sizeof(u_int64_t));
956 		sc_if->sk_rx_ramend = val - 1;
957 		sc_if->sk_tx_ramstart = val;
958 		val += (chunk / sizeof(u_int64_t));
959 		sc_if->sk_tx_ramend = val - 1;
960 	}
961 
962 	DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
963 		     "           tx_ramstart=%#x tx_ramend=%#x\n",
964 		     sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
965 		     sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
966 
967 	/* Read and save PHY type */
968 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
969 
970 	/* Set PHY address */
971 	if (SK_IS_GENESIS(sc)) {
972 		switch (sc_if->sk_phytype) {
973 			case SK_PHYTYPE_XMAC:
974 				sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
975 				break;
976 			case SK_PHYTYPE_BCOM:
977 				sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
978 				break;
979 			default:
980 				printf("%s: unsupported PHY type: %d\n",
981 				    sc->sk_dev.dv_xname, sc_if->sk_phytype);
982 				return;
983 		}
984 	}
985 
986 	if (SK_IS_YUKON(sc)) {
987 		if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
988 		    sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
989 			/* not initialized, punt */
990 			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
991 
992 			sc->sk_coppertype = 1;
993 		}
994 
995 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
996 
997 		if (!(sc->sk_coppertype))
998 			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
999 	}
1000 
1001 	/* Allocate the descriptor queues. */
1002 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
1003 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1004 		printf(": can't alloc rx buffers\n");
1005 		goto fail;
1006 	}
1007 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1008 	    sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1009 		printf(": can't map dma buffers (%lu bytes)\n",
1010 		       (ulong)sizeof(struct sk_ring_data));
1011 		goto fail_1;
1012 	}
1013 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
1014 	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
1015             &sc_if->sk_ring_map)) {
1016 		printf(": can't create dma map\n");
1017 		goto fail_2;
1018 	}
1019 	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1020 	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1021 		printf(": can't load dma map\n");
1022 		goto fail_3;
1023 	}
1024         sc_if->sk_rdata = (struct sk_ring_data *)kva;
1025 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1026 
1027 	for (i = 0; i < SK_RX_RING_CNT; i++) {
1028 		if ((error = bus_dmamap_create(sc->sc_dmatag, SK_JLEN, 4,
1029 		    SK_JLEN, 0, 0, &sc_if->sk_cdata.sk_rx_map[i])) != 0) {
1030 			printf("\n%s: unable to create rx DMA map %d, "
1031 			    "error = %d\n", sc->sk_dev.dv_xname, i, error);
1032 			goto fail_4;
1033 		}
1034 	}
1035 
1036 	ifp = &sc_if->arpcom.ac_if;
1037 	ifp->if_softc = sc_if;
1038 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1039 	ifp->if_ioctl = sk_ioctl;
1040 	ifp->if_start = sk_start;
1041 	ifp->if_watchdog = sk_watchdog;
1042 	ifp->if_baudrate = 1000000000;
1043 	ifp->if_hardmtu = SK_JUMBO_MTU;
1044 	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1045 	IFQ_SET_READY(&ifp->if_snd);
1046 	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1047 
1048 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1049 
1050 	/*
1051 	 * Do miibus setup.
1052 	 */
1053 	switch (sc->sk_type) {
1054 	case SK_GENESIS:
1055 		sk_init_xmac(sc_if);
1056 		break;
1057 	case SK_YUKON:
1058 	case SK_YUKON_LITE:
1059 	case SK_YUKON_LP:
1060 		sk_init_yukon(sc_if);
1061 		break;
1062 	default:
1063 		printf(": unknown device type %d\n", sc->sk_type);
1064 		/* dealloc jumbo on error */
1065 		goto fail_3;
1066 	}
1067 
1068  	DPRINTFN(2, ("sk_attach: 1\n"));
1069 
1070 	sc_if->sk_mii.mii_ifp = ifp;
1071 	if (SK_IS_GENESIS(sc)) {
1072 		sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg;
1073 		sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg;
1074 		sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg;
1075 	} else {
1076 		sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg;
1077 		sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg;
1078 		sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg;
1079 	}
1080 
1081 	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1082 	    sk_ifmedia_upd, sk_ifmedia_sts);
1083 	if (SK_IS_GENESIS(sc)) {
1084 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1085 		    MII_OFFSET_ANY, 0);
1086 	} else {
1087 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1088 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
1089 	}
1090 	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1091 		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1092 		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1093 			    0, NULL);
1094 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1095 	} else
1096 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1097 
1098 	if (SK_IS_GENESIS(sc)) {
1099 		timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
1100 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1101 	} else
1102 		timeout_set(&sc_if->sk_tick_ch, sk_yukon_tick, sc_if);
1103 
1104 	/*
1105 	 * Call MI attach routines.
1106 	 */
1107 	if_attach(ifp);
1108 	ether_ifattach(ifp);
1109 
1110 	shutdownhook_establish(skc_shutdown, sc);
1111 
1112 	DPRINTFN(2, ("sk_attach: end\n"));
1113 	return;
1114 
1115 fail_4:
1116 	for (i = 0; i < SK_RX_RING_CNT; i++) {
1117 		if (sc_if->sk_cdata.sk_rx_map[i] != NULL)
1118 			bus_dmamap_destroy(sc->sc_dmatag,
1119 			    sc_if->sk_cdata.sk_rx_map[i]);
1120 	}
1121 
1122 fail_3:
1123 	bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1124 fail_2:
1125 	bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data));
1126 fail_1:
1127 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1128 fail:
1129 	sc->sk_if[sa->skc_port] = NULL;
1130 }
1131 
1132 int
1133 skcprint(void *aux, const char *pnp)
1134 {
1135 	struct skc_attach_args *sa = aux;
1136 
1137 	if (pnp)
1138 		printf("sk port %c at %s",
1139 		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1140 	else
1141 		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1142 	return (UNCONF);
1143 }
1144 
1145 /*
1146  * Attach the interface. Allocate softc structures, do ifmedia
1147  * setup and ethernet/BPF attach.
1148  */
1149 void
1150 skc_attach(struct device *parent, struct device *self, void *aux)
1151 {
1152 	struct sk_softc *sc = (struct sk_softc *)self;
1153 	struct pci_attach_args *pa = aux;
1154 	struct skc_attach_args skca;
1155 	pci_chipset_tag_t pc = pa->pa_pc;
1156 	pcireg_t command, memtype;
1157 	pci_intr_handle_t ih;
1158 	const char *intrstr = NULL;
1159 	bus_size_t size;
1160 	u_int8_t skrs;
1161 	char *revstr = NULL;
1162 
1163 	DPRINTFN(2, ("begin skc_attach\n"));
1164 
1165 	/*
1166 	 * Handle power management nonsense.
1167 	 */
1168 	command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1169 
1170 	if (command == 0x01) {
1171 		command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1172 		if (command & SK_PSTATE_MASK) {
1173 			u_int32_t		iobase, membase, irq;
1174 
1175 			/* Save important PCI config data. */
1176 			iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1177 			membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1178 			irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1179 
1180 			/* Reset the power state. */
1181 			printf("%s chip is in D%d power mode "
1182 			    "-- setting to D0\n", sc->sk_dev.dv_xname,
1183 			    command & SK_PSTATE_MASK);
1184 			command &= 0xFFFFFFFC;
1185 			pci_conf_write(pc, pa->pa_tag,
1186 			    SK_PCI_PWRMGMTCTRL, command);
1187 
1188 			/* Restore PCI config data. */
1189 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1190 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1191 			pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1192 		}
1193 	}
1194 
1195 	/*
1196 	 * Map control/status registers.
1197 	 */
1198 	memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1199 	if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag,
1200 	    &sc->sk_bhandle, NULL, &size, 0)) {
1201 		printf(": can't map mem space\n");
1202 		return;
1203 	}
1204 
1205 	sc->sc_dmatag = pa->pa_dmat;
1206 
1207 	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1208 	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1209 
1210 	/* bail out here if chip is not recognized */
1211 	if (! SK_IS_GENESIS(sc) && ! SK_IS_YUKON(sc)) {
1212 		printf(": unknown chip type: %d\n", sc->sk_type);
1213 		goto fail_1;
1214 	}
1215 	DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1216 
1217 	/* Allocate interrupt */
1218 	if (pci_intr_map(pa, &ih)) {
1219 		printf(": couldn't map interrupt\n");
1220 		goto fail_1;
1221 	}
1222 
1223 	intrstr = pci_intr_string(pc, ih);
1224 	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1225 	    self->dv_xname);
1226 	if (sc->sk_intrhand == NULL) {
1227 		printf(": couldn't establish interrupt");
1228 		if (intrstr != NULL)
1229 			printf(" at %s", intrstr);
1230 		printf("\n");
1231 		goto fail_1;
1232 	}
1233 
1234 	/* Reset the adapter. */
1235 	sk_reset(sc);
1236 
1237 	skrs = sk_win_read_1(sc, SK_EPROM0);
1238 	if (SK_IS_GENESIS(sc)) {
1239 		/* Read and save RAM size and RAMbuffer offset */
1240 		switch(skrs) {
1241 		case SK_RAMSIZE_512K_64:
1242 			sc->sk_ramsize = 0x80000;
1243 			sc->sk_rboff = SK_RBOFF_0;
1244 			break;
1245 		case SK_RAMSIZE_1024K_64:
1246 			sc->sk_ramsize = 0x100000;
1247 			sc->sk_rboff = SK_RBOFF_80000;
1248 			break;
1249 		case SK_RAMSIZE_1024K_128:
1250 			sc->sk_ramsize = 0x100000;
1251 			sc->sk_rboff = SK_RBOFF_0;
1252 			break;
1253 		case SK_RAMSIZE_2048K_128:
1254 			sc->sk_ramsize = 0x200000;
1255 			sc->sk_rboff = SK_RBOFF_0;
1256 			break;
1257 		default:
1258 			printf(": unknown ram size: %d\n", skrs);
1259 			goto fail_2;
1260 			break;
1261 		}
1262 	} else {
1263 		if (skrs == 0x00)
1264 			sc->sk_ramsize = 0x20000;
1265 		else
1266 			sc->sk_ramsize = skrs * (1<<12);
1267 		sc->sk_rboff = SK_RBOFF_0;
1268 	}
1269 
1270 	DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1271 		     sc->sk_ramsize, sc->sk_ramsize / 1024,
1272 		     sc->sk_rboff));
1273 
1274 	/* Read and save physical media type */
1275 	sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1276 
1277 	if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1278 		sc->sk_coppertype = 1;
1279 	else
1280 		sc->sk_coppertype = 0;
1281 
1282 	switch (sc->sk_type) {
1283 	case SK_GENESIS:
1284 		sc->sk_name = "GEnesis";
1285 		break;
1286 	case SK_YUKON:
1287 		sc->sk_name = "Yukon";
1288 		break;
1289 	case SK_YUKON_LITE:
1290 		sc->sk_name = "Yukon Lite";
1291 		break;
1292 	case SK_YUKON_LP:
1293 		sc->sk_name = "Yukon LP";
1294 		break;
1295 	default:
1296 		sc->sk_name = "Yukon (Unknown)";
1297 	}
1298 
1299 	/* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1300 	if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1301 		u_int32_t flashaddr;
1302 		u_int8_t testbyte;
1303 
1304 		flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1305 
1306 		/* test Flash-Address Register */
1307 		sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1308 		testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1309 
1310 		if (testbyte != 0) {
1311 			/* This is a Yukon Lite Rev A0 */
1312 			sc->sk_type = SK_YUKON_LITE;
1313 			sc->sk_rev = SK_YUKON_LITE_REV_A0;
1314 			/* restore Flash-Address Register */
1315 			sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1316 		}
1317 	}
1318 
1319 	if (sc->sk_type == SK_YUKON_LITE) {
1320 		switch (sc->sk_rev) {
1321 		case SK_YUKON_LITE_REV_A0:
1322 			revstr = "A0";
1323 			break;
1324 		case SK_YUKON_LITE_REV_A1:
1325 			revstr = "A1";
1326 			break;
1327 		case SK_YUKON_LITE_REV_A3:
1328 			revstr = "A3";
1329 			break;
1330 		default:
1331 			;
1332 		}
1333 	}
1334 
1335 	/* Announce the product name. */
1336 	printf(", %s", sc->sk_name);
1337 	if (revstr != NULL)
1338 		printf(" rev. %s", revstr);
1339 	printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1340 
1341 	sc->sk_macs = 1;
1342 
1343 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1344 		sc->sk_macs++;
1345 
1346 	skca.skc_port = SK_PORT_A;
1347 	skca.skc_type = sc->sk_type;
1348 	skca.skc_rev = sc->sk_rev;
1349 	(void)config_found(&sc->sk_dev, &skca, skcprint);
1350 
1351 	if (sc->sk_macs > 1) {
1352 		skca.skc_port = SK_PORT_B;
1353 		skca.skc_type = sc->sk_type;
1354 		skca.skc_rev = sc->sk_rev;
1355 		(void)config_found(&sc->sk_dev, &skca, skcprint);
1356 	}
1357 
1358 	/* Turn on the 'driver is loaded' LED. */
1359 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1360 
1361 	return;
1362 
1363 fail_2:
1364 	pci_intr_disestablish(pc, sc->sk_intrhand);
1365 fail_1:
1366 	bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1367 }
1368 
1369 int
1370 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1371 {
1372 	struct sk_softc		*sc = sc_if->sk_softc;
1373 	struct sk_tx_desc	*f = NULL;
1374 	u_int32_t		frag, cur, sk_ctl;
1375 	int			i;
1376 	struct sk_txmap_entry	*entry;
1377 	bus_dmamap_t		txmap;
1378 
1379 	DPRINTFN(2, ("sk_encap\n"));
1380 
1381 	entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1382 	if (entry == NULL) {
1383 		DPRINTFN(2, ("sk_encap: no txmap available\n"));
1384 		return (ENOBUFS);
1385 	}
1386 	txmap = entry->dmamap;
1387 
1388 	cur = frag = *txidx;
1389 
1390 #ifdef SK_DEBUG
1391 	if (skdebug >= 2)
1392 		sk_dump_mbuf(m_head);
1393 #endif
1394 
1395 	/*
1396 	 * Start packing the mbufs in this chain into
1397 	 * the fragment pointers. Stop when we run out
1398 	 * of fragments or hit the end of the mbuf chain.
1399 	 */
1400 	if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1401 	    BUS_DMA_NOWAIT)) {
1402 		DPRINTFN(2, ("sk_encap: dmamap failed\n"));
1403 		return (ENOBUFS);
1404 	}
1405 
1406 	if (txmap->dm_nsegs > (SK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
1407 		DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1408 		bus_dmamap_unload(sc->sc_dmatag, txmap);
1409 		return (ENOBUFS);
1410 	}
1411 
1412 	DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1413 
1414 	/* Sync the DMA map. */
1415 	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1416 	    BUS_DMASYNC_PREWRITE);
1417 
1418 	for (i = 0; i < txmap->dm_nsegs; i++) {
1419 		f = &sc_if->sk_rdata->sk_tx_ring[frag];
1420 		f->sk_data_lo = htole32(txmap->dm_segs[i].ds_addr);
1421 		sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1422 		if (i == 0)
1423 			sk_ctl |= SK_TXCTL_FIRSTFRAG;
1424 		else
1425 			sk_ctl |= SK_TXCTL_OWN;
1426 		f->sk_ctl = htole32(sk_ctl);
1427 		cur = frag;
1428 		SK_INC(frag, SK_TX_RING_CNT);
1429 	}
1430 
1431 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1432 	SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1433 
1434 	sc_if->sk_cdata.sk_tx_map[cur] = entry;
1435 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1436 		htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1437 
1438 	/* Sync descriptors before handing to chip */
1439 	SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1440 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1441 
1442 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |=
1443 		htole32(SK_TXCTL_OWN);
1444 
1445 	/* Sync first descriptor to hand it off */
1446 	SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1447 
1448 	sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1449 
1450 #ifdef SK_DEBUG
1451 	if (skdebug >= 2) {
1452 		struct sk_tx_desc *desc;
1453 		u_int32_t idx;
1454 		for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1455 			desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1456 			sk_dump_txdesc(desc, idx);
1457 		}
1458 	}
1459 #endif
1460 
1461 	*txidx = frag;
1462 
1463 	DPRINTFN(2, ("sk_encap: completed successfully\n"));
1464 
1465 	return (0);
1466 }
1467 
1468 void
1469 sk_start(struct ifnet *ifp)
1470 {
1471 	struct sk_if_softc	*sc_if = ifp->if_softc;
1472 	struct sk_softc		*sc = sc_if->sk_softc;
1473 	struct mbuf		*m_head = NULL;
1474 	u_int32_t		idx = sc_if->sk_cdata.sk_tx_prod;
1475 	int			pkts = 0;
1476 
1477 	DPRINTFN(2, ("sk_start\n"));
1478 
1479 	while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1480 		IFQ_POLL(&ifp->if_snd, m_head);
1481 		if (m_head == NULL)
1482 			break;
1483 
1484 		/*
1485 		 * Pack the data into the transmit ring. If we
1486 		 * don't have room, set the OACTIVE flag and wait
1487 		 * for the NIC to drain the ring.
1488 		 */
1489 		if (sk_encap(sc_if, m_head, &idx)) {
1490 			ifp->if_flags |= IFF_OACTIVE;
1491 			break;
1492 		}
1493 
1494 		/* now we are committed to transmit the packet */
1495 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1496 		pkts++;
1497 
1498 		/*
1499 		 * If there's a BPF listener, bounce a copy of this frame
1500 		 * to him.
1501 		 */
1502 #if NBPFILTER > 0
1503 		if (ifp->if_bpf)
1504 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1505 #endif
1506 	}
1507 	if (pkts == 0)
1508 		return;
1509 
1510 	/* Transmit */
1511 	if (idx != sc_if->sk_cdata.sk_tx_prod) {
1512 		sc_if->sk_cdata.sk_tx_prod = idx;
1513 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1514 
1515 		/* Set a timeout in case the chip goes out to lunch. */
1516 		ifp->if_timer = 5;
1517 	}
1518 }
1519 
1520 
1521 void
1522 sk_watchdog(struct ifnet *ifp)
1523 {
1524 	struct sk_if_softc *sc_if = ifp->if_softc;
1525 
1526 	/*
1527 	 * Reclaim first as there is a possibility of losing Tx completion
1528 	 * interrupts.
1529 	 */
1530 	sk_txeof(sc_if);
1531 	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1532 		printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1533 
1534 		ifp->if_oerrors++;
1535 
1536 		sk_init(sc_if);
1537 	}
1538 }
1539 
1540 void
1541 skc_shutdown(void *v)
1542 {
1543 	struct sk_softc		*sc = v;
1544 
1545 	DPRINTFN(2, ("sk_shutdown\n"));
1546 
1547 	/* Turn off the 'driver is loaded' LED. */
1548 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1549 
1550 	/*
1551 	 * Reset the GEnesis controller. Doing this should also
1552 	 * assert the resets on the attached XMAC(s).
1553 	 */
1554 	sk_reset(sc);
1555 }
1556 
1557 static __inline int
1558 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1559 {
1560 	if (sc->sk_type == SK_GENESIS) {
1561 		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1562 		    XM_RXSTAT_BYTES(stat) != len)
1563 			return (0);
1564 	} else {
1565 		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1566 		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1567 		    YU_RXSTAT_JABBER)) != 0 ||
1568 		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1569 		    YU_RXSTAT_BYTES(stat) != len)
1570 			return (0);
1571 	}
1572 
1573 	return (1);
1574 }
1575 
1576 void
1577 sk_rxeof(struct sk_if_softc *sc_if)
1578 {
1579 	struct sk_softc		*sc = sc_if->sk_softc;
1580 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1581 	struct mbuf		*m;
1582 	struct sk_chain		*cur_rx;
1583 	struct sk_rx_desc	*cur_desc;
1584 	int			i, cur, total_len = 0;
1585 	u_int32_t		rxstat, sk_ctl;
1586 	bus_dmamap_t		dmamap;
1587 	u_int16_t		csum1, csum2;
1588 
1589 	DPRINTFN(2, ("sk_rxeof\n"));
1590 
1591 	for (;;) {
1592 		cur = sc_if->sk_cdata.sk_rx_cons;
1593 
1594 		/* Sync the descriptor */
1595 		SK_CDRXSYNC(sc_if, cur,
1596 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1597 
1598 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1599 		if (cur_rx->sk_mbuf == NULL)
1600 			break;
1601 
1602 		sk_ctl = letoh32(sc_if->sk_rdata->sk_rx_ring[cur].sk_ctl);
1603 		if ((sk_ctl & SK_RXCTL_OWN) != 0)
1604 			break;
1605 
1606 		cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
1607 		dmamap = sc_if->sk_cdata.sk_rx_map[cur];
1608 		for (i = 0; i < dmamap->dm_nsegs; i++) {
1609 			SK_INC(sc_if->sk_cdata.sk_rx_cons, SK_RX_RING_CNT);
1610 			sc_if->sk_cdata.sk_rx_cnt--;
1611 		}
1612 
1613 		bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1614 		    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1615 		bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap);
1616 
1617 		rxstat = letoh32(cur_desc->sk_xmac_rxstat);
1618 		m = cur_rx->sk_mbuf;
1619 		cur_rx->sk_mbuf = NULL;
1620 		total_len = SK_RXBYTES(letoh32(cur_desc->sk_ctl));
1621 
1622 		csum1 = letoh16(sc_if->sk_rdata->sk_rx_ring[cur].sk_csum1);
1623 		csum2 = letoh16(sc_if->sk_rdata->sk_rx_ring[cur].sk_csum2);
1624 
1625 		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1626 		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1627 		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1628 		    total_len < SK_MIN_FRAMELEN ||
1629 		    total_len > SK_JUMBO_FRAMELEN ||
1630 		    sk_rxvalid(sc, rxstat, total_len) == 0) {
1631 			ifp->if_ierrors++;
1632 			m_freem(m);
1633 			continue;
1634 		}
1635 
1636 		m->m_pkthdr.rcvif = ifp;
1637 		m->m_pkthdr.len = m->m_len = total_len;
1638 
1639 		ifp->if_ipackets++;
1640 
1641 		sk_rxcsum(ifp, m, csum1, csum2);
1642 
1643 #if NBPFILTER > 0
1644 		if (ifp->if_bpf)
1645 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1646 #endif
1647 
1648 		/* pass it on. */
1649 		ether_input_mbuf(ifp, m);
1650 	}
1651 
1652 	sk_fill_rx_ring(sc_if);
1653 }
1654 
1655 void
1656 sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2)
1657 {
1658 	struct ether_header *eh;
1659 	struct ip *ip;
1660 	u_int8_t *pp;
1661 	int hlen, len, plen;
1662 	u_int16_t iph_csum, ipo_csum, ipd_csum, csum;
1663 
1664 	pp = mtod(m, u_int8_t *);
1665 	plen = m->m_pkthdr.len;
1666 	if (plen < sizeof(*eh))
1667 		return;
1668 	eh = (struct ether_header *)pp;
1669 	iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff));
1670 
1671 	if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1672 		u_int16_t *xp = (u_int16_t *)pp;
1673 
1674 		xp = (u_int16_t *)pp;
1675 		if (xp[1] != htons(ETHERTYPE_IP))
1676 			return;
1677 		iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff));
1678 		iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff));
1679 		xp = (u_int16_t *)(pp + sizeof(struct ip));
1680 		iph_csum = in_cksum_addword(iph_csum, xp[0]);
1681 		iph_csum = in_cksum_addword(iph_csum, xp[1]);
1682 		pp += EVL_ENCAPLEN;
1683 	} else if (eh->ether_type != htons(ETHERTYPE_IP))
1684 		return;
1685 
1686 	pp += sizeof(*eh);
1687 	plen -= sizeof(*eh);
1688 
1689 	ip = (struct ip *)pp;
1690 
1691 	if (ip->ip_v != IPVERSION)
1692 		return;
1693 
1694 	hlen = ip->ip_hl << 2;
1695 	if (hlen < sizeof(struct ip))
1696 		return;
1697 	if (hlen > ntohs(ip->ip_len))
1698 		return;
1699 
1700 	/* Don't deal with truncated or padded packets. */
1701 	if (plen != ntohs(ip->ip_len))
1702 		return;
1703 
1704 	len = hlen - sizeof(struct ip);
1705 	if (len > 0) {
1706 		u_int16_t *p;
1707 
1708 		p = (u_int16_t *)(ip + 1);
1709 		ipo_csum = 0;
1710 		for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1711 			ipo_csum = in_cksum_addword(ipo_csum, *p);
1712 		iph_csum = in_cksum_addword(iph_csum, ipo_csum);
1713 		ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff));
1714 	} else
1715 		ipd_csum = csum2;
1716 
1717 	if (iph_csum != 0xffff)
1718 		return;
1719 	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1720 
1721 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1722 		return;                 /* ip frag, we're done for now */
1723 
1724 	pp += hlen;
1725 
1726 	/* Only know checksum protocol for udp/tcp */
1727 	if (ip->ip_p == IPPROTO_UDP) {
1728 		struct udphdr *uh = (struct udphdr *)pp;
1729 
1730 		if (uh->uh_sum == 0)    /* udp with no checksum */
1731 			return;
1732 	} else if (ip->ip_p != IPPROTO_TCP)
1733 		return;
1734 
1735 	csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1736 	    htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1737 	if (csum == 0xffff) {
1738 		m->m_pkthdr.csum_flags |= (ip->ip_p == IPPROTO_TCP) ?
1739 		    M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK;
1740 	}
1741 }
1742 
1743 void
1744 sk_txeof(struct sk_if_softc *sc_if)
1745 {
1746 	struct sk_softc		*sc = sc_if->sk_softc;
1747 	struct sk_tx_desc	*cur_tx;
1748 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1749 	u_int32_t		idx, sk_ctl;
1750 	struct sk_txmap_entry	*entry;
1751 
1752 	DPRINTFN(2, ("sk_txeof\n"));
1753 
1754 	/*
1755 	 * Go through our tx ring and free mbufs for those
1756 	 * frames that have been sent.
1757 	 */
1758 	idx = sc_if->sk_cdata.sk_tx_cons;
1759 	while (idx != sc_if->sk_cdata.sk_tx_prod) {
1760 		SK_CDTXSYNC(sc_if, idx, 1,
1761 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1762 
1763 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1764 		sk_ctl = letoh32(cur_tx->sk_ctl);
1765 #ifdef SK_DEBUG
1766 		if (skdebug >= 2)
1767 			sk_dump_txdesc(cur_tx, idx);
1768 #endif
1769 		if (sk_ctl & SK_TXCTL_OWN) {
1770 			SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
1771 			break;
1772 		}
1773 		if (sk_ctl & SK_TXCTL_LASTFRAG)
1774 			ifp->if_opackets++;
1775 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1776 			entry = sc_if->sk_cdata.sk_tx_map[idx];
1777 
1778 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1779 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1780 
1781 			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1782 			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1783 
1784 			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1785 			SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1786 					  link);
1787 			sc_if->sk_cdata.sk_tx_map[idx] = NULL;
1788 		}
1789 		sc_if->sk_cdata.sk_tx_cnt--;
1790 		SK_INC(idx, SK_TX_RING_CNT);
1791 	}
1792 	ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1793 
1794 	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1795 		ifp->if_flags &= ~IFF_OACTIVE;
1796 
1797 	sc_if->sk_cdata.sk_tx_cons = idx;
1798 }
1799 
1800 void
1801 sk_tick(void *xsc_if)
1802 {
1803 	struct sk_if_softc *sc_if = xsc_if;
1804 	struct mii_data *mii = &sc_if->sk_mii;
1805 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1806 	int i;
1807 
1808 	DPRINTFN(2, ("sk_tick\n"));
1809 
1810 	if (!(ifp->if_flags & IFF_UP))
1811 		return;
1812 
1813 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1814 		sk_intr_bcom(sc_if);
1815 		return;
1816 	}
1817 
1818 	/*
1819 	 * According to SysKonnect, the correct way to verify that
1820 	 * the link has come back up is to poll bit 0 of the GPIO
1821 	 * register three times. This pin has the signal from the
1822 	 * link sync pin connected to it; if we read the same link
1823 	 * state 3 times in a row, we know the link is up.
1824 	 */
1825 	for (i = 0; i < 3; i++) {
1826 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1827 			break;
1828 	}
1829 
1830 	if (i != 3) {
1831 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1832 		return;
1833 	}
1834 
1835 	/* Turn the GP0 interrupt back on. */
1836 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1837 	SK_XM_READ_2(sc_if, XM_ISR);
1838 	mii_tick(mii);
1839 	timeout_del(&sc_if->sk_tick_ch);
1840 }
1841 
1842 void
1843 sk_yukon_tick(void *xsc_if)
1844 {
1845 	struct sk_if_softc *sc_if = xsc_if;
1846 	struct mii_data *mii = &sc_if->sk_mii;
1847 	int s;
1848 
1849 	s = splnet();
1850 	mii_tick(mii);
1851 	splx(s);
1852 	timeout_add_sec(&sc_if->sk_tick_ch, 1);
1853 }
1854 
1855 void
1856 sk_intr_bcom(struct sk_if_softc *sc_if)
1857 {
1858 	struct mii_data *mii = &sc_if->sk_mii;
1859 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1860 	int status;
1861 
1862 	DPRINTFN(2, ("sk_intr_bcom\n"));
1863 
1864 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1865 
1866 	/*
1867 	 * Read the PHY interrupt register to make sure
1868 	 * we clear any pending interrupts.
1869 	 */
1870 	status = sk_xmac_miibus_readreg((struct device *)sc_if,
1871 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
1872 
1873 	if (!(ifp->if_flags & IFF_RUNNING)) {
1874 		sk_init_xmac(sc_if);
1875 		return;
1876 	}
1877 
1878 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
1879 		int lstat;
1880 		lstat = sk_xmac_miibus_readreg((struct device *)sc_if,
1881 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
1882 
1883 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
1884 			mii_mediachg(mii);
1885 			/* Turn off the link LED. */
1886 			SK_IF_WRITE_1(sc_if, 0,
1887 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
1888 			sc_if->sk_link = 0;
1889 		} else if (status & BRGPHY_ISR_LNK_CHG) {
1890 			sk_xmac_miibus_writereg((struct device *)sc_if,
1891 			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
1892 			mii_tick(mii);
1893 			sc_if->sk_link = 1;
1894 			/* Turn on the link LED. */
1895 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
1896 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
1897 			    SK_LINKLED_BLINK_OFF);
1898 		} else {
1899 			mii_tick(mii);
1900 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1901 		}
1902 	}
1903 
1904 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
1905 }
1906 
1907 void
1908 sk_intr_xmac(struct sk_if_softc	*sc_if)
1909 {
1910 	u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR);
1911 
1912 	DPRINTFN(2, ("sk_intr_xmac\n"));
1913 
1914 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
1915 		if (status & XM_ISR_GP0_SET) {
1916 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1917 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1918 		}
1919 
1920 		if (status & XM_ISR_AUTONEG_DONE) {
1921 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
1922 		}
1923 	}
1924 
1925 	if (status & XM_IMR_TX_UNDERRUN)
1926 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
1927 
1928 	if (status & XM_IMR_RX_OVERRUN)
1929 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
1930 }
1931 
1932 void
1933 sk_intr_yukon(struct sk_if_softc *sc_if)
1934 {
1935 	u_int8_t status;
1936 
1937 	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
1938 	/* RX overrun */
1939 	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
1940 		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
1941 		    SK_RFCTL_RX_FIFO_OVER);
1942 	}
1943 	/* TX underrun */
1944 	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
1945 		SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
1946 		    SK_TFCTL_TX_FIFO_UNDER);
1947 	}
1948 
1949 	DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
1950 }
1951 
1952 int
1953 sk_intr(void *xsc)
1954 {
1955 	struct sk_softc		*sc = xsc;
1956 	struct sk_if_softc	*sc_if0 = sc->sk_if[SK_PORT_A];
1957 	struct sk_if_softc	*sc_if1 = sc->sk_if[SK_PORT_B];
1958 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
1959 	u_int32_t		status;
1960 	int			claimed = 0;
1961 
1962 	status = CSR_READ_4(sc, SK_ISSR);
1963 	if (status == 0 || status == 0xffffffff)
1964 		return (0);
1965 
1966 	if (sc_if0 != NULL)
1967 		ifp0 = &sc_if0->arpcom.ac_if;
1968 	if (sc_if1 != NULL)
1969 		ifp1 = &sc_if1->arpcom.ac_if;
1970 
1971 	for (; (status &= sc->sk_intrmask) != 0;) {
1972 		claimed = 1;
1973 
1974 		/* Handle receive interrupts first. */
1975 		if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
1976 			sk_rxeof(sc_if0);
1977 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
1978 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1979 		}
1980 		if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
1981 			sk_rxeof(sc_if1);
1982 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
1983 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
1984 		}
1985 
1986 		/* Then transmit interrupts. */
1987 		if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
1988 			sk_txeof(sc_if0);
1989 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
1990 			    SK_TXBMU_CLR_IRQ_EOF);
1991 		}
1992 		if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
1993 			sk_txeof(sc_if1);
1994 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
1995 			    SK_TXBMU_CLR_IRQ_EOF);
1996 		}
1997 
1998 		/* Then MAC interrupts. */
1999 		if (sc_if0 && (status & SK_ISR_MAC1) &&
2000 		    (ifp0->if_flags & IFF_RUNNING)) {
2001 			if (SK_IS_GENESIS(sc))
2002 				sk_intr_xmac(sc_if0);
2003 			else
2004 				sk_intr_yukon(sc_if0);
2005 		}
2006 
2007 		if (sc_if1 && (status & SK_ISR_MAC2) &&
2008 		    (ifp1->if_flags & IFF_RUNNING)) {
2009 			if (SK_IS_GENESIS(sc))
2010 				sk_intr_xmac(sc_if1);
2011 			else
2012 				sk_intr_yukon(sc_if1);
2013 
2014 		}
2015 
2016 		if (status & SK_ISR_EXTERNAL_REG) {
2017 			if (sc_if0 != NULL &&
2018 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2019 				sk_intr_bcom(sc_if0);
2020 
2021 			if (sc_if1 != NULL &&
2022 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2023 				sk_intr_bcom(sc_if1);
2024 		}
2025 		status = CSR_READ_4(sc, SK_ISSR);
2026 	}
2027 
2028 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2029 
2030 	if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
2031 		sk_start(ifp0);
2032 	if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
2033 		sk_start(ifp1);
2034 
2035 	return (claimed);
2036 }
2037 
2038 void
2039 sk_init_xmac(struct sk_if_softc	*sc_if)
2040 {
2041 	struct sk_softc		*sc = sc_if->sk_softc;
2042 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2043 	struct sk_bcom_hack     bhack[] = {
2044 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2045 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2046 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2047 	{ 0, 0 } };
2048 
2049 	DPRINTFN(2, ("sk_init_xmac\n"));
2050 
2051 	/* Unreset the XMAC. */
2052 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2053 	DELAY(1000);
2054 
2055 	/* Reset the XMAC's internal state. */
2056 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2057 
2058 	/* Save the XMAC II revision */
2059 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2060 
2061 	/*
2062 	 * Perform additional initialization for external PHYs,
2063 	 * namely for the 1000baseTX cards that use the XMAC's
2064 	 * GMII mode.
2065 	 */
2066 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2067 		int			i = 0;
2068 		u_int32_t		val;
2069 
2070 		/* Take PHY out of reset. */
2071 		val = sk_win_read_4(sc, SK_GPIO);
2072 		if (sc_if->sk_port == SK_PORT_A)
2073 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2074 		else
2075 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2076 		sk_win_write_4(sc, SK_GPIO, val);
2077 
2078 		/* Enable GMII mode on the XMAC. */
2079 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2080 
2081 		sk_xmac_miibus_writereg((struct device *)sc_if,
2082 		    SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2083 		DELAY(10000);
2084 		sk_xmac_miibus_writereg((struct device *)sc_if,
2085 		    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
2086 
2087 		/*
2088 		 * Early versions of the BCM5400 apparently have
2089 		 * a bug that requires them to have their reserved
2090 		 * registers initialized to some magic values. I don't
2091 		 * know what the numbers do, I'm just the messenger.
2092 		 */
2093 		if (sk_xmac_miibus_readreg((struct device *)sc_if,
2094 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
2095 			while(bhack[i].reg) {
2096 				sk_xmac_miibus_writereg((struct device *)sc_if,
2097 				    SK_PHYADDR_BCOM, bhack[i].reg,
2098 				    bhack[i].val);
2099 				i++;
2100 			}
2101 		}
2102 	}
2103 
2104 	/* Set station address */
2105 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2106 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])));
2107 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2108 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])));
2109 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2110 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])));
2111 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2112 
2113 	if (ifp->if_flags & IFF_BROADCAST)
2114 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2115 	else
2116 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2117 
2118 	/* We don't need the FCS appended to the packet. */
2119 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2120 
2121 	/* We want short frames padded to 60 bytes. */
2122 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2123 
2124 	/*
2125 	 * Enable the reception of all error frames. This is
2126 	 * a necessary evil due to the design of the XMAC. The
2127 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2128 	 * frames can be up to 9000 bytes in length. When bad
2129 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2130 	 * in 'store and forward' mode. For this to work, the
2131 	 * entire frame has to fit into the FIFO, but that means
2132 	 * that jumbo frames larger than 8192 bytes will be
2133 	 * truncated. Disabling all bad frame filtering causes
2134 	 * the RX FIFO to operate in streaming mode, in which
2135 	 * case the XMAC will start transfering frames out of the
2136 	 * RX FIFO as soon as the FIFO threshold is reached.
2137 	 */
2138 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2139 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2140 	    XM_MODE_RX_INRANGELEN);
2141 
2142 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2143 
2144 	/*
2145 	 * Bump up the transmit threshold. This helps hold off transmit
2146 	 * underruns when we're blasting traffic from both ports at once.
2147 	 */
2148 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2149 
2150 	/* Set promiscuous mode */
2151 	sk_setpromisc(sc_if);
2152 
2153 	/* Set multicast filter */
2154 	sk_setmulti(sc_if);
2155 
2156 	/* Clear and enable interrupts */
2157 	SK_XM_READ_2(sc_if, XM_ISR);
2158 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2159 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2160 	else
2161 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2162 
2163 	/* Configure MAC arbiter */
2164 	switch(sc_if->sk_xmac_rev) {
2165 	case XM_XMAC_REV_B2:
2166 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2167 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2168 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2169 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2170 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2171 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2172 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2173 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2174 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2175 		break;
2176 	case XM_XMAC_REV_C1:
2177 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2178 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2179 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2180 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2181 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2182 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2183 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2184 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2185 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2186 		break;
2187 	default:
2188 		break;
2189 	}
2190 	sk_win_write_2(sc, SK_MACARB_CTL,
2191 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2192 
2193 	sc_if->sk_link = 1;
2194 }
2195 
2196 void sk_init_yukon(struct sk_if_softc *sc_if)
2197 {
2198 	u_int32_t		phy, v;
2199 	u_int16_t		reg;
2200 	struct sk_softc		*sc;
2201 	int			i;
2202 
2203 	sc = sc_if->sk_softc;
2204 
2205 	DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2206 		     CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2207 
2208 	if (sc->sk_type == SK_YUKON_LITE &&
2209 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2210 		/*
2211 		 * Workaround code for COMA mode, set PHY reset.
2212 		 * Otherwise it will not correctly take chip out of
2213 		 * powerdown (coma)
2214 		 */
2215 		v = sk_win_read_4(sc, SK_GPIO);
2216 		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2217 		sk_win_write_4(sc, SK_GPIO, v);
2218 	}
2219 
2220 	DPRINTFN(6, ("sk_init_yukon: 1\n"));
2221 
2222 	/* GMAC and GPHY Reset */
2223 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2224 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2225 	DELAY(1000);
2226 
2227 	DPRINTFN(6, ("sk_init_yukon: 2\n"));
2228 
2229 	if (sc->sk_type == SK_YUKON_LITE &&
2230 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2231 		/*
2232 		 * Workaround code for COMA mode, clear PHY reset
2233 		 */
2234 		v = sk_win_read_4(sc, SK_GPIO);
2235 		v |= SK_GPIO_DIR9;
2236 		v &= ~SK_GPIO_DAT9;
2237 		sk_win_write_4(sc, SK_GPIO, v);
2238 	}
2239 
2240 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2241 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2242 
2243 	if (sc->sk_coppertype)
2244 		phy |= SK_GPHY_COPPER;
2245 	else
2246 		phy |= SK_GPHY_FIBER;
2247 
2248 	DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2249 
2250 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2251 	DELAY(1000);
2252 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2253 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2254 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2255 
2256 	DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2257 		     SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2258 
2259 	DPRINTFN(6, ("sk_init_yukon: 3\n"));
2260 
2261 	/* unused read of the interrupt source register */
2262 	DPRINTFN(6, ("sk_init_yukon: 4\n"));
2263 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2264 
2265 	DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2266 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2267 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2268 
2269 	/* MIB Counter Clear Mode set */
2270         reg |= YU_PAR_MIB_CLR;
2271 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2272 	DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2273 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2274 
2275 	/* MIB Counter Clear Mode clear */
2276 	DPRINTFN(6, ("sk_init_yukon: 5\n"));
2277         reg &= ~YU_PAR_MIB_CLR;
2278 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2279 
2280 	/* receive control reg */
2281 	DPRINTFN(6, ("sk_init_yukon: 7\n"));
2282 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2283 
2284 	/* transmit parameter register */
2285 	DPRINTFN(6, ("sk_init_yukon: 8\n"));
2286 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2287 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2288 
2289 	/* serial mode register */
2290 	DPRINTFN(6, ("sk_init_yukon: 9\n"));
2291 	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2292 		      YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2293 		      YU_SMR_IPG_DATA(0x1e));
2294 
2295 	DPRINTFN(6, ("sk_init_yukon: 10\n"));
2296 	/* Setup Yukon's address */
2297 	for (i = 0; i < 3; i++) {
2298 		/* Write Source Address 1 (unicast filter) */
2299 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2300 			      sc_if->arpcom.ac_enaddr[i * 2] |
2301 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2302 	}
2303 
2304 	for (i = 0; i < 3; i++) {
2305 		reg = sk_win_read_2(sc_if->sk_softc,
2306 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2307 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2308 	}
2309 
2310 	/* Set promiscuous mode */
2311 	sk_setpromisc(sc_if);
2312 
2313 	/* Set multicast filter */
2314 	DPRINTFN(6, ("sk_init_yukon: 11\n"));
2315 	sk_setmulti(sc_if);
2316 
2317 	/* enable interrupt mask for counter overflows */
2318 	DPRINTFN(6, ("sk_init_yukon: 12\n"));
2319 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2320 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2321 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2322 
2323 	/* Configure RX MAC FIFO Flush Mask */
2324 	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2325 	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2326 	    YU_RXSTAT_JABBER;
2327 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2328 
2329 	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2330 	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2331 		v = SK_TFCTL_OPERATION_ON;
2332 	else
2333 		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2334 	/* Configure RX MAC FIFO */
2335 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2336 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2337 
2338 	/* Increase flush threshould to 64 bytes */
2339 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2340 	    SK_RFCTL_FIFO_THRESHOLD + 1);
2341 
2342 	/* Configure TX MAC FIFO */
2343 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2344 	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2345 
2346 	DPRINTFN(6, ("sk_init_yukon: end\n"));
2347 }
2348 
2349 /*
2350  * Note that to properly initialize any part of the GEnesis chip,
2351  * you first have to take it out of reset mode.
2352  */
2353 void
2354 sk_init(void *xsc_if)
2355 {
2356 	struct sk_if_softc	*sc_if = xsc_if;
2357 	struct sk_softc		*sc = sc_if->sk_softc;
2358 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2359 	struct mii_data		*mii = &sc_if->sk_mii;
2360 	int			s;
2361 
2362 	DPRINTFN(2, ("sk_init\n"));
2363 
2364 	s = splnet();
2365 
2366 	/* Cancel pending I/O and free all RX/TX buffers. */
2367 	sk_stop(sc_if);
2368 
2369 	if (SK_IS_GENESIS(sc)) {
2370 		/* Configure LINK_SYNC LED */
2371 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2372 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2373 			      SK_LINKLED_LINKSYNC_ON);
2374 
2375 		/* Configure RX LED */
2376 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2377 			      SK_RXLEDCTL_COUNTER_START);
2378 
2379 		/* Configure TX LED */
2380 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2381 			      SK_TXLEDCTL_COUNTER_START);
2382 	}
2383 
2384 	/*
2385 	 * Configure descriptor poll timer
2386 	 *
2387 	 * SK-NET GENESIS data sheet says that possibility of losing Start
2388 	 * transmit command due to CPU/cache related interim storage problems
2389 	 * under certain conditions. The document recommends a polling
2390 	 * mechanism to send a Start transmit command to initiate transfer
2391 	 * of ready descriptors regulary. To cope with this issue sk(4) now
2392 	 * enables descriptor poll timer to initiate descriptor processing
2393 	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2394 	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2395 	 * command instead of waiting for next descriptor polling time.
2396 	 * The same rule may apply to Rx side too but it seems that is not
2397 	 * needed at the moment.
2398 	 * Since sk(4) uses descriptor polling as a last resort there is no
2399 	 * need to set smaller polling time than maximum allowable one.
2400 	 */
2401 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2402 
2403 	/* Configure I2C registers */
2404 
2405 	/* Configure XMAC(s) */
2406 	switch (sc->sk_type) {
2407 	case SK_GENESIS:
2408 		sk_init_xmac(sc_if);
2409 		break;
2410 	case SK_YUKON:
2411 	case SK_YUKON_LITE:
2412 	case SK_YUKON_LP:
2413 		sk_init_yukon(sc_if);
2414 		break;
2415 	}
2416 	mii_mediachg(mii);
2417 
2418 	if (SK_IS_GENESIS(sc)) {
2419 		/* Configure MAC FIFOs */
2420 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2421 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2422 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2423 
2424 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2425 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2426 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2427 	}
2428 
2429 	/* Configure transmit arbiter(s) */
2430 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2431 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2432 
2433 	/* Configure RAMbuffers */
2434 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2435 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2436 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2437 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2438 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2439 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2440 
2441 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2442 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2443 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2444 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2445 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2446 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2447 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2448 
2449 	/* Configure BMUs */
2450 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2451 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2452 	    SK_RX_RING_ADDR(sc_if, 0));
2453 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2454 
2455 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2456 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2457             SK_TX_RING_ADDR(sc_if, 0));
2458 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2459 
2460 	/* Init descriptors */
2461 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2462 		printf("%s: initialization failed: no "
2463 		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2464 		sk_stop(sc_if);
2465 		splx(s);
2466 		return;
2467 	}
2468 
2469 	if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2470 		printf("%s: initialization failed: no "
2471 		    "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2472 		sk_stop(sc_if);
2473 		splx(s);
2474 		return;
2475 	}
2476 
2477 	/* Configure interrupt handling */
2478 	CSR_READ_4(sc, SK_ISSR);
2479 	if (sc_if->sk_port == SK_PORT_A)
2480 		sc->sk_intrmask |= SK_INTRS1;
2481 	else
2482 		sc->sk_intrmask |= SK_INTRS2;
2483 
2484 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2485 
2486 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2487 
2488 	/* Start BMUs. */
2489 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2490 
2491 	if (SK_IS_GENESIS(sc)) {
2492 		/* Enable XMACs TX and RX state machines */
2493 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2494 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2495 			       XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2496 	}
2497 
2498 	if (SK_IS_YUKON(sc)) {
2499 		u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2500 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2501 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2502 	}
2503 
2504 	/* Activate descriptor polling timer */
2505 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2506 	/* start transfer of Tx descriptors */
2507 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2508 
2509 	ifp->if_flags |= IFF_RUNNING;
2510 	ifp->if_flags &= ~IFF_OACTIVE;
2511 
2512 	if (SK_IS_YUKON(sc))
2513 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
2514 
2515 	splx(s);
2516 }
2517 
2518 void
2519 sk_stop(struct sk_if_softc *sc_if)
2520 {
2521 	struct sk_softc		*sc = sc_if->sk_softc;
2522 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2523 	struct sk_txmap_entry	*dma;
2524 	int			i;
2525 	u_int32_t		val;
2526 
2527 	DPRINTFN(2, ("sk_stop\n"));
2528 
2529 	timeout_del(&sc_if->sk_tick_ch);
2530 
2531 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2532 
2533 	/* stop Tx descriptor polling timer */
2534 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2535 	/* stop transfer of Tx descriptors */
2536 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2537 	for (i = 0; i < SK_TIMEOUT; i++) {
2538 		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2539 		if (!(val & SK_TXBMU_TX_STOP))
2540 			break;
2541 		DELAY(1);
2542 	}
2543 	if (i == SK_TIMEOUT)
2544 		printf("%s: cannot stop transfer of Tx descriptors\n",
2545 		      sc_if->sk_dev.dv_xname);
2546 	/* stop transfer of Rx descriptors */
2547 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2548 	for (i = 0; i < SK_TIMEOUT; i++) {
2549 		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2550 		if (!(val & SK_RXBMU_RX_STOP))
2551 			break;
2552 		DELAY(1);
2553 	}
2554 	if (i == SK_TIMEOUT)
2555 		printf("%s: cannot stop transfer of Rx descriptors\n",
2556 		      sc_if->sk_dev.dv_xname);
2557 
2558 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2559 		u_int32_t		val;
2560 
2561 		/* Put PHY back into reset. */
2562 		val = sk_win_read_4(sc, SK_GPIO);
2563 		if (sc_if->sk_port == SK_PORT_A) {
2564 			val |= SK_GPIO_DIR0;
2565 			val &= ~SK_GPIO_DAT0;
2566 		} else {
2567 			val |= SK_GPIO_DIR2;
2568 			val &= ~SK_GPIO_DAT2;
2569 		}
2570 		sk_win_write_4(sc, SK_GPIO, val);
2571 	}
2572 
2573 	/* Turn off various components of this interface. */
2574 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2575 	switch (sc->sk_type) {
2576 	case SK_GENESIS:
2577 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2578 			      SK_TXMACCTL_XMAC_RESET);
2579 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2580 		break;
2581 	case SK_YUKON:
2582 	case SK_YUKON_LITE:
2583 	case SK_YUKON_LP:
2584 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2585 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2586 		break;
2587 	}
2588 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2589 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2590 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2591 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2592 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2593 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2594 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2595 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2596 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2597 
2598 	/* Disable interrupts */
2599 	if (sc_if->sk_port == SK_PORT_A)
2600 		sc->sk_intrmask &= ~SK_INTRS1;
2601 	else
2602 		sc->sk_intrmask &= ~SK_INTRS2;
2603 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2604 
2605 	SK_XM_READ_2(sc_if, XM_ISR);
2606 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2607 
2608 	/* Free RX and TX mbufs still in the queues. */
2609 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2610 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2611 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2612 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2613 		}
2614 	}
2615 
2616 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2617 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2618 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2619 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2620 			SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2621 			    sc_if->sk_cdata.sk_tx_map[i], link);
2622 			sc_if->sk_cdata.sk_tx_map[i] = 0;
2623 		}
2624 	}
2625 
2626 	while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2627 		SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2628 		bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2629 		free(dma, M_DEVBUF);
2630 	}
2631 }
2632 
2633 struct cfattach skc_ca = {
2634 	sizeof(struct sk_softc), skc_probe, skc_attach,
2635 };
2636 
2637 struct cfdriver skc_cd = {
2638 	0, "skc", DV_DULL
2639 };
2640 
2641 struct cfattach sk_ca = {
2642 	sizeof(struct sk_if_softc), sk_probe, sk_attach,
2643 };
2644 
2645 struct cfdriver sk_cd = {
2646 	0, "sk", DV_IFNET
2647 };
2648 
2649 #ifdef SK_DEBUG
2650 void
2651 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2652 {
2653 #define DESC_PRINT(X)					\
2654 	if (X)					\
2655 		printf("txdesc[%d]." #X "=%#x\n",	\
2656 		       idx, X);
2657 
2658 	DESC_PRINT(letoh32(desc->sk_ctl));
2659 	DESC_PRINT(letoh32(desc->sk_next));
2660 	DESC_PRINT(letoh32(desc->sk_data_lo));
2661 	DESC_PRINT(letoh32(desc->sk_data_hi));
2662 	DESC_PRINT(letoh32(desc->sk_xmac_txstat));
2663 	DESC_PRINT(letoh16(desc->sk_rsvd0));
2664 	DESC_PRINT(letoh16(desc->sk_csum_startval));
2665 	DESC_PRINT(letoh16(desc->sk_csum_startpos));
2666 	DESC_PRINT(letoh16(desc->sk_csum_writepos));
2667 	DESC_PRINT(letoh16(desc->sk_rsvd1));
2668 #undef PRINT
2669 }
2670 
2671 void
2672 sk_dump_bytes(const char *data, int len)
2673 {
2674 	int c, i, j;
2675 
2676 	for (i = 0; i < len; i += 16) {
2677 		printf("%08x  ", i);
2678 		c = len - i;
2679 		if (c > 16) c = 16;
2680 
2681 		for (j = 0; j < c; j++) {
2682 			printf("%02x ", data[i + j] & 0xff);
2683 			if ((j & 0xf) == 7 && j > 0)
2684 				printf(" ");
2685 		}
2686 
2687 		for (; j < 16; j++)
2688 			printf("   ");
2689 		printf("  ");
2690 
2691 		for (j = 0; j < c; j++) {
2692 			int ch = data[i + j] & 0xff;
2693 			printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2694 		}
2695 
2696 		printf("\n");
2697 
2698 		if (c < 16)
2699 			break;
2700 	}
2701 }
2702 
2703 void
2704 sk_dump_mbuf(struct mbuf *m)
2705 {
2706 	int count = m->m_pkthdr.len;
2707 
2708 	printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
2709 
2710 	while (count > 0 && m) {
2711 		printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2712 		       m, m->m_data, m->m_len);
2713 		sk_dump_bytes(mtod(m, char *), m->m_len);
2714 
2715 		count -= m->m_len;
2716 		m = m->m_next;
2717 	}
2718 }
2719 #endif
2720