xref: /openbsd-src/sys/dev/pci/if_sk.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: if_sk.c,v 1.147 2008/09/10 14:01:22 blambert Exp $	*/
2 
3 /*
4  * Copyright (c) 1997, 1998, 1999, 2000
5  *	Bill Paul <wpaul@ctr.columbia.edu>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $
35  */
36 
37 /*
38  * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu>
39  *
40  * Permission to use, copy, modify, and distribute this software for any
41  * purpose with or without fee is hereby granted, provided that the above
42  * copyright notice and this permission notice appear in all copies.
43  *
44  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
45  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
46  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
47  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
48  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
49  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
50  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51  */
52 
53 /*
54  * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports
55  * the SK-984x series adapters, both single port and dual port.
56  * References:
57  * 	The XaQti XMAC II datasheet,
58  * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
59  *	The SysKonnect GEnesis manual, http://www.syskonnect.com
60  *
61  * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the
62  * XMAC II datasheet online. I have put my copy at people.freebsd.org as a
63  * convenience to others until Vitesse corrects this problem:
64  *
65  * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf
66  *
67  * Written by Bill Paul <wpaul@ee.columbia.edu>
68  * Department of Electrical Engineering
69  * Columbia University, New York City
70  */
71 
72 /*
73  * The SysKonnect gigabit ethernet adapters consist of two main
74  * components: the SysKonnect GEnesis controller chip and the XaQti Corp.
75  * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC
76  * components and a PHY while the GEnesis controller provides a PCI
77  * interface with DMA support. Each card may have between 512K and
78  * 2MB of SRAM on board depending on the configuration.
79  *
80  * The SysKonnect GEnesis controller can have either one or two XMAC
81  * chips connected to it, allowing single or dual port NIC configurations.
82  * SysKonnect has the distinction of being the only vendor on the market
83  * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs,
84  * dual DMA queues, packet/MAC/transmit arbiters and direct access to the
85  * XMAC registers. This driver takes advantage of these features to allow
86  * both XMACs to operate as independent interfaces.
87  */
88 
89 #include "bpfilter.h"
90 
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/sockio.h>
94 #include <sys/mbuf.h>
95 #include <sys/malloc.h>
96 #include <sys/kernel.h>
97 #include <sys/socket.h>
98 #include <sys/timeout.h>
99 #include <sys/device.h>
100 #include <sys/queue.h>
101 
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_types.h>
105 
106 #ifdef INET
107 #include <netinet/in.h>
108 #include <netinet/in_systm.h>
109 #include <netinet/in_var.h>
110 #include <netinet/ip.h>
111 #include <netinet/udp.h>
112 #include <netinet/tcp.h>
113 #include <netinet/if_ether.h>
114 #endif
115 
116 #include <net/if_media.h>
117 #include <net/if_vlan_var.h>
118 
119 #if NBPFILTER > 0
120 #include <net/bpf.h>
121 #endif
122 
123 #include <dev/mii/mii.h>
124 #include <dev/mii/miivar.h>
125 #include <dev/mii/brgphyreg.h>
126 
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 
131 #include <dev/pci/if_skreg.h>
132 #include <dev/pci/if_skvar.h>
133 
134 int skc_probe(struct device *, void *, void *);
135 void skc_attach(struct device *, struct device *self, void *aux);
136 void skc_shutdown(void *);
137 int sk_probe(struct device *, void *, void *);
138 void sk_attach(struct device *, struct device *self, void *aux);
139 int skcprint(void *, const char *);
140 int sk_intr(void *);
141 void sk_intr_bcom(struct sk_if_softc *);
142 void sk_intr_xmac(struct sk_if_softc *);
143 void sk_intr_yukon(struct sk_if_softc *);
144 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t);
145 void sk_rxeof(struct sk_if_softc *);
146 void sk_txeof(struct sk_if_softc *);
147 int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *);
148 void sk_start(struct ifnet *);
149 int sk_ioctl(struct ifnet *, u_long, caddr_t);
150 void sk_init(void *);
151 void sk_init_xmac(struct sk_if_softc *);
152 void sk_init_yukon(struct sk_if_softc *);
153 void sk_stop(struct sk_if_softc *);
154 void sk_watchdog(struct ifnet *);
155 int sk_ifmedia_upd(struct ifnet *);
156 void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *);
157 void sk_reset(struct sk_softc *);
158 int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t);
159 int sk_alloc_jumbo_mem(struct sk_if_softc *);
160 void *sk_jalloc(struct sk_if_softc *);
161 void sk_jfree(caddr_t, u_int, void *);
162 int sk_init_rx_ring(struct sk_if_softc *);
163 int sk_init_tx_ring(struct sk_if_softc *);
164 
165 int sk_xmac_miibus_readreg(struct device *, int, int);
166 void sk_xmac_miibus_writereg(struct device *, int, int, int);
167 void sk_xmac_miibus_statchg(struct device *);
168 
169 int sk_marv_miibus_readreg(struct device *, int, int);
170 void sk_marv_miibus_writereg(struct device *, int, int, int);
171 void sk_marv_miibus_statchg(struct device *);
172 
173 u_int32_t sk_xmac_hash(caddr_t);
174 u_int32_t sk_yukon_hash(caddr_t);
175 void sk_setfilt(struct sk_if_softc *, caddr_t, int);
176 void sk_setmulti(struct sk_if_softc *);
177 void sk_setpromisc(struct sk_if_softc *);
178 void sk_tick(void *);
179 void sk_yukon_tick(void *);
180 void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t);
181 
182 #ifdef SK_DEBUG
183 #define DPRINTF(x)	if (skdebug) printf x
184 #define DPRINTFN(n,x)	if (skdebug >= (n)) printf x
185 int	skdebug = 0;
186 
187 void sk_dump_txdesc(struct sk_tx_desc *, int);
188 void sk_dump_mbuf(struct mbuf *);
189 void sk_dump_bytes(const char *, int);
190 #else
191 #define DPRINTF(x)
192 #define DPRINTFN(n,x)
193 #endif
194 
195 /* supported device vendors */
196 const struct pci_matchid skc_devices[] = {
197 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940 },
198 	{ PCI_VENDOR_3COM,		PCI_PRODUCT_3COM_3C940B },
199 	{ PCI_VENDOR_CNET,		PCI_PRODUCT_CNET_GIGACARD },
200 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_A1 },
201 	{ PCI_VENDOR_DLINK,		PCI_PRODUCT_DLINK_DGE530T_B1 },
202 	{ PCI_VENDOR_LINKSYS,		PCI_PRODUCT_LINKSYS_EG1064 },
203 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON },
204 	{ PCI_VENDOR_MARVELL,		PCI_PRODUCT_MARVELL_YUKON_BELKIN },
205 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX },
206 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 },
207 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9821 },
208 	{ PCI_VENDOR_SCHNEIDERKOCH,	PCI_PRODUCT_SCHNEIDERKOCH_SK9843 }
209 };
210 
211 #define SK_LINKSYS_EG1032_SUBID 0x00151737
212 
213 static inline u_int32_t
214 sk_win_read_4(struct sk_softc *sc, u_int32_t reg)
215 {
216 	return CSR_READ_4(sc, reg);
217 }
218 
219 static inline u_int16_t
220 sk_win_read_2(struct sk_softc *sc, u_int32_t reg)
221 {
222 	return CSR_READ_2(sc, reg);
223 }
224 
225 static inline u_int8_t
226 sk_win_read_1(struct sk_softc *sc, u_int32_t reg)
227 {
228 	return CSR_READ_1(sc, reg);
229 }
230 
231 static inline void
232 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x)
233 {
234 	CSR_WRITE_4(sc, reg, x);
235 }
236 
237 static inline void
238 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x)
239 {
240 	CSR_WRITE_2(sc, reg, x);
241 }
242 
243 static inline void
244 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x)
245 {
246 	CSR_WRITE_1(sc, reg, x);
247 }
248 
249 int
250 sk_xmac_miibus_readreg(struct device *dev, int phy, int reg)
251 {
252 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
253 	int i;
254 
255 	DPRINTFN(9, ("sk_xmac_miibus_readreg\n"));
256 
257 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0)
258 		return (0);
259 
260 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
261 	SK_XM_READ_2(sc_if, XM_PHY_DATA);
262 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
263 		for (i = 0; i < SK_TIMEOUT; i++) {
264 			DELAY(1);
265 			if (SK_XM_READ_2(sc_if, XM_MMUCMD) &
266 			    XM_MMUCMD_PHYDATARDY)
267 				break;
268 		}
269 
270 		if (i == SK_TIMEOUT) {
271 			printf("%s: phy failed to come ready\n",
272 			    sc_if->sk_dev.dv_xname);
273 			return (0);
274 		}
275 	}
276 	DELAY(1);
277 	return (SK_XM_READ_2(sc_if, XM_PHY_DATA));
278 }
279 
280 void
281 sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val)
282 {
283 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
284 	int i;
285 
286 	DPRINTFN(9, ("sk_xmac_miibus_writereg\n"));
287 
288 	SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8));
289 	for (i = 0; i < SK_TIMEOUT; i++) {
290 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
291 			break;
292 	}
293 
294 	if (i == SK_TIMEOUT) {
295 		printf("%s: phy failed to come ready\n",
296 		    sc_if->sk_dev.dv_xname);
297 		return;
298 	}
299 
300 	SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val);
301 	for (i = 0; i < SK_TIMEOUT; i++) {
302 		DELAY(1);
303 		if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY))
304 			break;
305 	}
306 
307 	if (i == SK_TIMEOUT)
308 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
309 }
310 
311 void
312 sk_xmac_miibus_statchg(struct device *dev)
313 {
314 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
315 	struct mii_data *mii = &sc_if->sk_mii;
316 
317 	DPRINTFN(9, ("sk_xmac_miibus_statchg\n"));
318 
319 	/*
320 	 * If this is a GMII PHY, manually set the XMAC's
321 	 * duplex mode accordingly.
322 	 */
323 	if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) {
324 		if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX)
325 			SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
326 		else
327 			SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX);
328 	}
329 }
330 
331 int
332 sk_marv_miibus_readreg(struct device *dev, int phy, int reg)
333 {
334 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
335 	u_int16_t val;
336 	int i;
337 
338 	if (phy != 0 ||
339 	    (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER &&
340 	     sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) {
341 		DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n",
342 			     phy, reg));
343 		return (0);
344 	}
345 
346         SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
347 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ);
348 
349 	for (i = 0; i < SK_TIMEOUT; i++) {
350 		DELAY(1);
351 		val = SK_YU_READ_2(sc_if, YUKON_SMICR);
352 		if (val & YU_SMICR_READ_VALID)
353 			break;
354 	}
355 
356 	if (i == SK_TIMEOUT) {
357 		printf("%s: phy failed to come ready\n",
358 		       sc_if->sk_dev.dv_xname);
359 		return (0);
360 	}
361 
362  	DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i,
363 		     SK_TIMEOUT));
364 
365         val = SK_YU_READ_2(sc_if, YUKON_SMIDR);
366 
367 	DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n",
368 		     phy, reg, val));
369 
370 	return (val);
371 }
372 
373 void
374 sk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val)
375 {
376 	struct sk_if_softc *sc_if = (struct sk_if_softc *)dev;
377 	int i;
378 
379 	DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n",
380 		     phy, reg, val));
381 
382 	SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val);
383 	SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) |
384 		      YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE);
385 
386 	for (i = 0; i < SK_TIMEOUT; i++) {
387 		DELAY(1);
388 		if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY))
389 			break;
390 	}
391 
392 	if (i == SK_TIMEOUT)
393 		printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname);
394 }
395 
396 void
397 sk_marv_miibus_statchg(struct device *dev)
398 {
399 	DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n",
400 		     SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR)));
401 }
402 
403 u_int32_t
404 sk_xmac_hash(caddr_t addr)
405 {
406 	u_int32_t crc;
407 
408 	crc = ether_crc32_le(addr, ETHER_ADDR_LEN);
409 	return (~crc & ((1 << SK_HASH_BITS) - 1));
410 }
411 
412 u_int32_t
413 sk_yukon_hash(caddr_t addr)
414 {
415 	u_int32_t crc;
416 
417 	crc = ether_crc32_be(addr, ETHER_ADDR_LEN);
418 	return (crc & ((1 << SK_HASH_BITS) - 1));
419 }
420 
421 void
422 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot)
423 {
424 	int base = XM_RXFILT_ENTRY(slot);
425 
426 	SK_XM_WRITE_2(sc_if, base, letoh16(*(u_int16_t *)(&addr[0])));
427 	SK_XM_WRITE_2(sc_if, base + 2, letoh16(*(u_int16_t *)(&addr[2])));
428 	SK_XM_WRITE_2(sc_if, base + 4, letoh16(*(u_int16_t *)(&addr[4])));
429 }
430 
431 void
432 sk_setmulti(struct sk_if_softc *sc_if)
433 {
434 	struct sk_softc *sc = sc_if->sk_softc;
435 	struct ifnet *ifp= &sc_if->arpcom.ac_if;
436 	u_int32_t hashes[2] = { 0, 0 };
437 	int h, i;
438 	struct arpcom *ac = &sc_if->arpcom;
439 	struct ether_multi *enm;
440 	struct ether_multistep step;
441 	u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 };
442 
443 	/* First, zot all the existing filters. */
444 	switch(sc->sk_type) {
445 	case SK_GENESIS:
446 		for (i = 1; i < XM_RXFILT_MAX; i++)
447 			sk_setfilt(sc_if, (caddr_t)&dummy, i);
448 
449 		SK_XM_WRITE_4(sc_if, XM_MAR0, 0);
450 		SK_XM_WRITE_4(sc_if, XM_MAR2, 0);
451 		break;
452 	case SK_YUKON:
453 	case SK_YUKON_LITE:
454 	case SK_YUKON_LP:
455 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0);
456 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0);
457 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0);
458 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0);
459 		break;
460 	}
461 
462 	/* Now program new ones. */
463 allmulti:
464 	if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) {
465 		hashes[0] = 0xFFFFFFFF;
466 		hashes[1] = 0xFFFFFFFF;
467 	} else {
468 		i = 1;
469 		/* First find the tail of the list. */
470 		ETHER_FIRST_MULTI(step, ac, enm);
471 		while (enm != NULL) {
472 			if (bcmp(enm->enm_addrlo, enm->enm_addrhi,
473 				 ETHER_ADDR_LEN)) {
474 				ifp->if_flags |= IFF_ALLMULTI;
475 				goto allmulti;
476 			}
477 			/*
478 			 * Program the first XM_RXFILT_MAX multicast groups
479 			 * into the perfect filter. For all others,
480 			 * use the hash table.
481 			 */
482 			if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) {
483 				sk_setfilt(sc_if, enm->enm_addrlo, i);
484 				i++;
485 			}
486 			else {
487 				switch(sc->sk_type) {
488 				case SK_GENESIS:
489 					h = sk_xmac_hash(enm->enm_addrlo);
490 					break;
491 
492 				case SK_YUKON:
493 				case SK_YUKON_LITE:
494 				case SK_YUKON_LP:
495 					h = sk_yukon_hash(enm->enm_addrlo);
496 					break;
497 				}
498 				if (h < 32)
499 					hashes[0] |= (1 << h);
500 				else
501 					hashes[1] |= (1 << (h - 32));
502 			}
503 
504 			ETHER_NEXT_MULTI(step, enm);
505 		}
506 	}
507 
508 	switch(sc->sk_type) {
509 	case SK_GENESIS:
510 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH|
511 			       XM_MODE_RX_USE_PERFECT);
512 		SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]);
513 		SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]);
514 		break;
515 	case SK_YUKON:
516 	case SK_YUKON_LITE:
517 	case SK_YUKON_LP:
518 		SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff);
519 		SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff);
520 		SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff);
521 		SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff);
522 		break;
523 	}
524 }
525 
526 void
527 sk_setpromisc(struct sk_if_softc *sc_if)
528 {
529 	struct sk_softc	*sc = sc_if->sk_softc;
530 	struct ifnet *ifp= &sc_if->arpcom.ac_if;
531 
532 	switch(sc->sk_type) {
533 	case SK_GENESIS:
534 		if (ifp->if_flags & IFF_PROMISC)
535 			SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
536 		else
537 			SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC);
538 		break;
539 	case SK_YUKON:
540 	case SK_YUKON_LITE:
541 	case SK_YUKON_LP:
542 		if (ifp->if_flags & IFF_PROMISC) {
543 			SK_YU_CLRBIT_2(sc_if, YUKON_RCR,
544 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
545 		} else {
546 			SK_YU_SETBIT_2(sc_if, YUKON_RCR,
547 			    YU_RCR_UFLEN | YU_RCR_MUFLEN);
548 		}
549 		break;
550 	}
551 }
552 
553 int
554 sk_init_rx_ring(struct sk_if_softc *sc_if)
555 {
556 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
557 	struct sk_ring_data	*rd = sc_if->sk_rdata;
558 	int			i, nexti;
559 
560 	bzero((char *)rd->sk_rx_ring,
561 	    sizeof(struct sk_rx_desc) * SK_RX_RING_CNT);
562 
563 	for (i = 0; i < SK_RX_RING_CNT; i++) {
564 		cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i];
565 		if (i == (SK_RX_RING_CNT - 1))
566 			nexti = 0;
567 		else
568 			nexti = i + 1;
569 		cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti];
570 		rd->sk_rx_ring[i].sk_next = htole32(SK_RX_RING_ADDR(sc_if, nexti));
571 		rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN);
572 		rd->sk_rx_ring[i].sk_csum2_start = htole16(ETHER_HDR_LEN +
573 		    sizeof(struct ip));
574 	}
575 
576 	for (i = 0; i < SK_RX_RING_CNT; i++) {
577 		if (sk_newbuf(sc_if, i, NULL,
578 		    sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) {
579 			printf("%s: failed alloc of %dth mbuf\n",
580 			    sc_if->sk_dev.dv_xname, i);
581 			return (ENOBUFS);
582 		}
583 	}
584 
585 	sc_if->sk_cdata.sk_rx_prod = 0;
586 	sc_if->sk_cdata.sk_rx_cons = 0;
587 
588 	return (0);
589 }
590 
591 int
592 sk_init_tx_ring(struct sk_if_softc *sc_if)
593 {
594 	struct sk_softc		*sc = sc_if->sk_softc;
595 	struct sk_chain_data	*cd = &sc_if->sk_cdata;
596 	struct sk_ring_data	*rd = sc_if->sk_rdata;
597 	bus_dmamap_t		dmamap;
598 	struct sk_txmap_entry	*entry;
599 	int			i, nexti;
600 
601 	bzero((char *)sc_if->sk_rdata->sk_tx_ring,
602 	    sizeof(struct sk_tx_desc) * SK_TX_RING_CNT);
603 
604 	SIMPLEQ_INIT(&sc_if->sk_txmap_head);
605 	for (i = 0; i < SK_TX_RING_CNT; i++) {
606 		cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i];
607 		if (i == (SK_TX_RING_CNT - 1))
608 			nexti = 0;
609 		else
610 			nexti = i + 1;
611 		cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti];
612 		rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti));
613 
614 		if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG,
615 		   SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap))
616 			return (ENOBUFS);
617 
618 		entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT);
619 		if (!entry) {
620 			bus_dmamap_destroy(sc->sc_dmatag, dmamap);
621 			return (ENOBUFS);
622 		}
623 		entry->dmamap = dmamap;
624 		SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link);
625 	}
626 
627 	sc_if->sk_cdata.sk_tx_prod = 0;
628 	sc_if->sk_cdata.sk_tx_cons = 0;
629 	sc_if->sk_cdata.sk_tx_cnt = 0;
630 
631 	SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT,
632 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
633 
634 	return (0);
635 }
636 
637 int
638 sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m,
639 	  bus_dmamap_t dmamap)
640 {
641 	struct mbuf		*m_new = NULL;
642 	struct sk_chain		*c;
643 	struct sk_rx_desc	*r;
644 
645 	if (m == NULL) {
646 		caddr_t buf = NULL;
647 
648 		MGETHDR(m_new, M_DONTWAIT, MT_DATA);
649 		if (m_new == NULL)
650 			return (ENOBUFS);
651 
652 		/* Allocate the jumbo buffer */
653 		buf = sk_jalloc(sc_if);
654 		if (buf == NULL) {
655 			m_freem(m_new);
656 			DPRINTFN(1, ("%s jumbo allocation failed -- packet "
657 			    "dropped!\n", sc_if->arpcom.ac_if.if_xname));
658 			return (ENOBUFS);
659 		}
660 
661 		/* Attach the buffer to the mbuf */
662 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
663 		MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if);
664 	} else {
665 		/*
666 	 	 * We're re-using a previously allocated mbuf;
667 		 * be sure to re-init pointers and lengths to
668 		 * default values.
669 		 */
670 		m_new = m;
671 		m_new->m_len = m_new->m_pkthdr.len = SK_JLEN;
672 		m_new->m_data = m_new->m_ext.ext_buf;
673 	}
674 	m_adj(m_new, ETHER_ALIGN);
675 
676 	c = &sc_if->sk_cdata.sk_rx_chain[i];
677 	r = c->sk_desc;
678 	c->sk_mbuf = m_new;
679 	r->sk_data_lo = htole32(dmamap->dm_segs[0].ds_addr +
680 	    (((vaddr_t)m_new->m_data
681              - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf)));
682 	r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT);
683 
684 	SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
685 
686 	return (0);
687 }
688 
689 /*
690  * Memory management for jumbo frames.
691  */
692 
693 int
694 sk_alloc_jumbo_mem(struct sk_if_softc *sc_if)
695 {
696 	struct sk_softc		*sc = sc_if->sk_softc;
697 	caddr_t			ptr, kva;
698 	bus_dma_segment_t	seg;
699 	int		i, rseg, state, error;
700 	struct sk_jpool_entry   *entry;
701 
702 	state = error = 0;
703 
704 	/* Grab a big chunk o' storage. */
705 	if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0,
706 			     &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
707 		printf(": can't alloc rx buffers");
708 		return (ENOBUFS);
709 	}
710 
711 	state = 1;
712 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, &kva,
713 			   BUS_DMA_NOWAIT)) {
714 		printf(": can't map dma buffers (%d bytes)", SK_JMEM);
715 		error = ENOBUFS;
716 		goto out;
717 	}
718 
719 	state = 2;
720 	if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0,
721 	    BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) {
722 		printf(": can't create dma map");
723 		error = ENOBUFS;
724 		goto out;
725 	}
726 
727 	state = 3;
728 	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map,
729 			    kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) {
730 		printf(": can't load dma map");
731 		error = ENOBUFS;
732 		goto out;
733 	}
734 
735 	state = 4;
736 	sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva;
737 	DPRINTFN(1,("sk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf));
738 
739 	LIST_INIT(&sc_if->sk_jfree_listhead);
740 	LIST_INIT(&sc_if->sk_jinuse_listhead);
741 
742 	/*
743 	 * Now divide it up into 9K pieces and save the addresses
744 	 * in an array.
745 	 */
746 	ptr = sc_if->sk_cdata.sk_jumbo_buf;
747 	for (i = 0; i < SK_JSLOTS; i++) {
748 		sc_if->sk_cdata.sk_jslots[i] = ptr;
749 		ptr += SK_JLEN;
750 		entry = malloc(sizeof(struct sk_jpool_entry),
751 		    M_DEVBUF, M_NOWAIT);
752 		if (entry == NULL) {
753 			sc_if->sk_cdata.sk_jumbo_buf = NULL;
754 			printf(": no memory for jumbo buffer queue!");
755 			error = ENOBUFS;
756 			goto out;
757 		}
758 		entry->slot = i;
759 		LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead,
760 				 entry, jpool_entries);
761 	}
762 out:
763 	if (error != 0) {
764 		switch (state) {
765 		case 4:
766 			bus_dmamap_unload(sc->sc_dmatag,
767 			    sc_if->sk_cdata.sk_rx_jumbo_map);
768 		case 3:
769 			bus_dmamap_destroy(sc->sc_dmatag,
770 			    sc_if->sk_cdata.sk_rx_jumbo_map);
771 		case 2:
772 			bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM);
773 		case 1:
774 			bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
775 			break;
776 		default:
777 			break;
778 		}
779 	}
780 
781 	return (error);
782 }
783 
784 /*
785  * Allocate a jumbo buffer.
786  */
787 void *
788 sk_jalloc(struct sk_if_softc *sc_if)
789 {
790 	struct sk_jpool_entry   *entry;
791 
792 	entry = LIST_FIRST(&sc_if->sk_jfree_listhead);
793 
794 	if (entry == NULL)
795 		return (NULL);
796 
797 	LIST_REMOVE(entry, jpool_entries);
798 	LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries);
799 	return (sc_if->sk_cdata.sk_jslots[entry->slot]);
800 }
801 
802 /*
803  * Release a jumbo buffer.
804  */
805 void
806 sk_jfree(caddr_t buf, u_int size, void	*arg)
807 {
808 	struct sk_jpool_entry *entry;
809 	struct sk_if_softc *sc;
810 	int i;
811 
812 	/* Extract the softc struct pointer. */
813 	sc = (struct sk_if_softc *)arg;
814 
815 	if (sc == NULL)
816 		panic("sk_jfree: can't find softc pointer!");
817 
818 	/* calculate the slot this buffer belongs to */
819 	i = ((vaddr_t)buf
820 	     - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN;
821 
822 	if ((i < 0) || (i >= SK_JSLOTS))
823 		panic("sk_jfree: asked to free buffer that we don't manage!");
824 
825 	entry = LIST_FIRST(&sc->sk_jinuse_listhead);
826 	if (entry == NULL)
827 		panic("sk_jfree: buffer not in use!");
828 	entry->slot = i;
829 	LIST_REMOVE(entry, jpool_entries);
830 	LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries);
831 }
832 
833 /*
834  * Set media options.
835  */
836 int
837 sk_ifmedia_upd(struct ifnet *ifp)
838 {
839 	struct sk_if_softc *sc_if = ifp->if_softc;
840 
841 	mii_mediachg(&sc_if->sk_mii);
842 	return (0);
843 }
844 
845 /*
846  * Report current media status.
847  */
848 void
849 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
850 {
851 	struct sk_if_softc *sc_if = ifp->if_softc;
852 
853 	mii_pollstat(&sc_if->sk_mii);
854 	ifmr->ifm_active = sc_if->sk_mii.mii_media_active;
855 	ifmr->ifm_status = sc_if->sk_mii.mii_media_status;
856 }
857 
858 int
859 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
860 {
861 	struct sk_if_softc *sc_if = ifp->if_softc;
862 	struct ifreq *ifr = (struct ifreq *) data;
863 	struct ifaddr *ifa = (struct ifaddr *) data;
864 	struct mii_data *mii;
865 	int s, error = 0;
866 
867 	s = splnet();
868 
869 	if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) {
870 		splx(s);
871 		return (error);
872 	}
873 
874 	switch(command) {
875 	case SIOCSIFADDR:
876 		ifp->if_flags |= IFF_UP;
877 		if (!(ifp->if_flags & IFF_RUNNING))
878 			sk_init(sc_if);
879 #ifdef INET
880 		if (ifa->ifa_addr->sa_family == AF_INET)
881 			arp_ifinit(&sc_if->arpcom, ifa);
882 #endif /* INET */
883 		break;
884 	case SIOCSIFMTU:
885 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
886 			error = EINVAL;
887 		else if (ifp->if_mtu != ifr->ifr_mtu)
888 			ifp->if_mtu = ifr->ifr_mtu;
889 		break;
890 	case SIOCSIFFLAGS:
891 		if (ifp->if_flags & IFF_UP) {
892 			if (ifp->if_flags & IFF_RUNNING &&
893 			    (ifp->if_flags ^ sc_if->sk_if_flags)
894 			     & IFF_PROMISC) {
895 				sk_setpromisc(sc_if);
896 				sk_setmulti(sc_if);
897 			} else {
898 				if (!(ifp->if_flags & IFF_RUNNING))
899 					sk_init(sc_if);
900 			}
901 		} else {
902 			if (ifp->if_flags & IFF_RUNNING)
903 				sk_stop(sc_if);
904 		}
905 		sc_if->sk_if_flags = ifp->if_flags;
906 		break;
907 	case SIOCADDMULTI:
908 	case SIOCDELMULTI:
909 		error = (command == SIOCADDMULTI) ?
910 		    ether_addmulti(ifr, &sc_if->arpcom) :
911 		    ether_delmulti(ifr, &sc_if->arpcom);
912 
913 		if (error == ENETRESET) {
914 			/*
915 			 * Multicast list has changed; set the hardware
916 			 * filter accordingly.
917 			 */
918 			if (ifp->if_flags & IFF_RUNNING)
919 				sk_setmulti(sc_if);
920 			error = 0;
921 		}
922 		break;
923 	case SIOCGIFMEDIA:
924 	case SIOCSIFMEDIA:
925 		mii = &sc_if->sk_mii;
926 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
927 		break;
928 	default:
929 		error = ENOTTY;
930 		break;
931 	}
932 
933 	splx(s);
934 
935 	return (error);
936 }
937 
938 /*
939  * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device
940  * IDs against our list and return a device name if we find a match.
941  */
942 int
943 skc_probe(struct device *parent, void *match, void *aux)
944 {
945 	struct pci_attach_args *pa = aux;
946 	pci_chipset_tag_t pc = pa->pa_pc;
947 	pcireg_t subid;
948 
949 	subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
950 
951 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS &&
952 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 &&
953 	    subid == SK_LINKSYS_EG1032_SUBID)
954 		return (1);
955 
956 	return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices,
957 	    sizeof(skc_devices)/sizeof(skc_devices[0])));
958 }
959 
960 /*
961  * Force the GEnesis into reset, then bring it out of reset.
962  */
963 void
964 sk_reset(struct sk_softc *sc)
965 {
966 	u_int32_t imtimer_ticks;
967 
968 	DPRINTFN(2, ("sk_reset\n"));
969 
970 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET);
971 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET);
972 	if (SK_IS_YUKON(sc))
973 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET);
974 
975 	DELAY(1000);
976 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET);
977 	DELAY(2);
978 	CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET);
979 	if (SK_IS_YUKON(sc))
980 		CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR);
981 
982 	DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR)));
983 	DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n",
984 		     CSR_READ_2(sc, SK_LINK_CTRL)));
985 
986 	if (SK_IS_GENESIS(sc)) {
987 		/* Configure packet arbiter */
988 		sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET);
989 		sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT);
990 		sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT);
991 		sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT);
992 		sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT);
993 	}
994 
995 	/* Enable RAM interface */
996 	sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET);
997 
998 	/*
999 	 * Configure interrupt moderation. The moderation timer
1000 	 * defers interrupts specified in the interrupt moderation
1001 	 * timer mask based on the timeout specified in the interrupt
1002 	 * moderation timer init register. Each bit in the timer
1003 	 * register represents one tick, so to specify a timeout in
1004 	 * microseconds, we have to multiply by the correct number of
1005 	 * ticks-per-microsecond.
1006 	 */
1007 	switch (sc->sk_type) {
1008 	case SK_GENESIS:
1009 		imtimer_ticks = SK_IMTIMER_TICKS_GENESIS;
1010 		break;
1011 	default:
1012 		imtimer_ticks = SK_IMTIMER_TICKS_YUKON;
1013 	}
1014 	sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100));
1015 	sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF|
1016 	    SK_ISR_RX1_EOF|SK_ISR_RX2_EOF);
1017 	sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START);
1018 }
1019 
1020 int
1021 sk_probe(struct device *parent, void *match, void *aux)
1022 {
1023 	struct skc_attach_args *sa = aux;
1024 
1025 	if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B)
1026 		return (0);
1027 
1028 	switch (sa->skc_type) {
1029 	case SK_GENESIS:
1030 	case SK_YUKON:
1031 	case SK_YUKON_LITE:
1032 	case SK_YUKON_LP:
1033 		return (1);
1034 	}
1035 
1036 	return (0);
1037 }
1038 
1039 /*
1040  * Each XMAC chip is attached as a separate logical IP interface.
1041  * Single port cards will have only one logical interface of course.
1042  */
1043 void
1044 sk_attach(struct device *parent, struct device *self, void *aux)
1045 {
1046 	struct sk_if_softc *sc_if = (struct sk_if_softc *) self;
1047 	struct sk_softc *sc = (struct sk_softc *)parent;
1048 	struct skc_attach_args *sa = aux;
1049 	struct ifnet *ifp;
1050 	caddr_t kva;
1051 	bus_dma_segment_t seg;
1052 	int i, rseg;
1053 
1054 	sc_if->sk_port = sa->skc_port;
1055 	sc_if->sk_softc = sc;
1056 	sc->sk_if[sa->skc_port] = sc_if;
1057 
1058 	if (sa->skc_port == SK_PORT_A)
1059 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0;
1060 	if (sa->skc_port == SK_PORT_B)
1061 		sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1;
1062 
1063 	DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port));
1064 
1065 	/*
1066 	 * Get station address for this interface. Note that
1067 	 * dual port cards actually come with three station
1068 	 * addresses: one for each port, plus an extra. The
1069 	 * extra one is used by the SysKonnect driver software
1070 	 * as a 'virtual' station address for when both ports
1071 	 * are operating in failover mode. Currently we don't
1072 	 * use this extra address.
1073 	 */
1074 	for (i = 0; i < ETHER_ADDR_LEN; i++)
1075 		sc_if->arpcom.ac_enaddr[i] =
1076 		    sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i);
1077 
1078 	printf(": address %s\n",
1079 	    ether_sprintf(sc_if->arpcom.ac_enaddr));
1080 
1081 	/*
1082 	 * Set up RAM buffer addresses. The NIC will have a certain
1083 	 * amount of SRAM on it, somewhere between 512K and 2MB. We
1084 	 * need to divide this up a) between the transmitter and
1085  	 * receiver and b) between the two XMACs, if this is a
1086 	 * dual port NIC. Our algorithm is to divide up the memory
1087 	 * evenly so that everyone gets a fair share.
1088 	 */
1089 	if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) {
1090 		u_int32_t		chunk, val;
1091 
1092 		chunk = sc->sk_ramsize / 2;
1093 		val = sc->sk_rboff / sizeof(u_int64_t);
1094 		sc_if->sk_rx_ramstart = val;
1095 		val += (chunk / sizeof(u_int64_t));
1096 		sc_if->sk_rx_ramend = val - 1;
1097 		sc_if->sk_tx_ramstart = val;
1098 		val += (chunk / sizeof(u_int64_t));
1099 		sc_if->sk_tx_ramend = val - 1;
1100 	} else {
1101 		u_int32_t		chunk, val;
1102 
1103 		chunk = sc->sk_ramsize / 4;
1104 		val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) /
1105 		    sizeof(u_int64_t);
1106 		sc_if->sk_rx_ramstart = val;
1107 		val += (chunk / sizeof(u_int64_t));
1108 		sc_if->sk_rx_ramend = val - 1;
1109 		sc_if->sk_tx_ramstart = val;
1110 		val += (chunk / sizeof(u_int64_t));
1111 		sc_if->sk_tx_ramend = val - 1;
1112 	}
1113 
1114 	DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n"
1115 		     "           tx_ramstart=%#x tx_ramend=%#x\n",
1116 		     sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend,
1117 		     sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend));
1118 
1119 	/* Read and save PHY type */
1120 	sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF;
1121 
1122 	/* Set PHY address */
1123 	if (SK_IS_GENESIS(sc)) {
1124 		switch (sc_if->sk_phytype) {
1125 			case SK_PHYTYPE_XMAC:
1126 				sc_if->sk_phyaddr = SK_PHYADDR_XMAC;
1127 				break;
1128 			case SK_PHYTYPE_BCOM:
1129 				sc_if->sk_phyaddr = SK_PHYADDR_BCOM;
1130 				break;
1131 			default:
1132 				printf("%s: unsupported PHY type: %d\n",
1133 				    sc->sk_dev.dv_xname, sc_if->sk_phytype);
1134 				return;
1135 		}
1136 	}
1137 
1138 	if (SK_IS_YUKON(sc)) {
1139 		if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER &&
1140 		    sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) {
1141 			/* not initialized, punt */
1142 			sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER;
1143 
1144 			sc->sk_coppertype = 1;
1145 		}
1146 
1147 		sc_if->sk_phyaddr = SK_PHYADDR_MARV;
1148 
1149 		if (!(sc->sk_coppertype))
1150 			sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER;
1151 	}
1152 
1153 	/* Allocate the descriptor queues. */
1154 	if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data),
1155 	    PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
1156 		printf(": can't alloc rx buffers\n");
1157 		goto fail;
1158 	}
1159 	if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg,
1160 	    sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) {
1161 		printf(": can't map dma buffers (%lu bytes)\n",
1162 		       (ulong)sizeof(struct sk_ring_data));
1163 		goto fail_1;
1164 	}
1165 	if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1,
1166 	    sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT,
1167             &sc_if->sk_ring_map)) {
1168 		printf(": can't create dma map\n");
1169 		goto fail_2;
1170 	}
1171 	if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva,
1172 	    sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) {
1173 		printf(": can't load dma map\n");
1174 		goto fail_3;
1175 	}
1176         sc_if->sk_rdata = (struct sk_ring_data *)kva;
1177 	bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data));
1178 
1179 	/* Try to allocate memory for jumbo buffers. */
1180 	if (sk_alloc_jumbo_mem(sc_if)) {
1181 		printf(": jumbo buffer allocation failed\n");
1182 		goto fail_3;
1183 	}
1184 
1185 	ifp = &sc_if->arpcom.ac_if;
1186 	ifp->if_softc = sc_if;
1187 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1188 	ifp->if_ioctl = sk_ioctl;
1189 	ifp->if_start = sk_start;
1190 	ifp->if_watchdog = sk_watchdog;
1191 	ifp->if_baudrate = 1000000000;
1192 	ifp->if_hardmtu = SK_JUMBO_MTU;
1193 	IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1);
1194 	IFQ_SET_READY(&ifp->if_snd);
1195 	bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1196 
1197 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1198 
1199 	/*
1200 	 * Do miibus setup.
1201 	 */
1202 	switch (sc->sk_type) {
1203 	case SK_GENESIS:
1204 		sk_init_xmac(sc_if);
1205 		break;
1206 	case SK_YUKON:
1207 	case SK_YUKON_LITE:
1208 	case SK_YUKON_LP:
1209 		sk_init_yukon(sc_if);
1210 		break;
1211 	default:
1212 		printf(": unknown device type %d\n", sc->sk_type);
1213 		/* dealloc jumbo on error */
1214 		goto fail_3;
1215 	}
1216 
1217  	DPRINTFN(2, ("sk_attach: 1\n"));
1218 
1219 	sc_if->sk_mii.mii_ifp = ifp;
1220 	if (SK_IS_GENESIS(sc)) {
1221 		sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg;
1222 		sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg;
1223 		sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg;
1224 	} else {
1225 		sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg;
1226 		sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg;
1227 		sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg;
1228 	}
1229 
1230 	ifmedia_init(&sc_if->sk_mii.mii_media, 0,
1231 	    sk_ifmedia_upd, sk_ifmedia_sts);
1232 	if (SK_IS_GENESIS(sc)) {
1233 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1234 		    MII_OFFSET_ANY, 0);
1235 	} else {
1236 		mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY,
1237 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
1238 	}
1239 	if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) {
1240 		printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname);
1241 		ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL,
1242 			    0, NULL);
1243 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1244 	} else
1245 		ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO);
1246 
1247 	if (SK_IS_GENESIS(sc)) {
1248 		timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if);
1249 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1250 	} else
1251 		timeout_set(&sc_if->sk_tick_ch, sk_yukon_tick, sc_if);
1252 
1253 	/*
1254 	 * Call MI attach routines.
1255 	 */
1256 	if_attach(ifp);
1257 	ether_ifattach(ifp);
1258 
1259 	shutdownhook_establish(skc_shutdown, sc);
1260 
1261 	DPRINTFN(2, ("sk_attach: end\n"));
1262 	return;
1263 
1264 fail_3:
1265 	bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map);
1266 fail_2:
1267 	bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data));
1268 fail_1:
1269 	bus_dmamem_free(sc->sc_dmatag, &seg, rseg);
1270 fail:
1271 	sc->sk_if[sa->skc_port] = NULL;
1272 }
1273 
1274 int
1275 skcprint(void *aux, const char *pnp)
1276 {
1277 	struct skc_attach_args *sa = aux;
1278 
1279 	if (pnp)
1280 		printf("sk port %c at %s",
1281 		    (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp);
1282 	else
1283 		printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B');
1284 	return (UNCONF);
1285 }
1286 
1287 /*
1288  * Attach the interface. Allocate softc structures, do ifmedia
1289  * setup and ethernet/BPF attach.
1290  */
1291 void
1292 skc_attach(struct device *parent, struct device *self, void *aux)
1293 {
1294 	struct sk_softc *sc = (struct sk_softc *)self;
1295 	struct pci_attach_args *pa = aux;
1296 	struct skc_attach_args skca;
1297 	pci_chipset_tag_t pc = pa->pa_pc;
1298 	pcireg_t command, memtype;
1299 	pci_intr_handle_t ih;
1300 	const char *intrstr = NULL;
1301 	bus_size_t size;
1302 	u_int8_t skrs;
1303 	char *revstr = NULL;
1304 
1305 	DPRINTFN(2, ("begin skc_attach\n"));
1306 
1307 	/*
1308 	 * Handle power management nonsense.
1309 	 */
1310 	command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF;
1311 
1312 	if (command == 0x01) {
1313 		command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL);
1314 		if (command & SK_PSTATE_MASK) {
1315 			u_int32_t		iobase, membase, irq;
1316 
1317 			/* Save important PCI config data. */
1318 			iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO);
1319 			membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM);
1320 			irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE);
1321 
1322 			/* Reset the power state. */
1323 			printf("%s chip is in D%d power mode "
1324 			    "-- setting to D0\n", sc->sk_dev.dv_xname,
1325 			    command & SK_PSTATE_MASK);
1326 			command &= 0xFFFFFFFC;
1327 			pci_conf_write(pc, pa->pa_tag,
1328 			    SK_PCI_PWRMGMTCTRL, command);
1329 
1330 			/* Restore PCI config data. */
1331 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase);
1332 			pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase);
1333 			pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq);
1334 		}
1335 	}
1336 
1337 	/*
1338 	 * Map control/status registers.
1339 	 */
1340 	memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM);
1341 	if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag,
1342 	    &sc->sk_bhandle, NULL, &size, 0)) {
1343 		printf(": can't map mem space\n");
1344 		return;
1345 	}
1346 
1347 	sc->sc_dmatag = pa->pa_dmat;
1348 
1349 	sc->sk_type = sk_win_read_1(sc, SK_CHIPVER);
1350 	sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4);
1351 
1352 	/* bail out here if chip is not recognized */
1353 	if (! SK_IS_GENESIS(sc) && ! SK_IS_YUKON(sc)) {
1354 		printf(": unknown chip type: %d\n", sc->sk_type);
1355 		goto fail_1;
1356 	}
1357 	DPRINTFN(2, ("skc_attach: allocate interrupt\n"));
1358 
1359 	/* Allocate interrupt */
1360 	if (pci_intr_map(pa, &ih)) {
1361 		printf(": couldn't map interrupt\n");
1362 		goto fail_1;
1363 	}
1364 
1365 	intrstr = pci_intr_string(pc, ih);
1366 	sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc,
1367 	    self->dv_xname);
1368 	if (sc->sk_intrhand == NULL) {
1369 		printf(": couldn't establish interrupt");
1370 		if (intrstr != NULL)
1371 			printf(" at %s", intrstr);
1372 		printf("\n");
1373 		goto fail_1;
1374 	}
1375 
1376 	/* Reset the adapter. */
1377 	sk_reset(sc);
1378 
1379 	skrs = sk_win_read_1(sc, SK_EPROM0);
1380 	if (SK_IS_GENESIS(sc)) {
1381 		/* Read and save RAM size and RAMbuffer offset */
1382 		switch(skrs) {
1383 		case SK_RAMSIZE_512K_64:
1384 			sc->sk_ramsize = 0x80000;
1385 			sc->sk_rboff = SK_RBOFF_0;
1386 			break;
1387 		case SK_RAMSIZE_1024K_64:
1388 			sc->sk_ramsize = 0x100000;
1389 			sc->sk_rboff = SK_RBOFF_80000;
1390 			break;
1391 		case SK_RAMSIZE_1024K_128:
1392 			sc->sk_ramsize = 0x100000;
1393 			sc->sk_rboff = SK_RBOFF_0;
1394 			break;
1395 		case SK_RAMSIZE_2048K_128:
1396 			sc->sk_ramsize = 0x200000;
1397 			sc->sk_rboff = SK_RBOFF_0;
1398 			break;
1399 		default:
1400 			printf(": unknown ram size: %d\n", skrs);
1401 			goto fail_2;
1402 			break;
1403 		}
1404 	} else {
1405 		if (skrs == 0x00)
1406 			sc->sk_ramsize = 0x20000;
1407 		else
1408 			sc->sk_ramsize = skrs * (1<<12);
1409 		sc->sk_rboff = SK_RBOFF_0;
1410 	}
1411 
1412 	DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n",
1413 		     sc->sk_ramsize, sc->sk_ramsize / 1024,
1414 		     sc->sk_rboff));
1415 
1416 	/* Read and save physical media type */
1417 	sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE);
1418 
1419 	if (sc->sk_pmd == 'T' || sc->sk_pmd == '1')
1420 		sc->sk_coppertype = 1;
1421 	else
1422 		sc->sk_coppertype = 0;
1423 
1424 	switch (sc->sk_type) {
1425 	case SK_GENESIS:
1426 		sc->sk_name = "GEnesis";
1427 		break;
1428 	case SK_YUKON:
1429 		sc->sk_name = "Yukon";
1430 		break;
1431 	case SK_YUKON_LITE:
1432 		sc->sk_name = "Yukon Lite";
1433 		break;
1434 	case SK_YUKON_LP:
1435 		sc->sk_name = "Yukon LP";
1436 		break;
1437 	default:
1438 		sc->sk_name = "Yukon (Unknown)";
1439 	}
1440 
1441 	/* Yukon Lite Rev A0 needs special test, from sk98lin driver */
1442 	if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) {
1443 		u_int32_t flashaddr;
1444 		u_int8_t testbyte;
1445 
1446 		flashaddr = sk_win_read_4(sc, SK_EP_ADDR);
1447 
1448 		/* test Flash-Address Register */
1449 		sk_win_write_1(sc, SK_EP_ADDR+3, 0xff);
1450 		testbyte = sk_win_read_1(sc, SK_EP_ADDR+3);
1451 
1452 		if (testbyte != 0) {
1453 			/* This is a Yukon Lite Rev A0 */
1454 			sc->sk_type = SK_YUKON_LITE;
1455 			sc->sk_rev = SK_YUKON_LITE_REV_A0;
1456 			/* restore Flash-Address Register */
1457 			sk_win_write_4(sc, SK_EP_ADDR, flashaddr);
1458 		}
1459 	}
1460 
1461 	if (sc->sk_type == SK_YUKON_LITE) {
1462 		switch (sc->sk_rev) {
1463 		case SK_YUKON_LITE_REV_A0:
1464 			revstr = "A0";
1465 			break;
1466 		case SK_YUKON_LITE_REV_A1:
1467 			revstr = "A1";
1468 			break;
1469 		case SK_YUKON_LITE_REV_A3:
1470 			revstr = "A3";
1471 			break;
1472 		default:
1473 			;
1474 		}
1475 	}
1476 
1477 	/* Announce the product name. */
1478 	printf(", %s", sc->sk_name);
1479 	if (revstr != NULL)
1480 		printf(" rev. %s", revstr);
1481 	printf(" (0x%x): %s\n", sc->sk_rev, intrstr);
1482 
1483 	sc->sk_macs = 1;
1484 
1485 	if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC))
1486 		sc->sk_macs++;
1487 
1488 	skca.skc_port = SK_PORT_A;
1489 	skca.skc_type = sc->sk_type;
1490 	skca.skc_rev = sc->sk_rev;
1491 	(void)config_found(&sc->sk_dev, &skca, skcprint);
1492 
1493 	if (sc->sk_macs > 1) {
1494 		skca.skc_port = SK_PORT_B;
1495 		skca.skc_type = sc->sk_type;
1496 		skca.skc_rev = sc->sk_rev;
1497 		(void)config_found(&sc->sk_dev, &skca, skcprint);
1498 	}
1499 
1500 	/* Turn on the 'driver is loaded' LED. */
1501 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
1502 
1503 	return;
1504 
1505 fail_2:
1506 	pci_intr_disestablish(pc, sc->sk_intrhand);
1507 fail_1:
1508 	bus_space_unmap(sc->sk_btag, sc->sk_bhandle, size);
1509 }
1510 
1511 int
1512 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx)
1513 {
1514 	struct sk_softc		*sc = sc_if->sk_softc;
1515 	struct sk_tx_desc	*f = NULL;
1516 	u_int32_t		frag, cur, sk_ctl;
1517 	int			i;
1518 	struct sk_txmap_entry	*entry;
1519 	bus_dmamap_t		txmap;
1520 
1521 	DPRINTFN(2, ("sk_encap\n"));
1522 
1523 	entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head);
1524 	if (entry == NULL) {
1525 		DPRINTFN(2, ("sk_encap: no txmap available\n"));
1526 		return (ENOBUFS);
1527 	}
1528 	txmap = entry->dmamap;
1529 
1530 	cur = frag = *txidx;
1531 
1532 #ifdef SK_DEBUG
1533 	if (skdebug >= 2)
1534 		sk_dump_mbuf(m_head);
1535 #endif
1536 
1537 	/*
1538 	 * Start packing the mbufs in this chain into
1539 	 * the fragment pointers. Stop when we run out
1540 	 * of fragments or hit the end of the mbuf chain.
1541 	 */
1542 	if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head,
1543 	    BUS_DMA_NOWAIT)) {
1544 		DPRINTFN(2, ("sk_encap: dmamap failed\n"));
1545 		return (ENOBUFS);
1546 	}
1547 
1548 	if (txmap->dm_nsegs > (SK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) {
1549 		DPRINTFN(2, ("sk_encap: too few descriptors free\n"));
1550 		bus_dmamap_unload(sc->sc_dmatag, txmap);
1551 		return (ENOBUFS);
1552 	}
1553 
1554 	DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs));
1555 
1556 	/* Sync the DMA map. */
1557 	bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize,
1558 	    BUS_DMASYNC_PREWRITE);
1559 
1560 	for (i = 0; i < txmap->dm_nsegs; i++) {
1561 		f = &sc_if->sk_rdata->sk_tx_ring[frag];
1562 		f->sk_data_lo = htole32(txmap->dm_segs[i].ds_addr);
1563 		sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT;
1564 		if (i == 0)
1565 			sk_ctl |= SK_TXCTL_FIRSTFRAG;
1566 		else
1567 			sk_ctl |= SK_TXCTL_OWN;
1568 		f->sk_ctl = htole32(sk_ctl);
1569 		cur = frag;
1570 		SK_INC(frag, SK_TX_RING_CNT);
1571 	}
1572 
1573 	sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head;
1574 	SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
1575 
1576 	sc_if->sk_cdata.sk_tx_map[cur] = entry;
1577 	sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |=
1578 		htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR);
1579 
1580 	/* Sync descriptors before handing to chip */
1581 	SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs,
1582 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1583 
1584 	sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |=
1585 		htole32(SK_TXCTL_OWN);
1586 
1587 	/* Sync first descriptor to hand it off */
1588 	SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1589 
1590 	sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs;
1591 
1592 #ifdef SK_DEBUG
1593 	if (skdebug >= 2) {
1594 		struct sk_tx_desc *desc;
1595 		u_int32_t idx;
1596 		for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) {
1597 			desc = &sc_if->sk_rdata->sk_tx_ring[idx];
1598 			sk_dump_txdesc(desc, idx);
1599 		}
1600 	}
1601 #endif
1602 
1603 	*txidx = frag;
1604 
1605 	DPRINTFN(2, ("sk_encap: completed successfully\n"));
1606 
1607 	return (0);
1608 }
1609 
1610 void
1611 sk_start(struct ifnet *ifp)
1612 {
1613 	struct sk_if_softc	*sc_if = ifp->if_softc;
1614 	struct sk_softc		*sc = sc_if->sk_softc;
1615 	struct mbuf		*m_head = NULL;
1616 	u_int32_t		idx = sc_if->sk_cdata.sk_tx_prod;
1617 	int			pkts = 0;
1618 
1619 	DPRINTFN(2, ("sk_start\n"));
1620 
1621 	while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) {
1622 		IFQ_POLL(&ifp->if_snd, m_head);
1623 		if (m_head == NULL)
1624 			break;
1625 
1626 		/*
1627 		 * Pack the data into the transmit ring. If we
1628 		 * don't have room, set the OACTIVE flag and wait
1629 		 * for the NIC to drain the ring.
1630 		 */
1631 		if (sk_encap(sc_if, m_head, &idx)) {
1632 			ifp->if_flags |= IFF_OACTIVE;
1633 			break;
1634 		}
1635 
1636 		/* now we are committed to transmit the packet */
1637 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
1638 		pkts++;
1639 
1640 		/*
1641 		 * If there's a BPF listener, bounce a copy of this frame
1642 		 * to him.
1643 		 */
1644 #if NBPFILTER > 0
1645 		if (ifp->if_bpf)
1646 			bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
1647 #endif
1648 	}
1649 	if (pkts == 0)
1650 		return;
1651 
1652 	/* Transmit */
1653 	if (idx != sc_if->sk_cdata.sk_tx_prod) {
1654 		sc_if->sk_cdata.sk_tx_prod = idx;
1655 		CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
1656 
1657 		/* Set a timeout in case the chip goes out to lunch. */
1658 		ifp->if_timer = 5;
1659 	}
1660 }
1661 
1662 
1663 void
1664 sk_watchdog(struct ifnet *ifp)
1665 {
1666 	struct sk_if_softc *sc_if = ifp->if_softc;
1667 
1668 	/*
1669 	 * Reclaim first as there is a possibility of losing Tx completion
1670 	 * interrupts.
1671 	 */
1672 	sk_txeof(sc_if);
1673 	if (sc_if->sk_cdata.sk_tx_cnt != 0) {
1674 		printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname);
1675 
1676 		ifp->if_oerrors++;
1677 
1678 		sk_init(sc_if);
1679 	}
1680 }
1681 
1682 void
1683 skc_shutdown(void *v)
1684 {
1685 	struct sk_softc		*sc = v;
1686 
1687 	DPRINTFN(2, ("sk_shutdown\n"));
1688 
1689 	/* Turn off the 'driver is loaded' LED. */
1690 	CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF);
1691 
1692 	/*
1693 	 * Reset the GEnesis controller. Doing this should also
1694 	 * assert the resets on the attached XMAC(s).
1695 	 */
1696 	sk_reset(sc);
1697 }
1698 
1699 static __inline int
1700 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len)
1701 {
1702 	if (sc->sk_type == SK_GENESIS) {
1703 		if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME ||
1704 		    XM_RXSTAT_BYTES(stat) != len)
1705 			return (0);
1706 	} else {
1707 		if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR |
1708 		    YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC |
1709 		    YU_RXSTAT_JABBER)) != 0 ||
1710 		    (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK ||
1711 		    YU_RXSTAT_BYTES(stat) != len)
1712 			return (0);
1713 	}
1714 
1715 	return (1);
1716 }
1717 
1718 void
1719 sk_rxeof(struct sk_if_softc *sc_if)
1720 {
1721 	struct sk_softc		*sc = sc_if->sk_softc;
1722 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1723 	struct mbuf		*m;
1724 	struct sk_chain		*cur_rx;
1725 	struct sk_rx_desc	*cur_desc;
1726 	int			i, cur, total_len = 0;
1727 	u_int32_t		rxstat, sk_ctl;
1728 	bus_dmamap_t		dmamap;
1729 	u_int16_t		csum1, csum2;
1730 
1731 	DPRINTFN(2, ("sk_rxeof\n"));
1732 
1733 	i = sc_if->sk_cdata.sk_rx_prod;
1734 
1735 	for (;;) {
1736 		cur = i;
1737 
1738 		/* Sync the descriptor */
1739 		SK_CDRXSYNC(sc_if, cur,
1740 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1741 
1742 		sk_ctl = letoh32(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl);
1743 		if ((sk_ctl & SK_RXCTL_OWN) != 0) {
1744 			/* Invalidate the descriptor -- it's not ready yet */
1745 			SK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_PREREAD);
1746 			sc_if->sk_cdata.sk_rx_prod = i;
1747 			break;
1748 		}
1749 
1750 		cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur];
1751 		cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur];
1752 		dmamap = sc_if->sk_cdata.sk_rx_jumbo_map;
1753 
1754 		bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0,
1755 		    dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1756 
1757 		rxstat = letoh32(cur_desc->sk_xmac_rxstat);
1758 		m = cur_rx->sk_mbuf;
1759 		cur_rx->sk_mbuf = NULL;
1760 		total_len = SK_RXBYTES(letoh32(cur_desc->sk_ctl));
1761 
1762 		csum1 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum1);
1763 		csum2 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum2);
1764 
1765 		SK_INC(i, SK_RX_RING_CNT);
1766 
1767 		if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG |
1768 		    SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID |
1769 		    SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) ||
1770 		    total_len < SK_MIN_FRAMELEN ||
1771 		    total_len > SK_JUMBO_FRAMELEN ||
1772 		    sk_rxvalid(sc, rxstat, total_len) == 0) {
1773 			ifp->if_ierrors++;
1774 			sk_newbuf(sc_if, cur, m, dmamap);
1775 			continue;
1776 		}
1777 
1778 		/*
1779 		 * Try to allocate a new jumbo buffer. If that
1780 		 * fails, copy the packet to mbufs and put the
1781 		 * jumbo buffer back in the ring so it can be
1782 		 * re-used. If allocating mbufs fails, then we
1783 		 * have to drop the packet.
1784 		 */
1785 		if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) {
1786 			struct mbuf		*m0;
1787 			m0 = m_devget(mtod(m, char *) - ETHER_ALIGN,
1788 			    total_len + ETHER_ALIGN, 0, ifp, NULL);
1789 			sk_newbuf(sc_if, cur, m, dmamap);
1790 			if (m0 == NULL) {
1791 				ifp->if_ierrors++;
1792 				continue;
1793 			}
1794 			m_adj(m0, ETHER_ALIGN);
1795 			m = m0;
1796 		} else {
1797 			m->m_pkthdr.rcvif = ifp;
1798 			m->m_pkthdr.len = m->m_len = total_len;
1799 		}
1800 
1801 		ifp->if_ipackets++;
1802 
1803 		sk_rxcsum(ifp, m, csum1, csum2);
1804 
1805 #if NBPFILTER > 0
1806 		if (ifp->if_bpf)
1807 			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN);
1808 #endif
1809 
1810 		/* pass it on. */
1811 		ether_input_mbuf(ifp, m);
1812 	}
1813 }
1814 
1815 void
1816 sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2)
1817 {
1818 	struct ether_header *eh;
1819 	struct ip *ip;
1820 	u_int8_t *pp;
1821 	int hlen, len, plen;
1822 	u_int16_t iph_csum, ipo_csum, ipd_csum, csum;
1823 
1824 	pp = mtod(m, u_int8_t *);
1825 	plen = m->m_pkthdr.len;
1826 	if (plen < sizeof(*eh))
1827 		return;
1828 	eh = (struct ether_header *)pp;
1829 	iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff));
1830 
1831 	if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1832 		u_int16_t *xp = (u_int16_t *)pp;
1833 
1834 		xp = (u_int16_t *)pp;
1835 		if (xp[1] != htons(ETHERTYPE_IP))
1836 			return;
1837 		iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff));
1838 		iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff));
1839 		xp = (u_int16_t *)(pp + sizeof(struct ip));
1840 		iph_csum = in_cksum_addword(iph_csum, xp[0]);
1841 		iph_csum = in_cksum_addword(iph_csum, xp[1]);
1842 		pp += EVL_ENCAPLEN;
1843 	} else if (eh->ether_type != htons(ETHERTYPE_IP))
1844 		return;
1845 
1846 	pp += sizeof(*eh);
1847 	plen -= sizeof(*eh);
1848 
1849 	ip = (struct ip *)pp;
1850 
1851 	if (ip->ip_v != IPVERSION)
1852 		return;
1853 
1854 	hlen = ip->ip_hl << 2;
1855 	if (hlen < sizeof(struct ip))
1856 		return;
1857 	if (hlen > ntohs(ip->ip_len))
1858 		return;
1859 
1860 	/* Don't deal with truncated or padded packets. */
1861 	if (plen != ntohs(ip->ip_len))
1862 		return;
1863 
1864 	len = hlen - sizeof(struct ip);
1865 	if (len > 0) {
1866 		u_int16_t *p;
1867 
1868 		p = (u_int16_t *)(ip + 1);
1869 		ipo_csum = 0;
1870 		for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++)
1871 			ipo_csum = in_cksum_addword(ipo_csum, *p);
1872 		iph_csum = in_cksum_addword(iph_csum, ipo_csum);
1873 		ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff));
1874 	} else
1875 		ipd_csum = csum2;
1876 
1877 	if (iph_csum != 0xffff)
1878 		return;
1879 	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
1880 
1881 	if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1882 		return;                 /* ip frag, we're done for now */
1883 
1884 	pp += hlen;
1885 
1886 	/* Only know checksum protocol for udp/tcp */
1887 	if (ip->ip_p == IPPROTO_UDP) {
1888 		struct udphdr *uh = (struct udphdr *)pp;
1889 
1890 		if (uh->uh_sum == 0)    /* udp with no checksum */
1891 			return;
1892 	} else if (ip->ip_p != IPPROTO_TCP)
1893 		return;
1894 
1895 	csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1896 	    htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum);
1897 	if (csum == 0xffff) {
1898 		m->m_pkthdr.csum_flags |= (ip->ip_p == IPPROTO_TCP) ?
1899 		    M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK;
1900 	}
1901 }
1902 
1903 void
1904 sk_txeof(struct sk_if_softc *sc_if)
1905 {
1906 	struct sk_softc		*sc = sc_if->sk_softc;
1907 	struct sk_tx_desc	*cur_tx;
1908 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
1909 	u_int32_t		idx, sk_ctl;
1910 	struct sk_txmap_entry	*entry;
1911 
1912 	DPRINTFN(2, ("sk_txeof\n"));
1913 
1914 	/*
1915 	 * Go through our tx ring and free mbufs for those
1916 	 * frames that have been sent.
1917 	 */
1918 	idx = sc_if->sk_cdata.sk_tx_cons;
1919 	while (idx != sc_if->sk_cdata.sk_tx_prod) {
1920 		SK_CDTXSYNC(sc_if, idx, 1,
1921 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1922 
1923 		cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx];
1924 		sk_ctl = letoh32(cur_tx->sk_ctl);
1925 #ifdef SK_DEBUG
1926 		if (skdebug >= 2)
1927 			sk_dump_txdesc(cur_tx, idx);
1928 #endif
1929 		if (sk_ctl & SK_TXCTL_OWN) {
1930 			SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD);
1931 			break;
1932 		}
1933 		if (sk_ctl & SK_TXCTL_LASTFRAG)
1934 			ifp->if_opackets++;
1935 		if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) {
1936 			entry = sc_if->sk_cdata.sk_tx_map[idx];
1937 
1938 			m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf);
1939 			sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL;
1940 
1941 			bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0,
1942 			    entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1943 
1944 			bus_dmamap_unload(sc->sc_dmatag, entry->dmamap);
1945 			SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry,
1946 					  link);
1947 			sc_if->sk_cdata.sk_tx_map[idx] = NULL;
1948 		}
1949 		sc_if->sk_cdata.sk_tx_cnt--;
1950 		SK_INC(idx, SK_TX_RING_CNT);
1951 	}
1952 	ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0;
1953 
1954 	if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2)
1955 		ifp->if_flags &= ~IFF_OACTIVE;
1956 
1957 	sc_if->sk_cdata.sk_tx_cons = idx;
1958 }
1959 
1960 void
1961 sk_tick(void *xsc_if)
1962 {
1963 	struct sk_if_softc *sc_if = xsc_if;
1964 	struct mii_data *mii = &sc_if->sk_mii;
1965 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
1966 	int i;
1967 
1968 	DPRINTFN(2, ("sk_tick\n"));
1969 
1970 	if (!(ifp->if_flags & IFF_UP))
1971 		return;
1972 
1973 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
1974 		sk_intr_bcom(sc_if);
1975 		return;
1976 	}
1977 
1978 	/*
1979 	 * According to SysKonnect, the correct way to verify that
1980 	 * the link has come back up is to poll bit 0 of the GPIO
1981 	 * register three times. This pin has the signal from the
1982 	 * link sync pin connected to it; if we read the same link
1983 	 * state 3 times in a row, we know the link is up.
1984 	 */
1985 	for (i = 0; i < 3; i++) {
1986 		if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET)
1987 			break;
1988 	}
1989 
1990 	if (i != 3) {
1991 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
1992 		return;
1993 	}
1994 
1995 	/* Turn the GP0 interrupt back on. */
1996 	SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
1997 	SK_XM_READ_2(sc_if, XM_ISR);
1998 	mii_tick(mii);
1999 	timeout_del(&sc_if->sk_tick_ch);
2000 }
2001 
2002 void
2003 sk_yukon_tick(void *xsc_if)
2004 {
2005 	struct sk_if_softc *sc_if = xsc_if;
2006 	struct mii_data *mii = &sc_if->sk_mii;
2007 	int s;
2008 
2009 	s = splnet();
2010 	mii_tick(mii);
2011 	splx(s);
2012 	timeout_add_sec(&sc_if->sk_tick_ch, 1);
2013 }
2014 
2015 void
2016 sk_intr_bcom(struct sk_if_softc *sc_if)
2017 {
2018 	struct mii_data *mii = &sc_if->sk_mii;
2019 	struct ifnet *ifp = &sc_if->arpcom.ac_if;
2020 	int status;
2021 
2022 	DPRINTFN(2, ("sk_intr_bcom\n"));
2023 
2024 	SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2025 
2026 	/*
2027 	 * Read the PHY interrupt register to make sure
2028 	 * we clear any pending interrupts.
2029 	 */
2030 	status = sk_xmac_miibus_readreg((struct device *)sc_if,
2031 	    SK_PHYADDR_BCOM, BRGPHY_MII_ISR);
2032 
2033 	if (!(ifp->if_flags & IFF_RUNNING)) {
2034 		sk_init_xmac(sc_if);
2035 		return;
2036 	}
2037 
2038 	if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) {
2039 		int lstat;
2040 		lstat = sk_xmac_miibus_readreg((struct device *)sc_if,
2041 		    SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS);
2042 
2043 		if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) {
2044 			mii_mediachg(mii);
2045 			/* Turn off the link LED. */
2046 			SK_IF_WRITE_1(sc_if, 0,
2047 			    SK_LINKLED1_CTL, SK_LINKLED_OFF);
2048 			sc_if->sk_link = 0;
2049 		} else if (status & BRGPHY_ISR_LNK_CHG) {
2050 			sk_xmac_miibus_writereg((struct device *)sc_if,
2051 			    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00);
2052 			mii_tick(mii);
2053 			sc_if->sk_link = 1;
2054 			/* Turn on the link LED. */
2055 			SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2056 			    SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF|
2057 			    SK_LINKLED_BLINK_OFF);
2058 		} else {
2059 			mii_tick(mii);
2060 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
2061 		}
2062 	}
2063 
2064 	SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2065 }
2066 
2067 void
2068 sk_intr_xmac(struct sk_if_softc	*sc_if)
2069 {
2070 	u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR);
2071 
2072 	DPRINTFN(2, ("sk_intr_xmac\n"));
2073 
2074 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) {
2075 		if (status & XM_ISR_GP0_SET) {
2076 			SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET);
2077 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
2078 		}
2079 
2080 		if (status & XM_ISR_AUTONEG_DONE) {
2081 			timeout_add_sec(&sc_if->sk_tick_ch, 1);
2082 		}
2083 	}
2084 
2085 	if (status & XM_IMR_TX_UNDERRUN)
2086 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO);
2087 
2088 	if (status & XM_IMR_RX_OVERRUN)
2089 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO);
2090 }
2091 
2092 void
2093 sk_intr_yukon(struct sk_if_softc *sc_if)
2094 {
2095 	u_int8_t status;
2096 
2097 	status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR);
2098 	/* RX overrun */
2099 	if ((status & SK_GMAC_INT_RX_OVER) != 0) {
2100 		SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST,
2101 		    SK_RFCTL_RX_FIFO_OVER);
2102 	}
2103 	/* TX underrun */
2104 	if ((status & SK_GMAC_INT_TX_UNDER) != 0) {
2105 		SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST,
2106 		    SK_TFCTL_TX_FIFO_UNDER);
2107 	}
2108 
2109 	DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status));
2110 }
2111 
2112 int
2113 sk_intr(void *xsc)
2114 {
2115 	struct sk_softc		*sc = xsc;
2116 	struct sk_if_softc	*sc_if0 = sc->sk_if[SK_PORT_A];
2117 	struct sk_if_softc	*sc_if1 = sc->sk_if[SK_PORT_B];
2118 	struct ifnet		*ifp0 = NULL, *ifp1 = NULL;
2119 	u_int32_t		status;
2120 	int			claimed = 0;
2121 
2122 	status = CSR_READ_4(sc, SK_ISSR);
2123 	if (status == 0 || status == 0xffffffff)
2124 		return (0);
2125 
2126 	if (sc_if0 != NULL)
2127 		ifp0 = &sc_if0->arpcom.ac_if;
2128 	if (sc_if1 != NULL)
2129 		ifp1 = &sc_if1->arpcom.ac_if;
2130 
2131 	for (; (status &= sc->sk_intrmask) != 0;) {
2132 		claimed = 1;
2133 
2134 		/* Handle receive interrupts first. */
2135 		if (sc_if0 && (status & SK_ISR_RX1_EOF)) {
2136 			sk_rxeof(sc_if0);
2137 			CSR_WRITE_4(sc, SK_BMU_RX_CSR0,
2138 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2139 		}
2140 		if (sc_if1 && (status & SK_ISR_RX2_EOF)) {
2141 			sk_rxeof(sc_if1);
2142 			CSR_WRITE_4(sc, SK_BMU_RX_CSR1,
2143 			    SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START);
2144 		}
2145 
2146 		/* Then transmit interrupts. */
2147 		if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) {
2148 			sk_txeof(sc_if0);
2149 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR0,
2150 			    SK_TXBMU_CLR_IRQ_EOF);
2151 		}
2152 		if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) {
2153 			sk_txeof(sc_if1);
2154 			CSR_WRITE_4(sc, SK_BMU_TXS_CSR1,
2155 			    SK_TXBMU_CLR_IRQ_EOF);
2156 		}
2157 
2158 		/* Then MAC interrupts. */
2159 		if (sc_if0 && (status & SK_ISR_MAC1) &&
2160 		    (ifp0->if_flags & IFF_RUNNING)) {
2161 			if (SK_IS_GENESIS(sc))
2162 				sk_intr_xmac(sc_if0);
2163 			else
2164 				sk_intr_yukon(sc_if0);
2165 		}
2166 
2167 		if (sc_if1 && (status & SK_ISR_MAC2) &&
2168 		    (ifp1->if_flags & IFF_RUNNING)) {
2169 			if (SK_IS_GENESIS(sc))
2170 				sk_intr_xmac(sc_if1);
2171 			else
2172 				sk_intr_yukon(sc_if1);
2173 
2174 		}
2175 
2176 		if (status & SK_ISR_EXTERNAL_REG) {
2177 			if (sc_if0 != NULL &&
2178 			    sc_if0->sk_phytype == SK_PHYTYPE_BCOM)
2179 				sk_intr_bcom(sc_if0);
2180 
2181 			if (sc_if1 != NULL &&
2182 			    sc_if1->sk_phytype == SK_PHYTYPE_BCOM)
2183 				sk_intr_bcom(sc_if1);
2184 		}
2185 		status = CSR_READ_4(sc, SK_ISSR);
2186 	}
2187 
2188 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2189 
2190 	if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd))
2191 		sk_start(ifp0);
2192 	if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd))
2193 		sk_start(ifp1);
2194 
2195 	return (claimed);
2196 }
2197 
2198 void
2199 sk_init_xmac(struct sk_if_softc	*sc_if)
2200 {
2201 	struct sk_softc		*sc = sc_if->sk_softc;
2202 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2203 	struct sk_bcom_hack     bhack[] = {
2204 	{ 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
2205 	{ 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
2206 	{ 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
2207 	{ 0, 0 } };
2208 
2209 	DPRINTFN(2, ("sk_init_xmac\n"));
2210 
2211 	/* Unreset the XMAC. */
2212 	SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET);
2213 	DELAY(1000);
2214 
2215 	/* Reset the XMAC's internal state. */
2216 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2217 
2218 	/* Save the XMAC II revision */
2219 	sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID));
2220 
2221 	/*
2222 	 * Perform additional initialization for external PHYs,
2223 	 * namely for the 1000baseTX cards that use the XMAC's
2224 	 * GMII mode.
2225 	 */
2226 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2227 		int			i = 0;
2228 		u_int32_t		val;
2229 
2230 		/* Take PHY out of reset. */
2231 		val = sk_win_read_4(sc, SK_GPIO);
2232 		if (sc_if->sk_port == SK_PORT_A)
2233 			val |= SK_GPIO_DIR0|SK_GPIO_DAT0;
2234 		else
2235 			val |= SK_GPIO_DIR2|SK_GPIO_DAT2;
2236 		sk_win_write_4(sc, SK_GPIO, val);
2237 
2238 		/* Enable GMII mode on the XMAC. */
2239 		SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE);
2240 
2241 		sk_xmac_miibus_writereg((struct device *)sc_if,
2242 		    SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET);
2243 		DELAY(10000);
2244 		sk_xmac_miibus_writereg((struct device *)sc_if,
2245 		    SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0);
2246 
2247 		/*
2248 		 * Early versions of the BCM5400 apparently have
2249 		 * a bug that requires them to have their reserved
2250 		 * registers initialized to some magic values. I don't
2251 		 * know what the numbers do, I'm just the messenger.
2252 		 */
2253 		if (sk_xmac_miibus_readreg((struct device *)sc_if,
2254 		    SK_PHYADDR_BCOM, 0x03) == 0x6041) {
2255 			while(bhack[i].reg) {
2256 				sk_xmac_miibus_writereg((struct device *)sc_if,
2257 				    SK_PHYADDR_BCOM, bhack[i].reg,
2258 				    bhack[i].val);
2259 				i++;
2260 			}
2261 		}
2262 	}
2263 
2264 	/* Set station address */
2265 	SK_XM_WRITE_2(sc_if, XM_PAR0,
2266 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])));
2267 	SK_XM_WRITE_2(sc_if, XM_PAR1,
2268 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])));
2269 	SK_XM_WRITE_2(sc_if, XM_PAR2,
2270 	    letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])));
2271 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION);
2272 
2273 	if (ifp->if_flags & IFF_BROADCAST)
2274 		SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2275 	else
2276 		SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD);
2277 
2278 	/* We don't need the FCS appended to the packet. */
2279 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS);
2280 
2281 	/* We want short frames padded to 60 bytes. */
2282 	SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD);
2283 
2284 	/*
2285 	 * Enable the reception of all error frames. This is
2286 	 * a necessary evil due to the design of the XMAC. The
2287 	 * XMAC's receive FIFO is only 8K in size, however jumbo
2288 	 * frames can be up to 9000 bytes in length. When bad
2289 	 * frame filtering is enabled, the XMAC's RX FIFO operates
2290 	 * in 'store and forward' mode. For this to work, the
2291 	 * entire frame has to fit into the FIFO, but that means
2292 	 * that jumbo frames larger than 8192 bytes will be
2293 	 * truncated. Disabling all bad frame filtering causes
2294 	 * the RX FIFO to operate in streaming mode, in which
2295 	 * case the XMAC will start transfering frames out of the
2296 	 * RX FIFO as soon as the FIFO threshold is reached.
2297 	 */
2298 	SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES|
2299 	    XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS|
2300 	    XM_MODE_RX_INRANGELEN);
2301 
2302 	SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK);
2303 
2304 	/*
2305 	 * Bump up the transmit threshold. This helps hold off transmit
2306 	 * underruns when we're blasting traffic from both ports at once.
2307 	 */
2308 	SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH);
2309 
2310 	/* Set promiscuous mode */
2311 	sk_setpromisc(sc_if);
2312 
2313 	/* Set multicast filter */
2314 	sk_setmulti(sc_if);
2315 
2316 	/* Clear and enable interrupts */
2317 	SK_XM_READ_2(sc_if, XM_ISR);
2318 	if (sc_if->sk_phytype == SK_PHYTYPE_XMAC)
2319 		SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS);
2320 	else
2321 		SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2322 
2323 	/* Configure MAC arbiter */
2324 	switch(sc_if->sk_xmac_rev) {
2325 	case XM_XMAC_REV_B2:
2326 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2);
2327 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2);
2328 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2);
2329 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2);
2330 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2);
2331 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2);
2332 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2);
2333 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2);
2334 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2335 		break;
2336 	case XM_XMAC_REV_C1:
2337 		sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1);
2338 		sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1);
2339 		sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1);
2340 		sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1);
2341 		sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1);
2342 		sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1);
2343 		sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1);
2344 		sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1);
2345 		sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2);
2346 		break;
2347 	default:
2348 		break;
2349 	}
2350 	sk_win_write_2(sc, SK_MACARB_CTL,
2351 	    SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF);
2352 
2353 	sc_if->sk_link = 1;
2354 }
2355 
2356 void sk_init_yukon(struct sk_if_softc *sc_if)
2357 {
2358 	u_int32_t		phy, v;
2359 	u_int16_t		reg;
2360 	struct sk_softc		*sc;
2361 	int			i;
2362 
2363 	sc = sc_if->sk_softc;
2364 
2365 	DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n",
2366 		     CSR_READ_4(sc_if->sk_softc, SK_CSR)));
2367 
2368 	if (sc->sk_type == SK_YUKON_LITE &&
2369 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2370 		/*
2371 		 * Workaround code for COMA mode, set PHY reset.
2372 		 * Otherwise it will not correctly take chip out of
2373 		 * powerdown (coma)
2374 		 */
2375 		v = sk_win_read_4(sc, SK_GPIO);
2376 		v |= SK_GPIO_DIR9 | SK_GPIO_DAT9;
2377 		sk_win_write_4(sc, SK_GPIO, v);
2378 	}
2379 
2380 	DPRINTFN(6, ("sk_init_yukon: 1\n"));
2381 
2382 	/* GMAC and GPHY Reset */
2383 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET);
2384 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET);
2385 	DELAY(1000);
2386 
2387 	DPRINTFN(6, ("sk_init_yukon: 2\n"));
2388 
2389 	if (sc->sk_type == SK_YUKON_LITE &&
2390 	    sc->sk_rev >= SK_YUKON_LITE_REV_A3) {
2391 		/*
2392 		 * Workaround code for COMA mode, clear PHY reset
2393 		 */
2394 		v = sk_win_read_4(sc, SK_GPIO);
2395 		v |= SK_GPIO_DIR9;
2396 		v &= ~SK_GPIO_DAT9;
2397 		sk_win_write_4(sc, SK_GPIO, v);
2398 	}
2399 
2400 	phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP |
2401 		SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE;
2402 
2403 	if (sc->sk_coppertype)
2404 		phy |= SK_GPHY_COPPER;
2405 	else
2406 		phy |= SK_GPHY_FIBER;
2407 
2408 	DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy));
2409 
2410 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET);
2411 	DELAY(1000);
2412 	SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR);
2413 	SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF |
2414 		      SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR);
2415 
2416 	DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n",
2417 		     SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL)));
2418 
2419 	DPRINTFN(6, ("sk_init_yukon: 3\n"));
2420 
2421 	/* unused read of the interrupt source register */
2422 	DPRINTFN(6, ("sk_init_yukon: 4\n"));
2423 	SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR);
2424 
2425 	DPRINTFN(6, ("sk_init_yukon: 4a\n"));
2426 	reg = SK_YU_READ_2(sc_if, YUKON_PAR);
2427 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2428 
2429 	/* MIB Counter Clear Mode set */
2430         reg |= YU_PAR_MIB_CLR;
2431 	DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg));
2432 	DPRINTFN(6, ("sk_init_yukon: 4b\n"));
2433 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2434 
2435 	/* MIB Counter Clear Mode clear */
2436 	DPRINTFN(6, ("sk_init_yukon: 5\n"));
2437         reg &= ~YU_PAR_MIB_CLR;
2438 	SK_YU_WRITE_2(sc_if, YUKON_PAR, reg);
2439 
2440 	/* receive control reg */
2441 	DPRINTFN(6, ("sk_init_yukon: 7\n"));
2442 	SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR);
2443 
2444 	/* transmit parameter register */
2445 	DPRINTFN(6, ("sk_init_yukon: 8\n"));
2446 	SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) |
2447 		      YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) );
2448 
2449 	/* serial mode register */
2450 	DPRINTFN(6, ("sk_init_yukon: 9\n"));
2451 	SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) |
2452 		      YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO |
2453 		      YU_SMR_IPG_DATA(0x1e));
2454 
2455 	DPRINTFN(6, ("sk_init_yukon: 10\n"));
2456 	/* Setup Yukon's address */
2457 	for (i = 0; i < 3; i++) {
2458 		/* Write Source Address 1 (unicast filter) */
2459 		SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4,
2460 			      sc_if->arpcom.ac_enaddr[i * 2] |
2461 			      sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8);
2462 	}
2463 
2464 	for (i = 0; i < 3; i++) {
2465 		reg = sk_win_read_2(sc_if->sk_softc,
2466 				    SK_MAC1_0 + i * 2 + sc_if->sk_port * 8);
2467 		SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg);
2468 	}
2469 
2470 	/* Set promiscuous mode */
2471 	sk_setpromisc(sc_if);
2472 
2473 	/* Set multicast filter */
2474 	DPRINTFN(6, ("sk_init_yukon: 11\n"));
2475 	sk_setmulti(sc_if);
2476 
2477 	/* enable interrupt mask for counter overflows */
2478 	DPRINTFN(6, ("sk_init_yukon: 12\n"));
2479 	SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0);
2480 	SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0);
2481 	SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0);
2482 
2483 	/* Configure RX MAC FIFO Flush Mask */
2484 	v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR |
2485 	    YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT |
2486 	    YU_RXSTAT_JABBER;
2487 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v);
2488 
2489 	/* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */
2490 	if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0)
2491 		v = SK_TFCTL_OPERATION_ON;
2492 	else
2493 		v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON;
2494 	/* Configure RX MAC FIFO */
2495 	SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR);
2496 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v);
2497 
2498 	/* Increase flush threshould to 64 bytes */
2499 	SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD,
2500 	    SK_RFCTL_FIFO_THRESHOLD + 1);
2501 
2502 	/* Configure TX MAC FIFO */
2503 	SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR);
2504 	SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON);
2505 
2506 	DPRINTFN(6, ("sk_init_yukon: end\n"));
2507 }
2508 
2509 /*
2510  * Note that to properly initialize any part of the GEnesis chip,
2511  * you first have to take it out of reset mode.
2512  */
2513 void
2514 sk_init(void *xsc_if)
2515 {
2516 	struct sk_if_softc	*sc_if = xsc_if;
2517 	struct sk_softc		*sc = sc_if->sk_softc;
2518 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2519 	struct mii_data		*mii = &sc_if->sk_mii;
2520 	int			s;
2521 
2522 	DPRINTFN(2, ("sk_init\n"));
2523 
2524 	s = splnet();
2525 
2526 	/* Cancel pending I/O and free all RX/TX buffers. */
2527 	sk_stop(sc_if);
2528 
2529 	if (SK_IS_GENESIS(sc)) {
2530 		/* Configure LINK_SYNC LED */
2531 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON);
2532 		SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL,
2533 			      SK_LINKLED_LINKSYNC_ON);
2534 
2535 		/* Configure RX LED */
2536 		SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL,
2537 			      SK_RXLEDCTL_COUNTER_START);
2538 
2539 		/* Configure TX LED */
2540 		SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL,
2541 			      SK_TXLEDCTL_COUNTER_START);
2542 	}
2543 
2544 	/*
2545 	 * Configure descriptor poll timer
2546 	 *
2547 	 * SK-NET GENESIS data sheet says that possibility of losing Start
2548 	 * transmit command due to CPU/cache related interim storage problems
2549 	 * under certain conditions. The document recommends a polling
2550 	 * mechanism to send a Start transmit command to initiate transfer
2551 	 * of ready descriptors regulary. To cope with this issue sk(4) now
2552 	 * enables descriptor poll timer to initiate descriptor processing
2553 	 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still
2554 	 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx
2555 	 * command instead of waiting for next descriptor polling time.
2556 	 * The same rule may apply to Rx side too but it seems that is not
2557 	 * needed at the moment.
2558 	 * Since sk(4) uses descriptor polling as a last resort there is no
2559 	 * need to set smaller polling time than maximum allowable one.
2560 	 */
2561 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX);
2562 
2563 	/* Configure I2C registers */
2564 
2565 	/* Configure XMAC(s) */
2566 	switch (sc->sk_type) {
2567 	case SK_GENESIS:
2568 		sk_init_xmac(sc_if);
2569 		break;
2570 	case SK_YUKON:
2571 	case SK_YUKON_LITE:
2572 	case SK_YUKON_LP:
2573 		sk_init_yukon(sc_if);
2574 		break;
2575 	}
2576 	mii_mediachg(mii);
2577 
2578 	if (SK_IS_GENESIS(sc)) {
2579 		/* Configure MAC FIFOs */
2580 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET);
2581 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END);
2582 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON);
2583 
2584 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET);
2585 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END);
2586 		SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON);
2587 	}
2588 
2589 	/* Configure transmit arbiter(s) */
2590 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL,
2591 	    SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON);
2592 
2593 	/* Configure RAMbuffers */
2594 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET);
2595 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart);
2596 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart);
2597 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart);
2598 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend);
2599 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON);
2600 
2601 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET);
2602 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON);
2603 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart);
2604 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart);
2605 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart);
2606 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend);
2607 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON);
2608 
2609 	/* Configure BMUs */
2610 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE);
2611 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO,
2612 	    SK_RX_RING_ADDR(sc_if, 0));
2613 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0);
2614 
2615 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE);
2616 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO,
2617             SK_TX_RING_ADDR(sc_if, 0));
2618 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0);
2619 
2620 	/* Init descriptors */
2621 	if (sk_init_rx_ring(sc_if) == ENOBUFS) {
2622 		printf("%s: initialization failed: no "
2623 		    "memory for rx buffers\n", sc_if->sk_dev.dv_xname);
2624 		sk_stop(sc_if);
2625 		splx(s);
2626 		return;
2627 	}
2628 
2629 	if (sk_init_tx_ring(sc_if) == ENOBUFS) {
2630 		printf("%s: initialization failed: no "
2631 		    "memory for tx buffers\n", sc_if->sk_dev.dv_xname);
2632 		sk_stop(sc_if);
2633 		splx(s);
2634 		return;
2635 	}
2636 
2637 	/* Configure interrupt handling */
2638 	CSR_READ_4(sc, SK_ISSR);
2639 	if (sc_if->sk_port == SK_PORT_A)
2640 		sc->sk_intrmask |= SK_INTRS1;
2641 	else
2642 		sc->sk_intrmask |= SK_INTRS2;
2643 
2644 	sc->sk_intrmask |= SK_ISR_EXTERNAL_REG;
2645 
2646 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2647 
2648 	/* Start BMUs. */
2649 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START);
2650 
2651 	if (SK_IS_GENESIS(sc)) {
2652 		/* Enable XMACs TX and RX state machines */
2653 		SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE);
2654 		SK_XM_SETBIT_2(sc_if, XM_MMUCMD,
2655 			       XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB);
2656 	}
2657 
2658 	if (SK_IS_YUKON(sc)) {
2659 		u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR);
2660 		reg |= YU_GPCR_TXEN | YU_GPCR_RXEN;
2661 		SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg);
2662 	}
2663 
2664 	/* Activate descriptor polling timer */
2665 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START);
2666 	/* start transfer of Tx descriptors */
2667 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START);
2668 
2669 	ifp->if_flags |= IFF_RUNNING;
2670 	ifp->if_flags &= ~IFF_OACTIVE;
2671 
2672 	if (SK_IS_YUKON(sc))
2673 		timeout_add_sec(&sc_if->sk_tick_ch, 1);
2674 
2675 	splx(s);
2676 }
2677 
2678 void
2679 sk_stop(struct sk_if_softc *sc_if)
2680 {
2681 	struct sk_softc		*sc = sc_if->sk_softc;
2682 	struct ifnet		*ifp = &sc_if->arpcom.ac_if;
2683 	struct sk_txmap_entry	*dma;
2684 	int			i;
2685 	u_int32_t		val;
2686 
2687 	DPRINTFN(2, ("sk_stop\n"));
2688 
2689 	timeout_del(&sc_if->sk_tick_ch);
2690 
2691 	ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE);
2692 
2693 	/* stop Tx descriptor polling timer */
2694 	SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP);
2695 	/* stop transfer of Tx descriptors */
2696 	CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP);
2697 	for (i = 0; i < SK_TIMEOUT; i++) {
2698 		val = CSR_READ_4(sc, sc_if->sk_tx_bmu);
2699 		if (!(val & SK_TXBMU_TX_STOP))
2700 			break;
2701 		DELAY(1);
2702 	}
2703 	if (i == SK_TIMEOUT)
2704 		printf("%s: cannot stop transfer of Tx descriptors\n",
2705 		      sc_if->sk_dev.dv_xname);
2706 	/* stop transfer of Rx descriptors */
2707 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP);
2708 	for (i = 0; i < SK_TIMEOUT; i++) {
2709 		val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR);
2710 		if (!(val & SK_RXBMU_RX_STOP))
2711 			break;
2712 		DELAY(1);
2713 	}
2714 	if (i == SK_TIMEOUT)
2715 		printf("%s: cannot stop transfer of Rx descriptors\n",
2716 		      sc_if->sk_dev.dv_xname);
2717 
2718 	if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) {
2719 		u_int32_t		val;
2720 
2721 		/* Put PHY back into reset. */
2722 		val = sk_win_read_4(sc, SK_GPIO);
2723 		if (sc_if->sk_port == SK_PORT_A) {
2724 			val |= SK_GPIO_DIR0;
2725 			val &= ~SK_GPIO_DAT0;
2726 		} else {
2727 			val |= SK_GPIO_DIR2;
2728 			val &= ~SK_GPIO_DAT2;
2729 		}
2730 		sk_win_write_4(sc, SK_GPIO, val);
2731 	}
2732 
2733 	/* Turn off various components of this interface. */
2734 	SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC);
2735 	switch (sc->sk_type) {
2736 	case SK_GENESIS:
2737 		SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL,
2738 			      SK_TXMACCTL_XMAC_RESET);
2739 		SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET);
2740 		break;
2741 	case SK_YUKON:
2742 	case SK_YUKON_LITE:
2743 	case SK_YUKON_LP:
2744 		SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET);
2745 		SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET);
2746 		break;
2747 	}
2748 	SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE);
2749 	SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2750 	SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE);
2751 	SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF);
2752 	SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF);
2753 	SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2754 	SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP);
2755 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF);
2756 	SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF);
2757 
2758 	/* Disable interrupts */
2759 	if (sc_if->sk_port == SK_PORT_A)
2760 		sc->sk_intrmask &= ~SK_INTRS1;
2761 	else
2762 		sc->sk_intrmask &= ~SK_INTRS2;
2763 	CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask);
2764 
2765 	SK_XM_READ_2(sc_if, XM_ISR);
2766 	SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF);
2767 
2768 	/* Free RX and TX mbufs still in the queues. */
2769 	for (i = 0; i < SK_RX_RING_CNT; i++) {
2770 		if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) {
2771 			m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf);
2772 			sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL;
2773 		}
2774 	}
2775 
2776 	for (i = 0; i < SK_TX_RING_CNT; i++) {
2777 		if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) {
2778 			m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf);
2779 			sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL;
2780 			SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head,
2781 			    sc_if->sk_cdata.sk_tx_map[i], link);
2782 			sc_if->sk_cdata.sk_tx_map[i] = 0;
2783 		}
2784 	}
2785 
2786 	while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) {
2787 		SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link);
2788 		bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap);
2789 		free(dma, M_DEVBUF);
2790 	}
2791 }
2792 
2793 struct cfattach skc_ca = {
2794 	sizeof(struct sk_softc), skc_probe, skc_attach,
2795 };
2796 
2797 struct cfdriver skc_cd = {
2798 	0, "skc", DV_DULL
2799 };
2800 
2801 struct cfattach sk_ca = {
2802 	sizeof(struct sk_if_softc), sk_probe, sk_attach,
2803 };
2804 
2805 struct cfdriver sk_cd = {
2806 	0, "sk", DV_IFNET
2807 };
2808 
2809 #ifdef SK_DEBUG
2810 void
2811 sk_dump_txdesc(struct sk_tx_desc *desc, int idx)
2812 {
2813 #define DESC_PRINT(X)					\
2814 	if (X)					\
2815 		printf("txdesc[%d]." #X "=%#x\n",	\
2816 		       idx, X);
2817 
2818 	DESC_PRINT(letoh32(desc->sk_ctl));
2819 	DESC_PRINT(letoh32(desc->sk_next));
2820 	DESC_PRINT(letoh32(desc->sk_data_lo));
2821 	DESC_PRINT(letoh32(desc->sk_data_hi));
2822 	DESC_PRINT(letoh32(desc->sk_xmac_txstat));
2823 	DESC_PRINT(letoh16(desc->sk_rsvd0));
2824 	DESC_PRINT(letoh16(desc->sk_csum_startval));
2825 	DESC_PRINT(letoh16(desc->sk_csum_startpos));
2826 	DESC_PRINT(letoh16(desc->sk_csum_writepos));
2827 	DESC_PRINT(letoh16(desc->sk_rsvd1));
2828 #undef PRINT
2829 }
2830 
2831 void
2832 sk_dump_bytes(const char *data, int len)
2833 {
2834 	int c, i, j;
2835 
2836 	for (i = 0; i < len; i += 16) {
2837 		printf("%08x  ", i);
2838 		c = len - i;
2839 		if (c > 16) c = 16;
2840 
2841 		for (j = 0; j < c; j++) {
2842 			printf("%02x ", data[i + j] & 0xff);
2843 			if ((j & 0xf) == 7 && j > 0)
2844 				printf(" ");
2845 		}
2846 
2847 		for (; j < 16; j++)
2848 			printf("   ");
2849 		printf("  ");
2850 
2851 		for (j = 0; j < c; j++) {
2852 			int ch = data[i + j] & 0xff;
2853 			printf("%c", ' ' <= ch && ch <= '~' ? ch : ' ');
2854 		}
2855 
2856 		printf("\n");
2857 
2858 		if (c < 16)
2859 			break;
2860 	}
2861 }
2862 
2863 void
2864 sk_dump_mbuf(struct mbuf *m)
2865 {
2866 	int count = m->m_pkthdr.len;
2867 
2868 	printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len);
2869 
2870 	while (count > 0 && m) {
2871 		printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n",
2872 		       m, m->m_data, m->m_len);
2873 		sk_dump_bytes(mtod(m, char *), m->m_len);
2874 
2875 		count -= m->m_len;
2876 		m = m->m_next;
2877 	}
2878 }
2879 #endif
2880