1 /* $OpenBSD: if_sk.c,v 1.156 2009/10/17 21:40:43 martynas Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/timeout.h> 99 #include <sys/device.h> 100 #include <sys/queue.h> 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 106 #ifdef INET 107 #include <netinet/in.h> 108 #include <netinet/in_systm.h> 109 #include <netinet/in_var.h> 110 #include <netinet/ip.h> 111 #include <netinet/udp.h> 112 #include <netinet/tcp.h> 113 #include <netinet/if_ether.h> 114 #endif 115 116 #include <net/if_media.h> 117 #include <net/if_vlan_var.h> 118 119 #if NBPFILTER > 0 120 #include <net/bpf.h> 121 #endif 122 123 #include <dev/mii/mii.h> 124 #include <dev/mii/miivar.h> 125 #include <dev/mii/brgphyreg.h> 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 #include <dev/pci/pcidevs.h> 130 131 #include <dev/pci/if_skreg.h> 132 #include <dev/pci/if_skvar.h> 133 134 int skc_probe(struct device *, void *, void *); 135 void skc_attach(struct device *, struct device *self, void *aux); 136 int skc_detach(struct device *, int); 137 void skc_shutdown(void *); 138 int sk_probe(struct device *, void *, void *); 139 void sk_attach(struct device *, struct device *self, void *aux); 140 int sk_detach(struct device *, int); 141 int skcprint(void *, const char *); 142 int sk_intr(void *); 143 void sk_intr_bcom(struct sk_if_softc *); 144 void sk_intr_xmac(struct sk_if_softc *); 145 void sk_intr_yukon(struct sk_if_softc *); 146 static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 147 void sk_rxeof(struct sk_if_softc *); 148 void sk_txeof(struct sk_if_softc *); 149 int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *); 150 void sk_start(struct ifnet *); 151 int sk_ioctl(struct ifnet *, u_long, caddr_t); 152 void sk_init(void *); 153 void sk_init_xmac(struct sk_if_softc *); 154 void sk_init_yukon(struct sk_if_softc *); 155 void sk_stop(struct sk_if_softc *, int softonly); 156 void sk_watchdog(struct ifnet *); 157 int sk_ifmedia_upd(struct ifnet *); 158 void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 159 void sk_reset(struct sk_softc *); 160 int sk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t); 161 int sk_alloc_jumbo_mem(struct sk_if_softc *); 162 void *sk_jalloc(struct sk_if_softc *); 163 void sk_jfree(caddr_t, u_int, void *); 164 int sk_init_rx_ring(struct sk_if_softc *); 165 int sk_init_tx_ring(struct sk_if_softc *); 166 167 int sk_xmac_miibus_readreg(struct device *, int, int); 168 void sk_xmac_miibus_writereg(struct device *, int, int, int); 169 void sk_xmac_miibus_statchg(struct device *); 170 171 int sk_marv_miibus_readreg(struct device *, int, int); 172 void sk_marv_miibus_writereg(struct device *, int, int, int); 173 void sk_marv_miibus_statchg(struct device *); 174 175 u_int32_t sk_xmac_hash(caddr_t); 176 u_int32_t sk_yukon_hash(caddr_t); 177 void sk_setfilt(struct sk_if_softc *, caddr_t, int); 178 void sk_setmulti(struct sk_if_softc *); 179 void sk_setpromisc(struct sk_if_softc *); 180 void sk_tick(void *); 181 void sk_yukon_tick(void *); 182 void sk_rxcsum(struct ifnet *, struct mbuf *, const u_int16_t, const u_int16_t); 183 184 #ifdef SK_DEBUG 185 #define DPRINTF(x) if (skdebug) printf x 186 #define DPRINTFN(n,x) if (skdebug >= (n)) printf x 187 int skdebug = 0; 188 189 void sk_dump_txdesc(struct sk_tx_desc *, int); 190 void sk_dump_mbuf(struct mbuf *); 191 void sk_dump_bytes(const char *, int); 192 #else 193 #define DPRINTF(x) 194 #define DPRINTFN(n,x) 195 #endif 196 197 /* supported device vendors */ 198 const struct pci_matchid skc_devices[] = { 199 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940 }, 200 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C940B }, 201 { PCI_VENDOR_CNET, PCI_PRODUCT_CNET_GIGACARD }, 202 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_A1 }, 203 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE530T_B1 }, 204 { PCI_VENDOR_LINKSYS, PCI_PRODUCT_LINKSYS_EG1064 }, 205 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON }, 206 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_BELKIN }, 207 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK98XX }, 208 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK98XX2 }, 209 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9821 }, 210 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9843 } 211 }; 212 213 #define SK_LINKSYS_EG1032_SUBID 0x00151737 214 215 static inline u_int32_t 216 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 217 { 218 return CSR_READ_4(sc, reg); 219 } 220 221 static inline u_int16_t 222 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 223 { 224 return CSR_READ_2(sc, reg); 225 } 226 227 static inline u_int8_t 228 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 229 { 230 return CSR_READ_1(sc, reg); 231 } 232 233 static inline void 234 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 235 { 236 CSR_WRITE_4(sc, reg, x); 237 } 238 239 static inline void 240 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 241 { 242 CSR_WRITE_2(sc, reg, x); 243 } 244 245 static inline void 246 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 247 { 248 CSR_WRITE_1(sc, reg, x); 249 } 250 251 int 252 sk_xmac_miibus_readreg(struct device *dev, int phy, int reg) 253 { 254 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 255 int i; 256 257 DPRINTFN(9, ("sk_xmac_miibus_readreg\n")); 258 259 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 260 return (0); 261 262 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 263 SK_XM_READ_2(sc_if, XM_PHY_DATA); 264 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 265 for (i = 0; i < SK_TIMEOUT; i++) { 266 DELAY(1); 267 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 268 XM_MMUCMD_PHYDATARDY) 269 break; 270 } 271 272 if (i == SK_TIMEOUT) { 273 printf("%s: phy failed to come ready\n", 274 sc_if->sk_dev.dv_xname); 275 return (0); 276 } 277 } 278 DELAY(1); 279 return (SK_XM_READ_2(sc_if, XM_PHY_DATA)); 280 } 281 282 void 283 sk_xmac_miibus_writereg(struct device *dev, int phy, int reg, int val) 284 { 285 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 286 int i; 287 288 DPRINTFN(9, ("sk_xmac_miibus_writereg\n")); 289 290 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 291 for (i = 0; i < SK_TIMEOUT; i++) { 292 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 293 break; 294 } 295 296 if (i == SK_TIMEOUT) { 297 printf("%s: phy failed to come ready\n", 298 sc_if->sk_dev.dv_xname); 299 return; 300 } 301 302 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 303 for (i = 0; i < SK_TIMEOUT; i++) { 304 DELAY(1); 305 if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) 306 break; 307 } 308 309 if (i == SK_TIMEOUT) 310 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 311 } 312 313 void 314 sk_xmac_miibus_statchg(struct device *dev) 315 { 316 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 317 struct mii_data *mii = &sc_if->sk_mii; 318 319 DPRINTFN(9, ("sk_xmac_miibus_statchg\n")); 320 321 /* 322 * If this is a GMII PHY, manually set the XMAC's 323 * duplex mode accordingly. 324 */ 325 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 326 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 327 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 328 else 329 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 330 } 331 } 332 333 int 334 sk_marv_miibus_readreg(struct device *dev, int phy, int reg) 335 { 336 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 337 u_int16_t val; 338 int i; 339 340 if (phy != 0 || 341 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 342 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 343 DPRINTFN(9, ("sk_marv_miibus_readreg (skip) phy=%d, reg=%#x\n", 344 phy, reg)); 345 return (0); 346 } 347 348 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 349 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 350 351 for (i = 0; i < SK_TIMEOUT; i++) { 352 DELAY(1); 353 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 354 if (val & YU_SMICR_READ_VALID) 355 break; 356 } 357 358 if (i == SK_TIMEOUT) { 359 printf("%s: phy failed to come ready\n", 360 sc_if->sk_dev.dv_xname); 361 return (0); 362 } 363 364 DPRINTFN(9, ("sk_marv_miibus_readreg: i=%d, timeout=%d\n", i, 365 SK_TIMEOUT)); 366 367 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 368 369 DPRINTFN(9, ("sk_marv_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 370 phy, reg, val)); 371 372 return (val); 373 } 374 375 void 376 sk_marv_miibus_writereg(struct device *dev, int phy, int reg, int val) 377 { 378 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 379 int i; 380 381 DPRINTFN(9, ("sk_marv_miibus_writereg phy=%d reg=%#x val=%#x\n", 382 phy, reg, val)); 383 384 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 385 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 386 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 387 388 for (i = 0; i < SK_TIMEOUT; i++) { 389 DELAY(1); 390 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 391 break; 392 } 393 394 if (i == SK_TIMEOUT) 395 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 396 } 397 398 void 399 sk_marv_miibus_statchg(struct device *dev) 400 { 401 DPRINTFN(9, ("sk_marv_miibus_statchg: gpcr=%x\n", 402 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 403 } 404 405 u_int32_t 406 sk_xmac_hash(caddr_t addr) 407 { 408 u_int32_t crc; 409 410 crc = ether_crc32_le(addr, ETHER_ADDR_LEN); 411 return (~crc & ((1 << SK_HASH_BITS) - 1)); 412 } 413 414 u_int32_t 415 sk_yukon_hash(caddr_t addr) 416 { 417 u_int32_t crc; 418 419 crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 420 return (crc & ((1 << SK_HASH_BITS) - 1)); 421 } 422 423 void 424 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot) 425 { 426 int base = XM_RXFILT_ENTRY(slot); 427 428 SK_XM_WRITE_2(sc_if, base, letoh16(*(u_int16_t *)(&addr[0]))); 429 SK_XM_WRITE_2(sc_if, base + 2, letoh16(*(u_int16_t *)(&addr[2]))); 430 SK_XM_WRITE_2(sc_if, base + 4, letoh16(*(u_int16_t *)(&addr[4]))); 431 } 432 433 void 434 sk_setmulti(struct sk_if_softc *sc_if) 435 { 436 struct sk_softc *sc = sc_if->sk_softc; 437 struct ifnet *ifp= &sc_if->arpcom.ac_if; 438 u_int32_t hashes[2] = { 0, 0 }; 439 int h, i; 440 struct arpcom *ac = &sc_if->arpcom; 441 struct ether_multi *enm; 442 struct ether_multistep step; 443 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 444 445 /* First, zot all the existing filters. */ 446 switch(sc->sk_type) { 447 case SK_GENESIS: 448 for (i = 1; i < XM_RXFILT_MAX; i++) 449 sk_setfilt(sc_if, (caddr_t)&dummy, i); 450 451 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 452 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 453 break; 454 case SK_YUKON: 455 case SK_YUKON_LITE: 456 case SK_YUKON_LP: 457 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 458 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 459 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 460 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 461 break; 462 } 463 464 /* Now program new ones. */ 465 allmulti: 466 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 467 hashes[0] = 0xFFFFFFFF; 468 hashes[1] = 0xFFFFFFFF; 469 } else { 470 i = 1; 471 /* First find the tail of the list. */ 472 ETHER_FIRST_MULTI(step, ac, enm); 473 while (enm != NULL) { 474 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 475 ETHER_ADDR_LEN)) { 476 ifp->if_flags |= IFF_ALLMULTI; 477 goto allmulti; 478 } 479 /* 480 * Program the first XM_RXFILT_MAX multicast groups 481 * into the perfect filter. For all others, 482 * use the hash table. 483 */ 484 if (SK_IS_GENESIS(sc) && i < XM_RXFILT_MAX) { 485 sk_setfilt(sc_if, enm->enm_addrlo, i); 486 i++; 487 } 488 else { 489 switch(sc->sk_type) { 490 case SK_GENESIS: 491 h = sk_xmac_hash(enm->enm_addrlo); 492 break; 493 494 case SK_YUKON: 495 case SK_YUKON_LITE: 496 case SK_YUKON_LP: 497 h = sk_yukon_hash(enm->enm_addrlo); 498 break; 499 } 500 if (h < 32) 501 hashes[0] |= (1 << h); 502 else 503 hashes[1] |= (1 << (h - 32)); 504 } 505 506 ETHER_NEXT_MULTI(step, enm); 507 } 508 } 509 510 switch(sc->sk_type) { 511 case SK_GENESIS: 512 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 513 XM_MODE_RX_USE_PERFECT); 514 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 515 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 516 break; 517 case SK_YUKON: 518 case SK_YUKON_LITE: 519 case SK_YUKON_LP: 520 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 521 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 522 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 523 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 524 break; 525 } 526 } 527 528 void 529 sk_setpromisc(struct sk_if_softc *sc_if) 530 { 531 struct sk_softc *sc = sc_if->sk_softc; 532 struct ifnet *ifp= &sc_if->arpcom.ac_if; 533 534 switch(sc->sk_type) { 535 case SK_GENESIS: 536 if (ifp->if_flags & IFF_PROMISC) 537 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 538 else 539 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 540 break; 541 case SK_YUKON: 542 case SK_YUKON_LITE: 543 case SK_YUKON_LP: 544 if (ifp->if_flags & IFF_PROMISC) { 545 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 546 YU_RCR_UFLEN | YU_RCR_MUFLEN); 547 } else { 548 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 549 YU_RCR_UFLEN | YU_RCR_MUFLEN); 550 } 551 break; 552 } 553 } 554 555 int 556 sk_init_rx_ring(struct sk_if_softc *sc_if) 557 { 558 struct sk_chain_data *cd = &sc_if->sk_cdata; 559 struct sk_ring_data *rd = sc_if->sk_rdata; 560 int i, nexti; 561 562 bzero((char *)rd->sk_rx_ring, 563 sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 564 565 for (i = 0; i < SK_RX_RING_CNT; i++) { 566 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 567 if (i == (SK_RX_RING_CNT - 1)) 568 nexti = 0; 569 else 570 nexti = i + 1; 571 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti]; 572 rd->sk_rx_ring[i].sk_next = htole32(SK_RX_RING_ADDR(sc_if, nexti)); 573 rd->sk_rx_ring[i].sk_csum1_start = htole16(ETHER_HDR_LEN); 574 rd->sk_rx_ring[i].sk_csum2_start = htole16(ETHER_HDR_LEN + 575 sizeof(struct ip)); 576 } 577 578 for (i = 0; i < SK_RX_RING_CNT; i++) { 579 if (sk_newbuf(sc_if, i, NULL, 580 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) { 581 printf("%s: failed alloc of %dth mbuf\n", 582 sc_if->sk_dev.dv_xname, i); 583 return (ENOBUFS); 584 } 585 } 586 587 sc_if->sk_cdata.sk_rx_prod = 0; 588 sc_if->sk_cdata.sk_rx_cons = 0; 589 590 return (0); 591 } 592 593 int 594 sk_init_tx_ring(struct sk_if_softc *sc_if) 595 { 596 struct sk_softc *sc = sc_if->sk_softc; 597 struct sk_chain_data *cd = &sc_if->sk_cdata; 598 struct sk_ring_data *rd = sc_if->sk_rdata; 599 bus_dmamap_t dmamap; 600 struct sk_txmap_entry *entry; 601 int i, nexti; 602 603 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 604 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 605 606 SIMPLEQ_INIT(&sc_if->sk_txmap_head); 607 for (i = 0; i < SK_TX_RING_CNT; i++) { 608 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 609 if (i == (SK_TX_RING_CNT - 1)) 610 nexti = 0; 611 else 612 nexti = i + 1; 613 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti]; 614 rd->sk_tx_ring[i].sk_next = htole32(SK_TX_RING_ADDR(sc_if, nexti)); 615 616 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG, 617 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) 618 return (ENOBUFS); 619 620 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 621 if (!entry) { 622 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 623 return (ENOBUFS); 624 } 625 entry->dmamap = dmamap; 626 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link); 627 } 628 629 sc_if->sk_cdata.sk_tx_prod = 0; 630 sc_if->sk_cdata.sk_tx_cons = 0; 631 sc_if->sk_cdata.sk_tx_cnt = 0; 632 633 SK_CDTXSYNC(sc_if, 0, SK_TX_RING_CNT, 634 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 635 636 return (0); 637 } 638 639 int 640 sk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m, 641 bus_dmamap_t dmamap) 642 { 643 struct mbuf *m_new = NULL; 644 struct sk_chain *c; 645 struct sk_rx_desc *r; 646 647 if (m == NULL) { 648 caddr_t buf = NULL; 649 650 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 651 if (m_new == NULL) 652 return (ENOBUFS); 653 654 /* Allocate the jumbo buffer */ 655 buf = sk_jalloc(sc_if); 656 if (buf == NULL) { 657 m_freem(m_new); 658 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 659 "dropped!\n", sc_if->arpcom.ac_if.if_xname)); 660 return (ENOBUFS); 661 } 662 663 /* Attach the buffer to the mbuf */ 664 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 665 MEXTADD(m_new, buf, SK_JLEN, 0, sk_jfree, sc_if); 666 } else { 667 /* 668 * We're re-using a previously allocated mbuf; 669 * be sure to re-init pointers and lengths to 670 * default values. 671 */ 672 m_new = m; 673 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 674 m_new->m_data = m_new->m_ext.ext_buf; 675 } 676 m_adj(m_new, ETHER_ALIGN); 677 678 c = &sc_if->sk_cdata.sk_rx_chain[i]; 679 r = c->sk_desc; 680 c->sk_mbuf = m_new; 681 r->sk_data_lo = htole32(dmamap->dm_segs[0].ds_addr + 682 (((vaddr_t)m_new->m_data 683 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf))); 684 r->sk_ctl = htole32(SK_JLEN | SK_RXSTAT); 685 686 SK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 687 688 return (0); 689 } 690 691 /* 692 * Memory management for jumbo frames. 693 */ 694 695 int 696 sk_alloc_jumbo_mem(struct sk_if_softc *sc_if) 697 { 698 struct sk_softc *sc = sc_if->sk_softc; 699 caddr_t ptr, kva; 700 bus_dma_segment_t seg; 701 int i, rseg, state, error; 702 struct sk_jpool_entry *entry; 703 704 state = error = 0; 705 706 /* Grab a big chunk o' storage. */ 707 if (bus_dmamem_alloc(sc->sc_dmatag, SK_JMEM, PAGE_SIZE, 0, 708 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 709 printf(": can't alloc rx buffers"); 710 return (ENOBUFS); 711 } 712 713 state = 1; 714 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, SK_JMEM, &kva, 715 BUS_DMA_NOWAIT)) { 716 printf(": can't map dma buffers (%d bytes)", SK_JMEM); 717 error = ENOBUFS; 718 goto out; 719 } 720 721 state = 2; 722 if (bus_dmamap_create(sc->sc_dmatag, SK_JMEM, 1, SK_JMEM, 0, 723 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) { 724 printf(": can't create dma map"); 725 error = ENOBUFS; 726 goto out; 727 } 728 729 state = 3; 730 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map, 731 kva, SK_JMEM, NULL, BUS_DMA_NOWAIT)) { 732 printf(": can't load dma map"); 733 error = ENOBUFS; 734 goto out; 735 } 736 737 state = 4; 738 sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva; 739 DPRINTFN(1,("sk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf)); 740 741 LIST_INIT(&sc_if->sk_jfree_listhead); 742 LIST_INIT(&sc_if->sk_jinuse_listhead); 743 744 /* 745 * Now divide it up into 9K pieces and save the addresses 746 * in an array. 747 */ 748 ptr = sc_if->sk_cdata.sk_jumbo_buf; 749 for (i = 0; i < SK_JSLOTS; i++) { 750 sc_if->sk_cdata.sk_jslots[i] = ptr; 751 ptr += SK_JLEN; 752 entry = malloc(sizeof(struct sk_jpool_entry), 753 M_DEVBUF, M_NOWAIT); 754 if (entry == NULL) { 755 sc_if->sk_cdata.sk_jumbo_buf = NULL; 756 printf(": no memory for jumbo buffer queue!"); 757 error = ENOBUFS; 758 goto out; 759 } 760 entry->slot = i; 761 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 762 entry, jpool_entries); 763 } 764 out: 765 if (error != 0) { 766 switch (state) { 767 case 4: 768 bus_dmamap_unload(sc->sc_dmatag, 769 sc_if->sk_cdata.sk_rx_jumbo_map); 770 case 3: 771 bus_dmamap_destroy(sc->sc_dmatag, 772 sc_if->sk_cdata.sk_rx_jumbo_map); 773 case 2: 774 bus_dmamem_unmap(sc->sc_dmatag, kva, SK_JMEM); 775 case 1: 776 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 777 break; 778 default: 779 break; 780 } 781 } 782 783 return (error); 784 } 785 786 /* 787 * Allocate a jumbo buffer. 788 */ 789 void * 790 sk_jalloc(struct sk_if_softc *sc_if) 791 { 792 struct sk_jpool_entry *entry; 793 794 entry = LIST_FIRST(&sc_if->sk_jfree_listhead); 795 796 if (entry == NULL) 797 return (NULL); 798 799 LIST_REMOVE(entry, jpool_entries); 800 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 801 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 802 } 803 804 /* 805 * Release a jumbo buffer. 806 */ 807 void 808 sk_jfree(caddr_t buf, u_int size, void *arg) 809 { 810 struct sk_jpool_entry *entry; 811 struct sk_if_softc *sc; 812 int i; 813 814 /* Extract the softc struct pointer. */ 815 sc = (struct sk_if_softc *)arg; 816 817 if (sc == NULL) 818 panic("sk_jfree: can't find softc pointer!"); 819 820 /* calculate the slot this buffer belongs to */ 821 i = ((vaddr_t)buf 822 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN; 823 824 if ((i < 0) || (i >= SK_JSLOTS)) 825 panic("sk_jfree: asked to free buffer that we don't manage!"); 826 827 entry = LIST_FIRST(&sc->sk_jinuse_listhead); 828 if (entry == NULL) 829 panic("sk_jfree: buffer not in use!"); 830 entry->slot = i; 831 LIST_REMOVE(entry, jpool_entries); 832 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries); 833 } 834 835 /* 836 * Set media options. 837 */ 838 int 839 sk_ifmedia_upd(struct ifnet *ifp) 840 { 841 struct sk_if_softc *sc_if = ifp->if_softc; 842 843 mii_mediachg(&sc_if->sk_mii); 844 return (0); 845 } 846 847 /* 848 * Report current media status. 849 */ 850 void 851 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 852 { 853 struct sk_if_softc *sc_if = ifp->if_softc; 854 855 mii_pollstat(&sc_if->sk_mii); 856 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 857 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 858 } 859 860 int 861 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 862 { 863 struct sk_if_softc *sc_if = ifp->if_softc; 864 struct ifaddr *ifa = (struct ifaddr *) data; 865 struct ifreq *ifr = (struct ifreq *) data; 866 struct mii_data *mii; 867 int s, error = 0; 868 869 s = splnet(); 870 871 switch(command) { 872 case SIOCSIFADDR: 873 ifp->if_flags |= IFF_UP; 874 if (!(ifp->if_flags & IFF_RUNNING)) 875 sk_init(sc_if); 876 #ifdef INET 877 if (ifa->ifa_addr->sa_family == AF_INET) 878 arp_ifinit(&sc_if->arpcom, ifa); 879 #endif /* INET */ 880 break; 881 882 case SIOCSIFFLAGS: 883 if (ifp->if_flags & IFF_UP) { 884 if (ifp->if_flags & IFF_RUNNING && 885 (ifp->if_flags ^ sc_if->sk_if_flags) 886 & IFF_PROMISC) { 887 sk_setpromisc(sc_if); 888 sk_setmulti(sc_if); 889 } else { 890 if (!(ifp->if_flags & IFF_RUNNING)) 891 sk_init(sc_if); 892 } 893 } else { 894 if (ifp->if_flags & IFF_RUNNING) 895 sk_stop(sc_if, 0); 896 } 897 sc_if->sk_if_flags = ifp->if_flags; 898 break; 899 900 case SIOCGIFMEDIA: 901 case SIOCSIFMEDIA: 902 mii = &sc_if->sk_mii; 903 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 904 break; 905 906 default: 907 error = ether_ioctl(ifp, &sc_if->arpcom, command, data); 908 } 909 910 if (error == ENETRESET) { 911 if (ifp->if_flags & IFF_RUNNING) 912 sk_setmulti(sc_if); 913 error = 0; 914 } 915 916 splx(s); 917 return (error); 918 } 919 920 /* 921 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 922 * IDs against our list and return a device name if we find a match. 923 */ 924 int 925 skc_probe(struct device *parent, void *match, void *aux) 926 { 927 struct pci_attach_args *pa = aux; 928 pci_chipset_tag_t pc = pa->pa_pc; 929 pcireg_t subid; 930 931 subid = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 932 933 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_LINKSYS && 934 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_LINKSYS_EG1032 && 935 subid == SK_LINKSYS_EG1032_SUBID) 936 return (1); 937 938 return (pci_matchbyid((struct pci_attach_args *)aux, skc_devices, 939 sizeof(skc_devices)/sizeof(skc_devices[0]))); 940 } 941 942 /* 943 * Force the GEnesis into reset, then bring it out of reset. 944 */ 945 void 946 sk_reset(struct sk_softc *sc) 947 { 948 u_int32_t imtimer_ticks; 949 950 DPRINTFN(2, ("sk_reset\n")); 951 952 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 953 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 954 if (SK_IS_YUKON(sc)) 955 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 956 957 DELAY(1000); 958 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 959 DELAY(2); 960 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 961 if (SK_IS_YUKON(sc)) 962 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 963 964 DPRINTFN(2, ("sk_reset: sk_csr=%x\n", CSR_READ_2(sc, SK_CSR))); 965 DPRINTFN(2, ("sk_reset: sk_link_ctrl=%x\n", 966 CSR_READ_2(sc, SK_LINK_CTRL))); 967 968 if (SK_IS_GENESIS(sc)) { 969 /* Configure packet arbiter */ 970 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 971 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 972 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 973 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 974 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 975 } 976 977 /* Enable RAM interface */ 978 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 979 980 /* 981 * Configure interrupt moderation. The moderation timer 982 * defers interrupts specified in the interrupt moderation 983 * timer mask based on the timeout specified in the interrupt 984 * moderation timer init register. Each bit in the timer 985 * register represents one tick, so to specify a timeout in 986 * microseconds, we have to multiply by the correct number of 987 * ticks-per-microsecond. 988 */ 989 switch (sc->sk_type) { 990 case SK_GENESIS: 991 imtimer_ticks = SK_IMTIMER_TICKS_GENESIS; 992 break; 993 default: 994 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 995 } 996 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(100)); 997 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 998 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 999 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1000 } 1001 1002 int 1003 sk_probe(struct device *parent, void *match, void *aux) 1004 { 1005 struct skc_attach_args *sa = aux; 1006 1007 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 1008 return (0); 1009 1010 switch (sa->skc_type) { 1011 case SK_GENESIS: 1012 case SK_YUKON: 1013 case SK_YUKON_LITE: 1014 case SK_YUKON_LP: 1015 return (1); 1016 } 1017 1018 return (0); 1019 } 1020 1021 /* 1022 * Each XMAC chip is attached as a separate logical IP interface. 1023 * Single port cards will have only one logical interface of course. 1024 */ 1025 void 1026 sk_attach(struct device *parent, struct device *self, void *aux) 1027 { 1028 struct sk_if_softc *sc_if = (struct sk_if_softc *) self; 1029 struct sk_softc *sc = (struct sk_softc *)parent; 1030 struct skc_attach_args *sa = aux; 1031 struct ifnet *ifp; 1032 caddr_t kva; 1033 int i; 1034 1035 sc_if->sk_port = sa->skc_port; 1036 sc_if->sk_softc = sc; 1037 sc->sk_if[sa->skc_port] = sc_if; 1038 1039 if (sa->skc_port == SK_PORT_A) 1040 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1041 if (sa->skc_port == SK_PORT_B) 1042 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1043 1044 DPRINTFN(2, ("begin sk_attach: port=%d\n", sc_if->sk_port)); 1045 1046 /* 1047 * Get station address for this interface. Note that 1048 * dual port cards actually come with three station 1049 * addresses: one for each port, plus an extra. The 1050 * extra one is used by the SysKonnect driver software 1051 * as a 'virtual' station address for when both ports 1052 * are operating in failover mode. Currently we don't 1053 * use this extra address. 1054 */ 1055 for (i = 0; i < ETHER_ADDR_LEN; i++) 1056 sc_if->arpcom.ac_enaddr[i] = 1057 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 1058 1059 printf(": address %s\n", 1060 ether_sprintf(sc_if->arpcom.ac_enaddr)); 1061 1062 /* 1063 * Set up RAM buffer addresses. The NIC will have a certain 1064 * amount of SRAM on it, somewhere between 512K and 2MB. We 1065 * need to divide this up a) between the transmitter and 1066 * receiver and b) between the two XMACs, if this is a 1067 * dual port NIC. Our algorithm is to divide up the memory 1068 * evenly so that everyone gets a fair share. 1069 */ 1070 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1071 u_int32_t chunk, val; 1072 1073 chunk = sc->sk_ramsize / 2; 1074 val = sc->sk_rboff / sizeof(u_int64_t); 1075 sc_if->sk_rx_ramstart = val; 1076 val += (chunk / sizeof(u_int64_t)); 1077 sc_if->sk_rx_ramend = val - 1; 1078 sc_if->sk_tx_ramstart = val; 1079 val += (chunk / sizeof(u_int64_t)); 1080 sc_if->sk_tx_ramend = val - 1; 1081 } else { 1082 u_int32_t chunk, val; 1083 1084 chunk = sc->sk_ramsize / 4; 1085 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1086 sizeof(u_int64_t); 1087 sc_if->sk_rx_ramstart = val; 1088 val += (chunk / sizeof(u_int64_t)); 1089 sc_if->sk_rx_ramend = val - 1; 1090 sc_if->sk_tx_ramstart = val; 1091 val += (chunk / sizeof(u_int64_t)); 1092 sc_if->sk_tx_ramend = val - 1; 1093 } 1094 1095 DPRINTFN(2, ("sk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 1096 " tx_ramstart=%#x tx_ramend=%#x\n", 1097 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 1098 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 1099 1100 /* Read and save PHY type */ 1101 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1102 1103 /* Set PHY address */ 1104 if (SK_IS_GENESIS(sc)) { 1105 switch (sc_if->sk_phytype) { 1106 case SK_PHYTYPE_XMAC: 1107 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1108 break; 1109 case SK_PHYTYPE_BCOM: 1110 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1111 break; 1112 default: 1113 printf("%s: unsupported PHY type: %d\n", 1114 sc->sk_dev.dv_xname, sc_if->sk_phytype); 1115 return; 1116 } 1117 } 1118 1119 if (SK_IS_YUKON(sc)) { 1120 if ((sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && 1121 sc->sk_pmd != 'L' && sc->sk_pmd != 'S')) { 1122 /* not initialized, punt */ 1123 sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; 1124 1125 sc->sk_coppertype = 1; 1126 } 1127 1128 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1129 1130 if (!(sc->sk_coppertype)) 1131 sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; 1132 } 1133 1134 /* Allocate the descriptor queues. */ 1135 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct sk_ring_data), 1136 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg, 1137 BUS_DMA_NOWAIT)) { 1138 printf(": can't alloc rx buffers\n"); 1139 goto fail; 1140 } 1141 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg, 1142 sizeof(struct sk_ring_data), &kva, BUS_DMA_NOWAIT)) { 1143 printf(": can't map dma buffers (%lu bytes)\n", 1144 (ulong)sizeof(struct sk_ring_data)); 1145 goto fail_1; 1146 } 1147 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct sk_ring_data), 1, 1148 sizeof(struct sk_ring_data), 0, BUS_DMA_NOWAIT, 1149 &sc_if->sk_ring_map)) { 1150 printf(": can't create dma map\n"); 1151 goto fail_2; 1152 } 1153 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 1154 sizeof(struct sk_ring_data), NULL, BUS_DMA_NOWAIT)) { 1155 printf(": can't load dma map\n"); 1156 goto fail_3; 1157 } 1158 sc_if->sk_rdata = (struct sk_ring_data *)kva; 1159 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1160 1161 /* Try to allocate memory for jumbo buffers. */ 1162 if (sk_alloc_jumbo_mem(sc_if)) { 1163 printf(": jumbo buffer allocation failed\n"); 1164 goto fail_3; 1165 } 1166 1167 ifp = &sc_if->arpcom.ac_if; 1168 ifp->if_softc = sc_if; 1169 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1170 ifp->if_ioctl = sk_ioctl; 1171 ifp->if_start = sk_start; 1172 ifp->if_watchdog = sk_watchdog; 1173 ifp->if_baudrate = 1000000000; 1174 ifp->if_hardmtu = SK_JUMBO_MTU; 1175 IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); 1176 IFQ_SET_READY(&ifp->if_snd); 1177 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1178 1179 ifp->if_capabilities = IFCAP_VLAN_MTU; 1180 1181 /* 1182 * Do miibus setup. 1183 */ 1184 switch (sc->sk_type) { 1185 case SK_GENESIS: 1186 sk_init_xmac(sc_if); 1187 break; 1188 case SK_YUKON: 1189 case SK_YUKON_LITE: 1190 case SK_YUKON_LP: 1191 sk_init_yukon(sc_if); 1192 break; 1193 default: 1194 printf(": unknown device type %d\n", sc->sk_type); 1195 /* dealloc jumbo on error */ 1196 goto fail_3; 1197 } 1198 1199 DPRINTFN(2, ("sk_attach: 1\n")); 1200 1201 sc_if->sk_mii.mii_ifp = ifp; 1202 if (SK_IS_GENESIS(sc)) { 1203 sc_if->sk_mii.mii_readreg = sk_xmac_miibus_readreg; 1204 sc_if->sk_mii.mii_writereg = sk_xmac_miibus_writereg; 1205 sc_if->sk_mii.mii_statchg = sk_xmac_miibus_statchg; 1206 } else { 1207 sc_if->sk_mii.mii_readreg = sk_marv_miibus_readreg; 1208 sc_if->sk_mii.mii_writereg = sk_marv_miibus_writereg; 1209 sc_if->sk_mii.mii_statchg = sk_marv_miibus_statchg; 1210 } 1211 1212 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 1213 sk_ifmedia_upd, sk_ifmedia_sts); 1214 if (SK_IS_GENESIS(sc)) { 1215 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY, 1216 MII_OFFSET_ANY, 0); 1217 } else { 1218 mii_attach(self, &sc_if->sk_mii, 0xffffffff, MII_PHY_ANY, 1219 MII_OFFSET_ANY, MIIF_DOPAUSE); 1220 } 1221 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1222 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1223 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1224 0, NULL); 1225 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1226 } else 1227 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1228 1229 if (SK_IS_GENESIS(sc)) { 1230 timeout_set(&sc_if->sk_tick_ch, sk_tick, sc_if); 1231 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1232 } else 1233 timeout_set(&sc_if->sk_tick_ch, sk_yukon_tick, sc_if); 1234 1235 /* 1236 * Call MI attach routines. 1237 */ 1238 if_attach(ifp); 1239 ether_ifattach(ifp); 1240 1241 sc_if->sk_sdhook = shutdownhook_establish(skc_shutdown, sc); 1242 1243 DPRINTFN(2, ("sk_attach: end\n")); 1244 return; 1245 1246 fail_2: 1247 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct sk_ring_data)); 1248 fail_1: 1249 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1250 fail_3: 1251 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1252 fail: 1253 sc->sk_if[sa->skc_port] = NULL; 1254 } 1255 1256 int 1257 sk_detach(struct device *self, int flags) 1258 { 1259 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1260 struct sk_softc *sc = sc_if->sk_softc; 1261 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1262 1263 if (sc->sk_if[sc_if->sk_port] == NULL) 1264 return (0); 1265 1266 sk_stop(sc_if, 1); 1267 1268 /* Detach any PHYs we might have. */ 1269 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1270 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1271 1272 /* Delete any remaining media. */ 1273 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1274 1275 if (sc_if->sk_sdhook != NULL) 1276 shutdownhook_disestablish(sc_if->sk_sdhook); 1277 1278 ether_ifdetach(ifp); 1279 if_detach(ifp); 1280 1281 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1282 sizeof(struct sk_ring_data)); 1283 bus_dmamem_free(sc->sc_dmatag, 1284 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1285 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1286 sc->sk_if[sc_if->sk_port] = NULL; 1287 1288 return (0); 1289 } 1290 1291 int 1292 skcprint(void *aux, const char *pnp) 1293 { 1294 struct skc_attach_args *sa = aux; 1295 1296 if (pnp) 1297 printf("sk port %c at %s", 1298 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1299 else 1300 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1301 return (UNCONF); 1302 } 1303 1304 /* 1305 * Attach the interface. Allocate softc structures, do ifmedia 1306 * setup and ethernet/BPF attach. 1307 */ 1308 void 1309 skc_attach(struct device *parent, struct device *self, void *aux) 1310 { 1311 struct sk_softc *sc = (struct sk_softc *)self; 1312 struct pci_attach_args *pa = aux; 1313 struct skc_attach_args skca; 1314 pci_chipset_tag_t pc = pa->pa_pc; 1315 pcireg_t command, memtype; 1316 pci_intr_handle_t ih; 1317 const char *intrstr = NULL; 1318 u_int8_t skrs; 1319 char *revstr = NULL; 1320 1321 DPRINTFN(2, ("begin skc_attach\n")); 1322 1323 /* 1324 * Handle power management nonsense. 1325 */ 1326 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF; 1327 1328 if (command == 0x01) { 1329 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL); 1330 if (command & SK_PSTATE_MASK) { 1331 u_int32_t iobase, membase, irq; 1332 1333 /* Save important PCI config data. */ 1334 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO); 1335 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM); 1336 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE); 1337 1338 /* Reset the power state. */ 1339 printf("%s chip is in D%d power mode " 1340 "-- setting to D0\n", sc->sk_dev.dv_xname, 1341 command & SK_PSTATE_MASK); 1342 command &= 0xFFFFFFFC; 1343 pci_conf_write(pc, pa->pa_tag, 1344 SK_PCI_PWRMGMTCTRL, command); 1345 1346 /* Restore PCI config data. */ 1347 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase); 1348 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase); 1349 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq); 1350 } 1351 } 1352 1353 /* 1354 * Map control/status registers. 1355 */ 1356 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1357 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1358 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1359 printf(": can't map mem space\n"); 1360 return; 1361 } 1362 1363 sc->sc_dmatag = pa->pa_dmat; 1364 1365 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1366 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1367 sc->sk_pc = pc; 1368 1369 /* bail out here if chip is not recognized */ 1370 if (! SK_IS_GENESIS(sc) && ! SK_IS_YUKON(sc)) { 1371 printf(": unknown chip type: %d\n", sc->sk_type); 1372 goto fail_1; 1373 } 1374 DPRINTFN(2, ("skc_attach: allocate interrupt\n")); 1375 1376 /* Allocate interrupt */ 1377 if (pci_intr_map(pa, &ih)) { 1378 printf(": couldn't map interrupt\n"); 1379 goto fail_1; 1380 } 1381 1382 intrstr = pci_intr_string(pc, ih); 1383 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, sk_intr, sc, 1384 self->dv_xname); 1385 if (sc->sk_intrhand == NULL) { 1386 printf(": couldn't establish interrupt"); 1387 if (intrstr != NULL) 1388 printf(" at %s", intrstr); 1389 printf("\n"); 1390 goto fail_1; 1391 } 1392 1393 /* Reset the adapter. */ 1394 sk_reset(sc); 1395 1396 skrs = sk_win_read_1(sc, SK_EPROM0); 1397 if (SK_IS_GENESIS(sc)) { 1398 /* Read and save RAM size and RAMbuffer offset */ 1399 switch(skrs) { 1400 case SK_RAMSIZE_512K_64: 1401 sc->sk_ramsize = 0x80000; 1402 sc->sk_rboff = SK_RBOFF_0; 1403 break; 1404 case SK_RAMSIZE_1024K_64: 1405 sc->sk_ramsize = 0x100000; 1406 sc->sk_rboff = SK_RBOFF_80000; 1407 break; 1408 case SK_RAMSIZE_1024K_128: 1409 sc->sk_ramsize = 0x100000; 1410 sc->sk_rboff = SK_RBOFF_0; 1411 break; 1412 case SK_RAMSIZE_2048K_128: 1413 sc->sk_ramsize = 0x200000; 1414 sc->sk_rboff = SK_RBOFF_0; 1415 break; 1416 default: 1417 printf(": unknown ram size: %d\n", skrs); 1418 goto fail_2; 1419 break; 1420 } 1421 } else { 1422 if (skrs == 0x00) 1423 sc->sk_ramsize = 0x20000; 1424 else 1425 sc->sk_ramsize = skrs * (1<<12); 1426 sc->sk_rboff = SK_RBOFF_0; 1427 } 1428 1429 DPRINTFN(2, ("skc_attach: ramsize=%d (%dk), rboff=%d\n", 1430 sc->sk_ramsize, sc->sk_ramsize / 1024, 1431 sc->sk_rboff)); 1432 1433 /* Read and save physical media type */ 1434 sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); 1435 1436 if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') 1437 sc->sk_coppertype = 1; 1438 else 1439 sc->sk_coppertype = 0; 1440 1441 switch (sc->sk_type) { 1442 case SK_GENESIS: 1443 sc->sk_name = "GEnesis"; 1444 break; 1445 case SK_YUKON: 1446 sc->sk_name = "Yukon"; 1447 break; 1448 case SK_YUKON_LITE: 1449 sc->sk_name = "Yukon Lite"; 1450 break; 1451 case SK_YUKON_LP: 1452 sc->sk_name = "Yukon LP"; 1453 break; 1454 default: 1455 sc->sk_name = "Yukon (Unknown)"; 1456 } 1457 1458 /* Yukon Lite Rev A0 needs special test, from sk98lin driver */ 1459 if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { 1460 u_int32_t flashaddr; 1461 u_int8_t testbyte; 1462 1463 flashaddr = sk_win_read_4(sc, SK_EP_ADDR); 1464 1465 /* test Flash-Address Register */ 1466 sk_win_write_1(sc, SK_EP_ADDR+3, 0xff); 1467 testbyte = sk_win_read_1(sc, SK_EP_ADDR+3); 1468 1469 if (testbyte != 0) { 1470 /* This is a Yukon Lite Rev A0 */ 1471 sc->sk_type = SK_YUKON_LITE; 1472 sc->sk_rev = SK_YUKON_LITE_REV_A0; 1473 /* restore Flash-Address Register */ 1474 sk_win_write_4(sc, SK_EP_ADDR, flashaddr); 1475 } 1476 } 1477 1478 if (sc->sk_type == SK_YUKON_LITE) { 1479 switch (sc->sk_rev) { 1480 case SK_YUKON_LITE_REV_A0: 1481 revstr = "A0"; 1482 break; 1483 case SK_YUKON_LITE_REV_A1: 1484 revstr = "A1"; 1485 break; 1486 case SK_YUKON_LITE_REV_A3: 1487 revstr = "A3"; 1488 break; 1489 default: 1490 ; 1491 } 1492 } 1493 1494 /* Announce the product name. */ 1495 printf(", %s", sc->sk_name); 1496 if (revstr != NULL) 1497 printf(" rev. %s", revstr); 1498 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1499 1500 sc->sk_macs = 1; 1501 1502 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) 1503 sc->sk_macs++; 1504 1505 skca.skc_port = SK_PORT_A; 1506 skca.skc_type = sc->sk_type; 1507 skca.skc_rev = sc->sk_rev; 1508 (void)config_found(&sc->sk_dev, &skca, skcprint); 1509 1510 if (sc->sk_macs > 1) { 1511 skca.skc_port = SK_PORT_B; 1512 skca.skc_type = sc->sk_type; 1513 skca.skc_rev = sc->sk_rev; 1514 (void)config_found(&sc->sk_dev, &skca, skcprint); 1515 } 1516 1517 /* Turn on the 'driver is loaded' LED. */ 1518 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1519 1520 return; 1521 1522 fail_2: 1523 pci_intr_disestablish(pc, sc->sk_intrhand); 1524 fail_1: 1525 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1526 } 1527 1528 int 1529 skc_detach(struct device *self, int flags) 1530 { 1531 struct sk_softc *sc = (struct sk_softc *)self; 1532 int rv; 1533 1534 if (sc->sk_intrhand) 1535 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1536 1537 rv = config_detach_children(self, flags); 1538 if (rv != 0) 1539 return (rv); 1540 1541 if (sc->sk_bsize > 0) 1542 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1543 1544 return(0); 1545 } 1546 1547 int 1548 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx) 1549 { 1550 struct sk_softc *sc = sc_if->sk_softc; 1551 struct sk_tx_desc *f = NULL; 1552 u_int32_t frag, cur, sk_ctl; 1553 int i; 1554 struct sk_txmap_entry *entry; 1555 bus_dmamap_t txmap; 1556 1557 DPRINTFN(2, ("sk_encap\n")); 1558 1559 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head); 1560 if (entry == NULL) { 1561 DPRINTFN(2, ("sk_encap: no txmap available\n")); 1562 return (ENOBUFS); 1563 } 1564 txmap = entry->dmamap; 1565 1566 cur = frag = *txidx; 1567 1568 #ifdef SK_DEBUG 1569 if (skdebug >= 2) 1570 sk_dump_mbuf(m_head); 1571 #endif 1572 1573 /* 1574 * Start packing the mbufs in this chain into 1575 * the fragment pointers. Stop when we run out 1576 * of fragments or hit the end of the mbuf chain. 1577 */ 1578 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1579 BUS_DMA_NOWAIT)) { 1580 DPRINTFN(2, ("sk_encap: dmamap failed\n")); 1581 return (ENOBUFS); 1582 } 1583 1584 if (txmap->dm_nsegs > (SK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) { 1585 DPRINTFN(2, ("sk_encap: too few descriptors free\n")); 1586 bus_dmamap_unload(sc->sc_dmatag, txmap); 1587 return (ENOBUFS); 1588 } 1589 1590 DPRINTFN(2, ("sk_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1591 1592 /* Sync the DMA map. */ 1593 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 1594 BUS_DMASYNC_PREWRITE); 1595 1596 for (i = 0; i < txmap->dm_nsegs; i++) { 1597 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1598 f->sk_data_lo = htole32(txmap->dm_segs[i].ds_addr); 1599 sk_ctl = txmap->dm_segs[i].ds_len | SK_OPCODE_DEFAULT; 1600 if (i == 0) 1601 sk_ctl |= SK_TXCTL_FIRSTFRAG; 1602 else 1603 sk_ctl |= SK_TXCTL_OWN; 1604 f->sk_ctl = htole32(sk_ctl); 1605 cur = frag; 1606 SK_INC(frag, SK_TX_RING_CNT); 1607 } 1608 1609 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1610 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 1611 1612 sc_if->sk_cdata.sk_tx_map[cur] = entry; 1613 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1614 htole32(SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR); 1615 1616 /* Sync descriptors before handing to chip */ 1617 SK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs, 1618 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1619 1620 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= 1621 htole32(SK_TXCTL_OWN); 1622 1623 /* Sync first descriptor to hand it off */ 1624 SK_CDTXSYNC(sc_if, *txidx, 1, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1625 1626 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs; 1627 1628 #ifdef SK_DEBUG 1629 if (skdebug >= 2) { 1630 struct sk_tx_desc *desc; 1631 u_int32_t idx; 1632 for (idx = *txidx; idx != frag; SK_INC(idx, SK_TX_RING_CNT)) { 1633 desc = &sc_if->sk_rdata->sk_tx_ring[idx]; 1634 sk_dump_txdesc(desc, idx); 1635 } 1636 } 1637 #endif 1638 1639 *txidx = frag; 1640 1641 DPRINTFN(2, ("sk_encap: completed successfully\n")); 1642 1643 return (0); 1644 } 1645 1646 void 1647 sk_start(struct ifnet *ifp) 1648 { 1649 struct sk_if_softc *sc_if = ifp->if_softc; 1650 struct sk_softc *sc = sc_if->sk_softc; 1651 struct mbuf *m_head = NULL; 1652 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod; 1653 int pkts = 0; 1654 1655 DPRINTFN(2, ("sk_start\n")); 1656 1657 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1658 IFQ_POLL(&ifp->if_snd, m_head); 1659 if (m_head == NULL) 1660 break; 1661 1662 /* 1663 * Pack the data into the transmit ring. If we 1664 * don't have room, set the OACTIVE flag and wait 1665 * for the NIC to drain the ring. 1666 */ 1667 if (sk_encap(sc_if, m_head, &idx)) { 1668 ifp->if_flags |= IFF_OACTIVE; 1669 break; 1670 } 1671 1672 /* now we are committed to transmit the packet */ 1673 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1674 pkts++; 1675 1676 /* 1677 * If there's a BPF listener, bounce a copy of this frame 1678 * to him. 1679 */ 1680 #if NBPFILTER > 0 1681 if (ifp->if_bpf) 1682 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1683 #endif 1684 } 1685 if (pkts == 0) 1686 return; 1687 1688 /* Transmit */ 1689 if (idx != sc_if->sk_cdata.sk_tx_prod) { 1690 sc_if->sk_cdata.sk_tx_prod = idx; 1691 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1692 1693 /* Set a timeout in case the chip goes out to lunch. */ 1694 ifp->if_timer = 5; 1695 } 1696 } 1697 1698 1699 void 1700 sk_watchdog(struct ifnet *ifp) 1701 { 1702 struct sk_if_softc *sc_if = ifp->if_softc; 1703 1704 /* 1705 * Reclaim first as there is a possibility of losing Tx completion 1706 * interrupts. 1707 */ 1708 sk_txeof(sc_if); 1709 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 1710 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1711 1712 ifp->if_oerrors++; 1713 1714 sk_init(sc_if); 1715 } 1716 } 1717 1718 void 1719 skc_shutdown(void *v) 1720 { 1721 struct sk_softc *sc = v; 1722 1723 DPRINTFN(2, ("sk_shutdown\n")); 1724 1725 /* Turn off the 'driver is loaded' LED. */ 1726 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1727 1728 /* 1729 * Reset the GEnesis controller. Doing this should also 1730 * assert the resets on the attached XMAC(s). 1731 */ 1732 sk_reset(sc); 1733 } 1734 1735 static __inline int 1736 sk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1737 { 1738 if (sc->sk_type == SK_GENESIS) { 1739 if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || 1740 XM_RXSTAT_BYTES(stat) != len) 1741 return (0); 1742 } else { 1743 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1744 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1745 YU_RXSTAT_JABBER)) != 0 || 1746 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1747 YU_RXSTAT_BYTES(stat) != len) 1748 return (0); 1749 } 1750 1751 return (1); 1752 } 1753 1754 void 1755 sk_rxeof(struct sk_if_softc *sc_if) 1756 { 1757 struct sk_softc *sc = sc_if->sk_softc; 1758 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1759 struct mbuf *m; 1760 struct sk_chain *cur_rx; 1761 struct sk_rx_desc *cur_desc; 1762 int i, cur, total_len = 0; 1763 u_int32_t rxstat, sk_ctl; 1764 bus_dmamap_t dmamap; 1765 u_int16_t csum1, csum2; 1766 1767 DPRINTFN(2, ("sk_rxeof\n")); 1768 1769 i = sc_if->sk_cdata.sk_rx_prod; 1770 1771 for (;;) { 1772 cur = i; 1773 1774 /* Sync the descriptor */ 1775 SK_CDRXSYNC(sc_if, cur, 1776 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1777 1778 sk_ctl = letoh32(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 1779 if ((sk_ctl & SK_RXCTL_OWN) != 0) { 1780 /* Invalidate the descriptor -- it's not ready yet */ 1781 SK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_PREREAD); 1782 sc_if->sk_cdata.sk_rx_prod = i; 1783 break; 1784 } 1785 1786 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur]; 1787 cur_desc = &sc_if->sk_rdata->sk_rx_ring[cur]; 1788 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map; 1789 1790 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, 1791 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1792 1793 rxstat = letoh32(cur_desc->sk_xmac_rxstat); 1794 m = cur_rx->sk_mbuf; 1795 cur_rx->sk_mbuf = NULL; 1796 total_len = SK_RXBYTES(letoh32(cur_desc->sk_ctl)); 1797 1798 csum1 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum1); 1799 csum2 = letoh16(sc_if->sk_rdata->sk_rx_ring[i].sk_csum2); 1800 1801 SK_INC(i, SK_RX_RING_CNT); 1802 1803 if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | 1804 SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | 1805 SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || 1806 total_len < SK_MIN_FRAMELEN || 1807 total_len > SK_JUMBO_FRAMELEN || 1808 sk_rxvalid(sc, rxstat, total_len) == 0) { 1809 ifp->if_ierrors++; 1810 sk_newbuf(sc_if, cur, m, dmamap); 1811 continue; 1812 } 1813 1814 /* 1815 * Try to allocate a new jumbo buffer. If that 1816 * fails, copy the packet to mbufs and put the 1817 * jumbo buffer back in the ring so it can be 1818 * re-used. If allocating mbufs fails, then we 1819 * have to drop the packet. 1820 */ 1821 if (sk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) { 1822 struct mbuf *m0; 1823 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, 1824 ifp, NULL); 1825 sk_newbuf(sc_if, cur, m, dmamap); 1826 if (m0 == NULL) { 1827 ifp->if_ierrors++; 1828 continue; 1829 } 1830 m = m0; 1831 } else { 1832 m->m_pkthdr.rcvif = ifp; 1833 m->m_pkthdr.len = m->m_len = total_len; 1834 } 1835 1836 ifp->if_ipackets++; 1837 1838 sk_rxcsum(ifp, m, csum1, csum2); 1839 1840 #if NBPFILTER > 0 1841 if (ifp->if_bpf) 1842 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1843 #endif 1844 1845 /* pass it on. */ 1846 ether_input_mbuf(ifp, m); 1847 } 1848 } 1849 1850 void 1851 sk_rxcsum(struct ifnet *ifp, struct mbuf *m, const u_int16_t csum1, const u_int16_t csum2) 1852 { 1853 struct ether_header *eh; 1854 struct ip *ip; 1855 u_int8_t *pp; 1856 int hlen, len, plen; 1857 u_int16_t iph_csum, ipo_csum, ipd_csum, csum; 1858 1859 pp = mtod(m, u_int8_t *); 1860 plen = m->m_pkthdr.len; 1861 if (plen < sizeof(*eh)) 1862 return; 1863 eh = (struct ether_header *)pp; 1864 iph_csum = in_cksum_addword(csum1, (~csum2 & 0xffff)); 1865 1866 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1867 u_int16_t *xp = (u_int16_t *)pp; 1868 1869 xp = (u_int16_t *)pp; 1870 if (xp[1] != htons(ETHERTYPE_IP)) 1871 return; 1872 iph_csum = in_cksum_addword(iph_csum, (~xp[0] & 0xffff)); 1873 iph_csum = in_cksum_addword(iph_csum, (~xp[1] & 0xffff)); 1874 xp = (u_int16_t *)(pp + sizeof(struct ip)); 1875 iph_csum = in_cksum_addword(iph_csum, xp[0]); 1876 iph_csum = in_cksum_addword(iph_csum, xp[1]); 1877 pp += EVL_ENCAPLEN; 1878 } else if (eh->ether_type != htons(ETHERTYPE_IP)) 1879 return; 1880 1881 pp += sizeof(*eh); 1882 plen -= sizeof(*eh); 1883 1884 ip = (struct ip *)pp; 1885 1886 if (ip->ip_v != IPVERSION) 1887 return; 1888 1889 hlen = ip->ip_hl << 2; 1890 if (hlen < sizeof(struct ip)) 1891 return; 1892 if (hlen > ntohs(ip->ip_len)) 1893 return; 1894 1895 /* Don't deal with truncated or padded packets. */ 1896 if (plen != ntohs(ip->ip_len)) 1897 return; 1898 1899 len = hlen - sizeof(struct ip); 1900 if (len > 0) { 1901 u_int16_t *p; 1902 1903 p = (u_int16_t *)(ip + 1); 1904 ipo_csum = 0; 1905 for (ipo_csum = 0; len > 0; len -= sizeof(*p), p++) 1906 ipo_csum = in_cksum_addword(ipo_csum, *p); 1907 iph_csum = in_cksum_addword(iph_csum, ipo_csum); 1908 ipd_csum = in_cksum_addword(csum2, (~ipo_csum & 0xffff)); 1909 } else 1910 ipd_csum = csum2; 1911 1912 if (iph_csum != 0xffff) 1913 return; 1914 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1915 1916 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1917 return; /* ip frag, we're done for now */ 1918 1919 pp += hlen; 1920 1921 /* Only know checksum protocol for udp/tcp */ 1922 if (ip->ip_p == IPPROTO_UDP) { 1923 struct udphdr *uh = (struct udphdr *)pp; 1924 1925 if (uh->uh_sum == 0) /* udp with no checksum */ 1926 return; 1927 } else if (ip->ip_p != IPPROTO_TCP) 1928 return; 1929 1930 csum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1931 htonl(ntohs(ip->ip_len) - hlen + ip->ip_p) + ipd_csum); 1932 if (csum == 0xffff) { 1933 m->m_pkthdr.csum_flags |= (ip->ip_p == IPPROTO_TCP) ? 1934 M_TCP_CSUM_IN_OK : M_UDP_CSUM_IN_OK; 1935 } 1936 } 1937 1938 void 1939 sk_txeof(struct sk_if_softc *sc_if) 1940 { 1941 struct sk_softc *sc = sc_if->sk_softc; 1942 struct sk_tx_desc *cur_tx; 1943 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1944 u_int32_t idx, sk_ctl; 1945 struct sk_txmap_entry *entry; 1946 1947 DPRINTFN(2, ("sk_txeof\n")); 1948 1949 /* 1950 * Go through our tx ring and free mbufs for those 1951 * frames that have been sent. 1952 */ 1953 idx = sc_if->sk_cdata.sk_tx_cons; 1954 while (idx != sc_if->sk_cdata.sk_tx_prod) { 1955 SK_CDTXSYNC(sc_if, idx, 1, 1956 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1957 1958 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1959 sk_ctl = letoh32(cur_tx->sk_ctl); 1960 #ifdef SK_DEBUG 1961 if (skdebug >= 2) 1962 sk_dump_txdesc(cur_tx, idx); 1963 #endif 1964 if (sk_ctl & SK_TXCTL_OWN) { 1965 SK_CDTXSYNC(sc_if, idx, 1, BUS_DMASYNC_PREREAD); 1966 break; 1967 } 1968 if (sk_ctl & SK_TXCTL_LASTFRAG) 1969 ifp->if_opackets++; 1970 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1971 entry = sc_if->sk_cdata.sk_tx_map[idx]; 1972 1973 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1974 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1975 1976 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1977 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1978 1979 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1980 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry, 1981 link); 1982 sc_if->sk_cdata.sk_tx_map[idx] = NULL; 1983 } 1984 sc_if->sk_cdata.sk_tx_cnt--; 1985 SK_INC(idx, SK_TX_RING_CNT); 1986 } 1987 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 1988 1989 if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) 1990 ifp->if_flags &= ~IFF_OACTIVE; 1991 1992 sc_if->sk_cdata.sk_tx_cons = idx; 1993 } 1994 1995 void 1996 sk_tick(void *xsc_if) 1997 { 1998 struct sk_if_softc *sc_if = xsc_if; 1999 struct mii_data *mii = &sc_if->sk_mii; 2000 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2001 int i; 2002 2003 DPRINTFN(2, ("sk_tick\n")); 2004 2005 if (!(ifp->if_flags & IFF_UP)) 2006 return; 2007 2008 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2009 sk_intr_bcom(sc_if); 2010 return; 2011 } 2012 2013 /* 2014 * According to SysKonnect, the correct way to verify that 2015 * the link has come back up is to poll bit 0 of the GPIO 2016 * register three times. This pin has the signal from the 2017 * link sync pin connected to it; if we read the same link 2018 * state 3 times in a row, we know the link is up. 2019 */ 2020 for (i = 0; i < 3; i++) { 2021 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 2022 break; 2023 } 2024 2025 if (i != 3) { 2026 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2027 return; 2028 } 2029 2030 /* Turn the GP0 interrupt back on. */ 2031 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2032 SK_XM_READ_2(sc_if, XM_ISR); 2033 mii_tick(mii); 2034 timeout_del(&sc_if->sk_tick_ch); 2035 } 2036 2037 void 2038 sk_yukon_tick(void *xsc_if) 2039 { 2040 struct sk_if_softc *sc_if = xsc_if; 2041 struct mii_data *mii = &sc_if->sk_mii; 2042 int s; 2043 2044 s = splnet(); 2045 mii_tick(mii); 2046 splx(s); 2047 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2048 } 2049 2050 void 2051 sk_intr_bcom(struct sk_if_softc *sc_if) 2052 { 2053 struct mii_data *mii = &sc_if->sk_mii; 2054 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2055 int status; 2056 2057 DPRINTFN(2, ("sk_intr_bcom\n")); 2058 2059 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2060 2061 /* 2062 * Read the PHY interrupt register to make sure 2063 * we clear any pending interrupts. 2064 */ 2065 status = sk_xmac_miibus_readreg((struct device *)sc_if, 2066 SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 2067 2068 if (!(ifp->if_flags & IFF_RUNNING)) { 2069 sk_init_xmac(sc_if); 2070 return; 2071 } 2072 2073 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 2074 int lstat; 2075 lstat = sk_xmac_miibus_readreg((struct device *)sc_if, 2076 SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); 2077 2078 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 2079 mii_mediachg(mii); 2080 /* Turn off the link LED. */ 2081 SK_IF_WRITE_1(sc_if, 0, 2082 SK_LINKLED1_CTL, SK_LINKLED_OFF); 2083 sc_if->sk_link = 0; 2084 } else if (status & BRGPHY_ISR_LNK_CHG) { 2085 sk_xmac_miibus_writereg((struct device *)sc_if, 2086 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); 2087 mii_tick(mii); 2088 sc_if->sk_link = 1; 2089 /* Turn on the link LED. */ 2090 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2091 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 2092 SK_LINKLED_BLINK_OFF); 2093 } else { 2094 mii_tick(mii); 2095 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2096 } 2097 } 2098 2099 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2100 } 2101 2102 void 2103 sk_intr_xmac(struct sk_if_softc *sc_if) 2104 { 2105 u_int16_t status = SK_XM_READ_2(sc_if, XM_ISR); 2106 2107 DPRINTFN(2, ("sk_intr_xmac\n")); 2108 2109 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 2110 if (status & XM_ISR_GP0_SET) { 2111 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 2112 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2113 } 2114 2115 if (status & XM_ISR_AUTONEG_DONE) { 2116 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2117 } 2118 } 2119 2120 if (status & XM_IMR_TX_UNDERRUN) 2121 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 2122 2123 if (status & XM_IMR_RX_OVERRUN) 2124 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 2125 } 2126 2127 void 2128 sk_intr_yukon(struct sk_if_softc *sc_if) 2129 { 2130 u_int8_t status; 2131 2132 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 2133 /* RX overrun */ 2134 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 2135 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 2136 SK_RFCTL_RX_FIFO_OVER); 2137 } 2138 /* TX underrun */ 2139 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 2140 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 2141 SK_TFCTL_TX_FIFO_UNDER); 2142 } 2143 2144 DPRINTFN(2, ("sk_intr_yukon status=%#x\n", status)); 2145 } 2146 2147 int 2148 sk_intr(void *xsc) 2149 { 2150 struct sk_softc *sc = xsc; 2151 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 2152 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 2153 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 2154 u_int32_t status; 2155 int claimed = 0; 2156 2157 status = CSR_READ_4(sc, SK_ISSR); 2158 if (status == 0 || status == 0xffffffff) 2159 return (0); 2160 2161 if (sc_if0 != NULL) 2162 ifp0 = &sc_if0->arpcom.ac_if; 2163 if (sc_if1 != NULL) 2164 ifp1 = &sc_if1->arpcom.ac_if; 2165 2166 for (; (status &= sc->sk_intrmask) != 0;) { 2167 claimed = 1; 2168 2169 /* Handle receive interrupts first. */ 2170 if (sc_if0 && (status & SK_ISR_RX1_EOF)) { 2171 sk_rxeof(sc_if0); 2172 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 2173 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2174 } 2175 if (sc_if1 && (status & SK_ISR_RX2_EOF)) { 2176 sk_rxeof(sc_if1); 2177 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 2178 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 2179 } 2180 2181 /* Then transmit interrupts. */ 2182 if (sc_if0 && (status & SK_ISR_TX1_S_EOF)) { 2183 sk_txeof(sc_if0); 2184 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 2185 SK_TXBMU_CLR_IRQ_EOF); 2186 } 2187 if (sc_if1 && (status & SK_ISR_TX2_S_EOF)) { 2188 sk_txeof(sc_if1); 2189 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 2190 SK_TXBMU_CLR_IRQ_EOF); 2191 } 2192 2193 /* Then MAC interrupts. */ 2194 if (sc_if0 && (status & SK_ISR_MAC1) && 2195 (ifp0->if_flags & IFF_RUNNING)) { 2196 if (SK_IS_GENESIS(sc)) 2197 sk_intr_xmac(sc_if0); 2198 else 2199 sk_intr_yukon(sc_if0); 2200 } 2201 2202 if (sc_if1 && (status & SK_ISR_MAC2) && 2203 (ifp1->if_flags & IFF_RUNNING)) { 2204 if (SK_IS_GENESIS(sc)) 2205 sk_intr_xmac(sc_if1); 2206 else 2207 sk_intr_yukon(sc_if1); 2208 2209 } 2210 2211 if (status & SK_ISR_EXTERNAL_REG) { 2212 if (sc_if0 != NULL && 2213 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2214 sk_intr_bcom(sc_if0); 2215 2216 if (sc_if1 != NULL && 2217 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2218 sk_intr_bcom(sc_if1); 2219 } 2220 status = CSR_READ_4(sc, SK_ISSR); 2221 } 2222 2223 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2224 2225 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd)) 2226 sk_start(ifp0); 2227 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd)) 2228 sk_start(ifp1); 2229 2230 return (claimed); 2231 } 2232 2233 void 2234 sk_init_xmac(struct sk_if_softc *sc_if) 2235 { 2236 struct sk_softc *sc = sc_if->sk_softc; 2237 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2238 struct sk_bcom_hack bhack[] = { 2239 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2240 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2241 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2242 { 0, 0 } }; 2243 2244 DPRINTFN(2, ("sk_init_xmac\n")); 2245 2246 /* Unreset the XMAC. */ 2247 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2248 DELAY(1000); 2249 2250 /* Reset the XMAC's internal state. */ 2251 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2252 2253 /* Save the XMAC II revision */ 2254 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2255 2256 /* 2257 * Perform additional initialization for external PHYs, 2258 * namely for the 1000baseTX cards that use the XMAC's 2259 * GMII mode. 2260 */ 2261 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2262 int i = 0; 2263 u_int32_t val; 2264 2265 /* Take PHY out of reset. */ 2266 val = sk_win_read_4(sc, SK_GPIO); 2267 if (sc_if->sk_port == SK_PORT_A) 2268 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2269 else 2270 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2271 sk_win_write_4(sc, SK_GPIO, val); 2272 2273 /* Enable GMII mode on the XMAC. */ 2274 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2275 2276 sk_xmac_miibus_writereg((struct device *)sc_if, 2277 SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2278 DELAY(10000); 2279 sk_xmac_miibus_writereg((struct device *)sc_if, 2280 SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); 2281 2282 /* 2283 * Early versions of the BCM5400 apparently have 2284 * a bug that requires them to have their reserved 2285 * registers initialized to some magic values. I don't 2286 * know what the numbers do, I'm just the messenger. 2287 */ 2288 if (sk_xmac_miibus_readreg((struct device *)sc_if, 2289 SK_PHYADDR_BCOM, 0x03) == 0x6041) { 2290 while(bhack[i].reg) { 2291 sk_xmac_miibus_writereg((struct device *)sc_if, 2292 SK_PHYADDR_BCOM, bhack[i].reg, 2293 bhack[i].val); 2294 i++; 2295 } 2296 } 2297 } 2298 2299 /* Set station address */ 2300 SK_XM_WRITE_2(sc_if, XM_PAR0, 2301 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0]))); 2302 SK_XM_WRITE_2(sc_if, XM_PAR1, 2303 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2]))); 2304 SK_XM_WRITE_2(sc_if, XM_PAR2, 2305 letoh16(*(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4]))); 2306 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2307 2308 if (ifp->if_flags & IFF_BROADCAST) 2309 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2310 else 2311 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2312 2313 /* We don't need the FCS appended to the packet. */ 2314 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2315 2316 /* We want short frames padded to 60 bytes. */ 2317 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2318 2319 /* 2320 * Enable the reception of all error frames. This is 2321 * a necessary evil due to the design of the XMAC. The 2322 * XMAC's receive FIFO is only 8K in size, however jumbo 2323 * frames can be up to 9000 bytes in length. When bad 2324 * frame filtering is enabled, the XMAC's RX FIFO operates 2325 * in 'store and forward' mode. For this to work, the 2326 * entire frame has to fit into the FIFO, but that means 2327 * that jumbo frames larger than 8192 bytes will be 2328 * truncated. Disabling all bad frame filtering causes 2329 * the RX FIFO to operate in streaming mode, in which 2330 * case the XMAC will start transfering frames out of the 2331 * RX FIFO as soon as the FIFO threshold is reached. 2332 */ 2333 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2334 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2335 XM_MODE_RX_INRANGELEN); 2336 2337 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2338 2339 /* 2340 * Bump up the transmit threshold. This helps hold off transmit 2341 * underruns when we're blasting traffic from both ports at once. 2342 */ 2343 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2344 2345 /* Set promiscuous mode */ 2346 sk_setpromisc(sc_if); 2347 2348 /* Set multicast filter */ 2349 sk_setmulti(sc_if); 2350 2351 /* Clear and enable interrupts */ 2352 SK_XM_READ_2(sc_if, XM_ISR); 2353 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2354 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2355 else 2356 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2357 2358 /* Configure MAC arbiter */ 2359 switch(sc_if->sk_xmac_rev) { 2360 case XM_XMAC_REV_B2: 2361 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2362 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2363 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2364 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2365 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2366 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2367 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2368 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2369 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2370 break; 2371 case XM_XMAC_REV_C1: 2372 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2373 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2374 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2375 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2376 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2377 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2378 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2379 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2380 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2381 break; 2382 default: 2383 break; 2384 } 2385 sk_win_write_2(sc, SK_MACARB_CTL, 2386 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2387 2388 sc_if->sk_link = 1; 2389 } 2390 2391 void sk_init_yukon(struct sk_if_softc *sc_if) 2392 { 2393 u_int32_t phy, v; 2394 u_int16_t reg; 2395 struct sk_softc *sc; 2396 int i; 2397 2398 sc = sc_if->sk_softc; 2399 2400 DPRINTFN(2, ("sk_init_yukon: start: sk_csr=%#x\n", 2401 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 2402 2403 if (sc->sk_type == SK_YUKON_LITE && 2404 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 2405 /* 2406 * Workaround code for COMA mode, set PHY reset. 2407 * Otherwise it will not correctly take chip out of 2408 * powerdown (coma) 2409 */ 2410 v = sk_win_read_4(sc, SK_GPIO); 2411 v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; 2412 sk_win_write_4(sc, SK_GPIO, v); 2413 } 2414 2415 DPRINTFN(6, ("sk_init_yukon: 1\n")); 2416 2417 /* GMAC and GPHY Reset */ 2418 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2419 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2420 DELAY(1000); 2421 2422 DPRINTFN(6, ("sk_init_yukon: 2\n")); 2423 2424 if (sc->sk_type == SK_YUKON_LITE && 2425 sc->sk_rev >= SK_YUKON_LITE_REV_A3) { 2426 /* 2427 * Workaround code for COMA mode, clear PHY reset 2428 */ 2429 v = sk_win_read_4(sc, SK_GPIO); 2430 v |= SK_GPIO_DIR9; 2431 v &= ~SK_GPIO_DAT9; 2432 sk_win_write_4(sc, SK_GPIO, v); 2433 } 2434 2435 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2436 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2437 2438 if (sc->sk_coppertype) 2439 phy |= SK_GPHY_COPPER; 2440 else 2441 phy |= SK_GPHY_FIBER; 2442 2443 DPRINTFN(3, ("sk_init_yukon: phy=%#x\n", phy)); 2444 2445 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2446 DELAY(1000); 2447 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2448 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2449 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2450 2451 DPRINTFN(3, ("sk_init_yukon: gmac_ctrl=%#x\n", 2452 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 2453 2454 DPRINTFN(6, ("sk_init_yukon: 3\n")); 2455 2456 /* unused read of the interrupt source register */ 2457 DPRINTFN(6, ("sk_init_yukon: 4\n")); 2458 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2459 2460 DPRINTFN(6, ("sk_init_yukon: 4a\n")); 2461 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2462 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg)); 2463 2464 /* MIB Counter Clear Mode set */ 2465 reg |= YU_PAR_MIB_CLR; 2466 DPRINTFN(6, ("sk_init_yukon: YUKON_PAR=%#x\n", reg)); 2467 DPRINTFN(6, ("sk_init_yukon: 4b\n")); 2468 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2469 2470 /* MIB Counter Clear Mode clear */ 2471 DPRINTFN(6, ("sk_init_yukon: 5\n")); 2472 reg &= ~YU_PAR_MIB_CLR; 2473 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2474 2475 /* receive control reg */ 2476 DPRINTFN(6, ("sk_init_yukon: 7\n")); 2477 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2478 2479 /* transmit parameter register */ 2480 DPRINTFN(6, ("sk_init_yukon: 8\n")); 2481 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2482 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2483 2484 /* serial mode register */ 2485 DPRINTFN(6, ("sk_init_yukon: 9\n")); 2486 SK_YU_WRITE_2(sc_if, YUKON_SMR, YU_SMR_DATA_BLIND(0x1c) | 2487 YU_SMR_MFL_VLAN | YU_SMR_MFL_JUMBO | 2488 YU_SMR_IPG_DATA(0x1e)); 2489 2490 DPRINTFN(6, ("sk_init_yukon: 10\n")); 2491 /* Setup Yukon's address */ 2492 for (i = 0; i < 3; i++) { 2493 /* Write Source Address 1 (unicast filter) */ 2494 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2495 sc_if->arpcom.ac_enaddr[i * 2] | 2496 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2497 } 2498 2499 for (i = 0; i < 3; i++) { 2500 reg = sk_win_read_2(sc_if->sk_softc, 2501 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2502 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2503 } 2504 2505 /* Set promiscuous mode */ 2506 sk_setpromisc(sc_if); 2507 2508 /* Set multicast filter */ 2509 DPRINTFN(6, ("sk_init_yukon: 11\n")); 2510 sk_setmulti(sc_if); 2511 2512 /* enable interrupt mask for counter overflows */ 2513 DPRINTFN(6, ("sk_init_yukon: 12\n")); 2514 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2515 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2516 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2517 2518 /* Configure RX MAC FIFO Flush Mask */ 2519 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 2520 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 2521 YU_RXSTAT_JABBER; 2522 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 2523 2524 /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ 2525 if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) 2526 v = SK_TFCTL_OPERATION_ON; 2527 else 2528 v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; 2529 /* Configure RX MAC FIFO */ 2530 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2531 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); 2532 2533 /* Increase flush threshould to 64 bytes */ 2534 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 2535 SK_RFCTL_FIFO_THRESHOLD + 1); 2536 2537 /* Configure TX MAC FIFO */ 2538 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2539 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2540 2541 DPRINTFN(6, ("sk_init_yukon: end\n")); 2542 } 2543 2544 /* 2545 * Note that to properly initialize any part of the GEnesis chip, 2546 * you first have to take it out of reset mode. 2547 */ 2548 void 2549 sk_init(void *xsc_if) 2550 { 2551 struct sk_if_softc *sc_if = xsc_if; 2552 struct sk_softc *sc = sc_if->sk_softc; 2553 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2554 struct mii_data *mii = &sc_if->sk_mii; 2555 int s; 2556 2557 DPRINTFN(2, ("sk_init\n")); 2558 2559 s = splnet(); 2560 2561 /* Cancel pending I/O and free all RX/TX buffers. */ 2562 sk_stop(sc_if, 0); 2563 2564 if (SK_IS_GENESIS(sc)) { 2565 /* Configure LINK_SYNC LED */ 2566 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2567 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2568 SK_LINKLED_LINKSYNC_ON); 2569 2570 /* Configure RX LED */ 2571 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2572 SK_RXLEDCTL_COUNTER_START); 2573 2574 /* Configure TX LED */ 2575 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2576 SK_TXLEDCTL_COUNTER_START); 2577 } 2578 2579 /* 2580 * Configure descriptor poll timer 2581 * 2582 * SK-NET GENESIS data sheet says that possibility of losing Start 2583 * transmit command due to CPU/cache related interim storage problems 2584 * under certain conditions. The document recommends a polling 2585 * mechanism to send a Start transmit command to initiate transfer 2586 * of ready descriptors regulary. To cope with this issue sk(4) now 2587 * enables descriptor poll timer to initiate descriptor processing 2588 * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still 2589 * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx 2590 * command instead of waiting for next descriptor polling time. 2591 * The same rule may apply to Rx side too but it seems that is not 2592 * needed at the moment. 2593 * Since sk(4) uses descriptor polling as a last resort there is no 2594 * need to set smaller polling time than maximum allowable one. 2595 */ 2596 SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); 2597 2598 /* Configure I2C registers */ 2599 2600 /* Configure XMAC(s) */ 2601 switch (sc->sk_type) { 2602 case SK_GENESIS: 2603 sk_init_xmac(sc_if); 2604 break; 2605 case SK_YUKON: 2606 case SK_YUKON_LITE: 2607 case SK_YUKON_LP: 2608 sk_init_yukon(sc_if); 2609 break; 2610 } 2611 mii_mediachg(mii); 2612 2613 if (SK_IS_GENESIS(sc)) { 2614 /* Configure MAC FIFOs */ 2615 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2616 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2617 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2618 2619 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2620 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2621 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2622 } 2623 2624 /* Configure transmit arbiter(s) */ 2625 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2626 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2627 2628 /* Configure RAMbuffers */ 2629 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2630 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2631 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2632 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2633 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2634 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2635 2636 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2637 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2638 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2639 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2640 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2641 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2642 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2643 2644 /* Configure BMUs */ 2645 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2646 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2647 SK_RX_RING_ADDR(sc_if, 0)); 2648 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2649 2650 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2651 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2652 SK_TX_RING_ADDR(sc_if, 0)); 2653 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2654 2655 /* Init descriptors */ 2656 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2657 printf("%s: initialization failed: no " 2658 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2659 sk_stop(sc_if, 0); 2660 splx(s); 2661 return; 2662 } 2663 2664 if (sk_init_tx_ring(sc_if) == ENOBUFS) { 2665 printf("%s: initialization failed: no " 2666 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2667 sk_stop(sc_if, 0); 2668 splx(s); 2669 return; 2670 } 2671 2672 /* Configure interrupt handling */ 2673 CSR_READ_4(sc, SK_ISSR); 2674 if (sc_if->sk_port == SK_PORT_A) 2675 sc->sk_intrmask |= SK_INTRS1; 2676 else 2677 sc->sk_intrmask |= SK_INTRS2; 2678 2679 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2680 2681 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2682 2683 /* Start BMUs. */ 2684 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2685 2686 if (SK_IS_GENESIS(sc)) { 2687 /* Enable XMACs TX and RX state machines */ 2688 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2689 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, 2690 XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 2691 } 2692 2693 if (SK_IS_YUKON(sc)) { 2694 u_int16_t reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2695 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2696 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2697 } 2698 2699 /* Activate descriptor polling timer */ 2700 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); 2701 /* start transfer of Tx descriptors */ 2702 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 2703 2704 ifp->if_flags |= IFF_RUNNING; 2705 ifp->if_flags &= ~IFF_OACTIVE; 2706 2707 if (SK_IS_YUKON(sc)) 2708 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2709 2710 splx(s); 2711 } 2712 2713 void 2714 sk_stop(struct sk_if_softc *sc_if, int softonly) 2715 { 2716 struct sk_softc *sc = sc_if->sk_softc; 2717 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2718 struct sk_txmap_entry *dma; 2719 int i; 2720 u_int32_t val; 2721 2722 DPRINTFN(2, ("sk_stop\n")); 2723 2724 timeout_del(&sc_if->sk_tick_ch); 2725 2726 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2727 2728 if (!softonly) { 2729 /* stop Tx descriptor polling timer */ 2730 SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 2731 /* stop transfer of Tx descriptors */ 2732 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); 2733 for (i = 0; i < SK_TIMEOUT; i++) { 2734 val = CSR_READ_4(sc, sc_if->sk_tx_bmu); 2735 if (!(val & SK_TXBMU_TX_STOP)) 2736 break; 2737 DELAY(1); 2738 } 2739 if (i == SK_TIMEOUT) 2740 printf("%s: cannot stop transfer of Tx descriptors\n", 2741 sc_if->sk_dev.dv_xname); 2742 /* stop transfer of Rx descriptors */ 2743 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); 2744 for (i = 0; i < SK_TIMEOUT; i++) { 2745 val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); 2746 if (!(val & SK_RXBMU_RX_STOP)) 2747 break; 2748 DELAY(1); 2749 } 2750 if (i == SK_TIMEOUT) 2751 printf("%s: cannot stop transfer of Rx descriptors\n", 2752 sc_if->sk_dev.dv_xname); 2753 2754 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2755 u_int32_t val; 2756 2757 /* Put PHY back into reset. */ 2758 val = sk_win_read_4(sc, SK_GPIO); 2759 if (sc_if->sk_port == SK_PORT_A) { 2760 val |= SK_GPIO_DIR0; 2761 val &= ~SK_GPIO_DAT0; 2762 } else { 2763 val |= SK_GPIO_DIR2; 2764 val &= ~SK_GPIO_DAT2; 2765 } 2766 sk_win_write_4(sc, SK_GPIO, val); 2767 } 2768 2769 /* Turn off various components of this interface. */ 2770 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2771 switch (sc->sk_type) { 2772 case SK_GENESIS: 2773 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, 2774 SK_TXMACCTL_XMAC_RESET); 2775 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2776 break; 2777 case SK_YUKON: 2778 case SK_YUKON_LITE: 2779 case SK_YUKON_LP: 2780 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2781 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2782 break; 2783 } 2784 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2785 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2786 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2787 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2788 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2789 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2790 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2791 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2792 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2793 2794 /* Disable interrupts */ 2795 if (sc_if->sk_port == SK_PORT_A) 2796 sc->sk_intrmask &= ~SK_INTRS1; 2797 else 2798 sc->sk_intrmask &= ~SK_INTRS2; 2799 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2800 2801 SK_XM_READ_2(sc_if, XM_ISR); 2802 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2803 } 2804 2805 /* Free RX and TX mbufs still in the queues. */ 2806 for (i = 0; i < SK_RX_RING_CNT; i++) { 2807 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2808 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2809 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2810 } 2811 } 2812 2813 for (i = 0; i < SK_TX_RING_CNT; i++) { 2814 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2815 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2816 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2817 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, 2818 sc_if->sk_cdata.sk_tx_map[i], link); 2819 sc_if->sk_cdata.sk_tx_map[i] = 0; 2820 } 2821 } 2822 2823 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) { 2824 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 2825 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap); 2826 free(dma, M_DEVBUF); 2827 } 2828 } 2829 2830 struct cfattach skc_ca = { 2831 sizeof(struct sk_softc), skc_probe, skc_attach, skc_detach 2832 }; 2833 2834 struct cfdriver skc_cd = { 2835 0, "skc", DV_DULL 2836 }; 2837 2838 struct cfattach sk_ca = { 2839 sizeof(struct sk_if_softc), sk_probe, sk_attach, sk_detach 2840 }; 2841 2842 struct cfdriver sk_cd = { 2843 NULL, "sk", DV_IFNET 2844 }; 2845 2846 #ifdef SK_DEBUG 2847 void 2848 sk_dump_txdesc(struct sk_tx_desc *desc, int idx) 2849 { 2850 #define DESC_PRINT(X) \ 2851 if (X) \ 2852 printf("txdesc[%d]." #X "=%#x\n", \ 2853 idx, X); 2854 2855 DESC_PRINT(letoh32(desc->sk_ctl)); 2856 DESC_PRINT(letoh32(desc->sk_next)); 2857 DESC_PRINT(letoh32(desc->sk_data_lo)); 2858 DESC_PRINT(letoh32(desc->sk_data_hi)); 2859 DESC_PRINT(letoh32(desc->sk_xmac_txstat)); 2860 DESC_PRINT(letoh16(desc->sk_rsvd0)); 2861 DESC_PRINT(letoh16(desc->sk_csum_startval)); 2862 DESC_PRINT(letoh16(desc->sk_csum_startpos)); 2863 DESC_PRINT(letoh16(desc->sk_csum_writepos)); 2864 DESC_PRINT(letoh16(desc->sk_rsvd1)); 2865 #undef PRINT 2866 } 2867 2868 void 2869 sk_dump_bytes(const char *data, int len) 2870 { 2871 int c, i, j; 2872 2873 for (i = 0; i < len; i += 16) { 2874 printf("%08x ", i); 2875 c = len - i; 2876 if (c > 16) c = 16; 2877 2878 for (j = 0; j < c; j++) { 2879 printf("%02x ", data[i + j] & 0xff); 2880 if ((j & 0xf) == 7 && j > 0) 2881 printf(" "); 2882 } 2883 2884 for (; j < 16; j++) 2885 printf(" "); 2886 printf(" "); 2887 2888 for (j = 0; j < c; j++) { 2889 int ch = data[i + j] & 0xff; 2890 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2891 } 2892 2893 printf("\n"); 2894 2895 if (c < 16) 2896 break; 2897 } 2898 } 2899 2900 void 2901 sk_dump_mbuf(struct mbuf *m) 2902 { 2903 int count = m->m_pkthdr.len; 2904 2905 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2906 2907 while (count > 0 && m) { 2908 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2909 m, m->m_data, m->m_len); 2910 sk_dump_bytes(mtod(m, char *), m->m_len); 2911 2912 count -= m->m_len; 2913 m = m->m_next; 2914 } 2915 } 2916 #endif 2917