1 /* 2 * Copyright (c) 1997, 1998, 1999, 2000 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $OpenBSD: if_sk.c,v 1.33 2003/08/12 05:23:06 nate Exp $ 33 * $FreeBSD: src/sys/pci/if_sk.c,v 1.19.2.9 2003/03/05 18:42:34 njl Exp $ 34 * $DragonFly: src/sys/dev/netif/sk/if_sk.c,v 1.48 2006/10/25 20:55:59 dillon Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include <sys/param.h> 90 #include <sys/systm.h> 91 #include <sys/sockio.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/queue.h> 97 #include <sys/serialize.h> 98 #include <sys/bus.h> 99 #include <sys/rman.h> 100 #include <sys/thread2.h> 101 102 #include <net/if.h> 103 #include <net/ifq_var.h> 104 #include <net/if_arp.h> 105 #include <net/ethernet.h> 106 #include <net/if_dl.h> 107 #include <net/if_media.h> 108 109 #include <net/bpf.h> 110 111 #include <vm/vm.h> /* for vtophys */ 112 #include <vm/pmap.h> /* for vtophys */ 113 114 #include <dev/netif/mii_layer/mii.h> 115 #include <dev/netif/mii_layer/miivar.h> 116 #include <dev/netif/mii_layer/brgphyreg.h> 117 118 #include <bus/pci/pcireg.h> 119 #include <bus/pci/pcivar.h> 120 121 #if 0 122 #define SK_USEIOSPACE 123 #endif 124 125 #include "if_skreg.h" 126 #include "xmaciireg.h" 127 #include "yukonreg.h" 128 129 /* "controller miibus0" required. See GENERIC if you get errors here. */ 130 #include "miibus_if.h" 131 132 static struct sk_type sk_devs[] = { 133 { VENDORID_SK, DEVICEID_SK_V1, 134 "SysKonnect Gigabit Ethernet (V1.0)" }, 135 { VENDORID_SK, DEVICEID_SK_V2, 136 "SysKonnect Gigabit Ethernet (V2.0)" }, 137 { VENDORID_MARVELL, DEVICEID_SK_V2, 138 "Marvell Gigabit Ethernet" }, 139 { VENDORID_3COM, DEVICEID_3COM_3C940, 140 "3Com 3C940 Gigabit Ethernet" }, 141 { VENDORID_LINKSYS, DEVICEID_LINKSYS_EG1032, 142 "Linksys EG1032 Gigabit Ethernet" }, 143 { VENDORID_DLINK, DEVICEID_DLINK_DGE530T, 144 "D-Link DGE-530T Gigabit Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static int skc_probe(device_t); 149 static int skc_attach(device_t); 150 static int skc_detach(device_t); 151 static void skc_shutdown(device_t); 152 static int sk_probe(device_t); 153 static int sk_attach(device_t); 154 static int sk_detach(device_t); 155 static void sk_tick(void *); 156 static void sk_intr(void *); 157 static void sk_intr_bcom(struct sk_if_softc *); 158 static void sk_intr_xmac(struct sk_if_softc *); 159 static void sk_intr_yukon(struct sk_if_softc *); 160 static void sk_rxeof(struct sk_if_softc *); 161 static void sk_txeof(struct sk_if_softc *); 162 static int sk_encap(struct sk_if_softc *, struct mbuf *, uint32_t *); 163 static void sk_start(struct ifnet *); 164 static int sk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 165 static void sk_init(void *); 166 static void sk_init_xmac(struct sk_if_softc *); 167 static void sk_init_yukon(struct sk_if_softc *); 168 static void sk_stop(struct sk_if_softc *); 169 static void sk_watchdog(struct ifnet *); 170 static int sk_ifmedia_upd(struct ifnet *); 171 static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 172 static void sk_reset(struct sk_softc *); 173 static int sk_newbuf(struct sk_if_softc *, struct sk_chain *, 174 struct mbuf *); 175 static int sk_alloc_jumbo_mem(struct sk_if_softc *); 176 static struct sk_jslot 177 *sk_jalloc(struct sk_if_softc *); 178 static void sk_jfree(void *); 179 static void sk_jref(void *); 180 static int sk_init_rx_ring(struct sk_if_softc *); 181 static void sk_init_tx_ring(struct sk_if_softc *); 182 static uint32_t sk_win_read_4(struct sk_softc *, int); 183 static uint16_t sk_win_read_2(struct sk_softc *, int); 184 static uint8_t sk_win_read_1(struct sk_softc *, int); 185 static void sk_win_write_4(struct sk_softc *, int, uint32_t); 186 static void sk_win_write_2(struct sk_softc *, int, uint32_t); 187 static void sk_win_write_1(struct sk_softc *, int, uint32_t); 188 static uint8_t sk_vpd_readbyte(struct sk_softc *, int); 189 static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); 190 static void sk_vpd_read(struct sk_softc *); 191 192 static int sk_miibus_readreg(device_t, int, int); 193 static int sk_miibus_writereg(device_t, int, int, int); 194 static void sk_miibus_statchg(device_t); 195 196 static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); 197 static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int); 198 static void sk_xmac_miibus_statchg(struct sk_if_softc *); 199 200 static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); 201 static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int); 202 static void sk_marv_miibus_statchg(struct sk_if_softc *); 203 204 static void sk_setfilt(struct sk_if_softc *, caddr_t, int); 205 static void sk_setmulti(struct sk_if_softc *); 206 static void sk_setpromisc(struct sk_if_softc *); 207 208 #ifdef SK_USEIOSPACE 209 #define SK_RES SYS_RES_IOPORT 210 #define SK_RID SK_PCI_LOIO 211 #else 212 #define SK_RES SYS_RES_MEMORY 213 #define SK_RID SK_PCI_LOMEM 214 #endif 215 216 /* 217 * Note that we have newbus methods for both the GEnesis controller 218 * itself and the XMAC(s). The XMACs are children of the GEnesis, and 219 * the miibus code is a child of the XMACs. We need to do it this way 220 * so that the miibus drivers can access the PHY registers on the 221 * right PHY. It's not quite what I had in mind, but it's the only 222 * design that achieves the desired effect. 223 */ 224 static device_method_t skc_methods[] = { 225 /* Device interface */ 226 DEVMETHOD(device_probe, skc_probe), 227 DEVMETHOD(device_attach, skc_attach), 228 DEVMETHOD(device_detach, skc_detach), 229 DEVMETHOD(device_shutdown, skc_shutdown), 230 231 /* bus interface */ 232 DEVMETHOD(bus_print_child, bus_generic_print_child), 233 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 234 235 { 0, 0 } 236 }; 237 238 static DEFINE_CLASS_0(skc, skc_driver, skc_methods, sizeof(struct sk_softc)); 239 static devclass_t skc_devclass; 240 241 static device_method_t sk_methods[] = { 242 /* Device interface */ 243 DEVMETHOD(device_probe, sk_probe), 244 DEVMETHOD(device_attach, sk_attach), 245 DEVMETHOD(device_detach, sk_detach), 246 DEVMETHOD(device_shutdown, bus_generic_shutdown), 247 248 /* bus interface */ 249 DEVMETHOD(bus_print_child, bus_generic_print_child), 250 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 251 252 /* MII interface */ 253 DEVMETHOD(miibus_readreg, sk_miibus_readreg), 254 DEVMETHOD(miibus_writereg, sk_miibus_writereg), 255 DEVMETHOD(miibus_statchg, sk_miibus_statchg), 256 257 { 0, 0 } 258 }; 259 260 static DEFINE_CLASS_0(sk, sk_driver, sk_methods, sizeof(struct sk_if_softc)); 261 static devclass_t sk_devclass; 262 static struct lwkt_serialize sk_serializer; 263 264 DECLARE_DUMMY_MODULE(if_sk); 265 DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0); 266 DRIVER_MODULE(if_sk, skc, sk_driver, sk_devclass, 0, 0); 267 DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); 268 269 #define SK_SETBIT(sc, reg, x) \ 270 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) 271 272 #define SK_CLRBIT(sc, reg, x) \ 273 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) 274 275 #define SK_WIN_SETBIT_4(sc, reg, x) \ 276 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) 277 278 #define SK_WIN_CLRBIT_4(sc, reg, x) \ 279 sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) 280 281 #define SK_WIN_SETBIT_2(sc, reg, x) \ 282 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) 283 284 #define SK_WIN_CLRBIT_2(sc, reg, x) \ 285 sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) 286 287 static uint32_t 288 sk_win_read_4(struct sk_softc *sc, int reg) 289 { 290 #ifdef SK_USEIOSPACE 291 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 292 return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); 293 #else 294 return(CSR_READ_4(sc, reg)); 295 #endif 296 } 297 298 static uint16_t 299 sk_win_read_2(struct sk_softc *sc, int reg) 300 { 301 #ifdef SK_USEIOSPACE 302 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 303 return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); 304 #else 305 return(CSR_READ_2(sc, reg)); 306 #endif 307 } 308 309 static uint8_t 310 sk_win_read_1(struct sk_softc *sc, int reg) 311 { 312 #ifdef SK_USEIOSPACE 313 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 314 return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); 315 #else 316 return(CSR_READ_1(sc, reg)); 317 #endif 318 } 319 320 static void 321 sk_win_write_4(struct sk_softc *sc, int reg, uint32_t val) 322 { 323 #ifdef SK_USEIOSPACE 324 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 325 CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); 326 #else 327 CSR_WRITE_4(sc, reg, val); 328 #endif 329 } 330 331 static void 332 sk_win_write_2(struct sk_softc *sc, int reg, uint32_t val) 333 { 334 #ifdef SK_USEIOSPACE 335 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 336 CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); 337 #else 338 CSR_WRITE_2(sc, reg, val); 339 #endif 340 } 341 342 static void 343 sk_win_write_1(struct sk_softc *sc, int reg, uint32_t val) 344 { 345 #ifdef SK_USEIOSPACE 346 CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); 347 CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); 348 #else 349 CSR_WRITE_1(sc, reg, val); 350 #endif 351 } 352 353 /* 354 * The VPD EEPROM contains Vital Product Data, as suggested in 355 * the PCI 2.1 specification. The VPD data is separared into areas 356 * denoted by resource IDs. The SysKonnect VPD contains an ID string 357 * resource (the name of the adapter), a read-only area resource 358 * containing various key/data fields and a read/write area which 359 * can be used to store asset management information or log messages. 360 * We read the ID string and read-only into buffers attached to 361 * the controller softc structure for later use. At the moment, 362 * we only use the ID string during sk_attach(). 363 */ 364 static uint8_t 365 sk_vpd_readbyte(struct sk_softc *sc, int addr) 366 { 367 int i; 368 369 sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); 370 for (i = 0; i < SK_TIMEOUT; i++) { 371 DELAY(1); 372 if (sk_win_read_2(sc, 373 SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) 374 break; 375 } 376 377 if (i == SK_TIMEOUT) 378 return(0); 379 380 return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); 381 } 382 383 static void 384 sk_vpd_read_res(struct sk_softc *sc, struct vpd_res *res, int addr) 385 { 386 int i; 387 uint8_t *ptr; 388 389 ptr = (uint8_t *)res; 390 for (i = 0; i < sizeof(struct vpd_res); i++) 391 ptr[i] = sk_vpd_readbyte(sc, i + addr); 392 } 393 394 static void 395 sk_vpd_read(struct sk_softc *sc) 396 { 397 struct vpd_res res; 398 int i, pos = 0; 399 400 if (sc->sk_vpd_prodname != NULL) 401 kfree(sc->sk_vpd_prodname, M_DEVBUF); 402 if (sc->sk_vpd_readonly != NULL) 403 kfree(sc->sk_vpd_readonly, M_DEVBUF); 404 sc->sk_vpd_prodname = NULL; 405 sc->sk_vpd_readonly = NULL; 406 407 sk_vpd_read_res(sc, &res, pos); 408 409 if (res.vr_id != VPD_RES_ID) { 410 printf("skc%d: bad VPD resource id: expected %x got %x\n", 411 sc->sk_unit, VPD_RES_ID, res.vr_id); 412 return; 413 } 414 415 pos += sizeof(res); 416 sc->sk_vpd_prodname = kmalloc(res.vr_len + 1, M_DEVBUF, M_INTWAIT); 417 for (i = 0; i < res.vr_len; i++) 418 sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); 419 sc->sk_vpd_prodname[i] = '\0'; 420 pos += i; 421 422 sk_vpd_read_res(sc, &res, pos); 423 424 if (res.vr_id != VPD_RES_READ) { 425 printf("skc%d: bad VPD resource id: expected %x got %x\n", 426 sc->sk_unit, VPD_RES_READ, res.vr_id); 427 return; 428 } 429 430 pos += sizeof(res); 431 sc->sk_vpd_readonly = kmalloc(res.vr_len, M_DEVBUF, M_INTWAIT); 432 for (i = 0; i < res.vr_len + 1; i++) 433 sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); 434 } 435 436 static int 437 sk_miibus_readreg(device_t dev, int phy, int reg) 438 { 439 struct sk_if_softc *sc_if = device_get_softc(dev); 440 441 switch(sc_if->sk_softc->sk_type) { 442 case SK_GENESIS: 443 return(sk_xmac_miibus_readreg(sc_if, phy, reg)); 444 case SK_YUKON: 445 return(sk_marv_miibus_readreg(sc_if, phy, reg)); 446 } 447 448 return(0); 449 } 450 451 static int 452 sk_miibus_writereg(device_t dev, int phy, int reg, int val) 453 { 454 struct sk_if_softc *sc_if = device_get_softc(dev); 455 456 switch(sc_if->sk_softc->sk_type) { 457 case SK_GENESIS: 458 return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); 459 case SK_YUKON: 460 return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); 461 } 462 463 return(0); 464 } 465 466 static void 467 sk_miibus_statchg(device_t dev) 468 { 469 struct sk_if_softc *sc_if = device_get_softc(dev); 470 471 switch(sc_if->sk_softc->sk_type) { 472 case SK_GENESIS: 473 sk_xmac_miibus_statchg(sc_if); 474 break; 475 case SK_YUKON: 476 sk_marv_miibus_statchg(sc_if); 477 break; 478 } 479 } 480 481 static int 482 sk_xmac_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg) 483 { 484 int i; 485 486 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) 487 return(0); 488 489 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 490 SK_XM_READ_2(sc_if, XM_PHY_DATA); 491 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 492 for (i = 0; i < SK_TIMEOUT; i++) { 493 DELAY(1); 494 if (SK_XM_READ_2(sc_if, XM_MMUCMD) & 495 XM_MMUCMD_PHYDATARDY) 496 break; 497 } 498 499 if (i == SK_TIMEOUT) { 500 printf("sk%d: phy failed to come ready\n", 501 sc_if->sk_unit); 502 return(0); 503 } 504 } 505 DELAY(1); 506 return(SK_XM_READ_2(sc_if, XM_PHY_DATA)); 507 } 508 509 static int 510 sk_xmac_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val) 511 { 512 int i; 513 514 SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); 515 for (i = 0; i < SK_TIMEOUT; i++) { 516 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0) 517 break; 518 } 519 520 if (i == SK_TIMEOUT) { 521 printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); 522 return(ETIMEDOUT); 523 } 524 525 SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); 526 for (i = 0; i < SK_TIMEOUT; i++) { 527 DELAY(1); 528 if ((SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY) == 0) 529 break; 530 } 531 532 if (i == SK_TIMEOUT) 533 printf("sk%d: phy write timed out\n", sc_if->sk_unit); 534 535 return(0); 536 } 537 538 static void 539 sk_xmac_miibus_statchg(struct sk_if_softc *sc_if) 540 { 541 struct mii_data *mii; 542 543 mii = device_get_softc(sc_if->sk_miibus); 544 545 /* 546 * If this is a GMII PHY, manually set the XMAC's 547 * duplex mode accordingly. 548 */ 549 if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { 550 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 551 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 552 else 553 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); 554 } 555 } 556 557 static int 558 sk_marv_miibus_readreg(struct sk_if_softc *sc_if, int phy, int reg) 559 { 560 uint16_t val; 561 int i; 562 563 if (phy != 0 || 564 (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && 565 sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { 566 return(0); 567 } 568 569 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 570 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 571 572 for (i = 0; i < SK_TIMEOUT; i++) { 573 DELAY(1); 574 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 575 if (val & YU_SMICR_READ_VALID) 576 break; 577 } 578 579 if (i == SK_TIMEOUT) { 580 printf("sk%d: phy failed to come ready\n", 581 sc_if->sk_unit); 582 return(0); 583 } 584 585 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 586 587 return(val); 588 } 589 590 static int 591 sk_marv_miibus_writereg(struct sk_if_softc *sc_if, int phy, int reg, int val) 592 { 593 int i; 594 595 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 596 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 597 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 598 599 for (i = 0; i < SK_TIMEOUT; i++) { 600 DELAY(1); 601 if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) 602 break; 603 } 604 605 return(0); 606 } 607 608 static void 609 sk_marv_miibus_statchg(struct sk_if_softc *sc_if) 610 { 611 } 612 613 #define HASH_BITS 6 614 615 static void 616 sk_setfilt(struct sk_if_softc *sc_if, caddr_t addr, int slot) 617 { 618 int base; 619 620 base = XM_RXFILT_ENTRY(slot); 621 622 SK_XM_WRITE_2(sc_if, base, *(uint16_t *)(&addr[0])); 623 SK_XM_WRITE_2(sc_if, base + 2, *(uint16_t *)(&addr[2])); 624 SK_XM_WRITE_2(sc_if, base + 4, *(uint16_t *)(&addr[4])); 625 } 626 627 static void 628 sk_setmulti(struct sk_if_softc *sc_if) 629 { 630 struct sk_softc *sc = sc_if->sk_softc; 631 struct ifnet *ifp = &sc_if->arpcom.ac_if; 632 uint32_t hashes[2] = { 0, 0 }; 633 int h, i; 634 struct ifmultiaddr *ifma; 635 uint8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 636 637 /* First, zot all the existing filters. */ 638 switch(sc->sk_type) { 639 case SK_GENESIS: 640 for (i = 1; i < XM_RXFILT_MAX; i++) 641 sk_setfilt(sc_if, (caddr_t)&dummy, i); 642 643 SK_XM_WRITE_4(sc_if, XM_MAR0, 0); 644 SK_XM_WRITE_4(sc_if, XM_MAR2, 0); 645 break; 646 case SK_YUKON: 647 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 648 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 649 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 650 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 651 break; 652 } 653 654 /* Now program new ones. */ 655 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 656 hashes[0] = 0xFFFFFFFF; 657 hashes[1] = 0xFFFFFFFF; 658 } else { 659 i = 1; 660 /* First find the tail of the list. */ 661 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 662 if (ifma->ifma_link.le_next == NULL) 663 break; 664 } 665 /* Now traverse the list backwards. */ 666 for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; 667 ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { 668 if (ifma->ifma_addr->sa_family != AF_LINK) 669 continue; 670 /* 671 * Program the first XM_RXFILT_MAX multicast groups 672 * into the perfect filter. For all others, 673 * use the hash table. 674 */ 675 if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { 676 sk_setfilt(sc_if, 677 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 678 i++; 679 continue; 680 } 681 682 switch(sc->sk_type) { 683 case SK_GENESIS: 684 h = ~ether_crc32_le(LLADDR((struct sockaddr_dl *) 685 ifma->ifma_addr), ETHER_ADDR_LEN) & 686 ((1 << HASH_BITS) -1 ); 687 if (h < 32) 688 hashes[0] |= (1 << h); 689 else 690 hashes[1] |= (1 << (h - 32)); 691 break; 692 693 case SK_YUKON: 694 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 695 ifma->ifma_addr), ETHER_ADDR_LEN) & 696 ((1 << HASH_BITS) -1 ); 697 if (h < 32) 698 hashes[0] |= (1 << h); 699 else 700 hashes[1] |= (1 << (h - 32)); 701 break; 702 } 703 } 704 } 705 706 switch(sc->sk_type) { 707 case SK_GENESIS: 708 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| 709 XM_MODE_RX_USE_PERFECT); 710 SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); 711 SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); 712 break; 713 case SK_YUKON: 714 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 715 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 716 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 717 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 718 break; 719 } 720 } 721 722 static void 723 sk_setpromisc(struct sk_if_softc *sc_if) 724 { 725 struct sk_softc *sc = sc_if->sk_softc; 726 struct ifnet *ifp = &sc_if->arpcom.ac_if; 727 728 switch(sc->sk_type) { 729 case SK_GENESIS: 730 if (ifp->if_flags & IFF_PROMISC) { 731 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 732 } else { 733 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); 734 } 735 break; 736 case SK_YUKON: 737 if (ifp->if_flags & IFF_PROMISC) { 738 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 739 YU_RCR_UFLEN | YU_RCR_MUFLEN); 740 } else { 741 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 742 YU_RCR_UFLEN | YU_RCR_MUFLEN); 743 } 744 break; 745 } 746 } 747 748 static int 749 sk_init_rx_ring(struct sk_if_softc *sc_if) 750 { 751 struct sk_chain_data *cd = &sc_if->sk_cdata; 752 struct sk_ring_data *rd = sc_if->sk_rdata; 753 int i; 754 755 bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); 756 757 for (i = 0; i < SK_RX_RING_CNT; i++) { 758 cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; 759 if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) 760 return(ENOBUFS); 761 if (i == (SK_RX_RING_CNT - 1)) { 762 cd->sk_rx_chain[i].sk_next = 763 &cd->sk_rx_chain[0]; 764 rd->sk_rx_ring[i].sk_next = 765 vtophys(&rd->sk_rx_ring[0]); 766 } else { 767 cd->sk_rx_chain[i].sk_next = 768 &cd->sk_rx_chain[i + 1]; 769 rd->sk_rx_ring[i].sk_next = 770 vtophys(&rd->sk_rx_ring[i + 1]); 771 } 772 } 773 774 sc_if->sk_cdata.sk_rx_prod = 0; 775 sc_if->sk_cdata.sk_rx_cons = 0; 776 777 return(0); 778 } 779 780 static void 781 sk_init_tx_ring(struct sk_if_softc *sc_if) 782 { 783 struct sk_chain_data *cd = &sc_if->sk_cdata; 784 struct sk_ring_data *rd = sc_if->sk_rdata; 785 int i, nexti; 786 787 bzero(sc_if->sk_rdata->sk_tx_ring, 788 sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); 789 790 for (i = 0; i < SK_TX_RING_CNT; i++) { 791 nexti = (i == (SK_TX_RING_CNT - 1)) ? 0 : i + 1; 792 cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; 793 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti]; 794 rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[nexti]); 795 } 796 797 sc_if->sk_cdata.sk_tx_prod = 0; 798 sc_if->sk_cdata.sk_tx_cons = 0; 799 sc_if->sk_cdata.sk_tx_cnt = 0; 800 } 801 802 static int 803 sk_newbuf(struct sk_if_softc *sc_if, struct sk_chain *c, struct mbuf *m) 804 { 805 struct mbuf *m_new = NULL; 806 struct sk_rx_desc *r; 807 struct sk_jslot *buf; 808 809 if (m == NULL) { 810 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 811 if (m_new == NULL) 812 return(ENOBUFS); 813 814 /* Allocate the jumbo buffer */ 815 buf = sk_jalloc(sc_if); 816 if (buf == NULL) { 817 m_freem(m_new); 818 #ifdef SK_VERBOSE 819 printf("sk%d: jumbo allocation failed " 820 "-- packet dropped!\n", sc_if->sk_unit); 821 #endif 822 return(ENOBUFS); 823 } 824 825 /* Attach the buffer to the mbuf */ 826 m_new->m_ext.ext_arg = buf; 827 m_new->m_ext.ext_buf = buf->sk_buf; 828 m_new->m_ext.ext_free = sk_jfree; 829 m_new->m_ext.ext_ref = sk_jref; 830 m_new->m_ext.ext_size = SK_JUMBO_FRAMELEN; 831 832 m_new->m_data = m_new->m_ext.ext_buf; 833 m_new->m_flags |= M_EXT; 834 m_new->m_len = m_new->m_pkthdr.len = m_new->m_ext.ext_size; 835 } else { 836 /* 837 * We're re-using a previously allocated mbuf; 838 * be sure to re-init pointers and lengths to 839 * default values. 840 */ 841 m_new = m; 842 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 843 m_new->m_data = m_new->m_ext.ext_buf; 844 } 845 846 /* 847 * Adjust alignment so packet payload begins on a 848 * longword boundary. Mandatory for Alpha, useful on 849 * x86 too. 850 */ 851 m_adj(m_new, ETHER_ALIGN); 852 853 r = c->sk_desc; 854 c->sk_mbuf = m_new; 855 r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); 856 r->sk_ctl = m_new->m_len | SK_RXSTAT; 857 858 return(0); 859 } 860 861 /* 862 * Allocate jumbo buffer storage. The SysKonnect adapters support 863 * "jumbograms" (9K frames), although SysKonnect doesn't currently 864 * use them in their drivers. In order for us to use them, we need 865 * large 9K receive buffers, however standard mbuf clusters are only 866 * 2048 bytes in size. Consequently, we need to allocate and manage 867 * our own jumbo buffer pool. Fortunately, this does not require an 868 * excessive amount of additional code. 869 */ 870 static int 871 sk_alloc_jumbo_mem(struct sk_if_softc *sc_if) 872 { 873 caddr_t ptr; 874 int i; 875 struct sk_jslot *entry; 876 877 /* Grab a big chunk o' storage. */ 878 sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, 879 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 880 881 if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { 882 printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); 883 return(ENOBUFS); 884 } 885 886 SLIST_INIT(&sc_if->sk_jfree_listhead); 887 888 /* 889 * Now divide it up into 9K pieces and save the addresses 890 * in an array. Note that we play an evil trick here by using 891 * the first few bytes in the buffer to hold the the address 892 * of the softc structure for this interface. This is because 893 * sk_jfree() needs it, but it is called by the mbuf management 894 * code which will not pass it to us explicitly. 895 */ 896 ptr = sc_if->sk_cdata.sk_jumbo_buf; 897 for (i = 0; i < SK_JSLOTS; i++) { 898 entry = &sc_if->sk_cdata.sk_jslots[i]; 899 entry->sk_sc = sc_if; 900 entry->sk_buf = ptr; 901 entry->sk_inuse = 0; 902 entry->sk_slot = i; 903 SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jslot_link); 904 ptr += SK_JLEN; 905 } 906 907 return(0); 908 } 909 910 /* 911 * Allocate a jumbo buffer. 912 */ 913 static struct sk_jslot * 914 sk_jalloc(struct sk_if_softc *sc_if) 915 { 916 struct sk_jslot *entry; 917 918 lwkt_serialize_enter(&sc_if->sk_jslot_serializer); 919 entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); 920 if (entry) { 921 SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jslot_link); 922 entry->sk_inuse = 1; 923 } else { 924 #ifdef SK_VERBOSE 925 printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); 926 #endif 927 } 928 lwkt_serialize_exit(&sc_if->sk_jslot_serializer); 929 return(entry); 930 } 931 932 /* 933 * Adjust usage count on a jumbo buffer. In general this doesn't 934 * get used much because our jumbo buffers don't get passed around 935 * a lot, but it's implemented for correctness. 936 */ 937 static void 938 sk_jref(void *arg) 939 { 940 struct sk_jslot *entry = (struct sk_jslot *)arg; 941 struct sk_if_softc *sc = entry->sk_sc; 942 943 if (sc == NULL) 944 panic("sk_jref: can't find softc pointer!"); 945 946 if (&sc->sk_cdata.sk_jslots[entry->sk_slot] != entry) 947 panic("sk_jref: asked to reference buffer " 948 "that we don't manage!"); 949 if (entry->sk_inuse == 0) 950 panic("sk_jref: buffer already free!"); 951 atomic_add_int(&entry->sk_inuse, 1); 952 } 953 954 /* 955 * Release a jumbo buffer. 956 */ 957 static void 958 sk_jfree(void *arg) 959 { 960 struct sk_jslot *entry = (struct sk_jslot *)arg; 961 struct sk_if_softc *sc = entry->sk_sc; 962 963 if (sc == NULL) 964 panic("sk_jref: can't find softc pointer!"); 965 966 if (&sc->sk_cdata.sk_jslots[entry->sk_slot] != entry) 967 panic("sk_jref: asked to reference buffer " 968 "that we don't manage!"); 969 if (entry->sk_inuse == 0) 970 panic("sk_jref: buffer already free!"); 971 lwkt_serialize_enter(&sc->sk_jslot_serializer); 972 atomic_subtract_int(&entry->sk_inuse, 1); 973 if (entry->sk_inuse == 0) 974 SLIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jslot_link); 975 lwkt_serialize_exit(&sc->sk_jslot_serializer); 976 } 977 978 /* 979 * Set media options. 980 */ 981 static int 982 sk_ifmedia_upd(struct ifnet *ifp) 983 { 984 struct sk_if_softc *sc_if = ifp->if_softc; 985 struct mii_data *mii; 986 987 mii = device_get_softc(sc_if->sk_miibus); 988 sk_init(sc_if); 989 mii_mediachg(mii); 990 991 return(0); 992 } 993 994 /* 995 * Report current media status. 996 */ 997 static void 998 sk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 999 { 1000 struct sk_if_softc *sc_if; 1001 struct mii_data *mii; 1002 1003 sc_if = ifp->if_softc; 1004 mii = device_get_softc(sc_if->sk_miibus); 1005 1006 mii_pollstat(mii); 1007 ifmr->ifm_active = mii->mii_media_active; 1008 ifmr->ifm_status = mii->mii_media_status; 1009 } 1010 1011 static int 1012 sk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1013 { 1014 struct sk_if_softc *sc_if = ifp->if_softc; 1015 struct ifreq *ifr = (struct ifreq *)data; 1016 struct mii_data *mii; 1017 int error = 0; 1018 1019 crit_enter(); 1020 1021 switch(command) { 1022 case SIOCSIFMTU: 1023 if (ifr->ifr_mtu > SK_JUMBO_MTU) 1024 error = EINVAL; 1025 else { 1026 ifp->if_mtu = ifr->ifr_mtu; 1027 ifp->if_flags &= ~IFF_RUNNING; 1028 sk_init(sc_if); 1029 } 1030 break; 1031 case SIOCSIFFLAGS: 1032 if (ifp->if_flags & IFF_UP) { 1033 if (ifp->if_flags & IFF_RUNNING) { 1034 if ((ifp->if_flags ^ sc_if->sk_if_flags) 1035 & IFF_PROMISC) { 1036 sk_setpromisc(sc_if); 1037 sk_setmulti(sc_if); 1038 } 1039 } else 1040 sk_init(sc_if); 1041 } else { 1042 if (ifp->if_flags & IFF_RUNNING) 1043 sk_stop(sc_if); 1044 } 1045 sc_if->sk_if_flags = ifp->if_flags; 1046 error = 0; 1047 break; 1048 case SIOCADDMULTI: 1049 case SIOCDELMULTI: 1050 sk_setmulti(sc_if); 1051 error = 0; 1052 break; 1053 case SIOCGIFMEDIA: 1054 case SIOCSIFMEDIA: 1055 mii = device_get_softc(sc_if->sk_miibus); 1056 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1057 break; 1058 default: 1059 error = ether_ioctl(ifp, command, data); 1060 break; 1061 } 1062 1063 crit_exit(); 1064 1065 return(error); 1066 } 1067 1068 /* 1069 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 1070 * IDs against our list and return a device name if we find a match. 1071 */ 1072 static int 1073 skc_probe(device_t dev) 1074 { 1075 struct sk_type *t; 1076 uint16_t vendor, product; 1077 1078 lwkt_serialize_init(&sk_serializer); 1079 vendor = pci_get_vendor(dev); 1080 product = pci_get_device(dev); 1081 1082 /* 1083 * Only attach to rev.2 of the Linksys EG1032 adapter. 1084 * Rev.3 is supported by re(4). 1085 */ 1086 if (vendor == VENDORID_LINKSYS && 1087 product == DEVICEID_LINKSYS_EG1032 && 1088 pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2) 1089 return (ENXIO); 1090 1091 for (t = sk_devs; t->sk_name != NULL; t++) { 1092 if (vendor == t->sk_vid && product == t->sk_did) { 1093 device_set_desc(dev, t->sk_name); 1094 return(0); 1095 } 1096 } 1097 1098 return(ENXIO); 1099 } 1100 1101 /* 1102 * Force the GEnesis into reset, then bring it out of reset. 1103 */ 1104 static void 1105 sk_reset(struct sk_softc *sc) 1106 { 1107 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); 1108 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); 1109 if (sc->sk_type == SK_YUKON) 1110 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 1111 1112 DELAY(1000); 1113 CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); 1114 DELAY(2); 1115 CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 1116 if (sc->sk_type == SK_YUKON) 1117 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 1118 1119 if (sc->sk_type == SK_GENESIS) { 1120 /* Configure packet arbiter */ 1121 sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); 1122 sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); 1123 sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); 1124 sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); 1125 sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); 1126 } 1127 1128 /* Enable RAM interface */ 1129 sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 1130 1131 /* 1132 * Configure interrupt moderation. The moderation timer 1133 * defers interrupts specified in the interrupt moderation 1134 * timer mask based on the timeout specified in the interrupt 1135 * moderation timer init register. Each bit in the timer 1136 * register represents 18.825ns, so to specify a timeout in 1137 * microseconds, we have to multiply by 54. 1138 */ 1139 sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); 1140 sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| 1141 SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); 1142 sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); 1143 } 1144 1145 static int 1146 sk_probe(device_t dev) 1147 { 1148 struct sk_softc *sc = device_get_softc(device_get_parent(dev)); 1149 1150 /* 1151 * Not much to do here. We always know there will be 1152 * at least one XMAC present, and if there are two, 1153 * skc_attach() will create a second device instance 1154 * for us. 1155 */ 1156 switch (sc->sk_type) { 1157 case SK_GENESIS: 1158 device_set_desc(dev, "XaQti Corp. XMAC II"); 1159 break; 1160 case SK_YUKON: 1161 device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); 1162 break; 1163 } 1164 1165 return(0); 1166 } 1167 1168 /* 1169 * Each XMAC chip is attached as a separate logical IP interface. 1170 * Single port cards will have only one logical interface of course. 1171 */ 1172 static int 1173 sk_attach(device_t dev) 1174 { 1175 struct sk_softc *sc = device_get_softc(device_get_parent(dev)); 1176 struct sk_if_softc *sc_if = device_get_softc(dev); 1177 struct ifnet *ifp; 1178 int i, port; 1179 1180 port = *(int *)device_get_ivars(dev); 1181 kfree(device_get_ivars(dev), M_DEVBUF); 1182 device_set_ivars(dev, NULL); 1183 sc_if->sk_dev = dev; 1184 callout_init(&sc_if->sk_tick_timer); 1185 lwkt_serialize_init(&sc_if->sk_jslot_serializer); 1186 1187 sc_if->sk_dev = dev; 1188 sc_if->sk_unit = device_get_unit(dev); 1189 sc_if->sk_port = port; 1190 sc_if->sk_softc = sc; 1191 sc->sk_if[port] = sc_if; 1192 if (port == SK_PORT_A) 1193 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; 1194 if (port == SK_PORT_B) 1195 sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; 1196 1197 /* 1198 * Get station address for this interface. Note that 1199 * dual port cards actually come with three station 1200 * addresses: one for each port, plus an extra. The 1201 * extra one is used by the SysKonnect driver software 1202 * as a 'virtual' station address for when both ports 1203 * are operating in failover mode. Currently we don't 1204 * use this extra address. 1205 */ 1206 for (i = 0; i < ETHER_ADDR_LEN; i++) 1207 sc_if->arpcom.ac_enaddr[i] = 1208 sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); 1209 1210 /* 1211 * Set up RAM buffer addresses. The NIC will have a certain 1212 * amount of SRAM on it, somewhere between 512K and 2MB. We 1213 * need to divide this up a) between the transmitter and 1214 * receiver and b) between the two XMACs, if this is a 1215 * dual port NIC. Our algotithm is to divide up the memory 1216 * evenly so that everyone gets a fair share. 1217 */ 1218 if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { 1219 uint32_t chunk, val; 1220 1221 chunk = sc->sk_ramsize / 2; 1222 val = sc->sk_rboff / sizeof(uint64_t); 1223 sc_if->sk_rx_ramstart = val; 1224 val += (chunk / sizeof(uint64_t)); 1225 sc_if->sk_rx_ramend = val - 1; 1226 sc_if->sk_tx_ramstart = val; 1227 val += (chunk / sizeof(uint64_t)); 1228 sc_if->sk_tx_ramend = val - 1; 1229 } else { 1230 uint32_t chunk, val; 1231 1232 chunk = sc->sk_ramsize / 4; 1233 val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / 1234 sizeof(uint64_t); 1235 sc_if->sk_rx_ramstart = val; 1236 val += (chunk / sizeof(uint64_t)); 1237 sc_if->sk_rx_ramend = val - 1; 1238 sc_if->sk_tx_ramstart = val; 1239 val += (chunk / sizeof(uint64_t)); 1240 sc_if->sk_tx_ramend = val - 1; 1241 } 1242 1243 /* Read and save PHY type and set PHY address */ 1244 sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; 1245 switch(sc_if->sk_phytype) { 1246 case SK_PHYTYPE_XMAC: 1247 sc_if->sk_phyaddr = SK_PHYADDR_XMAC; 1248 break; 1249 case SK_PHYTYPE_BCOM: 1250 sc_if->sk_phyaddr = SK_PHYADDR_BCOM; 1251 break; 1252 case SK_PHYTYPE_MARV_COPPER: 1253 sc_if->sk_phyaddr = SK_PHYADDR_MARV; 1254 break; 1255 default: 1256 printf("skc%d: unsupported PHY type: %d\n", 1257 sc->sk_unit, sc_if->sk_phytype); 1258 sc->sk_if[port] = NULL; 1259 return(ENODEV); 1260 } 1261 1262 /* Allocate the descriptor queues. */ 1263 sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, 1264 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1265 1266 if (sc_if->sk_rdata == NULL) { 1267 printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); 1268 sc->sk_if[port] = NULL; 1269 return(ENOMEM); 1270 } 1271 1272 bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); 1273 1274 /* Try to allocate memory for jumbo buffers. */ 1275 if (sk_alloc_jumbo_mem(sc_if)) { 1276 printf("sk%d: jumbo buffer allocation failed\n", 1277 sc_if->sk_unit); 1278 contigfree(sc_if->sk_rdata, 1279 sizeof(struct sk_ring_data), M_DEVBUF); 1280 sc->sk_if[port] = NULL; 1281 return(ENOMEM); 1282 } 1283 1284 ifp = &sc_if->arpcom.ac_if; 1285 ifp->if_softc = sc_if; 1286 if_initname(ifp, "sk", sc_if->sk_unit); 1287 ifp->if_mtu = ETHERMTU; 1288 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1289 ifp->if_ioctl = sk_ioctl; 1290 ifp->if_start = sk_start; 1291 ifp->if_watchdog = sk_watchdog; 1292 ifp->if_init = sk_init; 1293 ifp->if_baudrate = 1000000000; 1294 ifq_set_maxlen(&ifp->if_snd, SK_TX_RING_CNT - 1); 1295 ifq_set_ready(&ifp->if_snd); 1296 1297 /* 1298 * Do miibus setup. 1299 */ 1300 switch (sc->sk_type) { 1301 case SK_GENESIS: 1302 sk_init_xmac(sc_if); 1303 break; 1304 case SK_YUKON: 1305 sk_init_yukon(sc_if); 1306 break; 1307 } 1308 1309 if (mii_phy_probe(dev, &sc_if->sk_miibus, 1310 sk_ifmedia_upd, sk_ifmedia_sts)) { 1311 printf("skc%d: no PHY found!\n", sc_if->sk_unit); 1312 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, 1313 M_DEVBUF); 1314 contigfree(sc_if->sk_rdata, 1315 sizeof(struct sk_ring_data), M_DEVBUF); 1316 sc->sk_if[port] = NULL; 1317 return(ENXIO); 1318 } 1319 1320 /* 1321 * Call MI attach routine. 1322 */ 1323 ether_ifattach(ifp, sc_if->arpcom.ac_enaddr, &sk_serializer); 1324 callout_init(&sc_if->sk_tick_timer); 1325 1326 return(0); 1327 } 1328 1329 /* 1330 * Attach the interface. Allocate softc structures, do ifmedia 1331 * setup and ethernet/BPF attach. 1332 */ 1333 static int 1334 skc_attach(device_t dev) 1335 { 1336 struct sk_softc *sc; 1337 int error = 0, *port, rid, unit; 1338 uint32_t command; 1339 uint8_t skrs; 1340 1341 crit_enter(); 1342 1343 sc = device_get_softc(dev); 1344 unit = device_get_unit(dev); 1345 switch (pci_get_device(dev)) { 1346 case DEVICEID_SK_V1: 1347 sc->sk_type = SK_GENESIS; 1348 break; 1349 case DEVICEID_SK_V2: 1350 case DEVICEID_3COM_3C940: 1351 case DEVICEID_LINKSYS_EG1032: 1352 case DEVICEID_DLINK_DGE530T: 1353 sc->sk_type = SK_YUKON; 1354 break; 1355 } 1356 1357 /* 1358 * Handle power management nonsense. 1359 */ 1360 command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF; 1361 if (command == 0x01) { 1362 command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4); 1363 if (command & SK_PSTATE_MASK) { 1364 uint32_t iobase, membase, irq; 1365 1366 /* Save important PCI config data. */ 1367 iobase = pci_read_config(dev, SK_PCI_LOIO, 4); 1368 membase = pci_read_config(dev, SK_PCI_LOMEM, 4); 1369 irq = pci_read_config(dev, SK_PCI_INTLINE, 4); 1370 1371 /* Reset the power state. */ 1372 printf("skc%d: chip is in D%d power mode " 1373 "-- setting to D0\n", unit, command & SK_PSTATE_MASK); 1374 command &= 0xFFFFFFFC; 1375 pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4); 1376 1377 /* Restore PCI config data. */ 1378 pci_write_config(dev, SK_PCI_LOIO, iobase, 4); 1379 pci_write_config(dev, SK_PCI_LOMEM, membase, 4); 1380 pci_write_config(dev, SK_PCI_INTLINE, irq, 4); 1381 } 1382 } 1383 1384 /* 1385 * Map control/status registers. 1386 */ 1387 command = pci_read_config(dev, PCIR_COMMAND, 4); 1388 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 1389 pci_write_config(dev, PCIR_COMMAND, command, 4); 1390 command = pci_read_config(dev, PCIR_COMMAND, 4); 1391 1392 #ifdef SK_USEIOSPACE 1393 if ((command & PCIM_CMD_PORTEN) == 0) { 1394 printf("skc%d: failed to enable I/O ports!\n", unit); 1395 error = ENXIO; 1396 goto fail; 1397 } 1398 #else 1399 if ((command & PCIM_CMD_MEMEN) == 0) { 1400 printf("skc%d: failed to enable memory mapping!\n", unit); 1401 error = ENXIO; 1402 goto fail; 1403 } 1404 #endif 1405 1406 rid = SK_RID; 1407 sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); 1408 1409 if (sc->sk_res == NULL) { 1410 printf("sk%d: couldn't map ports/memory\n", unit); 1411 error = ENXIO; 1412 goto fail; 1413 } 1414 1415 sc->sk_btag = rman_get_bustag(sc->sk_res); 1416 sc->sk_bhandle = rman_get_bushandle(sc->sk_res); 1417 1418 /* Allocate interrupt */ 1419 rid = 0; 1420 sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1421 RF_SHAREABLE | RF_ACTIVE); 1422 1423 if (sc->sk_irq == NULL) { 1424 printf("skc%d: couldn't map interrupt\n", unit); 1425 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1426 error = ENXIO; 1427 goto fail; 1428 } 1429 1430 error = bus_setup_intr(dev, sc->sk_irq, INTR_NETSAFE, 1431 sk_intr, sc, 1432 &sc->sk_intrhand, &sk_serializer); 1433 1434 if (error) { 1435 printf("skc%d: couldn't set up irq\n", unit); 1436 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1438 goto fail; 1439 } 1440 1441 /* Reset the adapter. */ 1442 sk_reset(sc); 1443 1444 sc->sk_unit = unit; 1445 1446 /* Read and save vital product data from EEPROM. */ 1447 sk_vpd_read(sc); 1448 1449 skrs = sk_win_read_1(sc, SK_EPROM0); 1450 if (sc->sk_type == SK_GENESIS) { 1451 /* Read and save RAM size and RAMbuffer offset */ 1452 switch(skrs) { 1453 case SK_RAMSIZE_512K_64: 1454 sc->sk_ramsize = 0x80000; 1455 sc->sk_rboff = SK_RBOFF_0; 1456 break; 1457 case SK_RAMSIZE_1024K_64: 1458 sc->sk_ramsize = 0x100000; 1459 sc->sk_rboff = SK_RBOFF_80000; 1460 break; 1461 case SK_RAMSIZE_1024K_128: 1462 sc->sk_ramsize = 0x100000; 1463 sc->sk_rboff = SK_RBOFF_0; 1464 break; 1465 case SK_RAMSIZE_2048K_128: 1466 sc->sk_ramsize = 0x200000; 1467 sc->sk_rboff = SK_RBOFF_0; 1468 break; 1469 default: 1470 printf("skc%d: unknown ram size: %d\n", 1471 sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); 1472 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1474 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1475 error = ENXIO; 1476 goto fail; 1477 break; 1478 } 1479 } else { /* SK_YUKON */ 1480 if (skrs == 0x00) { 1481 sc->sk_ramsize = 0x20000; 1482 } else { 1483 sc->sk_ramsize = skrs * (1<<12); 1484 } 1485 sc->sk_rboff = SK_RBOFF_0; 1486 } 1487 1488 /* Read and save physical media type */ 1489 switch(sk_win_read_1(sc, SK_PMDTYPE)) { 1490 case SK_PMD_1000BASESX: 1491 sc->sk_pmd = IFM_1000_SX; 1492 break; 1493 case SK_PMD_1000BASELX: 1494 sc->sk_pmd = IFM_1000_LX; 1495 break; 1496 case SK_PMD_1000BASECX: 1497 sc->sk_pmd = IFM_1000_CX; 1498 break; 1499 case SK_PMD_1000BASETX: 1500 sc->sk_pmd = IFM_1000_T; 1501 break; 1502 default: 1503 printf("skc%d: unknown media type: 0x%x\n", 1504 sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); 1505 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1506 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1507 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1508 error = ENXIO; 1509 goto fail; 1510 } 1511 1512 /* Announce the product name. */ 1513 printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); 1514 sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); 1515 port = kmalloc(sizeof(int), M_DEVBUF, M_WAITOK); 1516 *port = SK_PORT_A; 1517 device_set_ivars(sc->sk_devs[SK_PORT_A], port); 1518 1519 if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { 1520 sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); 1521 port = kmalloc(sizeof(int), M_DEVBUF, M_WAITOK); 1522 *port = SK_PORT_B; 1523 device_set_ivars(sc->sk_devs[SK_PORT_B], port); 1524 } 1525 1526 /* Turn on the 'driver is loaded' LED. */ 1527 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1528 1529 bus_generic_attach(dev); 1530 1531 fail: 1532 crit_exit(); 1533 return(error); 1534 } 1535 1536 static int 1537 sk_detach(device_t dev) 1538 { 1539 struct sk_if_softc *sc_if = device_get_softc(dev); 1540 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1541 1542 ether_ifdetach(ifp); 1543 bus_generic_detach(dev); 1544 if (sc_if->sk_miibus != NULL) 1545 device_delete_child(dev, sc_if->sk_miibus); 1546 contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); 1547 contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); 1548 1549 return(0); 1550 } 1551 1552 static int 1553 skc_detach(device_t dev) 1554 { 1555 struct sk_softc *sc; 1556 1557 sc = device_get_softc(dev); 1558 1559 lwkt_serialize_enter(&sk_serializer); 1560 1561 if (sc->sk_if[SK_PORT_A] != NULL) 1562 sk_stop(sc->sk_if[SK_PORT_A]); 1563 if (sc->sk_if[SK_PORT_B] != NULL) 1564 sk_stop(sc->sk_if[SK_PORT_B]); 1565 1566 bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); 1567 1568 lwkt_serialize_exit(&sk_serializer); 1569 1570 /* 1571 * recursed from sk_detach ? don't need serializer 1572 */ 1573 bus_generic_detach(dev); 1574 if (sc->sk_devs[SK_PORT_A] != NULL) 1575 device_delete_child(dev, sc->sk_devs[SK_PORT_A]); 1576 if (sc->sk_devs[SK_PORT_B] != NULL) 1577 device_delete_child(dev, sc->sk_devs[SK_PORT_B]); 1578 1579 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); 1580 bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); 1581 1582 return(0); 1583 } 1584 1585 static int 1586 sk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, uint32_t *txidx) 1587 { 1588 struct sk_tx_desc *f = NULL; 1589 struct mbuf *m; 1590 uint32_t cnt = 0, cur, frag; 1591 1592 m = m_head; 1593 cur = frag = *txidx; 1594 1595 /* 1596 * Start packing the mbufs in this chain into 1597 * the fragment pointers. Stop when we run out 1598 * of fragments or hit the end of the mbuf chain. 1599 */ 1600 for (m = m_head; m != NULL; m = m->m_next) { 1601 if (m->m_len != 0) { 1602 if ((SK_TX_RING_CNT - 1603 (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) 1604 return(ENOBUFS); 1605 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1606 f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); 1607 f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; 1608 if (cnt == 0) 1609 f->sk_ctl |= SK_TXCTL_FIRSTFRAG; 1610 else 1611 f->sk_ctl |= SK_TXCTL_OWN; 1612 cur = frag; 1613 SK_INC(frag, SK_TX_RING_CNT); 1614 cnt++; 1615 } 1616 } 1617 1618 if (m != NULL) 1619 return(ENOBUFS); 1620 1621 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= 1622 SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; 1623 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1624 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; 1625 sc_if->sk_cdata.sk_tx_cnt += cnt; 1626 1627 *txidx = frag; 1628 1629 return(0); 1630 } 1631 1632 static void 1633 sk_start(struct ifnet *ifp) 1634 { 1635 struct sk_if_softc *sc_if = ifp->if_softc; 1636 struct sk_softc *sc = sc_if->sk_softc; 1637 struct mbuf *m_head = NULL; 1638 uint32_t idx; 1639 int need_trans; 1640 1641 idx = sc_if->sk_cdata.sk_tx_prod; 1642 1643 need_trans = 0; 1644 while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1645 m_head = ifq_poll(&ifp->if_snd); 1646 if (m_head == NULL) 1647 break; 1648 1649 /* 1650 * Pack the data into the transmit ring. If we 1651 * don't have room, set the OACTIVE flag and wait 1652 * for the NIC to drain the ring. 1653 */ 1654 if (sk_encap(sc_if, m_head, &idx)) { 1655 ifp->if_flags |= IFF_OACTIVE; 1656 break; 1657 } 1658 ifq_dequeue(&ifp->if_snd, m_head); 1659 need_trans = 1; 1660 1661 BPF_MTAP(ifp, m_head); 1662 } 1663 1664 if (!need_trans) 1665 return; 1666 1667 /* Transmit */ 1668 sc_if->sk_cdata.sk_tx_prod = idx; 1669 CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); 1670 1671 /* Set a timeout in case the chip goes out to lunch. */ 1672 ifp->if_timer = 5; 1673 } 1674 1675 1676 static void 1677 sk_watchdog(struct ifnet *ifp) 1678 { 1679 struct sk_if_softc *sc_if; 1680 1681 sc_if = ifp->if_softc; 1682 1683 printf("sk%d: watchdog timeout\n", sc_if->sk_unit); 1684 ifp->if_flags &= ~IFF_RUNNING; 1685 sk_init(sc_if); 1686 1687 if (!ifq_is_empty(&ifp->if_snd)) 1688 ifp->if_start(ifp); 1689 } 1690 1691 static void 1692 skc_shutdown(device_t dev) 1693 { 1694 struct sk_softc *sc = device_get_softc(dev); 1695 1696 lwkt_serialize_enter(&sk_serializer); 1697 1698 /* Turn off the 'driver is loaded' LED. */ 1699 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1700 1701 /* 1702 * Reset the GEnesis controller. Doing this should also 1703 * assert the resets on the attached XMAC(s). 1704 */ 1705 sk_reset(sc); 1706 lwkt_serialize_exit(&sk_serializer); 1707 } 1708 1709 static void 1710 sk_rxeof(struct sk_if_softc *sc_if) 1711 { 1712 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1713 struct mbuf *m; 1714 struct sk_chain *cur_rx; 1715 int i, total_len = 0; 1716 uint32_t rxstat; 1717 1718 i = sc_if->sk_cdata.sk_rx_prod; 1719 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1720 1721 while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { 1722 cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; 1723 rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; 1724 m = cur_rx->sk_mbuf; 1725 cur_rx->sk_mbuf = NULL; 1726 total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); 1727 SK_INC(i, SK_RX_RING_CNT); 1728 1729 if (rxstat & XM_RXSTAT_ERRFRAME) { 1730 ifp->if_ierrors++; 1731 sk_newbuf(sc_if, cur_rx, m); 1732 continue; 1733 } 1734 1735 /* 1736 * Try to allocate a new jumbo buffer. If that 1737 * fails, copy the packet to mbufs and put the 1738 * jumbo buffer back in the ring so it can be 1739 * re-used. If allocating mbufs fails, then we 1740 * have to drop the packet. 1741 */ 1742 if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { 1743 struct mbuf *m0; 1744 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1745 total_len + ETHER_ALIGN, 0, ifp, NULL); 1746 sk_newbuf(sc_if, cur_rx, m); 1747 if (m0 == NULL) { 1748 printf("sk%d: no receive buffers " 1749 "available -- packet dropped!\n", 1750 sc_if->sk_unit); 1751 ifp->if_ierrors++; 1752 continue; 1753 } 1754 m_adj(m0, ETHER_ALIGN); 1755 m = m0; 1756 } else { 1757 m->m_pkthdr.rcvif = ifp; 1758 m->m_pkthdr.len = m->m_len = total_len; 1759 } 1760 1761 ifp->if_ipackets++; 1762 ifp->if_input(ifp, m); 1763 } 1764 1765 sc_if->sk_cdata.sk_rx_prod = i; 1766 } 1767 1768 static void 1769 sk_txeof(struct sk_if_softc *sc_if) 1770 { 1771 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1772 struct sk_tx_desc *cur_tx = NULL; 1773 uint32_t idx; 1774 1775 /* 1776 * Go through our tx ring and free mbufs for those 1777 * frames that have been sent. 1778 */ 1779 idx = sc_if->sk_cdata.sk_tx_cons; 1780 while(idx != sc_if->sk_cdata.sk_tx_prod) { 1781 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1782 if (cur_tx->sk_ctl & SK_TXCTL_OWN) 1783 break; 1784 if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) 1785 ifp->if_opackets++; 1786 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1787 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1788 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1789 } 1790 sc_if->sk_cdata.sk_tx_cnt--; 1791 SK_INC(idx, SK_TX_RING_CNT); 1792 ifp->if_timer = 0; 1793 } 1794 1795 sc_if->sk_cdata.sk_tx_cons = idx; 1796 1797 if (cur_tx != NULL) 1798 ifp->if_flags &= ~IFF_OACTIVE; 1799 } 1800 1801 static void 1802 sk_tick(void *xsc_if) 1803 { 1804 struct sk_if_softc *sc_if = xsc_if; 1805 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1806 struct mii_data *mii = device_get_softc(sc_if->sk_miibus); 1807 int i; 1808 1809 lwkt_serialize_enter(&sk_serializer); 1810 1811 if ((ifp->if_flags & IFF_UP) == 0) { 1812 lwkt_serialize_exit(&sk_serializer); 1813 return; 1814 } 1815 1816 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 1817 sk_intr_bcom(sc_if); 1818 lwkt_serialize_exit(&sk_serializer); 1819 return; 1820 } 1821 1822 /* 1823 * According to SysKonnect, the correct way to verify that 1824 * the link has come back up is to poll bit 0 of the GPIO 1825 * register three times. This pin has the signal from the 1826 * link_sync pin connected to it; if we read the same link 1827 * state 3 times in a row, we know the link is up. 1828 */ 1829 for (i = 0; i < 3; i++) { 1830 if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) 1831 break; 1832 } 1833 1834 if (i != 3) { 1835 callout_reset(&sc_if->sk_tick_timer, hz, sk_tick, sc_if); 1836 lwkt_serialize_exit(&sk_serializer); 1837 return; 1838 } 1839 1840 /* Turn the GP0 interrupt back on. */ 1841 SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 1842 SK_XM_READ_2(sc_if, XM_ISR); 1843 mii_tick(mii); 1844 mii_pollstat(mii); 1845 callout_stop(&sc_if->sk_tick_timer); 1846 lwkt_serialize_exit(&sk_serializer); 1847 } 1848 1849 static void 1850 sk_intr_bcom(struct sk_if_softc *sc_if) 1851 { 1852 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1853 struct mii_data *mii = device_get_softc(sc_if->sk_miibus); 1854 int status; 1855 1856 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 1857 1858 /* 1859 * Read the PHY interrupt register to make sure 1860 * we clear any pending interrupts. 1861 */ 1862 status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); 1863 1864 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1865 sk_init_xmac(sc_if); 1866 return; 1867 } 1868 1869 if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { 1870 int lstat; 1871 lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 1872 BRGPHY_MII_AUXSTS); 1873 1874 if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { 1875 mii_mediachg(mii); 1876 /* Turn off the link LED. */ 1877 SK_IF_WRITE_1(sc_if, 0, 1878 SK_LINKLED1_CTL, SK_LINKLED_OFF); 1879 sc_if->sk_link = 0; 1880 } else if (status & BRGPHY_ISR_LNK_CHG) { 1881 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 1882 BRGPHY_MII_IMR, 0xFF00); 1883 mii_tick(mii); 1884 sc_if->sk_link = 1; 1885 /* Turn on the link LED. */ 1886 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 1887 SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| 1888 SK_LINKLED_BLINK_OFF); 1889 mii_pollstat(mii); 1890 } else { 1891 mii_tick(mii); 1892 callout_reset(&sc_if->sk_tick_timer, hz, 1893 sk_tick, sc_if); 1894 } 1895 } 1896 1897 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); 1898 } 1899 1900 static void 1901 sk_intr_xmac(struct sk_if_softc *sc_if) 1902 { 1903 uint16_t status; 1904 1905 status = SK_XM_READ_2(sc_if, XM_ISR); 1906 1907 /* 1908 * Link has gone down. Start MII tick timeout to 1909 * watch for link resync. 1910 */ 1911 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { 1912 if (status & XM_ISR_GP0_SET) { 1913 SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); 1914 callout_reset(&sc_if->sk_tick_timer, hz, 1915 sk_tick, sc_if); 1916 } 1917 1918 if (status & XM_ISR_AUTONEG_DONE) { 1919 callout_reset(&sc_if->sk_tick_timer, hz, 1920 sk_tick, sc_if); 1921 } 1922 } 1923 1924 if (status & XM_IMR_TX_UNDERRUN) 1925 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); 1926 1927 if (status & XM_IMR_RX_OVERRUN) 1928 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); 1929 1930 status = SK_XM_READ_2(sc_if, XM_ISR); 1931 } 1932 1933 static void 1934 sk_intr_yukon(struct sk_if_softc *sc_if) 1935 { 1936 int status; 1937 1938 status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 1939 } 1940 1941 static void 1942 sk_intr(void *xsc) 1943 { 1944 struct sk_softc *sc = xsc; 1945 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1946 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_A]; 1947 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1948 uint32_t status; 1949 1950 if (sc_if0 != NULL) 1951 ifp0 = &sc_if0->arpcom.ac_if; 1952 if (sc_if1 != NULL) 1953 ifp1 = &sc_if1->arpcom.ac_if; 1954 1955 for (;;) { 1956 status = CSR_READ_4(sc, SK_ISSR); 1957 if ((status & sc->sk_intrmask) == 0) 1958 break; 1959 1960 /* Handle receive interrupts first. */ 1961 if (status & SK_ISR_RX1_EOF) { 1962 sk_rxeof(sc_if0); 1963 CSR_WRITE_4(sc, SK_BMU_RX_CSR0, 1964 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 1965 } 1966 if (status & SK_ISR_RX2_EOF) { 1967 sk_rxeof(sc_if1); 1968 CSR_WRITE_4(sc, SK_BMU_RX_CSR1, 1969 SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); 1970 } 1971 1972 /* Then transmit interrupts. */ 1973 if (status & SK_ISR_TX1_S_EOF) { 1974 sk_txeof(sc_if0); 1975 CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, 1976 SK_TXBMU_CLR_IRQ_EOF); 1977 } 1978 if (status & SK_ISR_TX2_S_EOF) { 1979 sk_txeof(sc_if1); 1980 CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, 1981 SK_TXBMU_CLR_IRQ_EOF); 1982 } 1983 1984 /* Then MAC interrupts. */ 1985 if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { 1986 if (sc->sk_type == SK_GENESIS) 1987 sk_intr_xmac(sc_if0); 1988 else 1989 sk_intr_yukon(sc_if0); 1990 } 1991 1992 if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { 1993 if (sc->sk_type == SK_GENESIS) 1994 sk_intr_xmac(sc_if1); 1995 else 1996 sk_intr_yukon(sc_if0); 1997 } 1998 1999 if (status & SK_ISR_EXTERNAL_REG) { 2000 if (ifp0 != NULL && 2001 sc_if0->sk_phytype == SK_PHYTYPE_BCOM) 2002 sk_intr_bcom(sc_if0); 2003 if (ifp1 != NULL && 2004 sc_if1->sk_phytype == SK_PHYTYPE_BCOM) 2005 sk_intr_bcom(sc_if1); 2006 } 2007 } 2008 2009 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2010 2011 if (ifp0 != NULL && !ifq_is_empty(&ifp0->if_snd)) 2012 sk_start(ifp0); 2013 if (ifp1 != NULL && !ifq_is_empty(&ifp0->if_snd)) 2014 sk_start(ifp1); 2015 } 2016 2017 static void 2018 sk_init_xmac(struct sk_if_softc *sc_if) 2019 { 2020 struct sk_softc *sc = sc_if->sk_softc; 2021 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2022 struct sk_bcom_hack bhack[] = { 2023 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, 2024 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, 2025 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, 2026 { 0, 0 } }; 2027 2028 /* Unreset the XMAC. */ 2029 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); 2030 DELAY(1000); 2031 2032 /* Reset the XMAC's internal state. */ 2033 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2034 2035 /* Save the XMAC II revision */ 2036 sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); 2037 2038 /* 2039 * Perform additional initialization for external PHYs, 2040 * namely for the 1000baseTX cards that use the XMAC's 2041 * GMII mode. 2042 */ 2043 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2044 int i = 0; 2045 uint32_t val; 2046 2047 /* Take PHY out of reset. */ 2048 val = sk_win_read_4(sc, SK_GPIO); 2049 if (sc_if->sk_port == SK_PORT_A) 2050 val |= SK_GPIO_DIR0|SK_GPIO_DAT0; 2051 else 2052 val |= SK_GPIO_DIR2|SK_GPIO_DAT2; 2053 sk_win_write_4(sc, SK_GPIO, val); 2054 2055 /* Enable GMII mode on the XMAC. */ 2056 SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); 2057 2058 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2059 BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); 2060 DELAY(10000); 2061 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2062 BRGPHY_MII_IMR, 0xFFF0); 2063 2064 /* 2065 * Early versions of the BCM5400 apparently have 2066 * a bug that requires them to have their reserved 2067 * registers initialized to some magic values. I don't 2068 * know what the numbers do, I'm just the messenger. 2069 */ 2070 if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) 2071 == 0x6041) { 2072 while(bhack[i].reg) { 2073 sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, 2074 bhack[i].reg, bhack[i].val); 2075 i++; 2076 } 2077 } 2078 } 2079 2080 /* Set station address */ 2081 SK_XM_WRITE_2(sc_if, XM_PAR0, 2082 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[0])); 2083 SK_XM_WRITE_2(sc_if, XM_PAR1, 2084 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[2])); 2085 SK_XM_WRITE_2(sc_if, XM_PAR2, 2086 *(uint16_t *)(&sc_if->arpcom.ac_enaddr[4])); 2087 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); 2088 2089 if (ifp->if_flags & IFF_BROADCAST) 2090 SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2091 else 2092 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); 2093 2094 /* We don't need the FCS appended to the packet. */ 2095 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); 2096 2097 /* We want short frames padded to 60 bytes. */ 2098 SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); 2099 2100 /* 2101 * Enable the reception of all error frames. This is is 2102 * a necessary evil due to the design of the XMAC. The 2103 * XMAC's receive FIFO is only 8K in size, however jumbo 2104 * frames can be up to 9000 bytes in length. When bad 2105 * frame filtering is enabled, the XMAC's RX FIFO operates 2106 * in 'store and forward' mode. For this to work, the 2107 * entire frame has to fit into the FIFO, but that means 2108 * that jumbo frames larger than 8192 bytes will be 2109 * truncated. Disabling all bad frame filtering causes 2110 * the RX FIFO to operate in streaming mode, in which 2111 * case the XMAC will start transfering frames out of the 2112 * RX FIFO as soon as the FIFO threshold is reached. 2113 */ 2114 SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| 2115 XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| 2116 XM_MODE_RX_INRANGELEN); 2117 2118 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2119 SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2120 else 2121 SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); 2122 2123 /* 2124 * Bump up the transmit threshold. This helps hold off transmit 2125 * underruns when we're blasting traffic from both ports at once. 2126 */ 2127 SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); 2128 2129 /* Set promiscuous mode */ 2130 sk_setpromisc(sc_if); 2131 2132 /* Set multicast filter */ 2133 sk_setmulti(sc_if); 2134 2135 /* Clear and enable interrupts */ 2136 SK_XM_READ_2(sc_if, XM_ISR); 2137 if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) 2138 SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); 2139 else 2140 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2141 2142 /* Configure MAC arbiter */ 2143 switch(sc_if->sk_xmac_rev) { 2144 case XM_XMAC_REV_B2: 2145 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); 2146 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); 2147 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); 2148 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); 2149 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); 2150 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); 2151 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); 2152 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); 2153 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2154 break; 2155 case XM_XMAC_REV_C1: 2156 sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); 2157 sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); 2158 sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); 2159 sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); 2160 sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); 2161 sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); 2162 sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); 2163 sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); 2164 sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); 2165 break; 2166 default: 2167 break; 2168 } 2169 sk_win_write_2(sc, SK_MACARB_CTL, 2170 SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); 2171 2172 sc_if->sk_link = 1; 2173 } 2174 2175 static void 2176 sk_init_yukon(struct sk_if_softc *sc_if) 2177 { 2178 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2179 uint32_t phy; 2180 uint16_t reg; 2181 int i; 2182 2183 /* GMAC and GPHY Reset */ 2184 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 2185 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2186 DELAY(1000); 2187 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); 2188 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 2189 DELAY(1000); 2190 2191 phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | 2192 SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; 2193 2194 switch(sc_if->sk_softc->sk_pmd) { 2195 case IFM_1000_SX: 2196 case IFM_1000_LX: 2197 phy |= SK_GPHY_FIBER; 2198 break; 2199 2200 case IFM_1000_CX: 2201 case IFM_1000_T: 2202 phy |= SK_GPHY_COPPER; 2203 break; 2204 } 2205 2206 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); 2207 DELAY(1000); 2208 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); 2209 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 2210 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 2211 2212 /* unused read of the interrupt source register */ 2213 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2214 2215 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2216 2217 /* MIB Counter Clear Mode set */ 2218 reg |= YU_PAR_MIB_CLR; 2219 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2220 2221 /* MIB Counter Clear Mode clear */ 2222 reg &= ~YU_PAR_MIB_CLR; 2223 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2224 2225 /* receive control reg */ 2226 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2227 2228 /* transmit parameter register */ 2229 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2230 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2231 2232 /* serial mode register */ 2233 reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); 2234 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) 2235 reg |= YU_SMR_MFL_JUMBO; 2236 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2237 2238 /* Setup Yukon's address */ 2239 for (i = 0; i < 3; i++) { 2240 /* Write Source Address 1 (unicast filter) */ 2241 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2242 sc_if->arpcom.ac_enaddr[i * 2] | 2243 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2244 } 2245 2246 for (i = 0; i < 3; i++) { 2247 reg = sk_win_read_2(sc_if->sk_softc, 2248 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2249 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2250 } 2251 2252 /* Set promiscuous mode */ 2253 sk_setpromisc(sc_if); 2254 2255 /* Set multicast filter */ 2256 sk_setmulti(sc_if); 2257 2258 /* enable interrupt mask for counter overflows */ 2259 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2260 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2261 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2262 2263 /* Configure RX MAC FIFO */ 2264 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2265 SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); 2266 2267 /* Configure TX MAC FIFO */ 2268 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2269 SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2270 } 2271 2272 /* 2273 * Note that to properly initialize any part of the GEnesis chip, 2274 * you first have to take it out of reset mode. 2275 */ 2276 static void 2277 sk_init(void *xsc) 2278 { 2279 struct sk_if_softc *sc_if = xsc; 2280 struct sk_softc *sc = sc_if->sk_softc; 2281 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2282 struct mii_data *mii = device_get_softc(sc_if->sk_miibus); 2283 uint16_t reg; 2284 2285 crit_enter(); 2286 2287 if (ifp->if_flags & IFF_RUNNING) { 2288 crit_exit(); 2289 return; 2290 } 2291 2292 /* Cancel pending I/O and free all RX/TX buffers. */ 2293 sk_stop(sc_if); 2294 2295 if (sc->sk_type == SK_GENESIS) { 2296 /* Configure LINK_SYNC LED */ 2297 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); 2298 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, 2299 SK_LINKLED_LINKSYNC_ON); 2300 2301 /* Configure RX LED */ 2302 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, 2303 SK_RXLEDCTL_COUNTER_START); 2304 2305 /* Configure TX LED */ 2306 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, 2307 SK_TXLEDCTL_COUNTER_START); 2308 } 2309 2310 /* Configure I2C registers */ 2311 2312 /* Configure XMAC(s) */ 2313 switch (sc->sk_type) { 2314 case SK_GENESIS: 2315 sk_init_xmac(sc_if); 2316 break; 2317 case SK_YUKON: 2318 sk_init_yukon(sc_if); 2319 break; 2320 } 2321 mii_mediachg(mii); 2322 2323 if (sc->sk_type == SK_GENESIS) { 2324 /* Configure MAC FIFOs */ 2325 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); 2326 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); 2327 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); 2328 2329 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); 2330 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); 2331 SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); 2332 } 2333 2334 /* Configure transmit arbiter(s) */ 2335 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, 2336 SK_TXARCTL_ON | SK_TXARCTL_FSYNC_ON); 2337 2338 /* Configure RAMbuffers */ 2339 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2340 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2341 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2342 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2343 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2344 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2345 2346 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); 2347 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); 2348 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); 2349 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); 2350 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); 2351 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); 2352 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); 2353 2354 /* Configure BMUs */ 2355 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); 2356 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, 2357 vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); 2358 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); 2359 2360 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); 2361 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, 2362 vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); 2363 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); 2364 2365 /* Init descriptors */ 2366 if (sk_init_rx_ring(sc_if) == ENOBUFS) { 2367 printf("sk%d: initialization failed: no " 2368 "memory for rx buffers\n", sc_if->sk_unit); 2369 sk_stop(sc_if); 2370 crit_exit(); 2371 return; 2372 } 2373 sk_init_tx_ring(sc_if); 2374 2375 /* Configure interrupt handling */ 2376 CSR_READ_4(sc, SK_ISSR); 2377 if (sc_if->sk_port == SK_PORT_A) 2378 sc->sk_intrmask |= SK_INTRS1; 2379 else 2380 sc->sk_intrmask |= SK_INTRS2; 2381 2382 sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; 2383 2384 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2385 2386 /* Start BMUs. */ 2387 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); 2388 2389 switch(sc->sk_type) { 2390 case SK_GENESIS: 2391 /* Enable XMACs TX and RX state machines */ 2392 SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); 2393 SK_XM_SETBIT_2(sc_if, XM_MMUCMD, 2394 XM_MMUCMD_TX_ENB | XM_MMUCMD_RX_ENB); 2395 break; 2396 case SK_YUKON: 2397 reg = SK_YU_READ_2(sc_if, YUKON_GPCR); 2398 reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; 2399 reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); 2400 SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); 2401 } 2402 2403 ifp->if_flags |= IFF_RUNNING; 2404 ifp->if_flags &= ~IFF_OACTIVE; 2405 2406 crit_exit(); 2407 } 2408 2409 static void 2410 sk_stop(struct sk_if_softc *sc_if) 2411 { 2412 int i; 2413 struct sk_softc *sc = sc_if->sk_softc; 2414 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2415 2416 callout_stop(&sc_if->sk_tick_timer); 2417 2418 if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { 2419 uint32_t val; 2420 2421 /* Put PHY back into reset. */ 2422 val = sk_win_read_4(sc, SK_GPIO); 2423 if (sc_if->sk_port == SK_PORT_A) { 2424 val |= SK_GPIO_DIR0; 2425 val &= ~SK_GPIO_DAT0; 2426 } else { 2427 val |= SK_GPIO_DIR2; 2428 val &= ~SK_GPIO_DAT2; 2429 } 2430 sk_win_write_4(sc, SK_GPIO, val); 2431 } 2432 2433 /* Turn off various components of this interface. */ 2434 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2435 switch (sc->sk_type) { 2436 case SK_GENESIS: 2437 SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); 2438 SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); 2439 break; 2440 case SK_YUKON: 2441 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2442 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2443 break; 2444 } 2445 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2446 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET | SK_RBCTL_OFF); 2447 SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); 2448 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, 2449 SK_RBCTL_RESET | SK_RBCTL_OFF); 2450 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2451 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2452 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2453 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2454 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2455 2456 /* Disable interrupts */ 2457 if (sc_if->sk_port == SK_PORT_A) 2458 sc->sk_intrmask &= ~SK_INTRS1; 2459 else 2460 sc->sk_intrmask &= ~SK_INTRS2; 2461 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2462 2463 SK_XM_READ_2(sc_if, XM_ISR); 2464 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2465 2466 /* Free RX and TX mbufs still in the queues. */ 2467 for (i = 0; i < SK_RX_RING_CNT; i++) { 2468 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2469 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2470 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2471 } 2472 } 2473 2474 for (i = 0; i < SK_TX_RING_CNT; i++) { 2475 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2476 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2477 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2478 } 2479 } 2480 2481 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2482 } 2483