1 /* $OpenBSD: if_msk.c,v 1.94 2012/03/28 12:02:49 jsg Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/timeout.h> 99 #include <sys/device.h> 100 #include <sys/queue.h> 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 106 #ifdef INET 107 #include <netinet/in.h> 108 #include <netinet/in_systm.h> 109 #include <netinet/in_var.h> 110 #include <netinet/ip.h> 111 #include <netinet/udp.h> 112 #include <netinet/tcp.h> 113 #include <netinet/if_ether.h> 114 #endif 115 116 #include <net/if_media.h> 117 #include <net/if_vlan_var.h> 118 119 #if NBPFILTER > 0 120 #include <net/bpf.h> 121 #endif 122 123 #include <dev/mii/mii.h> 124 #include <dev/mii/miivar.h> 125 #include <dev/mii/brgphyreg.h> 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 #include <dev/pci/pcidevs.h> 130 131 #include <dev/pci/if_skreg.h> 132 #include <dev/pci/if_mskvar.h> 133 134 int mskc_probe(struct device *, void *, void *); 135 void mskc_attach(struct device *, struct device *self, void *aux); 136 int mskc_detach(struct device *, int); 137 int mskc_activate(struct device *, int); 138 void mskc_reset(struct sk_softc *); 139 int msk_probe(struct device *, void *, void *); 140 void msk_attach(struct device *, struct device *self, void *aux); 141 int msk_detach(struct device *, int); 142 int msk_activate(struct device *, int); 143 void msk_reset(struct sk_if_softc *); 144 int mskcprint(void *, const char *); 145 int msk_intr(void *); 146 void msk_intr_yukon(struct sk_if_softc *); 147 static __inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 148 void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t); 149 void msk_txeof(struct sk_if_softc *); 150 int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *); 151 void msk_start(struct ifnet *); 152 int msk_ioctl(struct ifnet *, u_long, caddr_t); 153 void msk_init(void *); 154 void msk_init_yukon(struct sk_if_softc *); 155 void msk_stop(struct sk_if_softc *, int); 156 void msk_watchdog(struct ifnet *); 157 int msk_ifmedia_upd(struct ifnet *); 158 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 159 int msk_newbuf(struct sk_if_softc *); 160 int msk_init_rx_ring(struct sk_if_softc *); 161 int msk_init_tx_ring(struct sk_if_softc *); 162 void msk_fill_rx_ring(struct sk_if_softc *); 163 164 int msk_miibus_readreg(struct device *, int, int); 165 void msk_miibus_writereg(struct device *, int, int, int); 166 void msk_miibus_statchg(struct device *); 167 168 void msk_setmulti(struct sk_if_softc *); 169 void msk_setpromisc(struct sk_if_softc *); 170 void msk_tick(void *); 171 172 #ifdef MSK_DEBUG 173 #define DPRINTF(x) if (mskdebug) printf x 174 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x 175 int mskdebug = 0; 176 177 void msk_dump_txdesc(struct msk_tx_desc *, int); 178 void msk_dump_mbuf(struct mbuf *); 179 void msk_dump_bytes(const char *, int); 180 #else 181 #define DPRINTF(x) 182 #define DPRINTFN(n,x) 183 #endif 184 185 /* supported device vendors */ 186 const struct pci_matchid mskc_devices[] = { 187 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX }, 188 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T_B1 }, 189 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX }, 190 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T }, 191 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021CU }, 192 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021X }, 193 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022CU }, 194 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022X }, 195 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 }, 196 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 }, 197 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 }, 198 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 }, 199 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040 }, 200 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040T }, 201 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8042 }, 202 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8048 }, 203 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 }, 204 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 }, 205 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 }, 206 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 }, 207 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055_2 }, 208 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 }, 209 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8057 }, 210 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8058 }, 211 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8059 }, 212 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061CU }, 213 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061X }, 214 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062CU }, 215 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062X }, 216 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8070 }, 217 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8071 }, 218 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8072 }, 219 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8075 }, 220 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 }, 221 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 }, 222 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 }, 223 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 }, 224 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 }, 225 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Exx }, 226 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Sxx } 227 }; 228 229 static inline u_int32_t 230 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 231 { 232 return CSR_READ_4(sc, reg); 233 } 234 235 static inline u_int16_t 236 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 237 { 238 return CSR_READ_2(sc, reg); 239 } 240 241 static inline u_int8_t 242 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 243 { 244 return CSR_READ_1(sc, reg); 245 } 246 247 static inline void 248 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 249 { 250 CSR_WRITE_4(sc, reg, x); 251 } 252 253 static inline void 254 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 255 { 256 CSR_WRITE_2(sc, reg, x); 257 } 258 259 static inline void 260 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 261 { 262 CSR_WRITE_1(sc, reg, x); 263 } 264 265 int 266 msk_miibus_readreg(struct device *dev, int phy, int reg) 267 { 268 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 269 u_int16_t val; 270 int i; 271 272 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 273 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 274 275 for (i = 0; i < SK_TIMEOUT; i++) { 276 DELAY(1); 277 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 278 if (val & YU_SMICR_READ_VALID) 279 break; 280 } 281 282 if (i == SK_TIMEOUT) { 283 printf("%s: phy failed to come ready\n", 284 sc_if->sk_dev.dv_xname); 285 return (0); 286 } 287 288 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i, 289 SK_TIMEOUT)); 290 291 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 292 293 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 294 phy, reg, val)); 295 296 return (val); 297 } 298 299 void 300 msk_miibus_writereg(struct device *dev, int phy, int reg, int val) 301 { 302 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 303 int i; 304 305 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n", 306 phy, reg, val)); 307 308 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 309 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 310 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 311 312 for (i = 0; i < SK_TIMEOUT; i++) { 313 DELAY(1); 314 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 315 break; 316 } 317 318 if (i == SK_TIMEOUT) 319 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 320 } 321 322 void 323 msk_miibus_statchg(struct device *dev) 324 { 325 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 326 struct mii_data *mii = &sc_if->sk_mii; 327 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 328 int gpcr; 329 330 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR); 331 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN); 332 333 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO || 334 sc_if->sk_softc->sk_type == SK_YUKON_FE_P) { 335 /* Set speed. */ 336 gpcr |= YU_GPCR_SPEED_DIS; 337 switch (IFM_SUBTYPE(mii->mii_media_active)) { 338 case IFM_1000_SX: 339 case IFM_1000_LX: 340 case IFM_1000_CX: 341 case IFM_1000_T: 342 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED); 343 break; 344 case IFM_100_TX: 345 gpcr |= YU_GPCR_SPEED; 346 break; 347 } 348 349 /* Set duplex. */ 350 gpcr |= YU_GPCR_DPLX_DIS; 351 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 352 gpcr |= YU_GPCR_DUPLEX; 353 354 /* Disable flow control. */ 355 gpcr |= YU_GPCR_FCTL_DIS; 356 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS); 357 } 358 359 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr); 360 361 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n", 362 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 363 } 364 365 void 366 msk_setmulti(struct sk_if_softc *sc_if) 367 { 368 struct ifnet *ifp= &sc_if->arpcom.ac_if; 369 u_int32_t hashes[2] = { 0, 0 }; 370 int h; 371 struct arpcom *ac = &sc_if->arpcom; 372 struct ether_multi *enm; 373 struct ether_multistep step; 374 375 /* First, zot all the existing filters. */ 376 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 377 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 378 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 379 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 380 381 382 /* Now program new ones. */ 383 allmulti: 384 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 385 hashes[0] = 0xFFFFFFFF; 386 hashes[1] = 0xFFFFFFFF; 387 } else { 388 /* First find the tail of the list. */ 389 ETHER_FIRST_MULTI(step, ac, enm); 390 while (enm != NULL) { 391 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 392 ETHER_ADDR_LEN)) { 393 ifp->if_flags |= IFF_ALLMULTI; 394 goto allmulti; 395 } 396 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 397 ((1 << SK_HASH_BITS) - 1); 398 if (h < 32) 399 hashes[0] |= (1 << h); 400 else 401 hashes[1] |= (1 << (h - 32)); 402 403 ETHER_NEXT_MULTI(step, enm); 404 } 405 } 406 407 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 408 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 409 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 410 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 411 } 412 413 void 414 msk_setpromisc(struct sk_if_softc *sc_if) 415 { 416 struct ifnet *ifp = &sc_if->arpcom.ac_if; 417 418 if (ifp->if_flags & IFF_PROMISC) 419 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 420 YU_RCR_UFLEN | YU_RCR_MUFLEN); 421 else 422 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 423 YU_RCR_UFLEN | YU_RCR_MUFLEN); 424 } 425 426 int 427 msk_init_rx_ring(struct sk_if_softc *sc_if) 428 { 429 struct msk_chain_data *cd = &sc_if->sk_cdata; 430 struct msk_ring_data *rd = sc_if->sk_rdata; 431 int i, nexti; 432 433 bzero(rd->sk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 434 435 for (i = 0; i < MSK_RX_RING_CNT; i++) { 436 cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i]; 437 if (i == (MSK_RX_RING_CNT - 1)) 438 nexti = 0; 439 else 440 nexti = i + 1; 441 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti]; 442 } 443 444 sc_if->sk_cdata.sk_rx_prod = 0; 445 sc_if->sk_cdata.sk_rx_cons = 0; 446 sc_if->sk_cdata.sk_rx_cnt = 0; 447 448 msk_fill_rx_ring(sc_if); 449 return (0); 450 } 451 452 int 453 msk_init_tx_ring(struct sk_if_softc *sc_if) 454 { 455 struct sk_softc *sc = sc_if->sk_softc; 456 struct msk_chain_data *cd = &sc_if->sk_cdata; 457 struct msk_ring_data *rd = sc_if->sk_rdata; 458 bus_dmamap_t dmamap; 459 struct sk_txmap_entry *entry; 460 int i, nexti; 461 462 bzero(sc_if->sk_rdata->sk_tx_ring, 463 sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 464 465 SIMPLEQ_INIT(&sc_if->sk_txmap_head); 466 for (i = 0; i < MSK_TX_RING_CNT; i++) { 467 cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i]; 468 if (i == (MSK_TX_RING_CNT - 1)) 469 nexti = 0; 470 else 471 nexti = i + 1; 472 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti]; 473 474 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG, 475 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) 476 return (ENOBUFS); 477 478 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 479 if (!entry) { 480 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 481 return (ENOBUFS); 482 } 483 entry->dmamap = dmamap; 484 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link); 485 } 486 487 sc_if->sk_cdata.sk_tx_prod = 0; 488 sc_if->sk_cdata.sk_tx_cons = 0; 489 sc_if->sk_cdata.sk_tx_cnt = 0; 490 491 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, 492 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 493 494 return (0); 495 } 496 497 int 498 msk_newbuf(struct sk_if_softc *sc_if) 499 { 500 struct sk_chain *c; 501 struct msk_rx_desc *r; 502 struct mbuf *m; 503 bus_dmamap_t dmamap; 504 int error; 505 int i, head; 506 507 m = MCLGETI(NULL, M_DONTWAIT, &sc_if->arpcom.ac_if, sc_if->sk_pktlen); 508 if (!m) 509 return (ENOBUFS); 510 m->m_len = m->m_pkthdr.len = sc_if->sk_pktlen; 511 m_adj(m, ETHER_ALIGN); 512 513 dmamap = sc_if->sk_cdata.sk_rx_map[sc_if->sk_cdata.sk_rx_prod]; 514 515 error = bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, dmamap, m, 516 BUS_DMA_READ|BUS_DMA_NOWAIT); 517 if (error) { 518 m_freem(m); 519 return (ENOBUFS); 520 } 521 522 if (dmamap->dm_nsegs > (MSK_RX_RING_CNT - sc_if->sk_cdata.sk_rx_cnt)) { 523 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap); 524 m_freem(m); 525 return (ENOBUFS); 526 } 527 528 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, 529 dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 530 531 c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod]; 532 head = sc_if->sk_cdata.sk_rx_prod; 533 r = c->sk_le; 534 c->sk_mbuf = m; 535 536 r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr); 537 r->sk_len = htole16(dmamap->dm_segs[0].ds_len); 538 r->sk_ctl = 0; 539 540 MSK_CDRXSYNC(sc_if, head, BUS_DMASYNC_PREWRITE); 541 542 SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT); 543 sc_if->sk_cdata.sk_rx_cnt++; 544 545 for (i = 1; i < dmamap->dm_nsegs; i++) { 546 c = &sc_if->sk_cdata.sk_rx_chain[sc_if->sk_cdata.sk_rx_prod]; 547 r = c->sk_le; 548 c->sk_mbuf = NULL; 549 550 r->sk_addr = htole32(dmamap->dm_segs[i].ds_addr); 551 r->sk_len = htole16(dmamap->dm_segs[i].ds_len); 552 r->sk_ctl = 0; 553 554 MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod, 555 BUS_DMASYNC_PREWRITE); 556 557 r->sk_opcode = SK_Y2_RXOPC_BUFFER | SK_Y2_RXOPC_OWN; 558 559 MSK_CDRXSYNC(sc_if, sc_if->sk_cdata.sk_rx_prod, 560 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 561 562 SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT); 563 sc_if->sk_cdata.sk_rx_cnt++; 564 } 565 566 c = &sc_if->sk_cdata.sk_rx_chain[head]; 567 r = c->sk_le; 568 r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN; 569 570 MSK_CDRXSYNC(sc_if, head, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 571 572 return (0); 573 } 574 575 /* 576 * Set media options. 577 */ 578 int 579 msk_ifmedia_upd(struct ifnet *ifp) 580 { 581 struct sk_if_softc *sc_if = ifp->if_softc; 582 583 mii_mediachg(&sc_if->sk_mii); 584 return (0); 585 } 586 587 /* 588 * Report current media status. 589 */ 590 void 591 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 592 { 593 struct sk_if_softc *sc_if = ifp->if_softc; 594 595 mii_pollstat(&sc_if->sk_mii); 596 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 597 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 598 } 599 600 int 601 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 602 { 603 struct sk_if_softc *sc_if = ifp->if_softc; 604 struct ifaddr *ifa = (struct ifaddr *) data; 605 struct ifreq *ifr = (struct ifreq *) data; 606 struct mii_data *mii; 607 int s, error = 0; 608 609 s = splnet(); 610 611 switch(command) { 612 case SIOCSIFADDR: 613 ifp->if_flags |= IFF_UP; 614 if (!(ifp->if_flags & IFF_RUNNING)) 615 msk_init(sc_if); 616 #ifdef INET 617 if (ifa->ifa_addr->sa_family == AF_INET) 618 arp_ifinit(&sc_if->arpcom, ifa); 619 #endif /* INET */ 620 break; 621 622 case SIOCSIFFLAGS: 623 if (ifp->if_flags & IFF_UP) { 624 if (ifp->if_flags & IFF_RUNNING && 625 (sc_if->sk_if_flags ^ ifp->if_flags) & 626 IFF_PROMISC) { 627 msk_setpromisc(sc_if); 628 msk_setmulti(sc_if); 629 } else { 630 if (!(ifp->if_flags & IFF_RUNNING)) 631 msk_init(sc_if); 632 } 633 } else { 634 if (ifp->if_flags & IFF_RUNNING) 635 msk_stop(sc_if, 0); 636 } 637 sc_if->sk_if_flags = ifp->if_flags; 638 break; 639 640 case SIOCGIFMEDIA: 641 case SIOCSIFMEDIA: 642 mii = &sc_if->sk_mii; 643 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 644 break; 645 646 default: 647 error = ether_ioctl(ifp, &sc_if->arpcom, command, data); 648 } 649 650 if (error == ENETRESET) { 651 if (ifp->if_flags & IFF_RUNNING) 652 msk_setmulti(sc_if); 653 error = 0; 654 } 655 656 splx(s); 657 return (error); 658 } 659 660 /* 661 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 662 * IDs against our list and return a device name if we find a match. 663 */ 664 int 665 mskc_probe(struct device *parent, void *match, void *aux) 666 { 667 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices, 668 nitems(mskc_devices))); 669 } 670 671 /* 672 * Force the GEnesis into reset, then bring it out of reset. 673 */ 674 void 675 mskc_reset(struct sk_softc *sc) 676 { 677 u_int32_t imtimer_ticks, reg1; 678 int reg; 679 680 DPRINTFN(2, ("mskc_reset\n")); 681 682 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET); 683 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET); 684 685 DELAY(1000); 686 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET); 687 DELAY(2); 688 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 689 690 sk_win_write_1(sc, SK_TESTCTL1, 2); 691 692 if (sc->sk_type == SK_YUKON_EC_U || sc->sk_type == SK_YUKON_EX || 693 sc->sk_type >= SK_YUKON_FE_P) { 694 /* enable all clocks. */ 695 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG3), 0); 696 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)); 697 reg1 &= (SK_Y2_REG4_FORCE_ASPM_REQUEST| 698 SK_Y2_REG4_ASPM_GPHY_LINK_DOWN| 699 SK_Y2_REG4_ASPM_INT_FIFO_EMPTY| 700 SK_Y2_REG4_ASPM_CLKRUN_REQUEST); 701 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4), reg1); 702 703 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)); 704 reg1 &= SK_Y2_REG5_TIM_VMAIN_AV_MASK; 705 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5), reg1); 706 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_CFGREG1), 0); 707 708 /* 709 * Disable status race, workaround for Yukon EC Ultra & 710 * Yukon EX. 711 */ 712 reg1 = sk_win_read_4(sc, SK_GPIO); 713 reg1 |= SK_Y2_GPIO_STAT_RACE_DIS; 714 sk_win_write_4(sc, SK_GPIO, reg1); 715 sk_win_read_4(sc, SK_GPIO); 716 } 717 718 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)); 719 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 720 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 721 else 722 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 723 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1); 724 725 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 726 sk_win_write_1(sc, SK_Y2_CLKGATE, 727 SK_Y2_CLKGATE_LINK1_GATE_DIS | 728 SK_Y2_CLKGATE_LINK2_GATE_DIS | 729 SK_Y2_CLKGATE_LINK1_CORE_DIS | 730 SK_Y2_CLKGATE_LINK2_CORE_DIS | 731 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS); 732 else 733 sk_win_write_1(sc, SK_Y2_CLKGATE, 0); 734 735 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 736 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET); 737 DELAY(1000); 738 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 739 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR); 740 741 if (sc->sk_type == SK_YUKON_EX || sc->sk_type == SK_YUKON_SUPR) { 742 CSR_WRITE_2(sc, SK_GMAC_CTRL, SK_GMAC_BYP_MACSECRX | 743 SK_GMAC_BYP_MACSECTX | SK_GMAC_BYP_RETR_FIFO); 744 } 745 746 sk_win_write_1(sc, SK_TESTCTL1, 1); 747 748 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR))); 749 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n", 750 CSR_READ_2(sc, SK_LINK_CTRL))); 751 752 /* Disable ASF */ 753 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET); 754 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF); 755 756 /* Clear I2C IRQ noise */ 757 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1); 758 759 /* Disable hardware timer */ 760 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP); 761 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR); 762 763 /* Disable descriptor polling */ 764 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 765 766 /* Disable time stamps */ 767 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP); 768 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR); 769 770 /* Enable RAM interface */ 771 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 772 for (reg = SK_TO0;reg <= SK_TO11; reg++) 773 sk_win_write_1(sc, reg, 36); 774 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET); 775 for (reg = SK_TO0;reg <= SK_TO11; reg++) 776 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36); 777 778 /* 779 * Configure interrupt moderation. The moderation timer 780 * defers interrupts specified in the interrupt moderation 781 * timer mask based on the timeout specified in the interrupt 782 * moderation timer init register. Each bit in the timer 783 * register represents one tick, so to specify a timeout in 784 * microseconds, we have to multiply by the correct number of 785 * ticks-per-microsecond. 786 */ 787 switch (sc->sk_type) { 788 case SK_YUKON_EC: 789 case SK_YUKON_XL: 790 case SK_YUKON_FE: 791 case SK_YUKON_OPTIMA: 792 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC; 793 break; 794 default: 795 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 796 } 797 798 /* Reset status ring. */ 799 bzero(sc->sk_status_ring, 800 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 801 sc->sk_status_idx = 0; 802 803 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET); 804 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET); 805 806 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1); 807 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO, 808 sc->sk_status_map->dm_segs[0].ds_addr); 809 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI, 810 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32); 811 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10); 812 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16); 813 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16); 814 815 #if 0 816 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100)); 817 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000)); 818 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(20)); 819 #else 820 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4)); 821 #endif 822 823 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON); 824 825 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START); 826 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START); 827 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START); 828 } 829 830 int 831 msk_probe(struct device *parent, void *match, void *aux) 832 { 833 struct skc_attach_args *sa = aux; 834 835 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 836 return (0); 837 838 switch (sa->skc_type) { 839 case SK_YUKON_XL: 840 case SK_YUKON_EC_U: 841 case SK_YUKON_EX: 842 case SK_YUKON_EC: 843 case SK_YUKON_FE: 844 case SK_YUKON_FE_P: 845 case SK_YUKON_SUPR: 846 case SK_YUKON_ULTRA2: 847 case SK_YUKON_OPTIMA: 848 return (1); 849 } 850 851 return (0); 852 } 853 854 void 855 msk_reset(struct sk_if_softc *sc_if) 856 { 857 /* GMAC and GPHY Reset */ 858 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 859 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 860 DELAY(1000); 861 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR); 862 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 863 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 864 } 865 866 /* 867 * Each XMAC chip is attached as a separate logical IP interface. 868 * Single port cards will have only one logical interface of course. 869 */ 870 void 871 msk_attach(struct device *parent, struct device *self, void *aux) 872 { 873 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 874 struct sk_softc *sc = (struct sk_softc *)parent; 875 struct skc_attach_args *sa = aux; 876 struct ifnet *ifp; 877 caddr_t kva; 878 int i; 879 u_int32_t chunk; 880 int mii_flags; 881 int error; 882 883 sc_if->sk_port = sa->skc_port; 884 sc_if->sk_softc = sc; 885 sc->sk_if[sa->skc_port] = sc_if; 886 887 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port)); 888 889 /* 890 * Get station address for this interface. Note that 891 * dual port cards actually come with three station 892 * addresses: one for each port, plus an extra. The 893 * extra one is used by the SysKonnect driver software 894 * as a 'virtual' station address for when both ports 895 * are operating in failover mode. Currently we don't 896 * use this extra address. 897 */ 898 for (i = 0; i < ETHER_ADDR_LEN; i++) 899 sc_if->arpcom.ac_enaddr[i] = 900 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 901 902 printf(": address %s\n", 903 ether_sprintf(sc_if->arpcom.ac_enaddr)); 904 905 /* 906 * Set up RAM buffer addresses. The Yukon2 has a small amount 907 * of SRAM on it, somewhere between 4K and 48K. We need to 908 * divide this up between the transmitter and receiver. We 909 * give the receiver 2/3 of the memory (rounded down), and the 910 * transmitter whatever remains. 911 */ 912 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff; 913 sc_if->sk_rx_ramstart = 0; 914 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1; 915 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk; 916 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1; 917 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1; 918 919 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 920 " tx_ramstart=%#x tx_ramend=%#x\n", 921 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 922 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 923 924 /* Allocate the descriptor queues. */ 925 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data), 926 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg, 927 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 928 printf(": can't alloc rx buffers\n"); 929 goto fail; 930 } 931 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg, 932 sc_if->sk_ring_nseg, 933 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) { 934 printf(": can't map dma buffers (%lu bytes)\n", 935 (ulong)sizeof(struct msk_ring_data)); 936 goto fail_1; 937 } 938 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1, 939 sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT, 940 &sc_if->sk_ring_map)) { 941 printf(": can't create dma map\n"); 942 goto fail_2; 943 } 944 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 945 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) { 946 printf(": can't load dma map\n"); 947 goto fail_3; 948 } 949 sc_if->sk_rdata = (struct msk_ring_data *)kva; 950 951 if (sc->sk_type != SK_YUKON_FE && 952 sc->sk_type != SK_YUKON_FE_P) 953 sc_if->sk_pktlen = SK_JLEN; 954 else 955 sc_if->sk_pktlen = MCLBYTES; 956 957 for (i = 0; i < MSK_RX_RING_CNT; i++) { 958 if ((error = bus_dmamap_create(sc->sc_dmatag, 959 sc_if->sk_pktlen, 4, sc_if->sk_pktlen, 960 0, 0, &sc_if->sk_cdata.sk_rx_map[i])) != 0) { 961 printf("\n%s: unable to create rx DMA map %d, " 962 "error = %d\n", sc->sk_dev.dv_xname, i, error); 963 goto fail_4; 964 } 965 } 966 967 ifp = &sc_if->arpcom.ac_if; 968 ifp->if_softc = sc_if; 969 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 970 ifp->if_ioctl = msk_ioctl; 971 ifp->if_start = msk_start; 972 ifp->if_watchdog = msk_watchdog; 973 ifp->if_baudrate = 1000000000; 974 if (sc->sk_type != SK_YUKON_FE && 975 sc->sk_type != SK_YUKON_FE_P) 976 ifp->if_hardmtu = SK_JUMBO_MTU; 977 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 978 IFQ_SET_READY(&ifp->if_snd); 979 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 980 981 ifp->if_capabilities = IFCAP_VLAN_MTU; 982 983 msk_reset(sc_if); 984 985 /* 986 * Do miibus setup. 987 */ 988 msk_init_yukon(sc_if); 989 990 DPRINTFN(2, ("msk_attach: 1\n")); 991 992 sc_if->sk_mii.mii_ifp = ifp; 993 sc_if->sk_mii.mii_readreg = msk_miibus_readreg; 994 sc_if->sk_mii.mii_writereg = msk_miibus_writereg; 995 sc_if->sk_mii.mii_statchg = msk_miibus_statchg; 996 997 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 998 msk_ifmedia_upd, msk_ifmedia_sts); 999 mii_flags = MIIF_DOPAUSE; 1000 if (sc->sk_fibertype) 1001 mii_flags |= MIIF_HAVEFIBER; 1002 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0, 1003 MII_OFFSET_ANY, mii_flags); 1004 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1005 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1006 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1007 0, NULL); 1008 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1009 } else 1010 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1011 1012 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if); 1013 1014 /* 1015 * Call MI attach routines. 1016 */ 1017 if_attach(ifp); 1018 ether_ifattach(ifp); 1019 m_clsetwms(ifp, sc_if->sk_pktlen, 2, MSK_RX_RING_CNT); 1020 1021 DPRINTFN(2, ("msk_attach: end\n")); 1022 return; 1023 1024 fail_4: 1025 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1026 if (sc_if->sk_cdata.sk_rx_map[i] != NULL) 1027 bus_dmamap_destroy(sc->sc_dmatag, 1028 sc_if->sk_cdata.sk_rx_map[i]); 1029 } 1030 1031 fail_3: 1032 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1033 fail_2: 1034 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data)); 1035 fail_1: 1036 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1037 fail: 1038 sc->sk_if[sa->skc_port] = NULL; 1039 } 1040 1041 int 1042 msk_detach(struct device *self, int flags) 1043 { 1044 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1045 struct sk_softc *sc = sc_if->sk_softc; 1046 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1047 1048 if (sc->sk_if[sc_if->sk_port] == NULL) 1049 return (0); 1050 1051 msk_stop(sc_if, 1); 1052 1053 /* Detach any PHYs we might have. */ 1054 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1055 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1056 1057 /* Delete any remaining media. */ 1058 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1059 1060 ether_ifdetach(ifp); 1061 if_detach(ifp); 1062 1063 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1064 sizeof(struct msk_ring_data)); 1065 bus_dmamem_free(sc->sc_dmatag, 1066 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1067 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1068 sc->sk_if[sc_if->sk_port] = NULL; 1069 1070 return (0); 1071 } 1072 1073 int 1074 msk_activate(struct device *self, int act) 1075 { 1076 struct sk_if_softc *sc_if = (void *)self; 1077 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1078 int rv = 0; 1079 1080 switch (act) { 1081 case DVACT_QUIESCE: 1082 rv = config_activate_children(self, act); 1083 break; 1084 case DVACT_SUSPEND: 1085 rv = config_activate_children(self, act); 1086 break; 1087 case DVACT_RESUME: 1088 msk_reset(sc_if); 1089 rv = config_activate_children(self, act); 1090 if (ifp->if_flags & IFF_RUNNING) 1091 msk_init(sc_if); 1092 break; 1093 } 1094 return (rv); 1095 } 1096 1097 int 1098 mskcprint(void *aux, const char *pnp) 1099 { 1100 struct skc_attach_args *sa = aux; 1101 1102 if (pnp) 1103 printf("msk port %c at %s", 1104 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1105 else 1106 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1107 return (UNCONF); 1108 } 1109 1110 /* 1111 * Attach the interface. Allocate softc structures, do ifmedia 1112 * setup and ethernet/BPF attach. 1113 */ 1114 void 1115 mskc_attach(struct device *parent, struct device *self, void *aux) 1116 { 1117 struct sk_softc *sc = (struct sk_softc *)self; 1118 struct pci_attach_args *pa = aux; 1119 struct skc_attach_args skca; 1120 pci_chipset_tag_t pc = pa->pa_pc; 1121 pcireg_t command, memtype; 1122 pci_intr_handle_t ih; 1123 const char *intrstr = NULL; 1124 u_int8_t hw, pmd; 1125 char *revstr = NULL; 1126 caddr_t kva; 1127 1128 DPRINTFN(2, ("begin mskc_attach\n")); 1129 1130 /* 1131 * Handle power management nonsense. 1132 */ 1133 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF; 1134 1135 if (command == 0x01) { 1136 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL); 1137 if (command & SK_PSTATE_MASK) { 1138 u_int32_t iobase, membase, irq; 1139 1140 /* Save important PCI config data. */ 1141 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO); 1142 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM); 1143 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE); 1144 1145 /* Reset the power state. */ 1146 printf("%s chip is in D%d power mode " 1147 "-- setting to D0\n", sc->sk_dev.dv_xname, 1148 command & SK_PSTATE_MASK); 1149 command &= 0xFFFFFFFC; 1150 pci_conf_write(pc, pa->pa_tag, 1151 SK_PCI_PWRMGMTCTRL, command); 1152 1153 /* Restore PCI config data. */ 1154 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase); 1155 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase); 1156 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq); 1157 } 1158 } 1159 1160 /* 1161 * Map control/status registers. 1162 */ 1163 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1164 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1165 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1166 printf(": can't map mem space\n"); 1167 return; 1168 } 1169 1170 sc->sc_dmatag = pa->pa_dmat; 1171 1172 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1173 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1174 1175 /* bail out here if chip is not recognized */ 1176 if (!(SK_IS_YUKON2(sc))) { 1177 printf(": unknown chip type: %d\n", sc->sk_type); 1178 goto fail_1; 1179 } 1180 DPRINTFN(2, ("mskc_attach: allocate interrupt\n")); 1181 1182 /* Allocate interrupt */ 1183 if (pci_intr_map(pa, &ih)) { 1184 printf(": couldn't map interrupt\n"); 1185 goto fail_1; 1186 } 1187 1188 intrstr = pci_intr_string(pc, ih); 1189 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc, 1190 self->dv_xname); 1191 if (sc->sk_intrhand == NULL) { 1192 printf(": couldn't establish interrupt"); 1193 if (intrstr != NULL) 1194 printf(" at %s", intrstr); 1195 printf("\n"); 1196 goto fail_1; 1197 } 1198 sc->sk_pc = pc; 1199 1200 if (bus_dmamem_alloc(sc->sc_dmatag, 1201 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1202 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1203 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg, 1204 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1205 printf(": can't alloc status buffers\n"); 1206 goto fail_2; 1207 } 1208 1209 if (bus_dmamem_map(sc->sc_dmatag, 1210 &sc->sk_status_seg, sc->sk_status_nseg, 1211 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1212 &kva, BUS_DMA_NOWAIT)) { 1213 printf(": can't map dma buffers (%lu bytes)\n", 1214 (ulong)(MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc))); 1215 goto fail_3; 1216 } 1217 if (bus_dmamap_create(sc->sc_dmatag, 1218 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1, 1219 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0, 1220 BUS_DMA_NOWAIT, &sc->sk_status_map)) { 1221 printf(": can't create dma map\n"); 1222 goto fail_4; 1223 } 1224 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva, 1225 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1226 NULL, BUS_DMA_NOWAIT)) { 1227 printf(": can't load dma map\n"); 1228 goto fail_5; 1229 } 1230 sc->sk_status_ring = (struct msk_status_desc *)kva; 1231 1232 /* Reset the adapter. */ 1233 mskc_reset(sc); 1234 1235 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096; 1236 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024)); 1237 1238 pmd = sk_win_read_1(sc, SK_PMDTYPE); 1239 if (pmd == 'L' || pmd == 'S' || pmd == 'P') 1240 sc->sk_fibertype = 1; 1241 1242 switch (sc->sk_type) { 1243 case SK_YUKON_XL: 1244 sc->sk_name = "Yukon-2 XL"; 1245 break; 1246 case SK_YUKON_EC_U: 1247 sc->sk_name = "Yukon-2 EC Ultra"; 1248 break; 1249 case SK_YUKON_EX: 1250 sc->sk_name = "Yukon-2 Extreme"; 1251 break; 1252 case SK_YUKON_EC: 1253 sc->sk_name = "Yukon-2 EC"; 1254 break; 1255 case SK_YUKON_FE: 1256 sc->sk_name = "Yukon-2 FE"; 1257 break; 1258 case SK_YUKON_FE_P: 1259 sc->sk_name = "Yukon-2 FE+"; 1260 break; 1261 case SK_YUKON_SUPR: 1262 sc->sk_name = "Yukon-2 Supreme"; 1263 break; 1264 case SK_YUKON_ULTRA2: 1265 sc->sk_name = "Yukon-2 Ultra2"; 1266 break; 1267 case SK_YUKON_OPTIMA: 1268 sc->sk_name = "Yukon-2 Optima"; 1269 break; 1270 default: 1271 sc->sk_name = "Yukon (Unknown)"; 1272 } 1273 1274 if (sc->sk_type == SK_YUKON_XL) { 1275 switch (sc->sk_rev) { 1276 case SK_YUKON_XL_REV_A0: 1277 revstr = "A0"; 1278 break; 1279 case SK_YUKON_XL_REV_A1: 1280 revstr = "A1"; 1281 break; 1282 case SK_YUKON_XL_REV_A2: 1283 revstr = "A2"; 1284 break; 1285 case SK_YUKON_XL_REV_A3: 1286 revstr = "A3"; 1287 break; 1288 default: 1289 ; 1290 } 1291 } 1292 1293 if (sc->sk_type == SK_YUKON_EC) { 1294 switch (sc->sk_rev) { 1295 case SK_YUKON_EC_REV_A1: 1296 revstr = "A1"; 1297 break; 1298 case SK_YUKON_EC_REV_A2: 1299 revstr = "A2"; 1300 break; 1301 case SK_YUKON_EC_REV_A3: 1302 revstr = "A3"; 1303 break; 1304 default: 1305 ; 1306 } 1307 } 1308 1309 if (sc->sk_type == SK_YUKON_EC_U) { 1310 switch (sc->sk_rev) { 1311 case SK_YUKON_EC_U_REV_A0: 1312 revstr = "A0"; 1313 break; 1314 case SK_YUKON_EC_U_REV_A1: 1315 revstr = "A1"; 1316 break; 1317 case SK_YUKON_EC_U_REV_B0: 1318 revstr = "B0"; 1319 break; 1320 default: 1321 ; 1322 } 1323 } 1324 1325 if (sc->sk_type == SK_YUKON_FE) { 1326 switch (sc->sk_rev) { 1327 case SK_YUKON_FE_REV_A1: 1328 revstr = "A1"; 1329 break; 1330 case SK_YUKON_FE_REV_A2: 1331 revstr = "A2"; 1332 break; 1333 default: 1334 ; 1335 } 1336 } 1337 1338 if (sc->sk_type == SK_YUKON_FE_P && sc->sk_rev == SK_YUKON_FE_P_REV_A0) 1339 revstr = "A0"; 1340 1341 if (sc->sk_type == SK_YUKON_EX) { 1342 switch (sc->sk_rev) { 1343 case SK_YUKON_EX_REV_A0: 1344 revstr = "A0"; 1345 break; 1346 case SK_YUKON_EX_REV_B0: 1347 revstr = "B0"; 1348 break; 1349 default: 1350 ; 1351 } 1352 } 1353 1354 if (sc->sk_type == SK_YUKON_SUPR && sc->sk_rev == SK_YUKON_SUPR_REV_A0) 1355 revstr = "A0"; 1356 1357 1358 /* Announce the product name. */ 1359 printf(", %s", sc->sk_name); 1360 if (revstr != NULL) 1361 printf(" rev. %s", revstr); 1362 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1363 1364 sc->sk_macs = 1; 1365 1366 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1367 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1368 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1369 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1370 sc->sk_macs++; 1371 } 1372 1373 skca.skc_port = SK_PORT_A; 1374 skca.skc_type = sc->sk_type; 1375 skca.skc_rev = sc->sk_rev; 1376 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1377 1378 if (sc->sk_macs > 1) { 1379 skca.skc_port = SK_PORT_B; 1380 skca.skc_type = sc->sk_type; 1381 skca.skc_rev = sc->sk_rev; 1382 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1383 } 1384 1385 /* Turn on the 'driver is loaded' LED. */ 1386 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1387 1388 return; 1389 1390 fail_4: 1391 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1392 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1393 fail_3: 1394 bus_dmamem_free(sc->sc_dmatag, 1395 &sc->sk_status_seg, sc->sk_status_nseg); 1396 sc->sk_status_nseg = 0; 1397 fail_5: 1398 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1399 fail_2: 1400 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1401 sc->sk_intrhand = NULL; 1402 fail_1: 1403 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1404 sc->sk_bsize = 0; 1405 } 1406 1407 int 1408 mskc_detach(struct device *self, int flags) 1409 { 1410 struct sk_softc *sc = (struct sk_softc *)self; 1411 int rv; 1412 1413 if (sc->sk_intrhand) 1414 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1415 1416 rv = config_detach_children(self, flags); 1417 if (rv != 0) 1418 return (rv); 1419 1420 if (sc->sk_status_nseg > 0) { 1421 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1422 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1423 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1424 bus_dmamem_free(sc->sc_dmatag, 1425 &sc->sk_status_seg, sc->sk_status_nseg); 1426 } 1427 1428 if (sc->sk_bsize > 0) 1429 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1430 1431 return(0); 1432 } 1433 1434 int 1435 mskc_activate(struct device *self, int act) 1436 { 1437 struct sk_softc *sc = (void *)self; 1438 int rv = 0; 1439 1440 switch (act) { 1441 case DVACT_QUIESCE: 1442 rv = config_activate_children(self, act); 1443 break; 1444 case DVACT_SUSPEND: 1445 rv = config_activate_children(self, act); 1446 break; 1447 case DVACT_RESUME: 1448 mskc_reset(sc); 1449 rv = config_activate_children(self, act); 1450 break; 1451 } 1452 return (rv); 1453 } 1454 1455 int 1456 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx) 1457 { 1458 struct sk_softc *sc = sc_if->sk_softc; 1459 struct msk_tx_desc *f = NULL; 1460 u_int32_t frag, cur; 1461 int i; 1462 struct sk_txmap_entry *entry; 1463 bus_dmamap_t txmap; 1464 1465 DPRINTFN(2, ("msk_encap\n")); 1466 1467 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head); 1468 if (entry == NULL) { 1469 DPRINTFN(2, ("msk_encap: no txmap available\n")); 1470 return (ENOBUFS); 1471 } 1472 txmap = entry->dmamap; 1473 1474 cur = frag = *txidx; 1475 1476 #ifdef MSK_DEBUG 1477 if (mskdebug >= 2) 1478 msk_dump_mbuf(m_head); 1479 #endif 1480 1481 /* 1482 * Start packing the mbufs in this chain into 1483 * the fragment pointers. Stop when we run out 1484 * of fragments or hit the end of the mbuf chain. 1485 */ 1486 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1487 BUS_DMA_NOWAIT)) { 1488 DPRINTFN(2, ("msk_encap: dmamap failed\n")); 1489 return (ENOBUFS); 1490 } 1491 1492 if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) { 1493 DPRINTFN(2, ("msk_encap: too few descriptors free\n")); 1494 bus_dmamap_unload(sc->sc_dmatag, txmap); 1495 return (ENOBUFS); 1496 } 1497 1498 DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1499 1500 /* Sync the DMA map. */ 1501 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 1502 BUS_DMASYNC_PREWRITE); 1503 1504 for (i = 0; i < txmap->dm_nsegs; i++) { 1505 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1506 f->sk_addr = htole32(txmap->dm_segs[i].ds_addr); 1507 f->sk_len = htole16(txmap->dm_segs[i].ds_len); 1508 f->sk_ctl = 0; 1509 if (i == 0) 1510 f->sk_opcode = SK_Y2_TXOPC_PACKET; 1511 else 1512 f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN; 1513 cur = frag; 1514 SK_INC(frag, MSK_TX_RING_CNT); 1515 } 1516 1517 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1518 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 1519 1520 sc_if->sk_cdata.sk_tx_map[cur] = entry; 1521 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG; 1522 1523 /* Sync descriptors before handing to chip */ 1524 MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs, 1525 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1526 1527 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN; 1528 1529 /* Sync first descriptor to hand it off */ 1530 MSK_CDTXSYNC(sc_if, *txidx, 1, 1531 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1532 1533 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs; 1534 1535 #ifdef MSK_DEBUG 1536 if (mskdebug >= 2) { 1537 struct msk_tx_desc *le; 1538 u_int32_t idx; 1539 for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) { 1540 le = &sc_if->sk_rdata->sk_tx_ring[idx]; 1541 msk_dump_txdesc(le, idx); 1542 } 1543 } 1544 #endif 1545 1546 *txidx = frag; 1547 1548 DPRINTFN(2, ("msk_encap: completed successfully\n")); 1549 1550 return (0); 1551 } 1552 1553 void 1554 msk_start(struct ifnet *ifp) 1555 { 1556 struct sk_if_softc *sc_if = ifp->if_softc; 1557 struct mbuf *m_head = NULL; 1558 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod; 1559 int pkts = 0; 1560 1561 DPRINTFN(2, ("msk_start\n")); 1562 1563 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1564 IFQ_POLL(&ifp->if_snd, m_head); 1565 if (m_head == NULL) 1566 break; 1567 1568 /* 1569 * Pack the data into the transmit ring. If we 1570 * don't have room, set the OACTIVE flag and wait 1571 * for the NIC to drain the ring. 1572 */ 1573 if (msk_encap(sc_if, m_head, &idx)) { 1574 ifp->if_flags |= IFF_OACTIVE; 1575 break; 1576 } 1577 1578 /* now we are committed to transmit the packet */ 1579 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1580 pkts++; 1581 1582 /* 1583 * If there's a BPF listener, bounce a copy of this frame 1584 * to him. 1585 */ 1586 #if NBPFILTER > 0 1587 if (ifp->if_bpf) 1588 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1589 #endif 1590 } 1591 if (pkts == 0) 1592 return; 1593 1594 /* Transmit */ 1595 if (idx != sc_if->sk_cdata.sk_tx_prod) { 1596 sc_if->sk_cdata.sk_tx_prod = idx; 1597 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx); 1598 1599 /* Set a timeout in case the chip goes out to lunch. */ 1600 ifp->if_timer = 5; 1601 } 1602 } 1603 1604 void 1605 msk_watchdog(struct ifnet *ifp) 1606 { 1607 struct sk_if_softc *sc_if = ifp->if_softc; 1608 1609 /* 1610 * Reclaim first as there is a possibility of losing Tx completion 1611 * interrupts. 1612 */ 1613 msk_txeof(sc_if); 1614 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 1615 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1616 1617 ifp->if_oerrors++; 1618 1619 /* XXX Resets both ports; we shouldn't do that. */ 1620 mskc_reset(sc_if->sk_softc); 1621 msk_reset(sc_if); 1622 msk_init(sc_if); 1623 } 1624 } 1625 1626 static __inline int 1627 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1628 { 1629 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1630 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1631 YU_RXSTAT_JABBER)) != 0 || 1632 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1633 YU_RXSTAT_BYTES(stat) != len) 1634 return (0); 1635 1636 return (1); 1637 } 1638 1639 void 1640 msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat) 1641 { 1642 struct sk_softc *sc = sc_if->sk_softc; 1643 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1644 struct mbuf *m; 1645 struct sk_chain *cur_rx; 1646 int i, cur, total_len = len; 1647 bus_dmamap_t dmamap; 1648 1649 DPRINTFN(2, ("msk_rxeof\n")); 1650 1651 cur = sc_if->sk_cdata.sk_rx_cons; 1652 1653 /* Sync the descriptor */ 1654 MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1655 1656 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur]; 1657 if (cur_rx->sk_mbuf == NULL) 1658 return; 1659 1660 dmamap = sc_if->sk_cdata.sk_rx_map[cur]; 1661 for (i = 0; i < dmamap->dm_nsegs; i++) { 1662 SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT); 1663 sc_if->sk_cdata.sk_rx_cnt--; 1664 } 1665 1666 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, 1667 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1668 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, dmamap); 1669 1670 m = cur_rx->sk_mbuf; 1671 cur_rx->sk_mbuf = NULL; 1672 1673 if (total_len < SK_MIN_FRAMELEN || 1674 total_len > SK_JUMBO_FRAMELEN || 1675 msk_rxvalid(sc, rxstat, total_len) == 0) { 1676 ifp->if_ierrors++; 1677 m_freem(m); 1678 return; 1679 } 1680 1681 m->m_pkthdr.rcvif = ifp; 1682 m->m_pkthdr.len = m->m_len = total_len; 1683 1684 ifp->if_ipackets++; 1685 1686 #if NBPFILTER > 0 1687 if (ifp->if_bpf) 1688 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1689 #endif 1690 1691 /* pass it on. */ 1692 ether_input_mbuf(ifp, m); 1693 } 1694 1695 void 1696 msk_txeof(struct sk_if_softc *sc_if) 1697 { 1698 struct sk_softc *sc = sc_if->sk_softc; 1699 struct msk_tx_desc *cur_tx; 1700 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1701 u_int32_t idx, reg, sk_ctl; 1702 struct sk_txmap_entry *entry; 1703 1704 DPRINTFN(2, ("msk_txeof\n")); 1705 1706 if (sc_if->sk_port == SK_PORT_A) 1707 reg = SK_STAT_BMU_TXA1_RIDX; 1708 else 1709 reg = SK_STAT_BMU_TXA2_RIDX; 1710 1711 /* 1712 * Go through our tx ring and free mbufs for those 1713 * frames that have been sent. 1714 */ 1715 idx = sc_if->sk_cdata.sk_tx_cons; 1716 while (idx != sk_win_read_2(sc, reg)) { 1717 MSK_CDTXSYNC(sc_if, idx, 1, 1718 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1719 1720 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1721 sk_ctl = cur_tx->sk_ctl; 1722 #ifdef MSK_DEBUG 1723 if (mskdebug >= 2) 1724 msk_dump_txdesc(cur_tx, idx); 1725 #endif 1726 if (sk_ctl & SK_Y2_TXCTL_LASTFRAG) 1727 ifp->if_opackets++; 1728 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1729 entry = sc_if->sk_cdata.sk_tx_map[idx]; 1730 1731 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1732 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1733 1734 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1735 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1736 1737 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1738 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry, 1739 link); 1740 sc_if->sk_cdata.sk_tx_map[idx] = NULL; 1741 } 1742 sc_if->sk_cdata.sk_tx_cnt--; 1743 SK_INC(idx, MSK_TX_RING_CNT); 1744 } 1745 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 1746 1747 if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2) 1748 ifp->if_flags &= ~IFF_OACTIVE; 1749 1750 sc_if->sk_cdata.sk_tx_cons = idx; 1751 } 1752 1753 void 1754 msk_fill_rx_ring(struct sk_if_softc *sc_if) 1755 { 1756 while (sc_if->sk_cdata.sk_rx_cnt < MSK_RX_RING_CNT) { 1757 if (msk_newbuf(sc_if) == ENOBUFS) 1758 break; 1759 } 1760 } 1761 1762 void 1763 msk_tick(void *xsc_if) 1764 { 1765 struct sk_if_softc *sc_if = xsc_if; 1766 struct mii_data *mii = &sc_if->sk_mii; 1767 int s; 1768 1769 s = splnet(); 1770 mii_tick(mii); 1771 splx(s); 1772 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1773 } 1774 1775 void 1776 msk_intr_yukon(struct sk_if_softc *sc_if) 1777 { 1778 u_int8_t status; 1779 1780 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 1781 /* RX overrun */ 1782 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 1783 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 1784 SK_RFCTL_RX_FIFO_OVER); 1785 } 1786 /* TX underrun */ 1787 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 1788 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 1789 SK_TFCTL_TX_FIFO_UNDER); 1790 } 1791 1792 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status)); 1793 } 1794 1795 int 1796 msk_intr(void *xsc) 1797 { 1798 struct sk_softc *sc = xsc; 1799 struct sk_if_softc *sc_if; 1800 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1801 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 1802 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1803 int claimed = 0, rx[2] = {0, 0}; 1804 u_int32_t status; 1805 struct msk_status_desc *cur_st; 1806 1807 status = CSR_READ_4(sc, SK_Y2_ISSR2); 1808 if (status == 0xffffffff) 1809 return (0); 1810 if (status == 0) { 1811 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1812 return (0); 1813 } 1814 1815 status = CSR_READ_4(sc, SK_ISR); 1816 1817 if (sc_if0 != NULL) 1818 ifp0 = &sc_if0->arpcom.ac_if; 1819 if (sc_if1 != NULL) 1820 ifp1 = &sc_if1->arpcom.ac_if; 1821 1822 if (sc_if0 && (status & SK_Y2_IMR_MAC1) && 1823 (ifp0->if_flags & IFF_RUNNING)) { 1824 msk_intr_yukon(sc_if0); 1825 } 1826 1827 if (sc_if1 && (status & SK_Y2_IMR_MAC2) && 1828 (ifp1->if_flags & IFF_RUNNING)) { 1829 msk_intr_yukon(sc_if1); 1830 } 1831 1832 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1833 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1834 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1835 1836 while (cur_st->sk_opcode & SK_Y2_STOPC_OWN) { 1837 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN; 1838 switch (cur_st->sk_opcode) { 1839 case SK_Y2_STOPC_RXSTAT: 1840 sc_if = sc->sk_if[cur_st->sk_link & 0x01]; 1841 rx[cur_st->sk_link & 0x01] = 1; 1842 msk_rxeof(sc_if, letoh16(cur_st->sk_len), 1843 letoh32(cur_st->sk_status)); 1844 break; 1845 case SK_Y2_STOPC_TXSTAT: 1846 if (sc_if0) 1847 msk_txeof(sc_if0); 1848 if (sc_if1) 1849 msk_txeof(sc_if1); 1850 break; 1851 default: 1852 printf("opcode=0x%x\n", cur_st->sk_opcode); 1853 break; 1854 } 1855 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT); 1856 1857 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1858 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1859 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1860 } 1861 1862 if (status & SK_Y2_IMR_BMU) { 1863 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR); 1864 claimed = 1; 1865 } 1866 1867 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1868 1869 if (rx[0]) { 1870 msk_fill_rx_ring(sc_if0); 1871 SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1872 sc_if0->sk_cdata.sk_rx_prod); 1873 } 1874 if (rx[1]) { 1875 msk_fill_rx_ring(sc_if1); 1876 SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1877 sc_if1->sk_cdata.sk_rx_prod); 1878 } 1879 1880 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd)) 1881 msk_start(ifp0); 1882 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd)) 1883 msk_start(ifp1); 1884 1885 return (claimed); 1886 } 1887 1888 void 1889 msk_init_yukon(struct sk_if_softc *sc_if) 1890 { 1891 u_int32_t v; 1892 u_int16_t reg; 1893 struct sk_softc *sc; 1894 int i; 1895 1896 sc = sc_if->sk_softc; 1897 1898 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n", 1899 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 1900 1901 DPRINTFN(6, ("msk_init_yukon: 1\n")); 1902 1903 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n", 1904 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 1905 1906 DPRINTFN(6, ("msk_init_yukon: 3\n")); 1907 1908 /* unused read of the interrupt source register */ 1909 DPRINTFN(6, ("msk_init_yukon: 4\n")); 1910 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 1911 1912 DPRINTFN(6, ("msk_init_yukon: 4a\n")); 1913 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 1914 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1915 1916 /* MIB Counter Clear Mode set */ 1917 reg |= YU_PAR_MIB_CLR; 1918 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1919 DPRINTFN(6, ("msk_init_yukon: 4b\n")); 1920 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1921 1922 /* MIB Counter Clear Mode clear */ 1923 DPRINTFN(6, ("msk_init_yukon: 5\n")); 1924 reg &= ~YU_PAR_MIB_CLR; 1925 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1926 1927 /* receive control reg */ 1928 DPRINTFN(6, ("msk_init_yukon: 7\n")); 1929 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 1930 1931 /* transmit parameter register */ 1932 DPRINTFN(6, ("msk_init_yukon: 8\n")); 1933 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 1934 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 1935 1936 /* serial mode register */ 1937 DPRINTFN(6, ("msk_init_yukon: 9\n")); 1938 reg = YU_SMR_DATA_BLIND(0x1c) | 1939 YU_SMR_MFL_VLAN | 1940 YU_SMR_IPG_DATA(0x1e); 1941 1942 if (sc->sk_type != SK_YUKON_FE && 1943 sc->sk_type != SK_YUKON_FE_P) 1944 reg |= YU_SMR_MFL_JUMBO; 1945 1946 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 1947 1948 DPRINTFN(6, ("msk_init_yukon: 10\n")); 1949 /* Setup Yukon's address */ 1950 for (i = 0; i < 3; i++) { 1951 /* Write Source Address 1 (unicast filter) */ 1952 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 1953 sc_if->arpcom.ac_enaddr[i * 2] | 1954 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 1955 } 1956 1957 for (i = 0; i < 3; i++) { 1958 reg = sk_win_read_2(sc_if->sk_softc, 1959 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 1960 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 1961 } 1962 1963 /* Set promiscuous mode */ 1964 msk_setpromisc(sc_if); 1965 1966 /* Set multicast filter */ 1967 DPRINTFN(6, ("msk_init_yukon: 11\n")); 1968 msk_setmulti(sc_if); 1969 1970 /* enable interrupt mask for counter overflows */ 1971 DPRINTFN(6, ("msk_init_yukon: 12\n")); 1972 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 1973 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 1974 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 1975 1976 /* Configure RX MAC FIFO Flush Mask */ 1977 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 1978 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 1979 YU_RXSTAT_JABBER; 1980 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 1981 1982 /* Configure RX MAC FIFO */ 1983 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 1984 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON | 1985 SK_RFCTL_FIFO_FLUSH_ON); 1986 1987 /* Increase flush threshould to 64 bytes */ 1988 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 1989 SK_RFCTL_FIFO_THRESHOLD + 1); 1990 1991 /* Configure TX MAC FIFO */ 1992 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 1993 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 1994 1995 #if 1 1996 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN); 1997 #endif 1998 DPRINTFN(6, ("msk_init_yukon: end\n")); 1999 } 2000 2001 /* 2002 * Note that to properly initialize any part of the GEnesis chip, 2003 * you first have to take it out of reset mode. 2004 */ 2005 void 2006 msk_init(void *xsc_if) 2007 { 2008 struct sk_if_softc *sc_if = xsc_if; 2009 struct sk_softc *sc = sc_if->sk_softc; 2010 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2011 struct mii_data *mii = &sc_if->sk_mii; 2012 int s; 2013 2014 DPRINTFN(2, ("msk_init\n")); 2015 2016 s = splnet(); 2017 2018 /* Cancel pending I/O and free all RX/TX buffers. */ 2019 msk_stop(sc_if, 0); 2020 2021 /* Configure I2C registers */ 2022 2023 /* Configure XMAC(s) */ 2024 msk_init_yukon(sc_if); 2025 mii_mediachg(mii); 2026 2027 /* Configure transmit arbiter(s) */ 2028 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON); 2029 #if 0 2030 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2031 #endif 2032 2033 /* Configure RAMbuffers */ 2034 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2035 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2036 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2037 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2038 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2039 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2040 2041 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET); 2042 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON); 2043 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart); 2044 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart); 2045 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart); 2046 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend); 2047 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON); 2048 2049 /* Configure BMUs */ 2050 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016); 2051 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28); 2052 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080); 2053 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600); 2054 2055 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016); 2056 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28); 2057 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080); 2058 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600); 2059 2060 /* Make sure the sync transmit queue is disabled. */ 2061 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET); 2062 2063 /* Init descriptors */ 2064 if (msk_init_rx_ring(sc_if) == ENOBUFS) { 2065 printf("%s: initialization failed: no " 2066 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2067 msk_stop(sc_if, 0); 2068 splx(s); 2069 return; 2070 } 2071 2072 if (msk_init_tx_ring(sc_if) == ENOBUFS) { 2073 printf("%s: initialization failed: no " 2074 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2075 msk_stop(sc_if, 0); 2076 splx(s); 2077 return; 2078 } 2079 2080 /* Initialize prefetch engine. */ 2081 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2082 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002); 2083 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1); 2084 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO, 2085 MSK_RX_RING_ADDR(sc_if, 0)); 2086 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI, 2087 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32); 2088 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008); 2089 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR); 2090 2091 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2092 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002); 2093 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1); 2094 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO, 2095 MSK_TX_RING_ADDR(sc_if, 0)); 2096 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI, 2097 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32); 2098 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008); 2099 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR); 2100 2101 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 2102 sc_if->sk_cdata.sk_rx_prod); 2103 2104 /* Configure interrupt handling */ 2105 if (sc_if->sk_port == SK_PORT_A) 2106 sc->sk_intrmask |= SK_Y2_INTRS1; 2107 else 2108 sc->sk_intrmask |= SK_Y2_INTRS2; 2109 sc->sk_intrmask |= SK_Y2_IMR_BMU; 2110 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2111 2112 ifp->if_flags |= IFF_RUNNING; 2113 ifp->if_flags &= ~IFF_OACTIVE; 2114 2115 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2116 2117 splx(s); 2118 } 2119 2120 void 2121 msk_stop(struct sk_if_softc *sc_if, int softonly) 2122 { 2123 struct sk_softc *sc = sc_if->sk_softc; 2124 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2125 struct sk_txmap_entry *dma; 2126 int i; 2127 2128 DPRINTFN(2, ("msk_stop\n")); 2129 2130 timeout_del(&sc_if->sk_tick_ch); 2131 2132 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2133 2134 /* Stop transfer of Tx descriptors */ 2135 2136 /* Stop transfer of Rx descriptors */ 2137 2138 if (!softonly) { 2139 /* Turn off various components of this interface. */ 2140 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2141 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2142 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2143 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2144 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE); 2145 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2146 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2147 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2148 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP); 2149 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2150 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2151 2152 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2153 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2154 2155 /* Disable interrupts */ 2156 if (sc_if->sk_port == SK_PORT_A) 2157 sc->sk_intrmask &= ~SK_Y2_INTRS1; 2158 else 2159 sc->sk_intrmask &= ~SK_Y2_INTRS2; 2160 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2161 } 2162 2163 /* Free RX and TX mbufs still in the queues. */ 2164 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2165 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2166 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2167 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2168 } 2169 } 2170 2171 sc_if->sk_cdata.sk_rx_prod = 0; 2172 sc_if->sk_cdata.sk_rx_cons = 0; 2173 sc_if->sk_cdata.sk_rx_cnt = 0; 2174 2175 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2176 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2177 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2178 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2179 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, 2180 sc_if->sk_cdata.sk_tx_map[i], link); 2181 sc_if->sk_cdata.sk_tx_map[i] = 0; 2182 } 2183 } 2184 2185 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) { 2186 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 2187 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap); 2188 free(dma, M_DEVBUF); 2189 } 2190 } 2191 2192 struct cfattach mskc_ca = { 2193 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach, 2194 mskc_activate 2195 }; 2196 2197 struct cfdriver mskc_cd = { 2198 NULL, "mskc", DV_DULL 2199 }; 2200 2201 struct cfattach msk_ca = { 2202 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach, 2203 msk_activate 2204 }; 2205 2206 struct cfdriver msk_cd = { 2207 NULL, "msk", DV_IFNET 2208 }; 2209 2210 #ifdef MSK_DEBUG 2211 void 2212 msk_dump_txdesc(struct msk_tx_desc *le, int idx) 2213 { 2214 #define DESC_PRINT(X) \ 2215 if (X) \ 2216 printf("txdesc[%d]." #X "=%#x\n", \ 2217 idx, X); 2218 2219 DESC_PRINT(letoh32(le->sk_addr)); 2220 DESC_PRINT(letoh16(le->sk_len)); 2221 DESC_PRINT(le->sk_ctl); 2222 DESC_PRINT(le->sk_opcode); 2223 #undef DESC_PRINT 2224 } 2225 2226 void 2227 msk_dump_bytes(const char *data, int len) 2228 { 2229 int c, i, j; 2230 2231 for (i = 0; i < len; i += 16) { 2232 printf("%08x ", i); 2233 c = len - i; 2234 if (c > 16) c = 16; 2235 2236 for (j = 0; j < c; j++) { 2237 printf("%02x ", data[i + j] & 0xff); 2238 if ((j & 0xf) == 7 && j > 0) 2239 printf(" "); 2240 } 2241 2242 for (; j < 16; j++) 2243 printf(" "); 2244 printf(" "); 2245 2246 for (j = 0; j < c; j++) { 2247 int ch = data[i + j] & 0xff; 2248 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2249 } 2250 2251 printf("\n"); 2252 2253 if (c < 16) 2254 break; 2255 } 2256 } 2257 2258 void 2259 msk_dump_mbuf(struct mbuf *m) 2260 { 2261 int count = m->m_pkthdr.len; 2262 2263 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2264 2265 while (count > 0 && m) { 2266 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2267 m, m->m_data, m->m_len); 2268 msk_dump_bytes(mtod(m, char *), m->m_len); 2269 2270 count -= m->m_len; 2271 m = m->m_next; 2272 } 2273 } 2274 #endif 2275