1 /* $OpenBSD: if_msk.c,v 1.65 2008/09/10 14:01:22 blambert Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/timeout.h> 99 #include <sys/device.h> 100 #include <sys/queue.h> 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 106 #ifdef INET 107 #include <netinet/in.h> 108 #include <netinet/in_systm.h> 109 #include <netinet/in_var.h> 110 #include <netinet/ip.h> 111 #include <netinet/udp.h> 112 #include <netinet/tcp.h> 113 #include <netinet/if_ether.h> 114 #endif 115 116 #include <net/if_media.h> 117 #include <net/if_vlan_var.h> 118 119 #if NBPFILTER > 0 120 #include <net/bpf.h> 121 #endif 122 123 #include <dev/mii/mii.h> 124 #include <dev/mii/miivar.h> 125 #include <dev/mii/brgphyreg.h> 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 #include <dev/pci/pcidevs.h> 130 131 #include <dev/pci/if_skreg.h> 132 #include <dev/pci/if_mskvar.h> 133 134 int mskc_probe(struct device *, void *, void *); 135 void mskc_attach(struct device *, struct device *self, void *aux); 136 int mskc_detach(struct device *, int); 137 void mskc_reset(struct sk_softc *); 138 void mskc_shutdown(void *); 139 int msk_probe(struct device *, void *, void *); 140 void msk_attach(struct device *, struct device *self, void *aux); 141 int msk_detach(struct device *, int); 142 void msk_reset(struct sk_if_softc *); 143 int mskcprint(void *, const char *); 144 int msk_intr(void *); 145 void msk_intr_yukon(struct sk_if_softc *); 146 static __inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 147 void msk_rxeof(struct sk_if_softc *, u_int16_t, u_int32_t); 148 void msk_txeof(struct sk_if_softc *); 149 int msk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *); 150 void msk_start(struct ifnet *); 151 int msk_ioctl(struct ifnet *, u_long, caddr_t); 152 void msk_init(void *); 153 void msk_init_yukon(struct sk_if_softc *); 154 void msk_stop(struct sk_if_softc *); 155 void msk_watchdog(struct ifnet *); 156 int msk_ifmedia_upd(struct ifnet *); 157 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 158 int msk_newbuf(struct sk_if_softc *, int, struct mbuf *, bus_dmamap_t); 159 int msk_alloc_jumbo_mem(struct sk_if_softc *); 160 void *msk_jalloc(struct sk_if_softc *); 161 void msk_jfree(caddr_t, u_int, void *); 162 int msk_init_rx_ring(struct sk_if_softc *); 163 int msk_init_tx_ring(struct sk_if_softc *); 164 165 int msk_miibus_readreg(struct device *, int, int); 166 void msk_miibus_writereg(struct device *, int, int, int); 167 void msk_miibus_statchg(struct device *); 168 169 void msk_setmulti(struct sk_if_softc *); 170 void msk_setpromisc(struct sk_if_softc *); 171 void msk_tick(void *); 172 173 #ifdef MSK_DEBUG 174 #define DPRINTF(x) if (mskdebug) printf x 175 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x 176 int mskdebug = 0; 177 178 void msk_dump_txdesc(struct msk_tx_desc *, int); 179 void msk_dump_mbuf(struct mbuf *); 180 void msk_dump_bytes(const char *, int); 181 #else 182 #define DPRINTF(x) 183 #define DPRINTFN(n,x) 184 #endif 185 186 /* supported device vendors */ 187 const struct pci_matchid mskc_devices[] = { 188 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX }, 189 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T_B1 }, 190 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX }, 191 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T }, 192 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 }, 193 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 }, 194 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 }, 195 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 }, 196 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 }, 197 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021CU }, 198 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021X }, 199 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022CU }, 200 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022X }, 201 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 }, 202 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 }, 203 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 }, 204 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 }, 205 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040 }, 206 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040T }, 207 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8042 }, 208 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8048 }, 209 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 }, 210 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 }, 211 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 }, 212 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 }, 213 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055_2 }, 214 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 }, 215 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8058 }, 216 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061CU }, 217 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061X }, 218 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062CU }, 219 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062X }, 220 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8070 }, 221 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8071 }, 222 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8072 }, 223 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8075 }, 224 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Sxx }, 225 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Exx } 226 }; 227 228 static inline u_int32_t 229 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 230 { 231 return CSR_READ_4(sc, reg); 232 } 233 234 static inline u_int16_t 235 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 236 { 237 return CSR_READ_2(sc, reg); 238 } 239 240 static inline u_int8_t 241 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 242 { 243 return CSR_READ_1(sc, reg); 244 } 245 246 static inline void 247 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 248 { 249 CSR_WRITE_4(sc, reg, x); 250 } 251 252 static inline void 253 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 254 { 255 CSR_WRITE_2(sc, reg, x); 256 } 257 258 static inline void 259 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 260 { 261 CSR_WRITE_1(sc, reg, x); 262 } 263 264 int 265 msk_miibus_readreg(struct device *dev, int phy, int reg) 266 { 267 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 268 u_int16_t val; 269 int i; 270 271 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 272 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 273 274 for (i = 0; i < SK_TIMEOUT; i++) { 275 DELAY(1); 276 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 277 if (val & YU_SMICR_READ_VALID) 278 break; 279 } 280 281 if (i == SK_TIMEOUT) { 282 printf("%s: phy failed to come ready\n", 283 sc_if->sk_dev.dv_xname); 284 return (0); 285 } 286 287 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i, 288 SK_TIMEOUT)); 289 290 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 291 292 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 293 phy, reg, val)); 294 295 return (val); 296 } 297 298 void 299 msk_miibus_writereg(struct device *dev, int phy, int reg, int val) 300 { 301 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 302 int i; 303 304 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n", 305 phy, reg, val)); 306 307 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 308 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 309 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 310 311 for (i = 0; i < SK_TIMEOUT; i++) { 312 DELAY(1); 313 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 314 break; 315 } 316 317 if (i == SK_TIMEOUT) 318 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 319 } 320 321 void 322 msk_miibus_statchg(struct device *dev) 323 { 324 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 325 struct mii_data *mii = &sc_if->sk_mii; 326 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 327 int gpcr; 328 329 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR); 330 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN); 331 332 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO) { 333 /* Set speed. */ 334 gpcr |= YU_GPCR_SPEED_DIS; 335 switch (IFM_SUBTYPE(mii->mii_media_active)) { 336 case IFM_1000_SX: 337 case IFM_1000_LX: 338 case IFM_1000_CX: 339 case IFM_1000_T: 340 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED); 341 break; 342 case IFM_100_TX: 343 gpcr |= YU_GPCR_SPEED; 344 break; 345 } 346 347 /* Set duplex. */ 348 gpcr |= YU_GPCR_DPLX_DIS; 349 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 350 gpcr |= YU_GPCR_DUPLEX; 351 352 /* Disable flow control. */ 353 gpcr |= YU_GPCR_FCTL_DIS; 354 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS); 355 } 356 357 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr); 358 359 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n", 360 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 361 } 362 363 void 364 msk_setmulti(struct sk_if_softc *sc_if) 365 { 366 struct ifnet *ifp= &sc_if->arpcom.ac_if; 367 u_int32_t hashes[2] = { 0, 0 }; 368 int h; 369 struct arpcom *ac = &sc_if->arpcom; 370 struct ether_multi *enm; 371 struct ether_multistep step; 372 373 /* First, zot all the existing filters. */ 374 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); 375 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); 376 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); 377 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); 378 379 380 /* Now program new ones. */ 381 allmulti: 382 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 383 hashes[0] = 0xFFFFFFFF; 384 hashes[1] = 0xFFFFFFFF; 385 } else { 386 /* First find the tail of the list. */ 387 ETHER_FIRST_MULTI(step, ac, enm); 388 while (enm != NULL) { 389 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 390 ETHER_ADDR_LEN)) { 391 ifp->if_flags |= IFF_ALLMULTI; 392 goto allmulti; 393 } 394 h = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) & 395 ((1 << SK_HASH_BITS) - 1); 396 if (h < 32) 397 hashes[0] |= (1 << h); 398 else 399 hashes[1] |= (1 << (h - 32)); 400 401 ETHER_NEXT_MULTI(step, enm); 402 } 403 } 404 405 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 406 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 407 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 408 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 409 } 410 411 void 412 msk_setpromisc(struct sk_if_softc *sc_if) 413 { 414 struct ifnet *ifp = &sc_if->arpcom.ac_if; 415 416 if (ifp->if_flags & IFF_PROMISC) 417 SK_YU_CLRBIT_2(sc_if, YUKON_RCR, 418 YU_RCR_UFLEN | YU_RCR_MUFLEN); 419 else 420 SK_YU_SETBIT_2(sc_if, YUKON_RCR, 421 YU_RCR_UFLEN | YU_RCR_MUFLEN); 422 } 423 424 int 425 msk_init_rx_ring(struct sk_if_softc *sc_if) 426 { 427 struct msk_chain_data *cd = &sc_if->sk_cdata; 428 struct msk_ring_data *rd = sc_if->sk_rdata; 429 int i, nexti; 430 431 bzero((char *)rd->sk_rx_ring, 432 sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 433 434 for (i = 0; i < MSK_RX_RING_CNT; i++) { 435 cd->sk_rx_chain[i].sk_le = &rd->sk_rx_ring[i]; 436 if (i == (MSK_RX_RING_CNT - 1)) 437 nexti = 0; 438 else 439 nexti = i + 1; 440 cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[nexti]; 441 } 442 443 for (i = 0; i < MSK_RX_RING_CNT; i++) { 444 if (msk_newbuf(sc_if, i, NULL, 445 sc_if->sk_cdata.sk_rx_jumbo_map) == ENOBUFS) { 446 printf("%s: failed alloc of %dth mbuf\n", 447 sc_if->sk_dev.dv_xname, i); 448 return (ENOBUFS); 449 } 450 } 451 452 sc_if->sk_cdata.sk_rx_prod = MSK_RX_RING_CNT - 1; 453 sc_if->sk_cdata.sk_rx_cons = 0; 454 455 return (0); 456 } 457 458 int 459 msk_init_tx_ring(struct sk_if_softc *sc_if) 460 { 461 struct sk_softc *sc = sc_if->sk_softc; 462 struct msk_chain_data *cd = &sc_if->sk_cdata; 463 struct msk_ring_data *rd = sc_if->sk_rdata; 464 bus_dmamap_t dmamap; 465 struct sk_txmap_entry *entry; 466 int i, nexti; 467 468 bzero((char *)sc_if->sk_rdata->sk_tx_ring, 469 sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 470 471 SIMPLEQ_INIT(&sc_if->sk_txmap_head); 472 for (i = 0; i < MSK_TX_RING_CNT; i++) { 473 cd->sk_tx_chain[i].sk_le = &rd->sk_tx_ring[i]; 474 if (i == (MSK_TX_RING_CNT - 1)) 475 nexti = 0; 476 else 477 nexti = i + 1; 478 cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[nexti]; 479 480 if (bus_dmamap_create(sc->sc_dmatag, SK_JLEN, SK_NTXSEG, 481 SK_JLEN, 0, BUS_DMA_NOWAIT, &dmamap)) 482 return (ENOBUFS); 483 484 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 485 if (!entry) { 486 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 487 return (ENOBUFS); 488 } 489 entry->dmamap = dmamap; 490 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, entry, link); 491 } 492 493 sc_if->sk_cdata.sk_tx_prod = 0; 494 sc_if->sk_cdata.sk_tx_cons = 0; 495 sc_if->sk_cdata.sk_tx_cnt = 0; 496 497 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, 498 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 499 500 return (0); 501 } 502 503 int 504 msk_newbuf(struct sk_if_softc *sc_if, int i, struct mbuf *m, 505 bus_dmamap_t dmamap) 506 { 507 struct mbuf *m_new = NULL; 508 struct sk_chain *c; 509 struct msk_rx_desc *r; 510 511 if (m == NULL) { 512 caddr_t buf = NULL; 513 514 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 515 if (m_new == NULL) 516 return (ENOBUFS); 517 518 /* Allocate the jumbo buffer */ 519 buf = msk_jalloc(sc_if); 520 if (buf == NULL) { 521 m_freem(m_new); 522 DPRINTFN(1, ("%s jumbo allocation failed -- packet " 523 "dropped!\n", sc_if->arpcom.ac_if.if_xname)); 524 return (ENOBUFS); 525 } 526 527 /* Attach the buffer to the mbuf */ 528 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 529 MEXTADD(m_new, buf, SK_JLEN, 0, msk_jfree, sc_if); 530 } else { 531 /* 532 * We're re-using a previously allocated mbuf; 533 * be sure to re-init pointers and lengths to 534 * default values. 535 */ 536 m_new = m; 537 m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; 538 m_new->m_data = m_new->m_ext.ext_buf; 539 } 540 m_adj(m_new, ETHER_ALIGN); 541 542 c = &sc_if->sk_cdata.sk_rx_chain[i]; 543 r = c->sk_le; 544 c->sk_mbuf = m_new; 545 r->sk_addr = htole32(dmamap->dm_segs[0].ds_addr + 546 (((vaddr_t)m_new->m_data 547 - (vaddr_t)sc_if->sk_cdata.sk_jumbo_buf))); 548 r->sk_len = htole16(SK_JLEN); 549 r->sk_ctl = 0; 550 r->sk_opcode = SK_Y2_RXOPC_PACKET | SK_Y2_RXOPC_OWN; 551 552 MSK_CDRXSYNC(sc_if, i, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 553 554 return (0); 555 } 556 557 /* 558 * Memory management for jumbo frames. 559 */ 560 561 int 562 msk_alloc_jumbo_mem(struct sk_if_softc *sc_if) 563 { 564 struct sk_softc *sc = sc_if->sk_softc; 565 caddr_t ptr, kva; 566 bus_dma_segment_t seg; 567 int i, rseg, state, error; 568 struct sk_jpool_entry *entry; 569 570 state = error = 0; 571 572 /* Grab a big chunk o' storage. */ 573 if (bus_dmamem_alloc(sc->sc_dmatag, MSK_JMEM, PAGE_SIZE, 0, 574 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 575 printf(": can't alloc rx buffers"); 576 return (ENOBUFS); 577 } 578 579 state = 1; 580 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, MSK_JMEM, &kva, 581 BUS_DMA_NOWAIT)) { 582 printf(": can't map dma buffers (%d bytes)", MSK_JMEM); 583 error = ENOBUFS; 584 goto out; 585 } 586 587 state = 2; 588 if (bus_dmamap_create(sc->sc_dmatag, MSK_JMEM, 1, MSK_JMEM, 0, 589 BUS_DMA_NOWAIT, &sc_if->sk_cdata.sk_rx_jumbo_map)) { 590 printf(": can't create dma map"); 591 error = ENOBUFS; 592 goto out; 593 } 594 595 state = 3; 596 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_cdata.sk_rx_jumbo_map, 597 kva, MSK_JMEM, NULL, BUS_DMA_NOWAIT)) { 598 printf(": can't load dma map"); 599 error = ENOBUFS; 600 goto out; 601 } 602 603 state = 4; 604 sc_if->sk_cdata.sk_jumbo_buf = (caddr_t)kva; 605 DPRINTFN(1,("msk_jumbo_buf = 0x%08X\n", sc_if->sk_cdata.sk_jumbo_buf)); 606 607 LIST_INIT(&sc_if->sk_jfree_listhead); 608 LIST_INIT(&sc_if->sk_jinuse_listhead); 609 610 /* 611 * Now divide it up into 9K pieces and save the addresses 612 * in an array. 613 */ 614 ptr = sc_if->sk_cdata.sk_jumbo_buf; 615 for (i = 0; i < MSK_JSLOTS; i++) { 616 sc_if->sk_cdata.sk_jslots[i] = ptr; 617 ptr += SK_JLEN; 618 entry = malloc(sizeof(struct sk_jpool_entry), 619 M_DEVBUF, M_NOWAIT); 620 if (entry == NULL) { 621 sc_if->sk_cdata.sk_jumbo_buf = NULL; 622 printf(": no memory for jumbo buffer queue!"); 623 error = ENOBUFS; 624 goto out; 625 } 626 entry->slot = i; 627 LIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, 628 entry, jpool_entries); 629 } 630 out: 631 if (error != 0) { 632 switch (state) { 633 case 4: 634 bus_dmamap_unload(sc->sc_dmatag, 635 sc_if->sk_cdata.sk_rx_jumbo_map); 636 case 3: 637 bus_dmamap_destroy(sc->sc_dmatag, 638 sc_if->sk_cdata.sk_rx_jumbo_map); 639 case 2: 640 bus_dmamem_unmap(sc->sc_dmatag, kva, MSK_JMEM); 641 case 1: 642 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 643 break; 644 default: 645 break; 646 } 647 } 648 649 return (error); 650 } 651 652 /* 653 * Allocate a jumbo buffer. 654 */ 655 void * 656 msk_jalloc(struct sk_if_softc *sc_if) 657 { 658 struct sk_jpool_entry *entry; 659 660 entry = LIST_FIRST(&sc_if->sk_jfree_listhead); 661 662 if (entry == NULL) 663 return (NULL); 664 665 LIST_REMOVE(entry, jpool_entries); 666 LIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); 667 return (sc_if->sk_cdata.sk_jslots[entry->slot]); 668 } 669 670 /* 671 * Release a jumbo buffer. 672 */ 673 void 674 msk_jfree(caddr_t buf, u_int size, void *arg) 675 { 676 struct sk_jpool_entry *entry; 677 struct sk_if_softc *sc; 678 int i; 679 680 /* Extract the softc struct pointer. */ 681 sc = (struct sk_if_softc *)arg; 682 683 if (sc == NULL) 684 panic("msk_jfree: can't find softc pointer!"); 685 686 /* calculate the slot this buffer belongs to */ 687 i = ((vaddr_t)buf 688 - (vaddr_t)sc->sk_cdata.sk_jumbo_buf) / SK_JLEN; 689 690 if ((i < 0) || (i >= MSK_JSLOTS)) 691 panic("msk_jfree: asked to free buffer that we don't manage!"); 692 693 entry = LIST_FIRST(&sc->sk_jinuse_listhead); 694 if (entry == NULL) 695 panic("msk_jfree: buffer not in use!"); 696 entry->slot = i; 697 LIST_REMOVE(entry, jpool_entries); 698 LIST_INSERT_HEAD(&sc->sk_jfree_listhead, entry, jpool_entries); 699 } 700 701 /* 702 * Set media options. 703 */ 704 int 705 msk_ifmedia_upd(struct ifnet *ifp) 706 { 707 struct sk_if_softc *sc_if = ifp->if_softc; 708 709 mii_mediachg(&sc_if->sk_mii); 710 return (0); 711 } 712 713 /* 714 * Report current media status. 715 */ 716 void 717 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 718 { 719 struct sk_if_softc *sc_if = ifp->if_softc; 720 721 mii_pollstat(&sc_if->sk_mii); 722 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 723 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 724 } 725 726 int 727 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 728 { 729 struct sk_if_softc *sc_if = ifp->if_softc; 730 struct ifreq *ifr = (struct ifreq *) data; 731 struct ifaddr *ifa = (struct ifaddr *) data; 732 struct mii_data *mii; 733 int s, error = 0; 734 735 s = splnet(); 736 737 if ((error = ether_ioctl(ifp, &sc_if->arpcom, command, data)) > 0) { 738 splx(s); 739 return (error); 740 } 741 742 switch(command) { 743 case SIOCSIFADDR: 744 ifp->if_flags |= IFF_UP; 745 if (!(ifp->if_flags & IFF_RUNNING)) 746 msk_init(sc_if); 747 #ifdef INET 748 if (ifa->ifa_addr->sa_family == AF_INET) 749 arp_ifinit(&sc_if->arpcom, ifa); 750 #endif /* INET */ 751 break; 752 case SIOCSIFMTU: 753 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu) 754 error = EINVAL; 755 else if (ifp->if_mtu != ifr->ifr_mtu) 756 ifp->if_mtu = ifr->ifr_mtu; 757 break; 758 case SIOCSIFFLAGS: 759 if (ifp->if_flags & IFF_UP) { 760 if (ifp->if_flags & IFF_RUNNING && 761 (sc_if->sk_if_flags ^ ifp->if_flags) & 762 IFF_PROMISC) { 763 msk_setpromisc(sc_if); 764 msk_setmulti(sc_if); 765 } else { 766 if (!(ifp->if_flags & IFF_RUNNING)) 767 msk_init(sc_if); 768 } 769 } else { 770 if (ifp->if_flags & IFF_RUNNING) 771 msk_stop(sc_if); 772 } 773 sc_if->sk_if_flags = ifp->if_flags; 774 break; 775 case SIOCADDMULTI: 776 case SIOCDELMULTI: 777 error = (command == SIOCADDMULTI) ? 778 ether_addmulti(ifr, &sc_if->arpcom) : 779 ether_delmulti(ifr, &sc_if->arpcom); 780 781 if (error == ENETRESET) { 782 /* 783 * Multicast list has changed; set the hardware 784 * filter accordingly. 785 */ 786 if (ifp->if_flags & IFF_RUNNING) 787 msk_setmulti(sc_if); 788 error = 0; 789 } 790 break; 791 case SIOCGIFMEDIA: 792 case SIOCSIFMEDIA: 793 mii = &sc_if->sk_mii; 794 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 795 break; 796 default: 797 error = ENOTTY; 798 break; 799 } 800 801 splx(s); 802 803 return (error); 804 } 805 806 /* 807 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 808 * IDs against our list and return a device name if we find a match. 809 */ 810 int 811 mskc_probe(struct device *parent, void *match, void *aux) 812 { 813 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices, 814 sizeof(mskc_devices)/sizeof(mskc_devices[0]))); 815 } 816 817 /* 818 * Force the GEnesis into reset, then bring it out of reset. 819 */ 820 void 821 mskc_reset(struct sk_softc *sc) 822 { 823 u_int32_t imtimer_ticks, reg1; 824 int reg; 825 826 DPRINTFN(2, ("mskc_reset\n")); 827 828 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET); 829 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET); 830 831 DELAY(1000); 832 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET); 833 DELAY(2); 834 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 835 836 sk_win_write_1(sc, SK_TESTCTL1, 2); 837 838 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)); 839 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 840 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 841 else 842 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 843 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1); 844 845 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 846 sk_win_write_1(sc, SK_Y2_CLKGATE, 847 SK_Y2_CLKGATE_LINK1_GATE_DIS | 848 SK_Y2_CLKGATE_LINK2_GATE_DIS | 849 SK_Y2_CLKGATE_LINK1_CORE_DIS | 850 SK_Y2_CLKGATE_LINK2_CORE_DIS | 851 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS); 852 else 853 sk_win_write_1(sc, SK_Y2_CLKGATE, 0); 854 855 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 856 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET); 857 DELAY(1000); 858 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 859 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR); 860 861 sk_win_write_1(sc, SK_TESTCTL1, 1); 862 863 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR))); 864 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n", 865 CSR_READ_2(sc, SK_LINK_CTRL))); 866 867 /* Disable ASF */ 868 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET); 869 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF); 870 871 /* Clear I2C IRQ noise */ 872 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1); 873 874 /* Disable hardware timer */ 875 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP); 876 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR); 877 878 /* Disable descriptor polling */ 879 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 880 881 /* Disable time stamps */ 882 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP); 883 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR); 884 885 /* Enable RAM interface */ 886 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 887 for (reg = SK_TO0;reg <= SK_TO11; reg++) 888 sk_win_write_1(sc, reg, 36); 889 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET); 890 for (reg = SK_TO0;reg <= SK_TO11; reg++) 891 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36); 892 893 /* 894 * Configure interrupt moderation. The moderation timer 895 * defers interrupts specified in the interrupt moderation 896 * timer mask based on the timeout specified in the interrupt 897 * moderation timer init register. Each bit in the timer 898 * register represents one tick, so to specify a timeout in 899 * microseconds, we have to multiply by the correct number of 900 * ticks-per-microsecond. 901 */ 902 switch (sc->sk_type) { 903 case SK_YUKON_EC: 904 case SK_YUKON_XL: 905 case SK_YUKON_FE: 906 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC; 907 break; 908 default: 909 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 910 } 911 912 /* Reset status ring. */ 913 bzero((char *)sc->sk_status_ring, 914 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 915 sc->sk_status_idx = 0; 916 917 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET); 918 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET); 919 920 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1); 921 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO, 922 sc->sk_status_map->dm_segs[0].ds_addr); 923 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI, 924 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32); 925 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10); 926 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16); 927 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16); 928 929 #if 0 930 sk_win_write_4(sc, SK_Y2_LEV_TIMERINIT, SK_IM_USECS(100)); 931 sk_win_write_4(sc, 0x0ec0, SK_IM_USECS(1000)); 932 933 sk_win_write_4(sc, 0x0ed0, SK_IM_USECS(20)); 934 #else 935 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4)); 936 #endif 937 938 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON); 939 940 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START); 941 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START); 942 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START); 943 } 944 945 int 946 msk_probe(struct device *parent, void *match, void *aux) 947 { 948 struct skc_attach_args *sa = aux; 949 950 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 951 return (0); 952 953 switch (sa->skc_type) { 954 case SK_YUKON_XL: 955 case SK_YUKON_EC_U: 956 case SK_YUKON_EX: 957 case SK_YUKON_EC: 958 case SK_YUKON_FE: 959 case SK_YUKON_FE_P: 960 case SK_YUKON_SUPR: 961 return (1); 962 } 963 964 return (0); 965 } 966 967 void 968 msk_reset(struct sk_if_softc *sc_if) 969 { 970 /* GMAC and GPHY Reset */ 971 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 972 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 973 DELAY(1000); 974 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR); 975 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 976 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 977 } 978 979 /* 980 * Each XMAC chip is attached as a separate logical IP interface. 981 * Single port cards will have only one logical interface of course. 982 */ 983 void 984 msk_attach(struct device *parent, struct device *self, void *aux) 985 { 986 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 987 struct sk_softc *sc = (struct sk_softc *)parent; 988 struct skc_attach_args *sa = aux; 989 struct ifnet *ifp; 990 caddr_t kva; 991 bus_dma_segment_t seg; 992 int i, rseg; 993 u_int32_t chunk; 994 int mii_flags; 995 996 sc_if->sk_port = sa->skc_port; 997 sc_if->sk_softc = sc; 998 sc->sk_if[sa->skc_port] = sc_if; 999 1000 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port)); 1001 1002 /* 1003 * Get station address for this interface. Note that 1004 * dual port cards actually come with three station 1005 * addresses: one for each port, plus an extra. The 1006 * extra one is used by the SysKonnect driver software 1007 * as a 'virtual' station address for when both ports 1008 * are operating in failover mode. Currently we don't 1009 * use this extra address. 1010 */ 1011 for (i = 0; i < ETHER_ADDR_LEN; i++) 1012 sc_if->arpcom.ac_enaddr[i] = 1013 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 1014 1015 printf(": address %s\n", 1016 ether_sprintf(sc_if->arpcom.ac_enaddr)); 1017 1018 /* 1019 * Set up RAM buffer addresses. The Yukon2 has a small amount 1020 * of SRAM on it, somewhere between 4K and 48K. We need to 1021 * divide this up between the transmitter and receiver. We 1022 * give the receiver 2/3 of the memory (rounded down), and the 1023 * transmitter whatever remains. 1024 */ 1025 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff; 1026 sc_if->sk_rx_ramstart = 0; 1027 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1; 1028 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk; 1029 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1; 1030 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1; 1031 1032 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 1033 " tx_ramstart=%#x tx_ramend=%#x\n", 1034 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 1035 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 1036 1037 /* Allocate the descriptor queues. */ 1038 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data), 1039 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1040 printf(": can't alloc rx buffers\n"); 1041 goto fail; 1042 } 1043 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 1044 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) { 1045 printf(": can't map dma buffers (%lu bytes)\n", 1046 (ulong)sizeof(struct msk_ring_data)); 1047 goto fail_1; 1048 } 1049 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1, 1050 sizeof(struct msk_ring_data), 0, BUS_DMA_NOWAIT, 1051 &sc_if->sk_ring_map)) { 1052 printf(": can't create dma map\n"); 1053 goto fail_2; 1054 } 1055 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 1056 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) { 1057 printf(": can't load dma map\n"); 1058 goto fail_3; 1059 } 1060 sc_if->sk_rdata = (struct msk_ring_data *)kva; 1061 bzero(sc_if->sk_rdata, sizeof(struct msk_ring_data)); 1062 1063 /* Try to allocate memory for jumbo buffers. */ 1064 if (msk_alloc_jumbo_mem(sc_if)) { 1065 printf(": jumbo buffer allocation failed\n"); 1066 goto fail_3; 1067 } 1068 1069 ifp = &sc_if->arpcom.ac_if; 1070 ifp->if_softc = sc_if; 1071 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1072 ifp->if_ioctl = msk_ioctl; 1073 ifp->if_start = msk_start; 1074 ifp->if_watchdog = msk_watchdog; 1075 ifp->if_baudrate = 1000000000; 1076 if (sc->sk_type != SK_YUKON_FE && 1077 sc->sk_type != SK_YUKON_FE_P) 1078 ifp->if_hardmtu = SK_JUMBO_MTU; 1079 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1080 IFQ_SET_READY(&ifp->if_snd); 1081 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1082 1083 ifp->if_capabilities = IFCAP_VLAN_MTU; 1084 1085 msk_reset(sc_if); 1086 1087 /* 1088 * Do miibus setup. 1089 */ 1090 msk_init_yukon(sc_if); 1091 1092 DPRINTFN(2, ("msk_attach: 1\n")); 1093 1094 sc_if->sk_mii.mii_ifp = ifp; 1095 sc_if->sk_mii.mii_readreg = msk_miibus_readreg; 1096 sc_if->sk_mii.mii_writereg = msk_miibus_writereg; 1097 sc_if->sk_mii.mii_statchg = msk_miibus_statchg; 1098 1099 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 1100 msk_ifmedia_upd, msk_ifmedia_sts); 1101 mii_flags = MIIF_DOPAUSE; 1102 if (sc->sk_fibertype) 1103 mii_flags |= MIIF_HAVEFIBER; 1104 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0, 1105 MII_OFFSET_ANY, mii_flags); 1106 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1107 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1108 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1109 0, NULL); 1110 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1111 } else 1112 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1113 1114 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if); 1115 1116 /* 1117 * Call MI attach routines. 1118 */ 1119 if_attach(ifp); 1120 ether_ifattach(ifp); 1121 1122 sc_if->sk_sdhook = shutdownhook_establish(mskc_shutdown, sc); 1123 1124 DPRINTFN(2, ("msk_attach: end\n")); 1125 return; 1126 1127 fail_3: 1128 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1129 fail_2: 1130 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data)); 1131 fail_1: 1132 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1133 fail: 1134 sc->sk_if[sa->skc_port] = NULL; 1135 } 1136 1137 int 1138 msk_detach(struct device *self, int flags) 1139 { 1140 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1141 struct sk_softc *sc = sc_if->sk_softc; 1142 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1143 1144 if (sc->sk_if[sc_if->sk_port] == NULL) 1145 return (0); 1146 1147 timeout_del(&sc_if->sk_tick_ch); 1148 1149 /* Detach any PHYs we might have. */ 1150 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1151 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1152 1153 /* Delete any remaining media. */ 1154 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1155 1156 if (sc_if->sk_sdhook != NULL) 1157 shutdownhook_disestablish(sc_if->sk_sdhook); 1158 1159 ether_ifdetach(ifp); 1160 if_detach(ifp); 1161 1162 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1163 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1164 sizeof(struct msk_ring_data)); 1165 bus_dmamem_free(sc->sc_dmatag, 1166 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1167 sc->sk_if[sc_if->sk_port] = NULL; 1168 1169 return (0); 1170 } 1171 1172 int 1173 mskcprint(void *aux, const char *pnp) 1174 { 1175 struct skc_attach_args *sa = aux; 1176 1177 if (pnp) 1178 printf("sk port %c at %s", 1179 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1180 else 1181 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1182 return (UNCONF); 1183 } 1184 1185 /* 1186 * Attach the interface. Allocate softc structures, do ifmedia 1187 * setup and ethernet/BPF attach. 1188 */ 1189 void 1190 mskc_attach(struct device *parent, struct device *self, void *aux) 1191 { 1192 struct sk_softc *sc = (struct sk_softc *)self; 1193 struct pci_attach_args *pa = aux; 1194 struct skc_attach_args skca; 1195 pci_chipset_tag_t pc = pa->pa_pc; 1196 pcireg_t command, memtype; 1197 pci_intr_handle_t ih; 1198 const char *intrstr = NULL; 1199 u_int8_t hw, pmd; 1200 char *revstr = NULL; 1201 caddr_t kva; 1202 1203 DPRINTFN(2, ("begin mskc_attach\n")); 1204 1205 /* 1206 * Handle power management nonsense. 1207 */ 1208 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_CAPID) & 0x000000FF; 1209 1210 if (command == 0x01) { 1211 command = pci_conf_read(pc, pa->pa_tag, SK_PCI_PWRMGMTCTRL); 1212 if (command & SK_PSTATE_MASK) { 1213 u_int32_t iobase, membase, irq; 1214 1215 /* Save important PCI config data. */ 1216 iobase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOIO); 1217 membase = pci_conf_read(pc, pa->pa_tag, SK_PCI_LOMEM); 1218 irq = pci_conf_read(pc, pa->pa_tag, SK_PCI_INTLINE); 1219 1220 /* Reset the power state. */ 1221 printf("%s chip is in D%d power mode " 1222 "-- setting to D0\n", sc->sk_dev.dv_xname, 1223 command & SK_PSTATE_MASK); 1224 command &= 0xFFFFFFFC; 1225 pci_conf_write(pc, pa->pa_tag, 1226 SK_PCI_PWRMGMTCTRL, command); 1227 1228 /* Restore PCI config data. */ 1229 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOIO, iobase); 1230 pci_conf_write(pc, pa->pa_tag, SK_PCI_LOMEM, membase); 1231 pci_conf_write(pc, pa->pa_tag, SK_PCI_INTLINE, irq); 1232 } 1233 } 1234 1235 /* 1236 * Map control/status registers. 1237 */ 1238 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1239 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1240 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1241 printf(": can't map mem space\n"); 1242 return; 1243 } 1244 1245 sc->sc_dmatag = pa->pa_dmat; 1246 1247 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1248 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1249 1250 /* bail out here if chip is not recognized */ 1251 if (!(SK_IS_YUKON2(sc))) { 1252 printf(": unknown chip type: %d\n", sc->sk_type); 1253 goto fail_1; 1254 } 1255 DPRINTFN(2, ("mskc_attach: allocate interrupt\n")); 1256 1257 /* Allocate interrupt */ 1258 if (pci_intr_map(pa, &ih)) { 1259 printf(": couldn't map interrupt\n"); 1260 goto fail_1; 1261 } 1262 1263 intrstr = pci_intr_string(pc, ih); 1264 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc, 1265 self->dv_xname); 1266 if (sc->sk_intrhand == NULL) { 1267 printf(": couldn't establish interrupt"); 1268 if (intrstr != NULL) 1269 printf(" at %s", intrstr); 1270 printf("\n"); 1271 goto fail_1; 1272 } 1273 sc->sk_pc = pc; 1274 1275 if (bus_dmamem_alloc(sc->sc_dmatag, 1276 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), PAGE_SIZE, 1277 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg, BUS_DMA_NOWAIT)) { 1278 printf(": can't alloc status buffers\n"); 1279 goto fail_2; 1280 } 1281 1282 if (bus_dmamem_map(sc->sc_dmatag, 1283 &sc->sk_status_seg, sc->sk_status_nseg, 1284 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1285 &kva, BUS_DMA_NOWAIT)) { 1286 printf(": can't map dma buffers (%lu bytes)\n", 1287 (ulong)(MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc))); 1288 goto fail_3; 1289 } 1290 if (bus_dmamap_create(sc->sc_dmatag, 1291 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1, 1292 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0, 1293 BUS_DMA_NOWAIT, &sc->sk_status_map)) { 1294 printf(": can't create dma map\n"); 1295 goto fail_4; 1296 } 1297 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva, 1298 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1299 NULL, BUS_DMA_NOWAIT)) { 1300 printf(": can't load dma map\n"); 1301 goto fail_5; 1302 } 1303 sc->sk_status_ring = (struct msk_status_desc *)kva; 1304 bzero(sc->sk_status_ring, 1305 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1306 1307 /* Reset the adapter. */ 1308 mskc_reset(sc); 1309 1310 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096; 1311 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024)); 1312 1313 pmd = sk_win_read_1(sc, SK_PMDTYPE); 1314 if (pmd == 'L' || pmd == 'S' || pmd == 'P') 1315 sc->sk_fibertype = 1; 1316 1317 switch (sc->sk_type) { 1318 case SK_YUKON_XL: 1319 sc->sk_name = "Yukon-2 XL"; 1320 break; 1321 case SK_YUKON_EC_U: 1322 sc->sk_name = "Yukon-2 EC Ultra"; 1323 break; 1324 case SK_YUKON_EX: 1325 sc->sk_name = "Yukon-2 Extreme"; 1326 break; 1327 case SK_YUKON_EC: 1328 sc->sk_name = "Yukon-2 EC"; 1329 break; 1330 case SK_YUKON_FE: 1331 sc->sk_name = "Yukon-2 FE"; 1332 break; 1333 case SK_YUKON_FE_P: 1334 sc->sk_name = "Yukon-2 FE+"; 1335 break; 1336 case SK_YUKON_SUPR: 1337 sc->sk_name = "Yukon-2 Supreme"; 1338 break; 1339 default: 1340 sc->sk_name = "Yukon (Unknown)"; 1341 } 1342 1343 if (sc->sk_type == SK_YUKON_XL) { 1344 switch (sc->sk_rev) { 1345 case SK_YUKON_XL_REV_A0: 1346 revstr = "A0"; 1347 break; 1348 case SK_YUKON_XL_REV_A1: 1349 revstr = "A1"; 1350 break; 1351 case SK_YUKON_XL_REV_A2: 1352 revstr = "A2"; 1353 break; 1354 case SK_YUKON_XL_REV_A3: 1355 revstr = "A3"; 1356 break; 1357 default: 1358 ; 1359 } 1360 } 1361 1362 if (sc->sk_type == SK_YUKON_EC) { 1363 switch (sc->sk_rev) { 1364 case SK_YUKON_EC_REV_A1: 1365 revstr = "A1"; 1366 break; 1367 case SK_YUKON_EC_REV_A2: 1368 revstr = "A2"; 1369 break; 1370 case SK_YUKON_EC_REV_A3: 1371 revstr = "A3"; 1372 break; 1373 default: 1374 ; 1375 } 1376 } 1377 1378 if (sc->sk_type == SK_YUKON_EC_U) { 1379 switch (sc->sk_rev) { 1380 case SK_YUKON_EC_U_REV_A0: 1381 revstr = "A0"; 1382 break; 1383 case SK_YUKON_EC_U_REV_A1: 1384 revstr = "A1"; 1385 break; 1386 default: 1387 ; 1388 } 1389 } 1390 1391 /* Announce the product name. */ 1392 printf(", %s", sc->sk_name); 1393 if (revstr != NULL) 1394 printf(" rev. %s", revstr); 1395 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1396 1397 sc->sk_macs = 1; 1398 1399 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1400 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1401 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1402 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1403 sc->sk_macs++; 1404 } 1405 1406 skca.skc_port = SK_PORT_A; 1407 skca.skc_type = sc->sk_type; 1408 skca.skc_rev = sc->sk_rev; 1409 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1410 1411 if (sc->sk_macs > 1) { 1412 skca.skc_port = SK_PORT_B; 1413 skca.skc_type = sc->sk_type; 1414 skca.skc_rev = sc->sk_rev; 1415 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1416 } 1417 1418 /* Turn on the 'driver is loaded' LED. */ 1419 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1420 1421 return; 1422 1423 fail_5: 1424 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1425 fail_4: 1426 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1427 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1428 fail_3: 1429 bus_dmamem_free(sc->sc_dmatag, 1430 &sc->sk_status_seg, sc->sk_status_nseg); 1431 sc->sk_status_nseg = 0; 1432 fail_2: 1433 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1434 sc->sk_intrhand = NULL; 1435 fail_1: 1436 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1437 sc->sk_bsize = 0; 1438 } 1439 1440 int 1441 mskc_detach(struct device *self, int flags) 1442 { 1443 struct sk_softc *sc = (struct sk_softc *)self; 1444 int rv; 1445 1446 rv = config_detach_children(self, flags); 1447 if (rv != 0) 1448 return (rv); 1449 1450 if (sc->sk_status_nseg > 0) { 1451 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1452 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1453 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1454 bus_dmamem_free(sc->sc_dmatag, 1455 &sc->sk_status_seg, sc->sk_status_nseg); 1456 } 1457 1458 if (sc->sk_intrhand) 1459 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1460 1461 if (sc->sk_bsize > 0) 1462 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1463 1464 return(0); 1465 } 1466 1467 int 1468 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m_head, u_int32_t *txidx) 1469 { 1470 struct sk_softc *sc = sc_if->sk_softc; 1471 struct msk_tx_desc *f = NULL; 1472 u_int32_t frag, cur; 1473 int i; 1474 struct sk_txmap_entry *entry; 1475 bus_dmamap_t txmap; 1476 1477 DPRINTFN(2, ("msk_encap\n")); 1478 1479 entry = SIMPLEQ_FIRST(&sc_if->sk_txmap_head); 1480 if (entry == NULL) { 1481 DPRINTFN(2, ("msk_encap: no txmap available\n")); 1482 return (ENOBUFS); 1483 } 1484 txmap = entry->dmamap; 1485 1486 cur = frag = *txidx; 1487 1488 #ifdef MSK_DEBUG 1489 if (mskdebug >= 2) 1490 msk_dump_mbuf(m_head); 1491 #endif 1492 1493 /* 1494 * Start packing the mbufs in this chain into 1495 * the fragment pointers. Stop when we run out 1496 * of fragments or hit the end of the mbuf chain. 1497 */ 1498 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 1499 BUS_DMA_NOWAIT)) { 1500 DPRINTFN(2, ("msk_encap: dmamap failed\n")); 1501 return (ENOBUFS); 1502 } 1503 1504 if (txmap->dm_nsegs > (MSK_TX_RING_CNT - sc_if->sk_cdata.sk_tx_cnt - 2)) { 1505 DPRINTFN(2, ("msk_encap: too few descriptors free\n")); 1506 bus_dmamap_unload(sc->sc_dmatag, txmap); 1507 return (ENOBUFS); 1508 } 1509 1510 DPRINTFN(2, ("msk_encap: dm_nsegs=%d\n", txmap->dm_nsegs)); 1511 1512 /* Sync the DMA map. */ 1513 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 1514 BUS_DMASYNC_PREWRITE); 1515 1516 for (i = 0; i < txmap->dm_nsegs; i++) { 1517 f = &sc_if->sk_rdata->sk_tx_ring[frag]; 1518 f->sk_addr = htole32(txmap->dm_segs[i].ds_addr); 1519 f->sk_len = htole16(txmap->dm_segs[i].ds_len); 1520 f->sk_ctl = 0; 1521 if (i == 0) 1522 f->sk_opcode = SK_Y2_TXOPC_PACKET; 1523 else 1524 f->sk_opcode = SK_Y2_TXOPC_BUFFER | SK_Y2_TXOPC_OWN; 1525 cur = frag; 1526 SK_INC(frag, MSK_TX_RING_CNT); 1527 } 1528 1529 sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; 1530 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 1531 1532 sc_if->sk_cdata.sk_tx_map[cur] = entry; 1533 sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_Y2_TXCTL_LASTFRAG; 1534 1535 /* Sync descriptors before handing to chip */ 1536 MSK_CDTXSYNC(sc_if, *txidx, txmap->dm_nsegs, 1537 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1538 1539 sc_if->sk_rdata->sk_tx_ring[*txidx].sk_opcode |= SK_Y2_TXOPC_OWN; 1540 1541 /* Sync first descriptor to hand it off */ 1542 MSK_CDTXSYNC(sc_if, *txidx, 1, 1543 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1544 1545 sc_if->sk_cdata.sk_tx_cnt += txmap->dm_nsegs; 1546 1547 #ifdef MSK_DEBUG 1548 if (mskdebug >= 2) { 1549 struct msk_tx_desc *le; 1550 u_int32_t idx; 1551 for (idx = *txidx; idx != frag; SK_INC(idx, MSK_TX_RING_CNT)) { 1552 le = &sc_if->sk_rdata->sk_tx_ring[idx]; 1553 msk_dump_txdesc(le, idx); 1554 } 1555 } 1556 #endif 1557 1558 *txidx = frag; 1559 1560 DPRINTFN(2, ("msk_encap: completed successfully\n")); 1561 1562 return (0); 1563 } 1564 1565 void 1566 msk_start(struct ifnet *ifp) 1567 { 1568 struct sk_if_softc *sc_if = ifp->if_softc; 1569 struct mbuf *m_head = NULL; 1570 u_int32_t idx = sc_if->sk_cdata.sk_tx_prod; 1571 int pkts = 0; 1572 1573 DPRINTFN(2, ("msk_start\n")); 1574 1575 while (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { 1576 IFQ_POLL(&ifp->if_snd, m_head); 1577 if (m_head == NULL) 1578 break; 1579 1580 /* 1581 * Pack the data into the transmit ring. If we 1582 * don't have room, set the OACTIVE flag and wait 1583 * for the NIC to drain the ring. 1584 */ 1585 if (msk_encap(sc_if, m_head, &idx)) { 1586 ifp->if_flags |= IFF_OACTIVE; 1587 break; 1588 } 1589 1590 /* now we are committed to transmit the packet */ 1591 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1592 pkts++; 1593 1594 /* 1595 * If there's a BPF listener, bounce a copy of this frame 1596 * to him. 1597 */ 1598 #if NBPFILTER > 0 1599 if (ifp->if_bpf) 1600 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1601 #endif 1602 } 1603 if (pkts == 0) 1604 return; 1605 1606 /* Transmit */ 1607 if (idx != sc_if->sk_cdata.sk_tx_prod) { 1608 sc_if->sk_cdata.sk_tx_prod = idx; 1609 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, idx); 1610 1611 /* Set a timeout in case the chip goes out to lunch. */ 1612 ifp->if_timer = 5; 1613 } 1614 } 1615 1616 void 1617 msk_watchdog(struct ifnet *ifp) 1618 { 1619 struct sk_if_softc *sc_if = ifp->if_softc; 1620 1621 /* 1622 * Reclaim first as there is a possibility of losing Tx completion 1623 * interrupts. 1624 */ 1625 msk_txeof(sc_if); 1626 if (sc_if->sk_cdata.sk_tx_cnt != 0) { 1627 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1628 1629 ifp->if_oerrors++; 1630 1631 /* XXX Resets both ports; we shouldn't do that. */ 1632 mskc_reset(sc_if->sk_softc); 1633 msk_reset(sc_if); 1634 msk_init(sc_if); 1635 } 1636 } 1637 1638 void 1639 mskc_shutdown(void *v) 1640 { 1641 struct sk_softc *sc = v; 1642 1643 DPRINTFN(2, ("msk_shutdown\n")); 1644 1645 /* Turn off the 'driver is loaded' LED. */ 1646 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); 1647 1648 /* 1649 * Reset the GEnesis controller. Doing this should also 1650 * assert the resets on the attached XMAC(s). 1651 */ 1652 mskc_reset(sc); 1653 } 1654 1655 static __inline int 1656 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1657 { 1658 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1659 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1660 YU_RXSTAT_JABBER)) != 0 || 1661 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1662 YU_RXSTAT_BYTES(stat) != len) 1663 return (0); 1664 1665 return (1); 1666 } 1667 1668 void 1669 msk_rxeof(struct sk_if_softc *sc_if, u_int16_t len, u_int32_t rxstat) 1670 { 1671 struct sk_softc *sc = sc_if->sk_softc; 1672 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1673 struct mbuf *m; 1674 struct sk_chain *cur_rx; 1675 int cur, total_len = len; 1676 bus_dmamap_t dmamap; 1677 1678 DPRINTFN(2, ("msk_rxeof\n")); 1679 1680 cur = sc_if->sk_cdata.sk_rx_cons; 1681 SK_INC(sc_if->sk_cdata.sk_rx_cons, MSK_RX_RING_CNT); 1682 SK_INC(sc_if->sk_cdata.sk_rx_prod, MSK_RX_RING_CNT); 1683 1684 /* Sync the descriptor */ 1685 MSK_CDRXSYNC(sc_if, cur, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1686 1687 cur_rx = &sc_if->sk_cdata.sk_rx_chain[cur]; 1688 dmamap = sc_if->sk_cdata.sk_rx_jumbo_map; 1689 1690 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, dmamap, 0, 1691 dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1692 1693 m = cur_rx->sk_mbuf; 1694 cur_rx->sk_mbuf = NULL; 1695 1696 if (total_len < SK_MIN_FRAMELEN || 1697 total_len > SK_JUMBO_FRAMELEN || 1698 msk_rxvalid(sc, rxstat, total_len) == 0) { 1699 ifp->if_ierrors++; 1700 msk_newbuf(sc_if, cur, m, dmamap); 1701 return; 1702 } 1703 1704 /* 1705 * Try to allocate a new jumbo buffer. If that fails, copy the 1706 * packet to mbufs and put the jumbo buffer back in the ring 1707 * so it can be re-used. If allocating mbufs fails, then we 1708 * have to drop the packet. 1709 */ 1710 if (msk_newbuf(sc_if, cur, NULL, dmamap) == ENOBUFS) { 1711 struct mbuf *m0; 1712 m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, 1713 total_len + ETHER_ALIGN, 0, ifp, NULL); 1714 msk_newbuf(sc_if, cur, m, dmamap); 1715 if (m0 == NULL) { 1716 ifp->if_ierrors++; 1717 return; 1718 } 1719 m_adj(m0, ETHER_ALIGN); 1720 m = m0; 1721 } else { 1722 m->m_pkthdr.rcvif = ifp; 1723 m->m_pkthdr.len = m->m_len = total_len; 1724 } 1725 1726 ifp->if_ipackets++; 1727 1728 #if NBPFILTER > 0 1729 if (ifp->if_bpf) 1730 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1731 #endif 1732 1733 /* pass it on. */ 1734 ether_input_mbuf(ifp, m); 1735 } 1736 1737 void 1738 msk_txeof(struct sk_if_softc *sc_if) 1739 { 1740 struct sk_softc *sc = sc_if->sk_softc; 1741 struct msk_tx_desc *cur_tx; 1742 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1743 u_int32_t idx, reg, sk_ctl; 1744 struct sk_txmap_entry *entry; 1745 1746 DPRINTFN(2, ("msk_txeof\n")); 1747 1748 if (sc_if->sk_port == SK_PORT_A) 1749 reg = SK_STAT_BMU_TXA1_RIDX; 1750 else 1751 reg = SK_STAT_BMU_TXA2_RIDX; 1752 1753 /* 1754 * Go through our tx ring and free mbufs for those 1755 * frames that have been sent. 1756 */ 1757 idx = sc_if->sk_cdata.sk_tx_cons; 1758 while (idx != sk_win_read_2(sc, reg)) { 1759 MSK_CDTXSYNC(sc_if, idx, 1, 1760 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1761 1762 cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; 1763 sk_ctl = cur_tx->sk_ctl; 1764 #ifdef MSK_DEBUG 1765 if (mskdebug >= 2) 1766 msk_dump_txdesc(cur_tx, idx); 1767 #endif 1768 if (sk_ctl & SK_Y2_TXCTL_LASTFRAG) 1769 ifp->if_opackets++; 1770 if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { 1771 entry = sc_if->sk_cdata.sk_tx_map[idx]; 1772 1773 m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); 1774 sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; 1775 1776 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1777 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1778 1779 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1780 SIMPLEQ_INSERT_TAIL(&sc_if->sk_txmap_head, entry, 1781 link); 1782 sc_if->sk_cdata.sk_tx_map[idx] = NULL; 1783 } 1784 sc_if->sk_cdata.sk_tx_cnt--; 1785 SK_INC(idx, MSK_TX_RING_CNT); 1786 } 1787 ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; 1788 1789 if (sc_if->sk_cdata.sk_tx_cnt < MSK_TX_RING_CNT - 2) 1790 ifp->if_flags &= ~IFF_OACTIVE; 1791 1792 sc_if->sk_cdata.sk_tx_cons = idx; 1793 } 1794 1795 void 1796 msk_tick(void *xsc_if) 1797 { 1798 struct sk_if_softc *sc_if = xsc_if; 1799 struct mii_data *mii = &sc_if->sk_mii; 1800 int s; 1801 1802 s = splnet(); 1803 mii_tick(mii); 1804 splx(s); 1805 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1806 } 1807 1808 void 1809 msk_intr_yukon(struct sk_if_softc *sc_if) 1810 { 1811 u_int8_t status; 1812 1813 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 1814 /* RX overrun */ 1815 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 1816 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 1817 SK_RFCTL_RX_FIFO_OVER); 1818 } 1819 /* TX underrun */ 1820 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 1821 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 1822 SK_TFCTL_TX_FIFO_UNDER); 1823 } 1824 1825 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status)); 1826 } 1827 1828 int 1829 msk_intr(void *xsc) 1830 { 1831 struct sk_softc *sc = xsc; 1832 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1833 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 1834 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1835 int claimed = 0; 1836 u_int32_t status; 1837 struct msk_status_desc *cur_st; 1838 1839 status = CSR_READ_4(sc, SK_Y2_ISSR2); 1840 if (status == 0) { 1841 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1842 return (0); 1843 } 1844 1845 status = CSR_READ_4(sc, SK_ISR); 1846 1847 if (sc_if0 != NULL) 1848 ifp0 = &sc_if0->arpcom.ac_if; 1849 if (sc_if1 != NULL) 1850 ifp1 = &sc_if1->arpcom.ac_if; 1851 1852 if (sc_if0 && (status & SK_Y2_IMR_MAC1) && 1853 (ifp0->if_flags & IFF_RUNNING)) { 1854 msk_intr_yukon(sc_if0); 1855 } 1856 1857 if (sc_if1 && (status & SK_Y2_IMR_MAC2) && 1858 (ifp1->if_flags & IFF_RUNNING)) { 1859 msk_intr_yukon(sc_if1); 1860 } 1861 1862 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1863 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1864 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1865 1866 while (cur_st->sk_opcode & SK_Y2_STOPC_OWN) { 1867 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN; 1868 switch (cur_st->sk_opcode) { 1869 case SK_Y2_STOPC_RXSTAT: 1870 msk_rxeof(sc->sk_if[cur_st->sk_link], 1871 letoh16(cur_st->sk_len), 1872 letoh32(cur_st->sk_status)); 1873 SK_IF_WRITE_2(sc->sk_if[cur_st->sk_link], 0, 1874 SK_RXQ1_Y2_PREF_PUTIDX, 1875 sc->sk_if[cur_st->sk_link]->sk_cdata.sk_rx_prod); 1876 break; 1877 case SK_Y2_STOPC_TXSTAT: 1878 if (sc_if0) 1879 msk_txeof(sc_if0); 1880 if (sc_if1) 1881 msk_txeof(sc_if1); 1882 break; 1883 default: 1884 printf("opcode=0x%x\n", cur_st->sk_opcode); 1885 break; 1886 } 1887 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT); 1888 1889 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1890 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1891 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1892 } 1893 1894 if (status & SK_Y2_IMR_BMU) { 1895 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR); 1896 claimed = 1; 1897 } 1898 1899 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1900 1901 if (ifp0 != NULL && !IFQ_IS_EMPTY(&ifp0->if_snd)) 1902 msk_start(ifp0); 1903 if (ifp1 != NULL && !IFQ_IS_EMPTY(&ifp1->if_snd)) 1904 msk_start(ifp1); 1905 1906 return (claimed); 1907 } 1908 1909 void 1910 msk_init_yukon(struct sk_if_softc *sc_if) 1911 { 1912 u_int32_t v; 1913 u_int16_t reg; 1914 struct sk_softc *sc; 1915 int i; 1916 1917 sc = sc_if->sk_softc; 1918 1919 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n", 1920 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 1921 1922 DPRINTFN(6, ("msk_init_yukon: 1\n")); 1923 1924 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n", 1925 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 1926 1927 DPRINTFN(6, ("msk_init_yukon: 3\n")); 1928 1929 /* unused read of the interrupt source register */ 1930 DPRINTFN(6, ("msk_init_yukon: 4\n")); 1931 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 1932 1933 DPRINTFN(6, ("msk_init_yukon: 4a\n")); 1934 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 1935 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1936 1937 /* MIB Counter Clear Mode set */ 1938 reg |= YU_PAR_MIB_CLR; 1939 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1940 DPRINTFN(6, ("msk_init_yukon: 4b\n")); 1941 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1942 1943 /* MIB Counter Clear Mode clear */ 1944 DPRINTFN(6, ("msk_init_yukon: 5\n")); 1945 reg &= ~YU_PAR_MIB_CLR; 1946 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1947 1948 /* receive control reg */ 1949 DPRINTFN(6, ("msk_init_yukon: 7\n")); 1950 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 1951 1952 /* transmit parameter register */ 1953 DPRINTFN(6, ("msk_init_yukon: 8\n")); 1954 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 1955 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 1956 1957 /* serial mode register */ 1958 DPRINTFN(6, ("msk_init_yukon: 9\n")); 1959 reg = YU_SMR_DATA_BLIND(0x1c) | 1960 YU_SMR_MFL_VLAN | 1961 YU_SMR_IPG_DATA(0x1e); 1962 1963 if (sc->sk_type != SK_YUKON_FE && 1964 sc->sk_type != SK_YUKON_FE_P) 1965 reg |= YU_SMR_MFL_JUMBO; 1966 1967 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 1968 1969 DPRINTFN(6, ("msk_init_yukon: 10\n")); 1970 /* Setup Yukon's address */ 1971 for (i = 0; i < 3; i++) { 1972 /* Write Source Address 1 (unicast filter) */ 1973 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 1974 sc_if->arpcom.ac_enaddr[i * 2] | 1975 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 1976 } 1977 1978 for (i = 0; i < 3; i++) { 1979 reg = sk_win_read_2(sc_if->sk_softc, 1980 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 1981 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 1982 } 1983 1984 /* Set promiscuous mode */ 1985 msk_setpromisc(sc_if); 1986 1987 /* Set multicast filter */ 1988 DPRINTFN(6, ("msk_init_yukon: 11\n")); 1989 msk_setmulti(sc_if); 1990 1991 /* enable interrupt mask for counter overflows */ 1992 DPRINTFN(6, ("msk_init_yukon: 12\n")); 1993 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 1994 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 1995 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 1996 1997 /* Configure RX MAC FIFO Flush Mask */ 1998 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 1999 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 2000 YU_RXSTAT_JABBER; 2001 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 2002 2003 /* Configure RX MAC FIFO */ 2004 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2005 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON | 2006 SK_RFCTL_FIFO_FLUSH_ON); 2007 2008 /* Increase flush threshould to 64 bytes */ 2009 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 2010 SK_RFCTL_FIFO_THRESHOLD + 1); 2011 2012 /* Configure TX MAC FIFO */ 2013 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2014 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2015 2016 #if 1 2017 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN); 2018 #endif 2019 DPRINTFN(6, ("msk_init_yukon: end\n")); 2020 } 2021 2022 /* 2023 * Note that to properly initialize any part of the GEnesis chip, 2024 * you first have to take it out of reset mode. 2025 */ 2026 void 2027 msk_init(void *xsc_if) 2028 { 2029 struct sk_if_softc *sc_if = xsc_if; 2030 struct sk_softc *sc = sc_if->sk_softc; 2031 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2032 struct mii_data *mii = &sc_if->sk_mii; 2033 int s; 2034 2035 DPRINTFN(2, ("msk_init\n")); 2036 2037 s = splnet(); 2038 2039 /* Cancel pending I/O and free all RX/TX buffers. */ 2040 msk_stop(sc_if); 2041 2042 /* Configure I2C registers */ 2043 2044 /* Configure XMAC(s) */ 2045 msk_init_yukon(sc_if); 2046 mii_mediachg(mii); 2047 2048 /* Configure transmit arbiter(s) */ 2049 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON); 2050 #if 0 2051 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2052 #endif 2053 2054 /* Configure RAMbuffers */ 2055 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2056 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2057 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2058 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2059 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2060 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2061 2062 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET); 2063 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON); 2064 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart); 2065 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart); 2066 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart); 2067 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend); 2068 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON); 2069 2070 /* Configure BMUs */ 2071 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016); 2072 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28); 2073 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080); 2074 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600); 2075 2076 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016); 2077 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28); 2078 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080); 2079 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600); 2080 2081 /* Make sure the sync transmit queue is disabled. */ 2082 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET); 2083 2084 /* Init descriptors */ 2085 if (msk_init_rx_ring(sc_if) == ENOBUFS) { 2086 printf("%s: initialization failed: no " 2087 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2088 msk_stop(sc_if); 2089 splx(s); 2090 return; 2091 } 2092 2093 if (msk_init_tx_ring(sc_if) == ENOBUFS) { 2094 printf("%s: initialization failed: no " 2095 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2096 msk_stop(sc_if); 2097 splx(s); 2098 return; 2099 } 2100 2101 /* Initialize prefetch engine. */ 2102 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2103 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002); 2104 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1); 2105 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO, 2106 MSK_RX_RING_ADDR(sc_if, 0)); 2107 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI, 2108 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32); 2109 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008); 2110 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR); 2111 2112 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2113 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002); 2114 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1); 2115 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO, 2116 MSK_TX_RING_ADDR(sc_if, 0)); 2117 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI, 2118 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32); 2119 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008); 2120 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR); 2121 2122 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 2123 sc_if->sk_cdata.sk_rx_prod); 2124 2125 /* Configure interrupt handling */ 2126 if (sc_if->sk_port == SK_PORT_A) 2127 sc->sk_intrmask |= SK_Y2_INTRS1; 2128 else 2129 sc->sk_intrmask |= SK_Y2_INTRS2; 2130 sc->sk_intrmask |= SK_Y2_IMR_BMU; 2131 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2132 2133 ifp->if_flags |= IFF_RUNNING; 2134 ifp->if_flags &= ~IFF_OACTIVE; 2135 2136 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2137 2138 splx(s); 2139 } 2140 2141 void 2142 msk_stop(struct sk_if_softc *sc_if) 2143 { 2144 struct sk_softc *sc = sc_if->sk_softc; 2145 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2146 struct sk_txmap_entry *dma; 2147 int i; 2148 2149 DPRINTFN(2, ("msk_stop\n")); 2150 2151 timeout_del(&sc_if->sk_tick_ch); 2152 2153 ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); 2154 2155 /* Stop transfer of Tx descriptors */ 2156 2157 /* Stop transfer of Rx descriptors */ 2158 2159 /* Turn off various components of this interface. */ 2160 SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); 2161 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2162 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2163 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2164 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2165 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE); 2166 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2167 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2168 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2169 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP); 2170 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2171 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2172 2173 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2174 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2175 2176 /* Disable interrupts */ 2177 if (sc_if->sk_port == SK_PORT_A) 2178 sc->sk_intrmask &= ~SK_Y2_INTRS1; 2179 else 2180 sc->sk_intrmask &= ~SK_Y2_INTRS2; 2181 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2182 2183 SK_XM_READ_2(sc_if, XM_ISR); 2184 SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); 2185 2186 /* Free RX and TX mbufs still in the queues. */ 2187 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2188 if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { 2189 m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); 2190 sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; 2191 } 2192 } 2193 2194 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2195 if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { 2196 m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); 2197 sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; 2198 SIMPLEQ_INSERT_HEAD(&sc_if->sk_txmap_head, 2199 sc_if->sk_cdata.sk_tx_map[i], link); 2200 sc_if->sk_cdata.sk_tx_map[i] = 0; 2201 } 2202 } 2203 2204 while ((dma = SIMPLEQ_FIRST(&sc_if->sk_txmap_head))) { 2205 SIMPLEQ_REMOVE_HEAD(&sc_if->sk_txmap_head, link); 2206 bus_dmamap_destroy(sc->sc_dmatag, dma->dmamap); 2207 free(dma, M_DEVBUF); 2208 } 2209 } 2210 2211 struct cfattach mskc_ca = { 2212 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach 2213 }; 2214 2215 struct cfdriver mskc_cd = { 2216 0, "mskc", DV_DULL 2217 }; 2218 2219 struct cfattach msk_ca = { 2220 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach 2221 }; 2222 2223 struct cfdriver msk_cd = { 2224 0, "msk", DV_IFNET 2225 }; 2226 2227 #ifdef MSK_DEBUG 2228 void 2229 msk_dump_txdesc(struct msk_tx_desc *le, int idx) 2230 { 2231 #define DESC_PRINT(X) \ 2232 if (X) \ 2233 printf("txdesc[%d]." #X "=%#x\n", \ 2234 idx, X); 2235 2236 DESC_PRINT(letoh32(le->sk_addr)); 2237 DESC_PRINT(letoh16(le->sk_len)); 2238 DESC_PRINT(le->sk_ctl); 2239 DESC_PRINT(le->sk_opcode); 2240 #undef DESC_PRINT 2241 } 2242 2243 void 2244 msk_dump_bytes(const char *data, int len) 2245 { 2246 int c, i, j; 2247 2248 for (i = 0; i < len; i += 16) { 2249 printf("%08x ", i); 2250 c = len - i; 2251 if (c > 16) c = 16; 2252 2253 for (j = 0; j < c; j++) { 2254 printf("%02x ", data[i + j] & 0xff); 2255 if ((j & 0xf) == 7 && j > 0) 2256 printf(" "); 2257 } 2258 2259 for (; j < 16; j++) 2260 printf(" "); 2261 printf(" "); 2262 2263 for (j = 0; j < c; j++) { 2264 int ch = data[i + j] & 0xff; 2265 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2266 } 2267 2268 printf("\n"); 2269 2270 if (c < 16) 2271 break; 2272 } 2273 } 2274 2275 void 2276 msk_dump_mbuf(struct mbuf *m) 2277 { 2278 int count = m->m_pkthdr.len; 2279 2280 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2281 2282 while (count > 0 && m) { 2283 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2284 m, m->m_data, m->m_len); 2285 msk_dump_bytes(mtod(m, char *), m->m_len); 2286 2287 count -= m->m_len; 2288 m = m->m_next; 2289 } 2290 } 2291 #endif 2292