1 /* $OpenBSD: if_msk.c,v 1.131 2018/01/06 03:11:04 dlg Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/socket.h> 98 #include <sys/timeout.h> 99 #include <sys/device.h> 100 #include <sys/queue.h> 101 102 #include <net/if.h> 103 104 #include <netinet/in.h> 105 #include <netinet/if_ether.h> 106 107 #include <net/if_media.h> 108 109 #if NBPFILTER > 0 110 #include <net/bpf.h> 111 #endif 112 113 #include <dev/mii/mii.h> 114 #include <dev/mii/miivar.h> 115 116 #include <dev/pci/pcireg.h> 117 #include <dev/pci/pcivar.h> 118 #include <dev/pci/pcidevs.h> 119 120 #include <dev/pci/if_skreg.h> 121 #include <dev/pci/if_mskvar.h> 122 123 int mskc_probe(struct device *, void *, void *); 124 void mskc_attach(struct device *, struct device *self, void *aux); 125 int mskc_detach(struct device *, int); 126 int mskc_activate(struct device *, int); 127 void mskc_reset(struct sk_softc *); 128 int msk_probe(struct device *, void *, void *); 129 void msk_attach(struct device *, struct device *self, void *aux); 130 int msk_detach(struct device *, int); 131 int msk_activate(struct device *, int); 132 void msk_reset(struct sk_if_softc *); 133 int mskcprint(void *, const char *); 134 int msk_intr(void *); 135 void msk_intr_yukon(struct sk_if_softc *); 136 static inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 137 void msk_rxeof(struct sk_if_softc *, uint16_t, uint32_t); 138 void msk_txeof(struct sk_if_softc *); 139 static unsigned int msk_encap(struct sk_if_softc *, struct mbuf *, uint32_t); 140 void msk_start(struct ifnet *); 141 int msk_ioctl(struct ifnet *, u_long, caddr_t); 142 void msk_init(void *); 143 void msk_init_yukon(struct sk_if_softc *); 144 void msk_stop(struct sk_if_softc *, int); 145 void msk_watchdog(struct ifnet *); 146 int msk_ifmedia_upd(struct ifnet *); 147 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 148 static int msk_newbuf(struct sk_if_softc *); 149 int msk_init_rx_ring(struct sk_if_softc *); 150 int msk_init_tx_ring(struct sk_if_softc *); 151 void msk_fill_rx_ring(struct sk_if_softc *); 152 153 int msk_miibus_readreg(struct device *, int, int); 154 void msk_miibus_writereg(struct device *, int, int, int); 155 void msk_miibus_statchg(struct device *); 156 157 void msk_iff(struct sk_if_softc *); 158 void msk_tick(void *); 159 void msk_fill_rx_tick(void *); 160 161 #ifdef MSK_DEBUG 162 #define DPRINTF(x) if (mskdebug) printf x 163 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x 164 int mskdebug = 0; 165 166 void msk_dump_txdesc(struct msk_tx_desc *, int); 167 void msk_dump_mbuf(struct mbuf *); 168 void msk_dump_bytes(const char *, int); 169 #else 170 #define DPRINTF(x) 171 #define DPRINTFN(n,x) 172 #endif 173 174 /* supported device vendors */ 175 const struct pci_matchid mskc_devices[] = { 176 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX }, 177 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T_B1 }, 178 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX }, 179 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T }, 180 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021CU }, 181 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021X }, 182 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022CU }, 183 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022X }, 184 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 }, 185 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 }, 186 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 }, 187 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 }, 188 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040 }, 189 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040T }, 190 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8042 }, 191 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8048 }, 192 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 }, 193 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 }, 194 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 }, 195 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 }, 196 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055_2 }, 197 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 }, 198 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8057 }, 199 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8058 }, 200 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8059 }, 201 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061CU }, 202 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061X }, 203 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062CU }, 204 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062X }, 205 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8070 }, 206 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8071 }, 207 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8072 }, 208 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8075 }, 209 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8079 }, 210 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 }, 211 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 }, 212 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 }, 213 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 }, 214 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 }, 215 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Exx }, 216 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9Sxx } 217 }; 218 219 static inline u_int32_t 220 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 221 { 222 return CSR_READ_4(sc, reg); 223 } 224 225 static inline u_int16_t 226 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 227 { 228 return CSR_READ_2(sc, reg); 229 } 230 231 static inline u_int8_t 232 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 233 { 234 return CSR_READ_1(sc, reg); 235 } 236 237 static inline void 238 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 239 { 240 CSR_WRITE_4(sc, reg, x); 241 } 242 243 static inline void 244 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 245 { 246 CSR_WRITE_2(sc, reg, x); 247 } 248 249 static inline void 250 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 251 { 252 CSR_WRITE_1(sc, reg, x); 253 } 254 255 int 256 msk_miibus_readreg(struct device *dev, int phy, int reg) 257 { 258 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 259 u_int16_t val; 260 int i; 261 262 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 263 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 264 265 for (i = 0; i < SK_TIMEOUT; i++) { 266 DELAY(1); 267 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 268 if (val & YU_SMICR_READ_VALID) 269 break; 270 } 271 272 if (i == SK_TIMEOUT) { 273 printf("%s: phy failed to come ready\n", 274 sc_if->sk_dev.dv_xname); 275 return (0); 276 } 277 278 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i, 279 SK_TIMEOUT)); 280 281 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 282 283 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 284 phy, reg, val)); 285 286 return (val); 287 } 288 289 void 290 msk_miibus_writereg(struct device *dev, int phy, int reg, int val) 291 { 292 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 293 int i; 294 295 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n", 296 phy, reg, val)); 297 298 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 299 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 300 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 301 302 for (i = 0; i < SK_TIMEOUT; i++) { 303 DELAY(1); 304 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 305 break; 306 } 307 308 if (i == SK_TIMEOUT) 309 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 310 } 311 312 void 313 msk_miibus_statchg(struct device *dev) 314 { 315 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 316 struct mii_data *mii = &sc_if->sk_mii; 317 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 318 int gpcr; 319 320 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR); 321 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN); 322 323 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO || 324 sc_if->sk_softc->sk_type == SK_YUKON_FE_P) { 325 /* Set speed. */ 326 gpcr |= YU_GPCR_SPEED_DIS; 327 switch (IFM_SUBTYPE(mii->mii_media_active)) { 328 case IFM_1000_SX: 329 case IFM_1000_LX: 330 case IFM_1000_CX: 331 case IFM_1000_T: 332 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED); 333 break; 334 case IFM_100_TX: 335 gpcr |= YU_GPCR_SPEED; 336 break; 337 } 338 339 /* Set duplex. */ 340 gpcr |= YU_GPCR_DPLX_DIS; 341 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 342 gpcr |= YU_GPCR_DUPLEX; 343 344 /* Disable flow control. */ 345 gpcr |= YU_GPCR_FCTL_DIS; 346 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS); 347 } 348 349 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr); 350 351 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n", 352 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 353 } 354 355 void 356 msk_iff(struct sk_if_softc *sc_if) 357 { 358 struct ifnet *ifp = &sc_if->arpcom.ac_if; 359 struct arpcom *ac = &sc_if->arpcom; 360 struct ether_multi *enm; 361 struct ether_multistep step; 362 u_int32_t hashes[2]; 363 u_int16_t rcr; 364 int h; 365 366 rcr = SK_YU_READ_2(sc_if, YUKON_RCR); 367 rcr &= ~(YU_RCR_MUFLEN | YU_RCR_UFLEN); 368 ifp->if_flags &= ~IFF_ALLMULTI; 369 370 /* 371 * Always accept frames destined to our station address. 372 */ 373 rcr |= YU_RCR_UFLEN; 374 375 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 376 ifp->if_flags |= IFF_ALLMULTI; 377 if (ifp->if_flags & IFF_PROMISC) 378 rcr &= ~YU_RCR_UFLEN; 379 else 380 rcr |= YU_RCR_MUFLEN; 381 hashes[0] = hashes[1] = 0xFFFFFFFF; 382 } else { 383 rcr |= YU_RCR_MUFLEN; 384 /* Program new filter. */ 385 bzero(hashes, sizeof(hashes)); 386 387 ETHER_FIRST_MULTI(step, ac, enm); 388 while (enm != NULL) { 389 h = ether_crc32_be(enm->enm_addrlo, 390 ETHER_ADDR_LEN) & ((1 << SK_HASH_BITS) - 1); 391 392 if (h < 32) 393 hashes[0] |= (1 << h); 394 else 395 hashes[1] |= (1 << (h - 32)); 396 397 ETHER_NEXT_MULTI(step, enm); 398 } 399 } 400 401 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 402 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 403 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 404 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 405 SK_YU_WRITE_2(sc_if, YUKON_RCR, rcr); 406 } 407 408 int 409 msk_init_rx_ring(struct sk_if_softc *sc_if) 410 { 411 struct msk_ring_data *rd = sc_if->sk_rdata; 412 struct msk_rx_desc *r; 413 414 memset(rd->sk_rx_ring, 0, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 415 416 r = &rd->sk_rx_ring[0]; 417 r->sk_addr = htole32(0); 418 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 419 420 sc_if->sk_cdata.sk_rx_prod = 1; 421 sc_if->sk_cdata.sk_rx_cons = 0; 422 sc_if->sk_cdata.sk_rx_hiaddr = 0; 423 424 /* 425 * up to two ring entries per packet, so the effective ring size is 426 * halved 427 */ 428 if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, (MSK_RX_RING_CNT/2) - 1); 429 430 msk_fill_rx_ring(sc_if); 431 return (0); 432 } 433 434 int 435 msk_init_tx_ring(struct sk_if_softc *sc_if) 436 { 437 struct sk_softc *sc = sc_if->sk_softc; 438 struct msk_ring_data *rd = sc_if->sk_rdata; 439 struct msk_tx_desc *t; 440 int i; 441 442 memset(rd->sk_tx_ring, 0, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 443 444 for (i = 0; i < MSK_TX_RING_CNT; i++) { 445 if (bus_dmamap_create(sc->sc_dmatag, sc_if->sk_pktlen, 446 SK_NTXSEG, sc_if->sk_pktlen, 0, 447 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 448 &sc_if->sk_cdata.sk_tx_maps[i])) 449 return (ENOBUFS); 450 } 451 452 t = &rd->sk_tx_ring[0]; 453 t->sk_addr = htole32(0); 454 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 455 456 sc_if->sk_cdata.sk_tx_prod = 1; 457 sc_if->sk_cdata.sk_tx_cons = 0; 458 sc_if->sk_cdata.sk_tx_hiaddr = 0; 459 460 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 461 462 return (0); 463 } 464 465 static int 466 msk_newbuf(struct sk_if_softc *sc_if) 467 { 468 struct msk_ring_data *rd = sc_if->sk_rdata; 469 struct msk_rx_desc *r; 470 struct mbuf *m; 471 bus_dmamap_t map; 472 uint64_t addr; 473 uint32_t prod, head; 474 uint32_t hiaddr; 475 unsigned int pktlen = sc_if->sk_pktlen + ETHER_ALIGN; 476 477 m = MCLGETI(NULL, M_DONTWAIT, NULL, pktlen); 478 if (m == NULL) 479 return (0); 480 m->m_len = m->m_pkthdr.len = pktlen; 481 m_adj(m, ETHER_ALIGN); 482 483 prod = sc_if->sk_cdata.sk_rx_prod; 484 map = sc_if->sk_cdata.sk_rx_maps[prod]; 485 486 if (bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, map, m, 487 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) { 488 m_freem(m); 489 return (0); 490 } 491 492 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, 493 map->dm_mapsize, BUS_DMASYNC_PREREAD); 494 495 head = prod; 496 497 /* high 32 bits of address */ 498 addr = map->dm_segs[0].ds_addr; 499 hiaddr = addr >> 32; 500 if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) { 501 r = &rd->sk_rx_ring[prod]; 502 htolem32(&r->sk_addr, hiaddr); 503 r->sk_len = htole16(0); 504 r->sk_ctl = 0; 505 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 506 507 sc_if->sk_cdata.sk_rx_hiaddr = hiaddr; 508 509 //printf("%s: addr64 @%u (%08x)\n", __func__, prod, hiaddr); 510 SK_INC(prod, MSK_RX_RING_CNT); 511 } 512 513 r = &rd->sk_rx_ring[prod]; 514 htolem32(&r->sk_addr, addr); 515 htolem16(&r->sk_len, map->dm_segs[0].ds_len); 516 r->sk_ctl = 0; 517 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_PACKET; 518 //printf("%s: packet @%u\n", __func__, prod); 519 520 sc_if->sk_cdata.sk_rx_maps[head] = sc_if->sk_cdata.sk_rx_maps[prod]; 521 sc_if->sk_cdata.sk_rx_maps[prod] = map; 522 523 sc_if->sk_cdata.sk_rx_mbuf[prod] = m; 524 525 SK_INC(prod, MSK_RX_RING_CNT); 526 //printf("%s: prod %u\n", __func__, prod); 527 sc_if->sk_cdata.sk_rx_prod = prod; 528 529 return (1); 530 } 531 532 /* 533 * Set media options. 534 */ 535 int 536 msk_ifmedia_upd(struct ifnet *ifp) 537 { 538 struct sk_if_softc *sc_if = ifp->if_softc; 539 540 mii_mediachg(&sc_if->sk_mii); 541 return (0); 542 } 543 544 /* 545 * Report current media status. 546 */ 547 void 548 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 549 { 550 struct sk_if_softc *sc_if = ifp->if_softc; 551 552 mii_pollstat(&sc_if->sk_mii); 553 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 554 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 555 } 556 557 int 558 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 559 { 560 struct sk_if_softc *sc_if = ifp->if_softc; 561 struct ifreq *ifr = (struct ifreq *) data; 562 struct mii_data *mii; 563 int s, error = 0; 564 565 s = splnet(); 566 567 switch(command) { 568 case SIOCSIFADDR: 569 ifp->if_flags |= IFF_UP; 570 if (!(ifp->if_flags & IFF_RUNNING)) 571 msk_init(sc_if); 572 break; 573 574 case SIOCSIFFLAGS: 575 if (ifp->if_flags & IFF_UP) { 576 if (ifp->if_flags & IFF_RUNNING) 577 error = ENETRESET; 578 else 579 msk_init(sc_if); 580 } else { 581 if (ifp->if_flags & IFF_RUNNING) 582 msk_stop(sc_if, 0); 583 } 584 break; 585 586 case SIOCGIFMEDIA: 587 case SIOCSIFMEDIA: 588 mii = &sc_if->sk_mii; 589 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 590 break; 591 592 case SIOCGIFRXR: 593 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 594 NULL, sc_if->sk_pktlen, &sc_if->sk_cdata.sk_rx_ring); 595 break; 596 597 default: 598 error = ether_ioctl(ifp, &sc_if->arpcom, command, data); 599 } 600 601 if (error == ENETRESET) { 602 if (ifp->if_flags & IFF_RUNNING) 603 msk_iff(sc_if); 604 error = 0; 605 } 606 607 splx(s); 608 return (error); 609 } 610 611 /* 612 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 613 * IDs against our list and return a device name if we find a match. 614 */ 615 int 616 mskc_probe(struct device *parent, void *match, void *aux) 617 { 618 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices, 619 nitems(mskc_devices))); 620 } 621 622 /* 623 * Force the GEnesis into reset, then bring it out of reset. 624 */ 625 void 626 mskc_reset(struct sk_softc *sc) 627 { 628 u_int32_t imtimer_ticks, reg1; 629 int reg; 630 631 DPRINTFN(2, ("mskc_reset\n")); 632 633 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET); 634 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET); 635 636 DELAY(1000); 637 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET); 638 DELAY(2); 639 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 640 641 sk_win_write_1(sc, SK_TESTCTL1, 2); 642 643 if (sc->sk_type == SK_YUKON_EC_U || sc->sk_type == SK_YUKON_EX || 644 sc->sk_type >= SK_YUKON_FE_P) { 645 /* enable all clocks. */ 646 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG3), 0); 647 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)); 648 reg1 &= (SK_Y2_REG4_FORCE_ASPM_REQUEST| 649 SK_Y2_REG4_ASPM_GPHY_LINK_DOWN| 650 SK_Y2_REG4_ASPM_INT_FIFO_EMPTY| 651 SK_Y2_REG4_ASPM_CLKRUN_REQUEST); 652 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4), reg1); 653 654 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)); 655 reg1 &= SK_Y2_REG5_TIM_VMAIN_AV_MASK; 656 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5), reg1); 657 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_CFGREG1), 0); 658 659 /* 660 * Disable status race, workaround for Yukon EC Ultra & 661 * Yukon EX. 662 */ 663 reg1 = sk_win_read_4(sc, SK_GPIO); 664 reg1 |= SK_Y2_GPIO_STAT_RACE_DIS; 665 sk_win_write_4(sc, SK_GPIO, reg1); 666 sk_win_read_4(sc, SK_GPIO); 667 } 668 669 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)); 670 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 671 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 672 else 673 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 674 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1); 675 676 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 677 sk_win_write_1(sc, SK_Y2_CLKGATE, 678 SK_Y2_CLKGATE_LINK1_GATE_DIS | 679 SK_Y2_CLKGATE_LINK2_GATE_DIS | 680 SK_Y2_CLKGATE_LINK1_CORE_DIS | 681 SK_Y2_CLKGATE_LINK2_CORE_DIS | 682 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS); 683 else 684 sk_win_write_1(sc, SK_Y2_CLKGATE, 0); 685 686 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 687 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET); 688 DELAY(1000); 689 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 690 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR); 691 692 if (sc->sk_type == SK_YUKON_EX || sc->sk_type == SK_YUKON_SUPR) { 693 CSR_WRITE_2(sc, SK_GMAC_CTRL, SK_GMAC_BYP_MACSECRX | 694 SK_GMAC_BYP_MACSECTX | SK_GMAC_BYP_RETR_FIFO); 695 } 696 697 sk_win_write_1(sc, SK_TESTCTL1, 1); 698 699 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR))); 700 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n", 701 CSR_READ_2(sc, SK_LINK_CTRL))); 702 703 /* Disable ASF */ 704 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET); 705 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF); 706 707 /* Clear I2C IRQ noise */ 708 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1); 709 710 /* Disable hardware timer */ 711 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP); 712 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR); 713 714 /* Disable descriptor polling */ 715 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 716 717 /* Disable time stamps */ 718 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP); 719 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR); 720 721 /* Enable RAM interface */ 722 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 723 for (reg = SK_TO0;reg <= SK_TO11; reg++) 724 sk_win_write_1(sc, reg, 36); 725 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET); 726 for (reg = SK_TO0;reg <= SK_TO11; reg++) 727 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36); 728 729 /* 730 * Configure interrupt moderation. The moderation timer 731 * defers interrupts specified in the interrupt moderation 732 * timer mask based on the timeout specified in the interrupt 733 * moderation timer init register. Each bit in the timer 734 * register represents one tick, so to specify a timeout in 735 * microseconds, we have to multiply by the correct number of 736 * ticks-per-microsecond. 737 */ 738 switch (sc->sk_type) { 739 case SK_YUKON_EC: 740 case SK_YUKON_EC_U: 741 case SK_YUKON_EX: 742 case SK_YUKON_SUPR: 743 case SK_YUKON_ULTRA2: 744 case SK_YUKON_OPTIMA: 745 case SK_YUKON_PRM: 746 case SK_YUKON_OPTIMA2: 747 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC; 748 break; 749 case SK_YUKON_FE: 750 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE; 751 break; 752 case SK_YUKON_FE_P: 753 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE_P; 754 break; 755 case SK_YUKON_XL: 756 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL; 757 break; 758 default: 759 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 760 break; 761 } 762 763 /* Reset status ring. */ 764 bzero(sc->sk_status_ring, 765 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 766 sc->sk_status_idx = 0; 767 768 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET); 769 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET); 770 771 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1); 772 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO, 773 sc->sk_status_map->dm_segs[0].ds_addr); 774 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI, 775 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32); 776 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10); 777 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16); 778 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16); 779 780 #if 0 781 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100)); 782 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000)); 783 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(20)); 784 #else 785 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4)); 786 #endif 787 788 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON); 789 790 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START); 791 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START); 792 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START); 793 } 794 795 int 796 msk_probe(struct device *parent, void *match, void *aux) 797 { 798 struct skc_attach_args *sa = aux; 799 800 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 801 return (0); 802 803 switch (sa->skc_type) { 804 case SK_YUKON_XL: 805 case SK_YUKON_EC_U: 806 case SK_YUKON_EX: 807 case SK_YUKON_EC: 808 case SK_YUKON_FE: 809 case SK_YUKON_FE_P: 810 case SK_YUKON_SUPR: 811 case SK_YUKON_ULTRA2: 812 case SK_YUKON_OPTIMA: 813 case SK_YUKON_PRM: 814 case SK_YUKON_OPTIMA2: 815 return (1); 816 } 817 818 return (0); 819 } 820 821 void 822 msk_reset(struct sk_if_softc *sc_if) 823 { 824 /* GMAC and GPHY Reset */ 825 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 826 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 827 DELAY(1000); 828 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR); 829 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 830 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 831 } 832 833 /* 834 * Each XMAC chip is attached as a separate logical IP interface. 835 * Single port cards will have only one logical interface of course. 836 */ 837 void 838 msk_attach(struct device *parent, struct device *self, void *aux) 839 { 840 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 841 struct sk_softc *sc = (struct sk_softc *)parent; 842 struct skc_attach_args *sa = aux; 843 struct ifnet *ifp; 844 caddr_t kva; 845 int i; 846 u_int32_t chunk; 847 int mii_flags; 848 int error; 849 850 sc_if->sk_port = sa->skc_port; 851 sc_if->sk_softc = sc; 852 sc->sk_if[sa->skc_port] = sc_if; 853 854 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port)); 855 856 /* 857 * Get station address for this interface. Note that 858 * dual port cards actually come with three station 859 * addresses: one for each port, plus an extra. The 860 * extra one is used by the SysKonnect driver software 861 * as a 'virtual' station address for when both ports 862 * are operating in failover mode. Currently we don't 863 * use this extra address. 864 */ 865 for (i = 0; i < ETHER_ADDR_LEN; i++) 866 sc_if->arpcom.ac_enaddr[i] = 867 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 868 869 printf(": address %s\n", 870 ether_sprintf(sc_if->arpcom.ac_enaddr)); 871 872 /* 873 * Set up RAM buffer addresses. The Yukon2 has a small amount 874 * of SRAM on it, somewhere between 4K and 48K. We need to 875 * divide this up between the transmitter and receiver. We 876 * give the receiver 2/3 of the memory (rounded down), and the 877 * transmitter whatever remains. 878 */ 879 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff; 880 sc_if->sk_rx_ramstart = 0; 881 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1; 882 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk; 883 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1; 884 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1; 885 886 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 887 " tx_ramstart=%#x tx_ramend=%#x\n", 888 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 889 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 890 891 /* Allocate the descriptor queues. */ 892 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data), 893 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg, 894 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 895 printf(": can't alloc rx buffers\n"); 896 goto fail; 897 } 898 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg, 899 sc_if->sk_ring_nseg, 900 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) { 901 printf(": can't map dma buffers (%lu bytes)\n", 902 (ulong)sizeof(struct msk_ring_data)); 903 goto fail_1; 904 } 905 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1, 906 sizeof(struct msk_ring_data), 0, 907 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 908 &sc_if->sk_ring_map)) { 909 printf(": can't create dma map\n"); 910 goto fail_2; 911 } 912 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 913 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) { 914 printf(": can't load dma map\n"); 915 goto fail_3; 916 } 917 sc_if->sk_rdata = (struct msk_ring_data *)kva; 918 919 if (sc->sk_type != SK_YUKON_FE && 920 sc->sk_type != SK_YUKON_FE_P) 921 sc_if->sk_pktlen = SK_JLEN; 922 else 923 sc_if->sk_pktlen = MCLBYTES; 924 925 for (i = 0; i < MSK_RX_RING_CNT; i++) { 926 if ((error = bus_dmamap_create(sc->sc_dmatag, 927 sc_if->sk_pktlen, 1, sc_if->sk_pktlen, 0, 928 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 929 &sc_if->sk_cdata.sk_rx_maps[i])) != 0) { 930 printf("\n%s: unable to create rx DMA map %d, " 931 "error = %d\n", sc->sk_dev.dv_xname, i, error); 932 goto fail_4; 933 } 934 } 935 936 ifp = &sc_if->arpcom.ac_if; 937 ifp->if_softc = sc_if; 938 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 939 ifp->if_ioctl = msk_ioctl; 940 ifp->if_start = msk_start; 941 ifp->if_watchdog = msk_watchdog; 942 if (sc->sk_type != SK_YUKON_FE && 943 sc->sk_type != SK_YUKON_FE_P) 944 ifp->if_hardmtu = SK_JUMBO_MTU; 945 IFQ_SET_MAXLEN(&ifp->if_snd, MSK_TX_RING_CNT - 1); 946 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 947 948 ifp->if_capabilities = IFCAP_VLAN_MTU; 949 950 msk_reset(sc_if); 951 952 /* 953 * Do miibus setup. 954 */ 955 msk_init_yukon(sc_if); 956 957 DPRINTFN(2, ("msk_attach: 1\n")); 958 959 sc_if->sk_mii.mii_ifp = ifp; 960 sc_if->sk_mii.mii_readreg = msk_miibus_readreg; 961 sc_if->sk_mii.mii_writereg = msk_miibus_writereg; 962 sc_if->sk_mii.mii_statchg = msk_miibus_statchg; 963 964 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 965 msk_ifmedia_upd, msk_ifmedia_sts); 966 mii_flags = MIIF_DOPAUSE; 967 if (sc->sk_fibertype) 968 mii_flags |= MIIF_HAVEFIBER; 969 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0, 970 MII_OFFSET_ANY, mii_flags); 971 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 972 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 973 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 974 0, NULL); 975 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 976 } else 977 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 978 979 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if); 980 timeout_set(&sc_if->sk_tick_rx, msk_fill_rx_tick, sc_if); 981 982 /* 983 * Call MI attach routines. 984 */ 985 if_attach(ifp); 986 ether_ifattach(ifp); 987 988 DPRINTFN(2, ("msk_attach: end\n")); 989 return; 990 991 fail_4: 992 for (i = 0; i < MSK_RX_RING_CNT; i++) { 993 if (sc_if->sk_cdata.sk_rx_maps[i] != NULL) 994 bus_dmamap_destroy(sc->sc_dmatag, 995 sc_if->sk_cdata.sk_rx_maps[i]); 996 } 997 998 fail_3: 999 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1000 fail_2: 1001 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data)); 1002 fail_1: 1003 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1004 fail: 1005 sc->sk_if[sa->skc_port] = NULL; 1006 } 1007 1008 int 1009 msk_detach(struct device *self, int flags) 1010 { 1011 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1012 struct sk_softc *sc = sc_if->sk_softc; 1013 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1014 1015 if (sc->sk_if[sc_if->sk_port] == NULL) 1016 return (0); 1017 1018 msk_stop(sc_if, 1); 1019 1020 /* Detach any PHYs we might have. */ 1021 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1022 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1023 1024 /* Delete any remaining media. */ 1025 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1026 1027 ether_ifdetach(ifp); 1028 if_detach(ifp); 1029 1030 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1031 sizeof(struct msk_ring_data)); 1032 bus_dmamem_free(sc->sc_dmatag, 1033 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1034 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1035 sc->sk_if[sc_if->sk_port] = NULL; 1036 1037 return (0); 1038 } 1039 1040 int 1041 msk_activate(struct device *self, int act) 1042 { 1043 struct sk_if_softc *sc_if = (void *)self; 1044 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1045 int rv = 0; 1046 1047 switch (act) { 1048 case DVACT_RESUME: 1049 msk_reset(sc_if); 1050 if (ifp->if_flags & IFF_RUNNING) 1051 msk_init(sc_if); 1052 break; 1053 default: 1054 rv = config_activate_children(self, act); 1055 break; 1056 } 1057 return (rv); 1058 } 1059 1060 int 1061 mskcprint(void *aux, const char *pnp) 1062 { 1063 struct skc_attach_args *sa = aux; 1064 1065 if (pnp) 1066 printf("msk port %c at %s", 1067 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1068 else 1069 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1070 return (UNCONF); 1071 } 1072 1073 /* 1074 * Attach the interface. Allocate softc structures, do ifmedia 1075 * setup and ethernet/BPF attach. 1076 */ 1077 void 1078 mskc_attach(struct device *parent, struct device *self, void *aux) 1079 { 1080 struct sk_softc *sc = (struct sk_softc *)self; 1081 struct pci_attach_args *pa = aux; 1082 struct skc_attach_args skca; 1083 pci_chipset_tag_t pc = pa->pa_pc; 1084 pcireg_t memtype; 1085 pci_intr_handle_t ih; 1086 const char *intrstr = NULL; 1087 u_int8_t hw, pmd; 1088 char *revstr = NULL; 1089 caddr_t kva; 1090 1091 DPRINTFN(2, ("begin mskc_attach\n")); 1092 1093 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 1094 1095 /* 1096 * Map control/status registers. 1097 */ 1098 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1099 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1100 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1101 printf(": can't map mem space\n"); 1102 return; 1103 } 1104 1105 sc->sc_dmatag = pa->pa_dmat; 1106 1107 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1108 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1109 1110 /* bail out here if chip is not recognized */ 1111 if (!(SK_IS_YUKON2(sc))) { 1112 printf(": unknown chip type: %d\n", sc->sk_type); 1113 goto fail_1; 1114 } 1115 DPRINTFN(2, ("mskc_attach: allocate interrupt\n")); 1116 1117 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MARVELL) { 1118 switch (PCI_PRODUCT(pa->pa_id)) { 1119 case PCI_PRODUCT_MARVELL_YUKON_8036: 1120 case PCI_PRODUCT_MARVELL_YUKON_8053: 1121 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 1122 } 1123 } 1124 1125 /* Allocate interrupt */ 1126 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 1127 printf(": couldn't map interrupt\n"); 1128 goto fail_1; 1129 } 1130 1131 intrstr = pci_intr_string(pc, ih); 1132 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc, 1133 self->dv_xname); 1134 if (sc->sk_intrhand == NULL) { 1135 printf(": couldn't establish interrupt"); 1136 if (intrstr != NULL) 1137 printf(" at %s", intrstr); 1138 printf("\n"); 1139 goto fail_1; 1140 } 1141 sc->sk_pc = pc; 1142 1143 if (bus_dmamem_alloc(sc->sc_dmatag, 1144 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1145 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1146 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg, 1147 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1148 printf(": can't alloc status buffers\n"); 1149 goto fail_2; 1150 } 1151 1152 if (bus_dmamem_map(sc->sc_dmatag, 1153 &sc->sk_status_seg, sc->sk_status_nseg, 1154 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1155 &kva, BUS_DMA_NOWAIT)) { 1156 printf(": can't map dma buffers (%lu bytes)\n", 1157 (ulong)(MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc))); 1158 goto fail_3; 1159 } 1160 if (bus_dmamap_create(sc->sc_dmatag, 1161 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1, 1162 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 0, 1163 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1164 &sc->sk_status_map)) { 1165 printf(": can't create dma map\n"); 1166 goto fail_4; 1167 } 1168 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva, 1169 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc), 1170 NULL, BUS_DMA_NOWAIT)) { 1171 printf(": can't load dma map\n"); 1172 goto fail_5; 1173 } 1174 sc->sk_status_ring = (struct msk_status_desc *)kva; 1175 1176 /* Reset the adapter. */ 1177 mskc_reset(sc); 1178 1179 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096; 1180 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024)); 1181 1182 pmd = sk_win_read_1(sc, SK_PMDTYPE); 1183 if (pmd == 'L' || pmd == 'S' || pmd == 'P') 1184 sc->sk_fibertype = 1; 1185 1186 switch (sc->sk_type) { 1187 case SK_YUKON_XL: 1188 sc->sk_name = "Yukon-2 XL"; 1189 break; 1190 case SK_YUKON_EC_U: 1191 sc->sk_name = "Yukon-2 EC Ultra"; 1192 break; 1193 case SK_YUKON_EX: 1194 sc->sk_name = "Yukon-2 Extreme"; 1195 break; 1196 case SK_YUKON_EC: 1197 sc->sk_name = "Yukon-2 EC"; 1198 break; 1199 case SK_YUKON_FE: 1200 sc->sk_name = "Yukon-2 FE"; 1201 break; 1202 case SK_YUKON_FE_P: 1203 sc->sk_name = "Yukon-2 FE+"; 1204 break; 1205 case SK_YUKON_SUPR: 1206 sc->sk_name = "Yukon-2 Supreme"; 1207 break; 1208 case SK_YUKON_ULTRA2: 1209 sc->sk_name = "Yukon-2 Ultra 2"; 1210 break; 1211 case SK_YUKON_OPTIMA: 1212 sc->sk_name = "Yukon-2 Optima"; 1213 break; 1214 case SK_YUKON_PRM: 1215 sc->sk_name = "Yukon-2 Optima Prime"; 1216 break; 1217 case SK_YUKON_OPTIMA2: 1218 sc->sk_name = "Yukon-2 Optima 2"; 1219 break; 1220 default: 1221 sc->sk_name = "Yukon (Unknown)"; 1222 } 1223 1224 if (sc->sk_type == SK_YUKON_XL) { 1225 switch (sc->sk_rev) { 1226 case SK_YUKON_XL_REV_A0: 1227 revstr = "A0"; 1228 break; 1229 case SK_YUKON_XL_REV_A1: 1230 revstr = "A1"; 1231 break; 1232 case SK_YUKON_XL_REV_A2: 1233 revstr = "A2"; 1234 break; 1235 case SK_YUKON_XL_REV_A3: 1236 revstr = "A3"; 1237 break; 1238 default: 1239 ; 1240 } 1241 } 1242 1243 if (sc->sk_type == SK_YUKON_EC) { 1244 switch (sc->sk_rev) { 1245 case SK_YUKON_EC_REV_A1: 1246 revstr = "A1"; 1247 break; 1248 case SK_YUKON_EC_REV_A2: 1249 revstr = "A2"; 1250 break; 1251 case SK_YUKON_EC_REV_A3: 1252 revstr = "A3"; 1253 break; 1254 default: 1255 ; 1256 } 1257 } 1258 1259 if (sc->sk_type == SK_YUKON_EC_U) { 1260 switch (sc->sk_rev) { 1261 case SK_YUKON_EC_U_REV_A0: 1262 revstr = "A0"; 1263 break; 1264 case SK_YUKON_EC_U_REV_A1: 1265 revstr = "A1"; 1266 break; 1267 case SK_YUKON_EC_U_REV_B0: 1268 revstr = "B0"; 1269 break; 1270 case SK_YUKON_EC_U_REV_B1: 1271 revstr = "B1"; 1272 break; 1273 default: 1274 ; 1275 } 1276 } 1277 1278 if (sc->sk_type == SK_YUKON_FE) { 1279 switch (sc->sk_rev) { 1280 case SK_YUKON_FE_REV_A1: 1281 revstr = "A1"; 1282 break; 1283 case SK_YUKON_FE_REV_A2: 1284 revstr = "A2"; 1285 break; 1286 default: 1287 ; 1288 } 1289 } 1290 1291 if (sc->sk_type == SK_YUKON_FE_P && sc->sk_rev == SK_YUKON_FE_P_REV_A0) 1292 revstr = "A0"; 1293 1294 if (sc->sk_type == SK_YUKON_EX) { 1295 switch (sc->sk_rev) { 1296 case SK_YUKON_EX_REV_A0: 1297 revstr = "A0"; 1298 break; 1299 case SK_YUKON_EX_REV_B0: 1300 revstr = "B0"; 1301 break; 1302 default: 1303 ; 1304 } 1305 } 1306 1307 if (sc->sk_type == SK_YUKON_SUPR) { 1308 switch (sc->sk_rev) { 1309 case SK_YUKON_SUPR_REV_A0: 1310 revstr = "A0"; 1311 break; 1312 case SK_YUKON_SUPR_REV_B0: 1313 revstr = "B0"; 1314 break; 1315 case SK_YUKON_SUPR_REV_B1: 1316 revstr = "B1"; 1317 break; 1318 default: 1319 ; 1320 } 1321 } 1322 1323 if (sc->sk_type == SK_YUKON_PRM) { 1324 switch (sc->sk_rev) { 1325 case SK_YUKON_PRM_REV_Z1: 1326 revstr = "Z1"; 1327 break; 1328 case SK_YUKON_PRM_REV_A0: 1329 revstr = "A0"; 1330 break; 1331 default: 1332 ; 1333 } 1334 } 1335 1336 /* Announce the product name. */ 1337 printf(", %s", sc->sk_name); 1338 if (revstr != NULL) 1339 printf(" rev. %s", revstr); 1340 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1341 1342 sc->sk_macs = 1; 1343 1344 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1345 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1346 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1347 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1348 sc->sk_macs++; 1349 } 1350 1351 skca.skc_port = SK_PORT_A; 1352 skca.skc_type = sc->sk_type; 1353 skca.skc_rev = sc->sk_rev; 1354 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1355 1356 if (sc->sk_macs > 1) { 1357 skca.skc_port = SK_PORT_B; 1358 skca.skc_type = sc->sk_type; 1359 skca.skc_rev = sc->sk_rev; 1360 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1361 } 1362 1363 /* Turn on the 'driver is loaded' LED. */ 1364 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1365 1366 return; 1367 1368 fail_4: 1369 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1370 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1371 fail_3: 1372 bus_dmamem_free(sc->sc_dmatag, 1373 &sc->sk_status_seg, sc->sk_status_nseg); 1374 sc->sk_status_nseg = 0; 1375 fail_5: 1376 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1377 fail_2: 1378 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1379 sc->sk_intrhand = NULL; 1380 fail_1: 1381 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1382 sc->sk_bsize = 0; 1383 } 1384 1385 int 1386 mskc_detach(struct device *self, int flags) 1387 { 1388 struct sk_softc *sc = (struct sk_softc *)self; 1389 int rv; 1390 1391 if (sc->sk_intrhand) 1392 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1393 1394 rv = config_detach_children(self, flags); 1395 if (rv != 0) 1396 return (rv); 1397 1398 if (sc->sk_status_nseg > 0) { 1399 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1400 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1401 MSK_STATUS_RING_CNT * sizeof(struct msk_status_desc)); 1402 bus_dmamem_free(sc->sc_dmatag, 1403 &sc->sk_status_seg, sc->sk_status_nseg); 1404 } 1405 1406 if (sc->sk_bsize > 0) 1407 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1408 1409 return(0); 1410 } 1411 1412 int 1413 mskc_activate(struct device *self, int act) 1414 { 1415 struct sk_softc *sc = (void *)self; 1416 int rv = 0; 1417 1418 switch (act) { 1419 case DVACT_RESUME: 1420 mskc_reset(sc); 1421 rv = config_activate_children(self, act); 1422 break; 1423 default: 1424 rv = config_activate_children(self, act); 1425 break; 1426 } 1427 return (rv); 1428 } 1429 1430 static unsigned int 1431 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m, uint32_t prod) 1432 { 1433 struct sk_softc *sc = sc_if->sk_softc; 1434 struct msk_ring_data *rd = sc_if->sk_rdata; 1435 struct msk_tx_desc *t; 1436 bus_dmamap_t map; 1437 uint64_t addr; 1438 uint32_t hiaddr; 1439 uint32_t next, last; 1440 uint8_t opcode; 1441 unsigned int entries = 0; 1442 int i; 1443 1444 map = sc_if->sk_cdata.sk_tx_maps[prod]; 1445 1446 switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1447 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1448 case 0: 1449 break; 1450 case EFBIG: /* mbuf chain is too fragmented */ 1451 if (m_defrag(m, M_DONTWAIT) == 0 && 1452 bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1453 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1454 break; 1455 /* FALLTHROUGH */ 1456 default: 1457 return (0); 1458 } 1459 1460 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1461 BUS_DMASYNC_PREWRITE); 1462 1463 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_PACKET; 1464 next = prod; 1465 for (i = 0; i < map->dm_nsegs; i++) { 1466 /* high 32 bits of address */ 1467 addr = map->dm_segs[i].ds_addr; 1468 hiaddr = addr >> 32; 1469 if (sc_if->sk_cdata.sk_tx_hiaddr != hiaddr) { 1470 t = &rd->sk_tx_ring[next]; 1471 htolem32(&t->sk_addr, hiaddr); 1472 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 1473 1474 sc_if->sk_cdata.sk_tx_hiaddr = hiaddr; 1475 1476 SK_INC(next, MSK_TX_RING_CNT); 1477 entries++; 1478 } 1479 1480 /* low 32 bits of address + length */ 1481 t = &rd->sk_tx_ring[next]; 1482 htolem32(&t->sk_addr, addr); 1483 htolem16(&t->sk_len, map->dm_segs[i].ds_len); 1484 t->sk_ctl = 0; 1485 t->sk_opcode = opcode; 1486 1487 last = next; 1488 SK_INC(next, MSK_TX_RING_CNT); 1489 entries++; 1490 1491 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_BUFFER; 1492 } 1493 t->sk_ctl = SK_Y2_TXCTL_LASTFRAG; 1494 1495 sc_if->sk_cdata.sk_tx_maps[prod] = sc_if->sk_cdata.sk_tx_maps[last]; 1496 sc_if->sk_cdata.sk_tx_maps[last] = map; 1497 sc_if->sk_cdata.sk_tx_mbuf[last] = m; 1498 1499 return (entries); 1500 } 1501 1502 void 1503 msk_start(struct ifnet *ifp) 1504 { 1505 struct sk_if_softc *sc_if = ifp->if_softc; 1506 struct mbuf *m = NULL; 1507 uint32_t prod, free, used; 1508 int post = 0; 1509 1510 prod = sc_if->sk_cdata.sk_tx_prod; 1511 free = sc_if->sk_cdata.sk_tx_cons; 1512 if (free <= prod) 1513 free += MSK_TX_RING_CNT; 1514 free -= prod; 1515 1516 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_POSTWRITE); 1517 1518 for (;;) { 1519 if (free <= SK_NTXSEG * 2) { 1520 ifq_set_oactive(&ifp->if_snd); 1521 break; 1522 } 1523 1524 m = ifq_dequeue(&ifp->if_snd); 1525 if (m == NULL) 1526 break; 1527 1528 used = msk_encap(sc_if, m, prod); 1529 if (used == 0) { 1530 m_freem(m); 1531 continue; 1532 } 1533 1534 free -= used; 1535 prod += used; 1536 prod &= MSK_TX_RING_CNT - 1; 1537 1538 #if NBPFILTER > 0 1539 if (ifp->if_bpf) 1540 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1541 #endif 1542 post = 1; 1543 } 1544 1545 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 1546 1547 if (post == 0) 1548 return; 1549 1550 /* Transmit */ 1551 sc_if->sk_cdata.sk_tx_prod = prod; 1552 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, prod); 1553 1554 /* Set a timeout in case the chip goes out to lunch. */ 1555 ifp->if_timer = MSK_TX_TIMEOUT; 1556 } 1557 1558 void 1559 msk_watchdog(struct ifnet *ifp) 1560 { 1561 struct sk_if_softc *sc_if = ifp->if_softc; 1562 1563 /* 1564 * Reclaim first as there is a possibility of losing Tx completion 1565 * interrupts. 1566 */ 1567 msk_txeof(sc_if); 1568 if (sc_if->sk_cdata.sk_tx_prod != sc_if->sk_cdata.sk_tx_cons) { 1569 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1570 1571 ifp->if_oerrors++; 1572 1573 /* XXX Resets both ports; we shouldn't do that. */ 1574 mskc_reset(sc_if->sk_softc); 1575 msk_reset(sc_if); 1576 msk_init(sc_if); 1577 } 1578 } 1579 1580 static inline int 1581 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1582 { 1583 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1584 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1585 YU_RXSTAT_JABBER)) != 0 || 1586 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1587 YU_RXSTAT_BYTES(stat) != len) 1588 return (0); 1589 1590 return (1); 1591 } 1592 1593 void 1594 msk_rxeof(struct sk_if_softc *sc_if, uint16_t len, uint32_t rxstat) 1595 { 1596 struct sk_softc *sc = sc_if->sk_softc; 1597 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1598 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1599 struct mbuf *m = NULL; 1600 int prod, cons, tail; 1601 bus_dmamap_t map; 1602 1603 prod = sc_if->sk_cdata.sk_rx_prod; 1604 cons = sc_if->sk_cdata.sk_rx_cons; 1605 1606 //printf("%s: prod %u cons %u\n", __func__, prod, cons); 1607 1608 while (cons != prod) { 1609 tail = cons; 1610 SK_INC(cons, MSK_RX_RING_CNT); 1611 1612 m = sc_if->sk_cdata.sk_rx_mbuf[tail]; 1613 if (m != NULL) { 1614 /* found it */ 1615 break; 1616 } 1617 } 1618 sc_if->sk_cdata.sk_rx_cons = cons; 1619 1620 if (m == NULL) { 1621 /* maybe if ADDR64 is consumed? */ 1622 return; 1623 } 1624 1625 sc_if->sk_cdata.sk_rx_mbuf[tail] = NULL; 1626 1627 map = sc_if->sk_cdata.sk_rx_maps[tail]; 1628 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, 1); 1629 1630 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, map->dm_mapsize, 1631 BUS_DMASYNC_POSTREAD); 1632 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, map); 1633 1634 if (len < SK_MIN_FRAMELEN || len > SK_JUMBO_FRAMELEN || 1635 msk_rxvalid(sc, rxstat, len) == 0) { 1636 ifp->if_ierrors++; 1637 m_freem(m); 1638 return; 1639 } 1640 1641 m->m_pkthdr.len = m->m_len = len; 1642 1643 ml_enqueue(&ml, m); 1644 if_input(ifp, &ml); 1645 } 1646 1647 void 1648 msk_txeof(struct sk_if_softc *sc_if) 1649 { 1650 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1651 struct sk_softc *sc = sc_if->sk_softc; 1652 uint32_t prod, cons; 1653 struct mbuf *m; 1654 bus_dmamap_t map; 1655 bus_size_t reg; 1656 1657 if (sc_if->sk_port == SK_PORT_A) 1658 reg = SK_STAT_BMU_TXA1_RIDX; 1659 else 1660 reg = SK_STAT_BMU_TXA2_RIDX; 1661 1662 /* 1663 * Go through our tx ring and free mbufs for those 1664 * frames that have been sent. 1665 */ 1666 cons = sc_if->sk_cdata.sk_tx_cons; 1667 prod = sk_win_read_2(sc, reg); 1668 1669 if (cons == prod) 1670 return; 1671 1672 while (cons != prod) { 1673 m = sc_if->sk_cdata.sk_tx_mbuf[cons]; 1674 if (m != NULL) { 1675 sc_if->sk_cdata.sk_tx_mbuf[cons] = NULL; 1676 1677 map = sc_if->sk_cdata.sk_tx_maps[cons]; 1678 bus_dmamap_sync(sc->sc_dmatag, map, 0, 1679 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1680 bus_dmamap_unload(sc->sc_dmatag, map); 1681 1682 m_freem(m); 1683 } 1684 1685 SK_INC(cons, MSK_TX_RING_CNT); 1686 } 1687 if (cons == sc_if->sk_cdata.sk_tx_prod) 1688 ifp->if_timer = 0; 1689 1690 sc_if->sk_cdata.sk_tx_cons = cons; 1691 1692 if (ifq_is_oactive(&ifp->if_snd)) 1693 ifq_restart(&ifp->if_snd); 1694 } 1695 1696 void 1697 msk_fill_rx_ring(struct sk_if_softc *sc_if) 1698 { 1699 u_int slots, used; 1700 1701 slots = if_rxr_get(&sc_if->sk_cdata.sk_rx_ring, MSK_RX_RING_CNT/2); 1702 1703 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_POSTWRITE); /* XXX */ 1704 while (slots > 0) { 1705 used = msk_newbuf(sc_if); 1706 if (used == 0) 1707 break; 1708 1709 slots -= used; 1710 } 1711 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_PREWRITE); /* XXX */ 1712 1713 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, slots); 1714 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) 1715 timeout_add(&sc_if->sk_tick_rx, 1); 1716 } 1717 1718 void 1719 msk_fill_rx_tick(void *xsc_if) 1720 { 1721 struct sk_if_softc *sc_if = xsc_if; 1722 int s; 1723 1724 s = splnet(); 1725 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) { 1726 msk_fill_rx_ring(sc_if); 1727 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1728 sc_if->sk_cdata.sk_rx_prod); 1729 } 1730 splx(s); 1731 } 1732 1733 void 1734 msk_tick(void *xsc_if) 1735 { 1736 struct sk_if_softc *sc_if = xsc_if; 1737 struct mii_data *mii = &sc_if->sk_mii; 1738 int s; 1739 1740 s = splnet(); 1741 mii_tick(mii); 1742 splx(s); 1743 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1744 } 1745 1746 void 1747 msk_intr_yukon(struct sk_if_softc *sc_if) 1748 { 1749 u_int8_t status; 1750 1751 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 1752 /* RX overrun */ 1753 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 1754 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 1755 SK_RFCTL_RX_FIFO_OVER); 1756 } 1757 /* TX underrun */ 1758 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 1759 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 1760 SK_TFCTL_TX_FIFO_UNDER); 1761 } 1762 1763 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status)); 1764 } 1765 1766 int 1767 msk_intr(void *xsc) 1768 { 1769 struct sk_softc *sc = xsc; 1770 struct sk_if_softc *sc_if; 1771 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1772 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 1773 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1774 int claimed = 0, rx[2] = {0, 0}; 1775 u_int32_t status; 1776 struct msk_status_desc *cur_st; 1777 1778 status = CSR_READ_4(sc, SK_Y2_ISSR2); 1779 if (status == 0xffffffff) 1780 return (0); 1781 if (status == 0) { 1782 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1783 return (0); 1784 } 1785 1786 status = CSR_READ_4(sc, SK_ISR); 1787 1788 if (sc_if0 != NULL) 1789 ifp0 = &sc_if0->arpcom.ac_if; 1790 if (sc_if1 != NULL) 1791 ifp1 = &sc_if1->arpcom.ac_if; 1792 1793 if (sc_if0 && (status & SK_Y2_IMR_MAC1) && 1794 (ifp0->if_flags & IFF_RUNNING)) { 1795 msk_intr_yukon(sc_if0); 1796 } 1797 1798 if (sc_if1 && (status & SK_Y2_IMR_MAC2) && 1799 (ifp1->if_flags & IFF_RUNNING)) { 1800 msk_intr_yukon(sc_if1); 1801 } 1802 1803 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1804 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1805 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1806 1807 while (cur_st->sk_opcode & SK_Y2_STOPC_OWN) { 1808 cur_st->sk_opcode &= ~SK_Y2_STOPC_OWN; 1809 switch (cur_st->sk_opcode) { 1810 case SK_Y2_STOPC_RXSTAT: 1811 sc_if = sc->sk_if[cur_st->sk_link & 0x01]; 1812 rx[cur_st->sk_link & 0x01] = 1; 1813 msk_rxeof(sc_if, lemtoh16(&cur_st->sk_len), 1814 lemtoh32(&cur_st->sk_status)); 1815 break; 1816 case SK_Y2_STOPC_TXSTAT: 1817 if (sc_if0) 1818 msk_txeof(sc_if0); 1819 if (sc_if1) 1820 msk_txeof(sc_if1); 1821 break; 1822 default: 1823 printf("opcode=0x%x\n", cur_st->sk_opcode); 1824 break; 1825 } 1826 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT); 1827 1828 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1829 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1830 cur_st = &sc->sk_status_ring[sc->sk_status_idx]; 1831 } 1832 1833 if (status & SK_Y2_IMR_BMU) { 1834 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR); 1835 claimed = 1; 1836 } 1837 1838 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1839 1840 if (rx[0]) { 1841 msk_fill_rx_ring(sc_if0); 1842 SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1843 sc_if0->sk_cdata.sk_rx_prod); 1844 } 1845 if (rx[1]) { 1846 msk_fill_rx_ring(sc_if1); 1847 SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1848 sc_if1->sk_cdata.sk_rx_prod); 1849 } 1850 1851 return (claimed); 1852 } 1853 1854 void 1855 msk_init_yukon(struct sk_if_softc *sc_if) 1856 { 1857 u_int32_t v; 1858 u_int16_t reg; 1859 struct sk_softc *sc; 1860 int i; 1861 1862 sc = sc_if->sk_softc; 1863 1864 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n", 1865 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 1866 1867 DPRINTFN(6, ("msk_init_yukon: 1\n")); 1868 1869 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n", 1870 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 1871 1872 DPRINTFN(6, ("msk_init_yukon: 3\n")); 1873 1874 /* unused read of the interrupt source register */ 1875 DPRINTFN(6, ("msk_init_yukon: 4\n")); 1876 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 1877 1878 DPRINTFN(6, ("msk_init_yukon: 4a\n")); 1879 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 1880 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1881 1882 /* MIB Counter Clear Mode set */ 1883 reg |= YU_PAR_MIB_CLR; 1884 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 1885 DPRINTFN(6, ("msk_init_yukon: 4b\n")); 1886 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1887 1888 /* MIB Counter Clear Mode clear */ 1889 DPRINTFN(6, ("msk_init_yukon: 5\n")); 1890 reg &= ~YU_PAR_MIB_CLR; 1891 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 1892 1893 /* receive control reg */ 1894 DPRINTFN(6, ("msk_init_yukon: 7\n")); 1895 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 1896 1897 /* transmit parameter register */ 1898 DPRINTFN(6, ("msk_init_yukon: 8\n")); 1899 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 1900 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 1901 1902 /* serial mode register */ 1903 DPRINTFN(6, ("msk_init_yukon: 9\n")); 1904 reg = YU_SMR_DATA_BLIND(0x1c) | 1905 YU_SMR_MFL_VLAN | 1906 YU_SMR_IPG_DATA(0x1e); 1907 1908 if (sc->sk_type != SK_YUKON_FE && 1909 sc->sk_type != SK_YUKON_FE_P) 1910 reg |= YU_SMR_MFL_JUMBO; 1911 1912 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 1913 1914 DPRINTFN(6, ("msk_init_yukon: 10\n")); 1915 /* Setup Yukon's address */ 1916 for (i = 0; i < 3; i++) { 1917 /* Write Source Address 1 (unicast filter) */ 1918 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 1919 sc_if->arpcom.ac_enaddr[i * 2] | 1920 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 1921 } 1922 1923 for (i = 0; i < 3; i++) { 1924 reg = sk_win_read_2(sc_if->sk_softc, 1925 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 1926 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 1927 } 1928 1929 /* Program promiscuous mode and multicast filters */ 1930 DPRINTFN(6, ("msk_init_yukon: 11\n")); 1931 msk_iff(sc_if); 1932 1933 /* enable interrupt mask for counter overflows */ 1934 DPRINTFN(6, ("msk_init_yukon: 12\n")); 1935 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 1936 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 1937 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 1938 1939 /* Configure RX MAC FIFO Flush Mask */ 1940 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 1941 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 1942 YU_RXSTAT_JABBER; 1943 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 1944 1945 /* Configure RX MAC FIFO */ 1946 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 1947 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON | 1948 SK_RFCTL_FIFO_FLUSH_ON); 1949 1950 /* Increase flush threshould to 64 bytes */ 1951 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 1952 SK_RFCTL_FIFO_THRESHOLD + 1); 1953 1954 /* Configure TX MAC FIFO */ 1955 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 1956 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 1957 1958 #if 1 1959 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN); 1960 #endif 1961 DPRINTFN(6, ("msk_init_yukon: end\n")); 1962 } 1963 1964 /* 1965 * Note that to properly initialize any part of the GEnesis chip, 1966 * you first have to take it out of reset mode. 1967 */ 1968 void 1969 msk_init(void *xsc_if) 1970 { 1971 struct sk_if_softc *sc_if = xsc_if; 1972 struct sk_softc *sc = sc_if->sk_softc; 1973 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1974 struct mii_data *mii = &sc_if->sk_mii; 1975 int s; 1976 1977 DPRINTFN(2, ("msk_init\n")); 1978 1979 s = splnet(); 1980 1981 /* Cancel pending I/O and free all RX/TX buffers. */ 1982 msk_stop(sc_if, 0); 1983 1984 /* Configure I2C registers */ 1985 1986 /* Configure XMAC(s) */ 1987 msk_init_yukon(sc_if); 1988 mii_mediachg(mii); 1989 1990 /* Configure transmit arbiter(s) */ 1991 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON); 1992 #if 0 1993 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 1994 #endif 1995 1996 /* Configure RAMbuffers */ 1997 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 1998 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 1999 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2000 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2001 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2002 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2003 2004 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET); 2005 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON); 2006 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart); 2007 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart); 2008 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart); 2009 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend); 2010 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON); 2011 2012 /* Configure BMUs */ 2013 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016); 2014 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28); 2015 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080); 2016 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600); 2017 2018 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016); 2019 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28); 2020 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080); 2021 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600); 2022 2023 /* Make sure the sync transmit queue is disabled. */ 2024 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET); 2025 2026 /* Init descriptors */ 2027 if (msk_init_rx_ring(sc_if) == ENOBUFS) { 2028 printf("%s: initialization failed: no " 2029 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2030 msk_stop(sc_if, 0); 2031 splx(s); 2032 return; 2033 } 2034 2035 if (msk_init_tx_ring(sc_if) == ENOBUFS) { 2036 printf("%s: initialization failed: no " 2037 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2038 msk_stop(sc_if, 0); 2039 splx(s); 2040 return; 2041 } 2042 2043 /* Initialize prefetch engine. */ 2044 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2045 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002); 2046 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1); 2047 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO, 2048 MSK_RX_RING_ADDR(sc_if, 0)); 2049 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI, 2050 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32); 2051 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008); 2052 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR); 2053 2054 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2055 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002); 2056 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1); 2057 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO, 2058 MSK_TX_RING_ADDR(sc_if, 0)); 2059 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI, 2060 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32); 2061 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008); 2062 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR); 2063 2064 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 2065 sc_if->sk_cdata.sk_rx_prod); 2066 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, 2067 sc_if->sk_cdata.sk_tx_prod); 2068 2069 /* Configure interrupt handling */ 2070 if (sc_if->sk_port == SK_PORT_A) 2071 sc->sk_intrmask |= SK_Y2_INTRS1; 2072 else 2073 sc->sk_intrmask |= SK_Y2_INTRS2; 2074 sc->sk_intrmask |= SK_Y2_IMR_BMU; 2075 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2076 2077 ifp->if_flags |= IFF_RUNNING; 2078 ifq_clr_oactive(&ifp->if_snd); 2079 2080 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2081 2082 splx(s); 2083 } 2084 2085 void 2086 msk_stop(struct sk_if_softc *sc_if, int softonly) 2087 { 2088 struct sk_softc *sc = sc_if->sk_softc; 2089 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2090 struct mbuf *m; 2091 bus_dmamap_t map; 2092 int i; 2093 2094 DPRINTFN(2, ("msk_stop\n")); 2095 2096 timeout_del(&sc_if->sk_tick_ch); 2097 timeout_del(&sc_if->sk_tick_rx); 2098 2099 ifp->if_flags &= ~IFF_RUNNING; 2100 ifq_clr_oactive(&ifp->if_snd); 2101 2102 /* Stop transfer of Tx descriptors */ 2103 2104 /* Stop transfer of Rx descriptors */ 2105 2106 if (!softonly) { 2107 /* Turn off various components of this interface. */ 2108 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2109 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2110 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2111 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2112 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE); 2113 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2114 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2115 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2116 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP); 2117 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2118 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2119 2120 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2121 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2122 2123 /* Disable interrupts */ 2124 if (sc_if->sk_port == SK_PORT_A) 2125 sc->sk_intrmask &= ~SK_Y2_INTRS1; 2126 else 2127 sc->sk_intrmask &= ~SK_Y2_INTRS2; 2128 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2129 } 2130 2131 /* Free RX and TX mbufs still in the queues. */ 2132 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2133 m = sc_if->sk_cdata.sk_rx_mbuf[i]; 2134 if (m == NULL) 2135 continue; 2136 2137 map = sc_if->sk_cdata.sk_rx_maps[i]; 2138 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2139 BUS_DMASYNC_POSTREAD); 2140 bus_dmamap_unload(sc->sc_dmatag, map); 2141 2142 m_freem(m); 2143 2144 sc_if->sk_cdata.sk_rx_mbuf[i] = NULL; 2145 } 2146 2147 sc_if->sk_cdata.sk_rx_prod = 0; 2148 sc_if->sk_cdata.sk_rx_cons = 0; 2149 2150 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2151 m = sc_if->sk_cdata.sk_tx_mbuf[i]; 2152 if (m == NULL) 2153 continue; 2154 2155 map = sc_if->sk_cdata.sk_tx_maps[i]; 2156 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2157 BUS_DMASYNC_POSTREAD); 2158 bus_dmamap_unload(sc->sc_dmatag, map); 2159 2160 m_freem(m); 2161 2162 sc_if->sk_cdata.sk_tx_mbuf[i] = NULL; 2163 } 2164 } 2165 2166 struct cfattach mskc_ca = { 2167 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach, 2168 mskc_activate 2169 }; 2170 2171 struct cfdriver mskc_cd = { 2172 NULL, "mskc", DV_DULL 2173 }; 2174 2175 struct cfattach msk_ca = { 2176 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach, 2177 msk_activate 2178 }; 2179 2180 struct cfdriver msk_cd = { 2181 NULL, "msk", DV_IFNET 2182 }; 2183 2184 #ifdef MSK_DEBUG 2185 void 2186 msk_dump_txdesc(struct msk_tx_desc *le, int idx) 2187 { 2188 #define DESC_PRINT(X) \ 2189 if (X) \ 2190 printf("txdesc[%d]." #X "=%#x\n", \ 2191 idx, X); 2192 2193 DESC_PRINT(letoh32(le->sk_addr)); 2194 DESC_PRINT(letoh16(le->sk_len)); 2195 DESC_PRINT(le->sk_ctl); 2196 DESC_PRINT(le->sk_opcode); 2197 #undef DESC_PRINT 2198 } 2199 2200 void 2201 msk_dump_bytes(const char *data, int len) 2202 { 2203 int c, i, j; 2204 2205 for (i = 0; i < len; i += 16) { 2206 printf("%08x ", i); 2207 c = len - i; 2208 if (c > 16) c = 16; 2209 2210 for (j = 0; j < c; j++) { 2211 printf("%02x ", data[i + j] & 0xff); 2212 if ((j & 0xf) == 7 && j > 0) 2213 printf(" "); 2214 } 2215 2216 for (; j < 16; j++) 2217 printf(" "); 2218 printf(" "); 2219 2220 for (j = 0; j < c; j++) { 2221 int ch = data[i + j] & 0xff; 2222 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2223 } 2224 2225 printf("\n"); 2226 2227 if (c < 16) 2228 break; 2229 } 2230 } 2231 2232 void 2233 msk_dump_mbuf(struct mbuf *m) 2234 { 2235 int count = m->m_pkthdr.len; 2236 2237 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2238 2239 while (count > 0 && m) { 2240 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2241 m, m->m_data, m->m_len); 2242 msk_dump_bytes(mtod(m, char *), m->m_len); 2243 2244 count -= m->m_len; 2245 m = m->m_next; 2246 } 2247 } 2248 #endif 2249