1 /* $OpenBSD: if_msk.c,v 1.145 2024/08/31 16:23:09 deraadt Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999, 2000 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: /c/ncvs/src/sys/pci/if_sk.c,v 1.20 2000/04/22 02:16:37 wpaul Exp $ 35 */ 36 37 /* 38 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 39 * 40 * Permission to use, copy, modify, and distribute this software for any 41 * purpose with or without fee is hereby granted, provided that the above 42 * copyright notice and this permission notice appear in all copies. 43 * 44 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 45 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 46 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 47 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 48 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 49 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 50 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 51 */ 52 53 /* 54 * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports 55 * the SK-984x series adapters, both single port and dual port. 56 * References: 57 * The XaQti XMAC II datasheet, 58 * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 59 * The SysKonnect GEnesis manual, http://www.syskonnect.com 60 * 61 * Note: XaQti has been acquired by Vitesse, and Vitesse does not have the 62 * XMAC II datasheet online. I have put my copy at people.freebsd.org as a 63 * convenience to others until Vitesse corrects this problem: 64 * 65 * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf 66 * 67 * Written by Bill Paul <wpaul@ee.columbia.edu> 68 * Department of Electrical Engineering 69 * Columbia University, New York City 70 */ 71 72 /* 73 * The SysKonnect gigabit ethernet adapters consist of two main 74 * components: the SysKonnect GEnesis controller chip and the XaQti Corp. 75 * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC 76 * components and a PHY while the GEnesis controller provides a PCI 77 * interface with DMA support. Each card may have between 512K and 78 * 2MB of SRAM on board depending on the configuration. 79 * 80 * The SysKonnect GEnesis controller can have either one or two XMAC 81 * chips connected to it, allowing single or dual port NIC configurations. 82 * SysKonnect has the distinction of being the only vendor on the market 83 * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, 84 * dual DMA queues, packet/MAC/transmit arbiters and direct access to the 85 * XMAC registers. This driver takes advantage of these features to allow 86 * both XMACs to operate as independent interfaces. 87 */ 88 89 #include "bpfilter.h" 90 #include "kstat.h" 91 92 #include <sys/param.h> 93 #include <sys/systm.h> 94 #include <sys/sockio.h> 95 #include <sys/mbuf.h> 96 #include <sys/malloc.h> 97 #include <sys/timeout.h> 98 #include <sys/device.h> 99 #include <sys/queue.h> 100 101 #include <net/if.h> 102 103 #include <netinet/in.h> 104 #include <netinet/if_ether.h> 105 106 #include <net/if_media.h> 107 108 #if NBPFILTER > 0 109 #include <net/bpf.h> 110 #endif 111 112 #if NKSTAT > 0 113 #include <sys/kstat.h> 114 #endif 115 116 #include <dev/mii/miivar.h> 117 118 #include <dev/pci/pcireg.h> 119 #include <dev/pci/pcivar.h> 120 #include <dev/pci/pcidevs.h> 121 122 #include <dev/pci/if_skreg.h> 123 #include <dev/pci/if_mskvar.h> 124 125 #define MSK_STATUS_OWN_SHIFT 63 126 #define MSK_STATUS_OWN_MASK 0x1 127 #define MSK_STATUS_OPCODE_SHIFT 56 128 #define MSK_STATUS_OPCODE_MASK 0x7f 129 130 #define MSK_STATUS_OWN(_d) \ 131 (((_d) >> MSK_STATUS_OWN_SHIFT) & MSK_STATUS_OWN_MASK) 132 #define MSK_STATUS_OPCODE(_d) \ 133 (((_d) >> MSK_STATUS_OPCODE_SHIFT) & MSK_STATUS_OPCODE_MASK) 134 135 #define MSK_STATUS_OPCODE_RXSTAT 0x60 136 #define MSK_STATUS_OPCODE_RXTIMESTAMP 0x61 137 #define MSK_STATUS_OPCODE_RXVLAN 0x62 138 #define MSK_STATUS_OPCODE_RXCKSUM 0x64 139 #define MSK_STATUS_OPCODE_RXCKSUMVLAN \ 140 (MSK_STATUS_OPCODE_RXVLAN | MSK_STATUS_OPCODE_RXCKSUM) 141 #define MSK_STATUS_OPCODE_RXTIMEVLAN \ 142 (MSK_STATUS_OPCODE_RXVLAN | MSK_STATUS_OPCODE_RXTIMESTAMP) 143 #define MSK_STATUS_OPCODE_RSS_HASH 0x65 144 #define MSK_STATUS_OPCODE_TXIDX 0x68 145 #define MSK_STATUS_OPCODE_MACSEC 0x6c 146 #define MSK_STATUS_OPCODE_PUTIDX 0x70 147 148 #define MSK_STATUS_RXSTAT_PORT_SHIFT 48 149 #define MSK_STATUS_RXSTAT_PORT_MASK 0x1 150 #define MSK_STATUS_RXSTAT_LEN_SHIFT 32 151 #define MSK_STATUS_RXSTAT_LEN_MASK 0xffff 152 #define MSK_STATUS_RXSTAT_STATUS_SHIFT 0 153 #define MSK_STATUS_RXSTAT_STATUS_MASK 0xffffffff 154 155 #define MSK_STATUS_RXSTAT_PORT(_d) \ 156 (((_d) >> MSK_STATUS_RXSTAT_PORT_SHIFT) & MSK_STATUS_RXSTAT_PORT_MASK) 157 #define MSK_STATUS_RXSTAT_LEN(_d) \ 158 (((_d) >> MSK_STATUS_RXSTAT_LEN_SHIFT) & MSK_STATUS_RXSTAT_LEN_MASK) 159 #define MSK_STATUS_RXSTAT_STATUS(_d) \ 160 (((_d) >> MSK_STATUS_RXSTAT_STATUS_SHIFT) & MSK_STATUS_RXSTAT_STATUS_MASK) 161 162 #define MSK_STATUS_TXIDX_PORTA_SHIFT 0 163 #define MSK_STATUS_TXIDX_PORTA_MASK 0xfff 164 #define MSK_STATUS_TXIDX_PORTB_SHIFT 24 165 #define MSK_STATUS_TXIDX_PORTB_MASK 0xfff 166 167 #define MSK_STATUS_TXIDX_PORTA(_d) \ 168 (((_d) >> MSK_STATUS_TXIDX_PORTA_SHIFT) & MSK_STATUS_TXIDX_PORTA_MASK) 169 #define MSK_STATUS_TXIDX_PORTB(_d) \ 170 (((_d) >> MSK_STATUS_TXIDX_PORTB_SHIFT) & MSK_STATUS_TXIDX_PORTB_MASK) 171 172 int mskc_probe(struct device *, void *, void *); 173 void mskc_attach(struct device *, struct device *self, void *aux); 174 int mskc_detach(struct device *, int); 175 int mskc_activate(struct device *, int); 176 void mskc_reset(struct sk_softc *); 177 int msk_probe(struct device *, void *, void *); 178 void msk_attach(struct device *, struct device *self, void *aux); 179 int msk_detach(struct device *, int); 180 int msk_activate(struct device *, int); 181 void msk_reset(struct sk_if_softc *); 182 int mskcprint(void *, const char *); 183 int msk_intr(void *); 184 void msk_intr_yukon(struct sk_if_softc *); 185 static inline int msk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); 186 void msk_rxeof(struct sk_if_softc *, struct mbuf_list *, uint16_t, uint32_t); 187 void msk_txeof(struct sk_if_softc *, unsigned int); 188 static unsigned int msk_encap(struct sk_if_softc *, struct mbuf *, uint32_t); 189 void msk_start(struct ifnet *); 190 int msk_ioctl(struct ifnet *, u_long, caddr_t); 191 void msk_init(void *); 192 void msk_init_yukon(struct sk_if_softc *); 193 void msk_stop(struct sk_if_softc *, int); 194 void msk_watchdog(struct ifnet *); 195 int msk_ifmedia_upd(struct ifnet *); 196 void msk_ifmedia_sts(struct ifnet *, struct ifmediareq *); 197 static int msk_newbuf(struct sk_if_softc *); 198 int msk_init_rx_ring(struct sk_if_softc *); 199 int msk_init_tx_ring(struct sk_if_softc *); 200 void msk_fill_rx_ring(struct sk_if_softc *); 201 202 int msk_miibus_readreg(struct device *, int, int); 203 void msk_miibus_writereg(struct device *, int, int, int); 204 void msk_miibus_statchg(struct device *); 205 206 void msk_iff(struct sk_if_softc *); 207 void msk_tick(void *); 208 void msk_fill_rx_tick(void *); 209 210 #ifdef MSK_DEBUG 211 #define DPRINTF(x) if (mskdebug) printf x 212 #define DPRINTFN(n,x) if (mskdebug >= (n)) printf x 213 int mskdebug = 0; 214 215 void msk_dump_txdesc(struct msk_tx_desc *, int); 216 void msk_dump_mbuf(struct mbuf *); 217 void msk_dump_bytes(const char *, int); 218 #else 219 #define DPRINTF(x) 220 #define DPRINTFN(n,x) 221 #endif 222 223 #if NKSTAT > 0 224 struct msk_mib { 225 const char *name; 226 uint32_t reg; 227 enum kstat_kv_type type; 228 enum kstat_kv_unit unit; 229 }; 230 231 #define C32 KSTAT_KV_T_COUNTER32 232 #define C64 KSTAT_KV_T_COUNTER64 233 234 #define PKTS KSTAT_KV_U_PACKETS 235 #define BYTES KSTAT_KV_U_BYTES 236 #define NONE KSTAT_KV_U_NONE 237 238 static const struct msk_mib msk_mib[] = { 239 { "InUnicasts", 0x100, C32, PKTS }, 240 { "InBroadcasts", 0x108, C32, PKTS }, 241 { "InPause", 0x110, C32, PKTS }, 242 { "InMulticasts", 0x118, C32, PKTS }, 243 { "InFCSErr", 0x120, C32, PKTS }, 244 { "InGoodOctets", 0x130, C64, BYTES }, 245 { "InBadOctets", 0x140, C64, BYTES }, 246 { "Undersize", 0x150, C32, PKTS }, 247 { "Fragments", 0x158, C32, PKTS }, 248 { "In64Octets", 0x160, C32, PKTS }, 249 { "In127Octets", 0x168, C32, PKTS }, 250 { "In255Octets", 0x170, C32, PKTS }, 251 { "In511Octets", 0x178, C32, PKTS }, 252 { "In1023Octets", 0x180, C32, PKTS }, 253 { "In1518Octets", 0x188, C32, PKTS }, 254 { "InMaxOctets", 0x190, C32, PKTS }, 255 { "OverSize", 0x198, C32, PKTS }, 256 { "Jabber", 0x1a8, C32, PKTS }, 257 { "Overflow", 0x1b0, C32, PKTS }, 258 259 { "OutUnicasts", 0x1c0, C32, PKTS }, 260 { "OutBroadcasts", 0x1c8, C32, PKTS }, 261 { "OutPause", 0x1d0, C32, PKTS }, 262 { "OutMulticasts", 0x1d8, C32, PKTS }, 263 { "OutOctets", 0x1e0, C64, BYTES }, 264 { "Out64Octets", 0x1f0, C32, PKTS }, 265 { "Out127Octets", 0x1f8, C32, PKTS }, 266 { "Out255Octets", 0x200, C32, PKTS }, 267 { "Out511Octets", 0x208, C32, PKTS }, 268 { "Out1023Octets", 0x210, C32, PKTS }, 269 { "Out1518Octets", 0x218, C32, PKTS }, 270 { "OutMaxOctets", 0x220, C32, PKTS }, 271 { "Collisions", 0x230, C32, NONE }, 272 { "Late", 0x238, C32, NONE }, 273 { "Excessive", 0x240, C32, PKTS }, 274 { "Multiple", 0x248, C32, PKTS }, 275 { "Single", 0x250, C32, PKTS }, 276 { "Underflow", 0x258, C32, PKTS }, 277 }; 278 279 #undef C32 280 #undef C64 281 282 #undef PKTS 283 #undef BYTES 284 #undef NONE 285 286 struct msk_kstat { 287 struct rwlock lock; 288 struct kstat *ks; 289 }; 290 291 static uint32_t msk_mib_read32(struct sk_if_softc *, uint32_t); 292 static uint64_t msk_mib_read64(struct sk_if_softc *, uint32_t); 293 294 void msk_kstat_attach(struct sk_if_softc *); 295 void msk_kstat_detach(struct sk_if_softc *); 296 int msk_kstat_read(struct kstat *ks); 297 #endif 298 299 /* supported device vendors */ 300 const struct pci_matchid mskc_devices[] = { 301 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550SX }, 302 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE550T_B1 }, 303 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560SX }, 304 { PCI_VENDOR_DLINK, PCI_PRODUCT_DLINK_DGE560T }, 305 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021CU }, 306 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8021X }, 307 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022CU }, 308 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8022X }, 309 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8035 }, 310 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8036 }, 311 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8038 }, 312 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8039 }, 313 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040 }, 314 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8040T }, 315 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8042 }, 316 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8048 }, 317 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8050 }, 318 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8052 }, 319 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8053 }, 320 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055 }, 321 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8055_2 }, 322 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8056 }, 323 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8057 }, 324 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8058 }, 325 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8059 }, 326 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061CU }, 327 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8061X }, 328 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062CU }, 329 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8062X }, 330 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8070 }, 331 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8071 }, 332 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8072 }, 333 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8075 }, 334 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_8079 }, 335 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C032 }, 336 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C033 }, 337 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C034 }, 338 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C036 }, 339 { PCI_VENDOR_MARVELL, PCI_PRODUCT_MARVELL_YUKON_C042 }, 340 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9EXX }, 341 { PCI_VENDOR_SCHNEIDERKOCH, PCI_PRODUCT_SCHNEIDERKOCH_SK9SXX } 342 }; 343 344 static inline u_int32_t 345 sk_win_read_4(struct sk_softc *sc, u_int32_t reg) 346 { 347 return CSR_READ_4(sc, reg); 348 } 349 350 static inline u_int16_t 351 sk_win_read_2(struct sk_softc *sc, u_int32_t reg) 352 { 353 return CSR_READ_2(sc, reg); 354 } 355 356 static inline u_int8_t 357 sk_win_read_1(struct sk_softc *sc, u_int32_t reg) 358 { 359 return CSR_READ_1(sc, reg); 360 } 361 362 static inline void 363 sk_win_write_4(struct sk_softc *sc, u_int32_t reg, u_int32_t x) 364 { 365 CSR_WRITE_4(sc, reg, x); 366 } 367 368 static inline void 369 sk_win_write_2(struct sk_softc *sc, u_int32_t reg, u_int16_t x) 370 { 371 CSR_WRITE_2(sc, reg, x); 372 } 373 374 static inline void 375 sk_win_write_1(struct sk_softc *sc, u_int32_t reg, u_int8_t x) 376 { 377 CSR_WRITE_1(sc, reg, x); 378 } 379 380 int 381 msk_miibus_readreg(struct device *dev, int phy, int reg) 382 { 383 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 384 u_int16_t val; 385 int i; 386 387 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 388 YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); 389 390 for (i = 0; i < SK_TIMEOUT; i++) { 391 DELAY(1); 392 val = SK_YU_READ_2(sc_if, YUKON_SMICR); 393 if (val & YU_SMICR_READ_VALID) 394 break; 395 } 396 397 if (i == SK_TIMEOUT) { 398 printf("%s: phy failed to come ready\n", 399 sc_if->sk_dev.dv_xname); 400 return (0); 401 } 402 403 DPRINTFN(9, ("msk_miibus_readreg: i=%d, timeout=%d\n", i, 404 SK_TIMEOUT)); 405 406 val = SK_YU_READ_2(sc_if, YUKON_SMIDR); 407 408 DPRINTFN(9, ("msk_miibus_readreg phy=%d, reg=%#x, val=%#x\n", 409 phy, reg, val)); 410 411 return (val); 412 } 413 414 void 415 msk_miibus_writereg(struct device *dev, int phy, int reg, int val) 416 { 417 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 418 int i; 419 420 DPRINTFN(9, ("msk_miibus_writereg phy=%d reg=%#x val=%#x\n", 421 phy, reg, val)); 422 423 SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); 424 SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | 425 YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); 426 427 for (i = 0; i < SK_TIMEOUT; i++) { 428 DELAY(1); 429 if (!(SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY)) 430 break; 431 } 432 433 if (i == SK_TIMEOUT) 434 printf("%s: phy write timed out\n", sc_if->sk_dev.dv_xname); 435 } 436 437 void 438 msk_miibus_statchg(struct device *dev) 439 { 440 struct sk_if_softc *sc_if = (struct sk_if_softc *)dev; 441 struct mii_data *mii = &sc_if->sk_mii; 442 struct ifmedia_entry *ife = mii->mii_media.ifm_cur; 443 int gpcr; 444 445 gpcr = SK_YU_READ_2(sc_if, YUKON_GPCR); 446 gpcr &= (YU_GPCR_TXEN | YU_GPCR_RXEN); 447 448 if (IFM_SUBTYPE(ife->ifm_media) != IFM_AUTO || 449 sc_if->sk_softc->sk_type == SK_YUKON_FE_P) { 450 /* Set speed. */ 451 gpcr |= YU_GPCR_SPEED_DIS; 452 switch (IFM_SUBTYPE(mii->mii_media_active)) { 453 case IFM_1000_SX: 454 case IFM_1000_LX: 455 case IFM_1000_CX: 456 case IFM_1000_T: 457 gpcr |= (YU_GPCR_GIG | YU_GPCR_SPEED); 458 break; 459 case IFM_100_TX: 460 gpcr |= YU_GPCR_SPEED; 461 break; 462 } 463 464 /* Set duplex. */ 465 gpcr |= YU_GPCR_DPLX_DIS; 466 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) 467 gpcr |= YU_GPCR_DUPLEX; 468 469 /* Disable flow control. */ 470 gpcr |= YU_GPCR_FCTL_DIS; 471 gpcr |= (YU_GPCR_FCTL_TX_DIS | YU_GPCR_FCTL_RX_DIS); 472 } 473 474 SK_YU_WRITE_2(sc_if, YUKON_GPCR, gpcr); 475 476 DPRINTFN(9, ("msk_miibus_statchg: gpcr=%x\n", 477 SK_YU_READ_2(((struct sk_if_softc *)dev), YUKON_GPCR))); 478 } 479 480 void 481 msk_iff(struct sk_if_softc *sc_if) 482 { 483 struct ifnet *ifp = &sc_if->arpcom.ac_if; 484 struct arpcom *ac = &sc_if->arpcom; 485 struct ether_multi *enm; 486 struct ether_multistep step; 487 u_int32_t hashes[2]; 488 u_int16_t rcr; 489 int h; 490 491 rcr = SK_YU_READ_2(sc_if, YUKON_RCR); 492 rcr &= ~(YU_RCR_MUFLEN | YU_RCR_UFLEN); 493 ifp->if_flags &= ~IFF_ALLMULTI; 494 495 /* 496 * Always accept frames destined to our station address. 497 */ 498 rcr |= YU_RCR_UFLEN; 499 500 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 501 ifp->if_flags |= IFF_ALLMULTI; 502 if (ifp->if_flags & IFF_PROMISC) 503 rcr &= ~YU_RCR_UFLEN; 504 else 505 rcr |= YU_RCR_MUFLEN; 506 hashes[0] = hashes[1] = 0xFFFFFFFF; 507 } else { 508 rcr |= YU_RCR_MUFLEN; 509 /* Program new filter. */ 510 bzero(hashes, sizeof(hashes)); 511 512 ETHER_FIRST_MULTI(step, ac, enm); 513 while (enm != NULL) { 514 h = ether_crc32_be(enm->enm_addrlo, 515 ETHER_ADDR_LEN) & ((1 << SK_HASH_BITS) - 1); 516 517 if (h < 32) 518 hashes[0] |= (1 << h); 519 else 520 hashes[1] |= (1 << (h - 32)); 521 522 ETHER_NEXT_MULTI(step, enm); 523 } 524 } 525 526 SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); 527 SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); 528 SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); 529 SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); 530 SK_YU_WRITE_2(sc_if, YUKON_RCR, rcr); 531 } 532 533 int 534 msk_init_rx_ring(struct sk_if_softc *sc_if) 535 { 536 struct msk_ring_data *rd = sc_if->sk_rdata; 537 struct msk_rx_desc *r; 538 539 memset(rd->sk_rx_ring, 0, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 540 541 r = &rd->sk_rx_ring[0]; 542 r->sk_addr = htole32(0); 543 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 544 545 sc_if->sk_cdata.sk_rx_prod = 1; 546 sc_if->sk_cdata.sk_rx_cons = 0; 547 sc_if->sk_cdata.sk_rx_hiaddr = 0; 548 549 /* 550 * up to two ring entries per packet, so the effective ring size is 551 * halved 552 */ 553 if_rxr_init(&sc_if->sk_cdata.sk_rx_ring, 2, (MSK_RX_RING_CNT/2) - 1); 554 555 msk_fill_rx_ring(sc_if); 556 return (0); 557 } 558 559 int 560 msk_init_tx_ring(struct sk_if_softc *sc_if) 561 { 562 struct sk_softc *sc = sc_if->sk_softc; 563 struct msk_ring_data *rd = sc_if->sk_rdata; 564 struct msk_tx_desc *t; 565 int i; 566 567 memset(rd->sk_tx_ring, 0, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 568 569 for (i = 0; i < MSK_TX_RING_CNT; i++) { 570 if (bus_dmamap_create(sc->sc_dmatag, sc_if->sk_pktlen, 571 SK_NTXSEG, sc_if->sk_pktlen, 0, 572 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 573 &sc_if->sk_cdata.sk_tx_maps[i])) 574 return (ENOBUFS); 575 } 576 577 t = &rd->sk_tx_ring[0]; 578 t->sk_addr = htole32(0); 579 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 580 581 sc_if->sk_cdata.sk_tx_prod = 1; 582 sc_if->sk_cdata.sk_tx_cons = 0; 583 sc_if->sk_cdata.sk_tx_hiaddr = 0; 584 585 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 586 587 return (0); 588 } 589 590 static int 591 msk_newbuf(struct sk_if_softc *sc_if) 592 { 593 struct msk_ring_data *rd = sc_if->sk_rdata; 594 struct msk_rx_desc *r; 595 struct mbuf *m; 596 bus_dmamap_t map; 597 uint64_t addr; 598 uint32_t prod, head; 599 uint32_t hiaddr; 600 unsigned int pktlen = sc_if->sk_pktlen + ETHER_ALIGN; 601 602 m = MCLGETL(NULL, M_DONTWAIT, pktlen); 603 if (m == NULL) 604 return (0); 605 m->m_len = m->m_pkthdr.len = pktlen; 606 m_adj(m, ETHER_ALIGN); 607 608 prod = sc_if->sk_cdata.sk_rx_prod; 609 map = sc_if->sk_cdata.sk_rx_maps[prod]; 610 611 if (bus_dmamap_load_mbuf(sc_if->sk_softc->sc_dmatag, map, m, 612 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) { 613 m_freem(m); 614 return (0); 615 } 616 617 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, 618 map->dm_mapsize, BUS_DMASYNC_PREREAD); 619 620 head = prod; 621 622 /* high 32 bits of address */ 623 addr = map->dm_segs[0].ds_addr; 624 hiaddr = addr >> 32; 625 if (sc_if->sk_cdata.sk_rx_hiaddr != hiaddr) { 626 r = &rd->sk_rx_ring[prod]; 627 htolem32(&r->sk_addr, hiaddr); 628 r->sk_len = htole16(0); 629 r->sk_ctl = 0; 630 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_ADDR64; 631 632 sc_if->sk_cdata.sk_rx_hiaddr = hiaddr; 633 634 SK_INC(prod, MSK_RX_RING_CNT); 635 } 636 637 r = &rd->sk_rx_ring[prod]; 638 htolem32(&r->sk_addr, addr); 639 htolem16(&r->sk_len, map->dm_segs[0].ds_len); 640 r->sk_ctl = 0; 641 r->sk_opcode = SK_Y2_RXOPC_OWN | SK_Y2_RXOPC_PACKET; 642 643 sc_if->sk_cdata.sk_rx_maps[head] = sc_if->sk_cdata.sk_rx_maps[prod]; 644 sc_if->sk_cdata.sk_rx_maps[prod] = map; 645 646 sc_if->sk_cdata.sk_rx_mbuf[prod] = m; 647 648 SK_INC(prod, MSK_RX_RING_CNT); 649 sc_if->sk_cdata.sk_rx_prod = prod; 650 651 return (1); 652 } 653 654 /* 655 * Set media options. 656 */ 657 int 658 msk_ifmedia_upd(struct ifnet *ifp) 659 { 660 struct sk_if_softc *sc_if = ifp->if_softc; 661 662 mii_mediachg(&sc_if->sk_mii); 663 return (0); 664 } 665 666 /* 667 * Report current media status. 668 */ 669 void 670 msk_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 671 { 672 struct sk_if_softc *sc_if = ifp->if_softc; 673 674 mii_pollstat(&sc_if->sk_mii); 675 ifmr->ifm_active = sc_if->sk_mii.mii_media_active; 676 ifmr->ifm_status = sc_if->sk_mii.mii_media_status; 677 } 678 679 int 680 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 681 { 682 struct sk_if_softc *sc_if = ifp->if_softc; 683 struct ifreq *ifr = (struct ifreq *) data; 684 struct mii_data *mii; 685 int s, error = 0; 686 687 s = splnet(); 688 689 switch(command) { 690 case SIOCSIFADDR: 691 ifp->if_flags |= IFF_UP; 692 if (!(ifp->if_flags & IFF_RUNNING)) 693 msk_init(sc_if); 694 break; 695 696 case SIOCSIFFLAGS: 697 if (ifp->if_flags & IFF_UP) { 698 if (ifp->if_flags & IFF_RUNNING) 699 error = ENETRESET; 700 else 701 msk_init(sc_if); 702 } else { 703 if (ifp->if_flags & IFF_RUNNING) 704 msk_stop(sc_if, 0); 705 } 706 break; 707 708 case SIOCGIFMEDIA: 709 case SIOCSIFMEDIA: 710 mii = &sc_if->sk_mii; 711 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 712 break; 713 714 case SIOCGIFRXR: 715 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 716 NULL, sc_if->sk_pktlen, &sc_if->sk_cdata.sk_rx_ring); 717 break; 718 719 default: 720 error = ether_ioctl(ifp, &sc_if->arpcom, command, data); 721 } 722 723 if (error == ENETRESET) { 724 if (ifp->if_flags & IFF_RUNNING) 725 msk_iff(sc_if); 726 error = 0; 727 } 728 729 splx(s); 730 return (error); 731 } 732 733 /* 734 * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device 735 * IDs against our list and return a device name if we find a match. 736 */ 737 int 738 mskc_probe(struct device *parent, void *match, void *aux) 739 { 740 return (pci_matchbyid((struct pci_attach_args *)aux, mskc_devices, 741 nitems(mskc_devices))); 742 } 743 744 /* 745 * Force the GEnesis into reset, then bring it out of reset. 746 */ 747 void 748 mskc_reset(struct sk_softc *sc) 749 { 750 u_int32_t imtimer_ticks, reg1; 751 int reg; 752 unsigned int i; 753 754 DPRINTFN(2, ("mskc_reset\n")); 755 756 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_RESET); 757 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_RESET); 758 759 DELAY(1000); 760 CSR_WRITE_1(sc, SK_CSR, SK_CSR_SW_UNRESET); 761 DELAY(2); 762 CSR_WRITE_1(sc, SK_CSR, SK_CSR_MASTER_UNRESET); 763 764 sk_win_write_1(sc, SK_TESTCTL1, 2); 765 766 if (sc->sk_type == SK_YUKON_EC_U || sc->sk_type == SK_YUKON_EX || 767 sc->sk_type >= SK_YUKON_FE_P) { 768 /* enable all clocks. */ 769 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG3), 0); 770 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4)); 771 reg1 &= (SK_Y2_REG4_FORCE_ASPM_REQUEST| 772 SK_Y2_REG4_ASPM_GPHY_LINK_DOWN| 773 SK_Y2_REG4_ASPM_INT_FIFO_EMPTY| 774 SK_Y2_REG4_ASPM_CLKRUN_REQUEST); 775 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG4), reg1); 776 777 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5)); 778 reg1 &= SK_Y2_REG5_TIM_VMAIN_AV_MASK; 779 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG5), reg1); 780 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_CFGREG1), 0); 781 782 /* 783 * Disable status race, workaround for Yukon EC Ultra & 784 * Yukon EX. 785 */ 786 reg1 = sk_win_read_4(sc, SK_GPIO); 787 reg1 |= SK_Y2_GPIO_STAT_RACE_DIS; 788 sk_win_write_4(sc, SK_GPIO, reg1); 789 sk_win_read_4(sc, SK_GPIO); 790 } 791 792 reg1 = sk_win_read_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1)); 793 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 794 reg1 |= (SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 795 else 796 reg1 &= ~(SK_Y2_REG1_PHY1_COMA | SK_Y2_REG1_PHY2_COMA); 797 sk_win_write_4(sc, SK_Y2_PCI_REG(SK_PCI_OURREG1), reg1); 798 799 if (sc->sk_type == SK_YUKON_XL && sc->sk_rev > SK_YUKON_XL_REV_A1) 800 sk_win_write_1(sc, SK_Y2_CLKGATE, 801 SK_Y2_CLKGATE_LINK1_GATE_DIS | 802 SK_Y2_CLKGATE_LINK2_GATE_DIS | 803 SK_Y2_CLKGATE_LINK1_CORE_DIS | 804 SK_Y2_CLKGATE_LINK2_CORE_DIS | 805 SK_Y2_CLKGATE_LINK1_PCI_DIS | SK_Y2_CLKGATE_LINK2_PCI_DIS); 806 else 807 sk_win_write_1(sc, SK_Y2_CLKGATE, 0); 808 809 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); 810 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_SET); 811 DELAY(1000); 812 CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); 813 CSR_WRITE_2(sc, SK_LINK_CTRL + SK_WIN_LEN, SK_LINK_RESET_CLEAR); 814 815 if (sc->sk_type == SK_YUKON_EX || sc->sk_type == SK_YUKON_SUPR) { 816 CSR_WRITE_2(sc, SK_GMAC_CTRL, SK_GMAC_BYP_MACSECRX | 817 SK_GMAC_BYP_MACSECTX | SK_GMAC_BYP_RETR_FIFO); 818 } 819 820 sk_win_write_1(sc, SK_TESTCTL1, 1); 821 822 DPRINTFN(2, ("mskc_reset: sk_csr=%x\n", CSR_READ_1(sc, SK_CSR))); 823 DPRINTFN(2, ("mskc_reset: sk_link_ctrl=%x\n", 824 CSR_READ_2(sc, SK_LINK_CTRL))); 825 826 /* Disable ASF */ 827 CSR_WRITE_1(sc, SK_Y2_ASF_CSR, SK_Y2_ASF_RESET); 828 CSR_WRITE_2(sc, SK_CSR, SK_CSR_ASF_OFF); 829 830 /* Clear I2C IRQ noise */ 831 CSR_WRITE_4(sc, SK_I2CHWIRQ, 1); 832 833 /* Disable hardware timer */ 834 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_STOP); 835 CSR_WRITE_1(sc, SK_TIMERCTL, SK_IMCTL_IRQ_CLEAR); 836 837 /* Disable descriptor polling */ 838 CSR_WRITE_4(sc, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); 839 840 /* Disable time stamps */ 841 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_STOP); 842 CSR_WRITE_1(sc, SK_TSTAMP_CTL, SK_TSTAMP_IRQ_CLEAR); 843 844 /* Enable RAM interface */ 845 sk_win_write_1(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); 846 for (reg = SK_TO0;reg <= SK_TO11; reg++) 847 sk_win_write_1(sc, reg, 36); 848 sk_win_write_1(sc, SK_RAMCTL + (SK_WIN_LEN / 2), SK_RAMCTL_UNRESET); 849 for (reg = SK_TO0;reg <= SK_TO11; reg++) 850 sk_win_write_1(sc, reg + (SK_WIN_LEN / 2), 36); 851 852 /* 853 * Configure interrupt moderation. The moderation timer 854 * defers interrupts specified in the interrupt moderation 855 * timer mask based on the timeout specified in the interrupt 856 * moderation timer init register. Each bit in the timer 857 * register represents one tick, so to specify a timeout in 858 * microseconds, we have to multiply by the correct number of 859 * ticks-per-microsecond. 860 */ 861 switch (sc->sk_type) { 862 case SK_YUKON_EC: 863 case SK_YUKON_EC_U: 864 case SK_YUKON_EX: 865 case SK_YUKON_SUPR: 866 case SK_YUKON_ULTRA2: 867 case SK_YUKON_OPTIMA: 868 case SK_YUKON_PRM: 869 case SK_YUKON_OPTIMA2: 870 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_EC; 871 break; 872 case SK_YUKON_FE: 873 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE; 874 break; 875 case SK_YUKON_FE_P: 876 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_FE_P; 877 break; 878 case SK_YUKON_XL: 879 imtimer_ticks = SK_IMTIMER_TICKS_YUKON_XL; 880 break; 881 default: 882 imtimer_ticks = SK_IMTIMER_TICKS_YUKON; 883 break; 884 } 885 886 /* Reset status ring. */ 887 for (i = 0; i < MSK_STATUS_RING_CNT; i++) 888 sc->sk_status_ring[i] = htole64(0); 889 sc->sk_status_idx = 0; 890 891 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_RESET); 892 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_UNRESET); 893 894 sk_win_write_2(sc, SK_STAT_BMU_LIDX, MSK_STATUS_RING_CNT - 1); 895 sk_win_write_4(sc, SK_STAT_BMU_ADDRLO, 896 sc->sk_status_map->dm_segs[0].ds_addr); 897 sk_win_write_4(sc, SK_STAT_BMU_ADDRHI, 898 (u_int64_t)sc->sk_status_map->dm_segs[0].ds_addr >> 32); 899 sk_win_write_2(sc, SK_STAT_BMU_TX_THRESH, 10); 900 sk_win_write_1(sc, SK_STAT_BMU_FIFOWM, 16); 901 sk_win_write_1(sc, SK_STAT_BMU_FIFOIWM, 16); 902 903 #if 0 904 sk_win_write_4(sc, SK_Y2_LEV_ITIMERINIT, SK_IM_USECS(100)); 905 sk_win_write_4(sc, SK_Y2_TX_ITIMERINIT, SK_IM_USECS(1000)); 906 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(20)); 907 #else 908 sk_win_write_4(sc, SK_Y2_ISR_ITIMERINIT, SK_IM_USECS(4)); 909 #endif 910 911 sk_win_write_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_ON); 912 913 sk_win_write_1(sc, SK_Y2_LEV_ITIMERCTL, SK_IMCTL_START); 914 sk_win_write_1(sc, SK_Y2_TX_ITIMERCTL, SK_IMCTL_START); 915 sk_win_write_1(sc, SK_Y2_ISR_ITIMERCTL, SK_IMCTL_START); 916 } 917 918 int 919 msk_probe(struct device *parent, void *match, void *aux) 920 { 921 struct skc_attach_args *sa = aux; 922 923 if (sa->skc_port != SK_PORT_A && sa->skc_port != SK_PORT_B) 924 return (0); 925 926 switch (sa->skc_type) { 927 case SK_YUKON_XL: 928 case SK_YUKON_EC_U: 929 case SK_YUKON_EX: 930 case SK_YUKON_EC: 931 case SK_YUKON_FE: 932 case SK_YUKON_FE_P: 933 case SK_YUKON_SUPR: 934 case SK_YUKON_ULTRA2: 935 case SK_YUKON_OPTIMA: 936 case SK_YUKON_PRM: 937 case SK_YUKON_OPTIMA2: 938 return (1); 939 } 940 941 return (0); 942 } 943 944 void 945 msk_reset(struct sk_if_softc *sc_if) 946 { 947 /* GMAC and GPHY Reset */ 948 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); 949 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); 950 DELAY(1000); 951 SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_CLEAR); 952 SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | 953 SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); 954 } 955 956 /* 957 * Each XMAC chip is attached as a separate logical IP interface. 958 * Single port cards will have only one logical interface of course. 959 */ 960 void 961 msk_attach(struct device *parent, struct device *self, void *aux) 962 { 963 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 964 struct sk_softc *sc = (struct sk_softc *)parent; 965 struct skc_attach_args *sa = aux; 966 struct ifnet *ifp; 967 caddr_t kva; 968 int i; 969 u_int32_t chunk; 970 int mii_flags; 971 int error; 972 973 sc_if->sk_port = sa->skc_port; 974 sc_if->sk_softc = sc; 975 sc->sk_if[sa->skc_port] = sc_if; 976 977 DPRINTFN(2, ("begin msk_attach: port=%d\n", sc_if->sk_port)); 978 979 /* 980 * Get station address for this interface. Note that 981 * dual port cards actually come with three station 982 * addresses: one for each port, plus an extra. The 983 * extra one is used by the SysKonnect driver software 984 * as a 'virtual' station address for when both ports 985 * are operating in failover mode. Currently we don't 986 * use this extra address. 987 */ 988 for (i = 0; i < ETHER_ADDR_LEN; i++) 989 sc_if->arpcom.ac_enaddr[i] = 990 sk_win_read_1(sc, SK_MAC0_0 + (sa->skc_port * 8) + i); 991 992 printf(": address %s\n", 993 ether_sprintf(sc_if->arpcom.ac_enaddr)); 994 995 /* 996 * Set up RAM buffer addresses. The Yukon2 has a small amount 997 * of SRAM on it, somewhere between 4K and 48K. We need to 998 * divide this up between the transmitter and receiver. We 999 * give the receiver 2/3 of the memory (rounded down), and the 1000 * transmitter whatever remains. 1001 */ 1002 chunk = (2 * (sc->sk_ramsize / sizeof(u_int64_t)) / 3) & ~0xff; 1003 sc_if->sk_rx_ramstart = 0; 1004 sc_if->sk_rx_ramend = sc_if->sk_rx_ramstart + chunk - 1; 1005 chunk = (sc->sk_ramsize / sizeof(u_int64_t)) - chunk; 1006 sc_if->sk_tx_ramstart = sc_if->sk_rx_ramend + 1; 1007 sc_if->sk_tx_ramend = sc_if->sk_tx_ramstart + chunk - 1; 1008 1009 DPRINTFN(2, ("msk_attach: rx_ramstart=%#x rx_ramend=%#x\n" 1010 " tx_ramstart=%#x tx_ramend=%#x\n", 1011 sc_if->sk_rx_ramstart, sc_if->sk_rx_ramend, 1012 sc_if->sk_tx_ramstart, sc_if->sk_tx_ramend)); 1013 1014 /* Allocate the descriptor queues. */ 1015 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct msk_ring_data), 1016 PAGE_SIZE, 0, &sc_if->sk_ring_seg, 1, &sc_if->sk_ring_nseg, 1017 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1018 printf(": can't alloc rx buffers\n"); 1019 goto fail; 1020 } 1021 if (bus_dmamem_map(sc->sc_dmatag, &sc_if->sk_ring_seg, 1022 sc_if->sk_ring_nseg, 1023 sizeof(struct msk_ring_data), &kva, BUS_DMA_NOWAIT)) { 1024 printf(": can't map dma buffers (%lu bytes)\n", 1025 (ulong)sizeof(struct msk_ring_data)); 1026 goto fail_1; 1027 } 1028 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct msk_ring_data), 1, 1029 sizeof(struct msk_ring_data), 0, 1030 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1031 &sc_if->sk_ring_map)) { 1032 printf(": can't create dma map\n"); 1033 goto fail_2; 1034 } 1035 if (bus_dmamap_load(sc->sc_dmatag, sc_if->sk_ring_map, kva, 1036 sizeof(struct msk_ring_data), NULL, BUS_DMA_NOWAIT)) { 1037 printf(": can't load dma map\n"); 1038 goto fail_3; 1039 } 1040 sc_if->sk_rdata = (struct msk_ring_data *)kva; 1041 1042 if (sc->sk_type != SK_YUKON_FE && 1043 sc->sk_type != SK_YUKON_FE_P) 1044 sc_if->sk_pktlen = SK_JLEN; 1045 else 1046 sc_if->sk_pktlen = MCLBYTES; 1047 1048 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1049 if ((error = bus_dmamap_create(sc->sc_dmatag, 1050 sc_if->sk_pktlen, 1, sc_if->sk_pktlen, 0, 1051 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1052 &sc_if->sk_cdata.sk_rx_maps[i])) != 0) { 1053 printf("\n%s: unable to create rx DMA map %d, " 1054 "error = %d\n", sc->sk_dev.dv_xname, i, error); 1055 goto fail_4; 1056 } 1057 } 1058 1059 ifp = &sc_if->arpcom.ac_if; 1060 ifp->if_softc = sc_if; 1061 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1062 ifp->if_ioctl = msk_ioctl; 1063 ifp->if_start = msk_start; 1064 ifp->if_watchdog = msk_watchdog; 1065 if (sc->sk_type != SK_YUKON_FE && 1066 sc->sk_type != SK_YUKON_FE_P) 1067 ifp->if_hardmtu = SK_JUMBO_MTU; 1068 ifq_init_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1069 bcopy(sc_if->sk_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1070 1071 ifp->if_capabilities = IFCAP_VLAN_MTU; 1072 1073 msk_reset(sc_if); 1074 1075 /* 1076 * Do miibus setup. 1077 */ 1078 msk_init_yukon(sc_if); 1079 1080 DPRINTFN(2, ("msk_attach: 1\n")); 1081 1082 sc_if->sk_mii.mii_ifp = ifp; 1083 sc_if->sk_mii.mii_readreg = msk_miibus_readreg; 1084 sc_if->sk_mii.mii_writereg = msk_miibus_writereg; 1085 sc_if->sk_mii.mii_statchg = msk_miibus_statchg; 1086 1087 ifmedia_init(&sc_if->sk_mii.mii_media, 0, 1088 msk_ifmedia_upd, msk_ifmedia_sts); 1089 mii_flags = MIIF_DOPAUSE; 1090 if (sc->sk_fibertype) 1091 mii_flags |= MIIF_HAVEFIBER; 1092 mii_attach(self, &sc_if->sk_mii, 0xffffffff, 0, 1093 MII_OFFSET_ANY, mii_flags); 1094 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) == NULL) { 1095 printf("%s: no PHY found!\n", sc_if->sk_dev.dv_xname); 1096 ifmedia_add(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL, 1097 0, NULL); 1098 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_MANUAL); 1099 } else 1100 ifmedia_set(&sc_if->sk_mii.mii_media, IFM_ETHER|IFM_AUTO); 1101 1102 timeout_set(&sc_if->sk_tick_ch, msk_tick, sc_if); 1103 timeout_set(&sc_if->sk_tick_rx, msk_fill_rx_tick, sc_if); 1104 1105 /* 1106 * Call MI attach routines. 1107 */ 1108 if_attach(ifp); 1109 ether_ifattach(ifp); 1110 1111 #if NKSTAT > 0 1112 msk_kstat_attach(sc_if); 1113 #endif 1114 1115 DPRINTFN(2, ("msk_attach: end\n")); 1116 return; 1117 1118 fail_4: 1119 for (i = 0; i < MSK_RX_RING_CNT; i++) { 1120 if (sc_if->sk_cdata.sk_rx_maps[i] != NULL) 1121 bus_dmamap_destroy(sc->sc_dmatag, 1122 sc_if->sk_cdata.sk_rx_maps[i]); 1123 } 1124 1125 fail_3: 1126 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1127 fail_2: 1128 bus_dmamem_unmap(sc->sc_dmatag, kva, sizeof(struct msk_ring_data)); 1129 fail_1: 1130 bus_dmamem_free(sc->sc_dmatag, &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1131 fail: 1132 sc->sk_if[sa->skc_port] = NULL; 1133 } 1134 1135 int 1136 msk_detach(struct device *self, int flags) 1137 { 1138 struct sk_if_softc *sc_if = (struct sk_if_softc *)self; 1139 struct sk_softc *sc = sc_if->sk_softc; 1140 struct ifnet *ifp= &sc_if->arpcom.ac_if; 1141 1142 if (sc->sk_if[sc_if->sk_port] == NULL) 1143 return (0); 1144 1145 msk_stop(sc_if, 1); 1146 1147 #if NKSTAT > 0 1148 msk_kstat_detach(sc_if); 1149 #endif 1150 1151 /* Detach any PHYs we might have. */ 1152 if (LIST_FIRST(&sc_if->sk_mii.mii_phys) != NULL) 1153 mii_detach(&sc_if->sk_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1154 1155 /* Delete any remaining media. */ 1156 ifmedia_delete_instance(&sc_if->sk_mii.mii_media, IFM_INST_ANY); 1157 1158 ether_ifdetach(ifp); 1159 if_detach(ifp); 1160 1161 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc_if->sk_rdata, 1162 sizeof(struct msk_ring_data)); 1163 bus_dmamem_free(sc->sc_dmatag, 1164 &sc_if->sk_ring_seg, sc_if->sk_ring_nseg); 1165 bus_dmamap_destroy(sc->sc_dmatag, sc_if->sk_ring_map); 1166 sc->sk_if[sc_if->sk_port] = NULL; 1167 1168 return (0); 1169 } 1170 1171 int 1172 msk_activate(struct device *self, int act) 1173 { 1174 struct sk_if_softc *sc_if = (void *)self; 1175 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1176 1177 switch (act) { 1178 case DVACT_RESUME: 1179 msk_reset(sc_if); 1180 if (ifp->if_flags & IFF_RUNNING) 1181 msk_init(sc_if); 1182 break; 1183 } 1184 return (0); 1185 } 1186 1187 int 1188 mskcprint(void *aux, const char *pnp) 1189 { 1190 struct skc_attach_args *sa = aux; 1191 1192 if (pnp) 1193 printf("msk port %c at %s", 1194 (sa->skc_port == SK_PORT_A) ? 'A' : 'B', pnp); 1195 else 1196 printf(" port %c", (sa->skc_port == SK_PORT_A) ? 'A' : 'B'); 1197 return (UNCONF); 1198 } 1199 1200 /* 1201 * Attach the interface. Allocate softc structures, do ifmedia 1202 * setup and ethernet/BPF attach. 1203 */ 1204 void 1205 mskc_attach(struct device *parent, struct device *self, void *aux) 1206 { 1207 struct sk_softc *sc = (struct sk_softc *)self; 1208 struct pci_attach_args *pa = aux; 1209 struct skc_attach_args skca; 1210 pci_chipset_tag_t pc = pa->pa_pc; 1211 pcireg_t memtype; 1212 pci_intr_handle_t ih; 1213 const char *intrstr = NULL; 1214 u_int8_t hw, pmd; 1215 char *revstr = NULL; 1216 caddr_t kva; 1217 1218 DPRINTFN(2, ("begin mskc_attach\n")); 1219 1220 pci_set_powerstate(pa->pa_pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 1221 1222 /* 1223 * Map control/status registers. 1224 */ 1225 memtype = pci_mapreg_type(pc, pa->pa_tag, SK_PCI_LOMEM); 1226 if (pci_mapreg_map(pa, SK_PCI_LOMEM, memtype, 0, &sc->sk_btag, 1227 &sc->sk_bhandle, NULL, &sc->sk_bsize, 0)) { 1228 printf(": can't map mem space\n"); 1229 return; 1230 } 1231 1232 sc->sc_dmatag = pa->pa_dmat; 1233 1234 sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); 1235 sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4); 1236 1237 /* bail out here if chip is not recognized */ 1238 if (!(SK_IS_YUKON2(sc))) { 1239 printf(": unknown chip type: %d\n", sc->sk_type); 1240 goto fail_1; 1241 } 1242 DPRINTFN(2, ("mskc_attach: allocate interrupt\n")); 1243 1244 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_MARVELL) { 1245 switch (PCI_PRODUCT(pa->pa_id)) { 1246 case PCI_PRODUCT_MARVELL_YUKON_8036: 1247 case PCI_PRODUCT_MARVELL_YUKON_8053: 1248 pa->pa_flags &= ~PCI_FLAGS_MSI_ENABLED; 1249 } 1250 } 1251 1252 /* Allocate interrupt */ 1253 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) { 1254 printf(": couldn't map interrupt\n"); 1255 goto fail_1; 1256 } 1257 1258 intrstr = pci_intr_string(pc, ih); 1259 sc->sk_intrhand = pci_intr_establish(pc, ih, IPL_NET, msk_intr, sc, 1260 self->dv_xname); 1261 if (sc->sk_intrhand == NULL) { 1262 printf(": couldn't establish interrupt"); 1263 if (intrstr != NULL) 1264 printf(" at %s", intrstr); 1265 printf("\n"); 1266 goto fail_1; 1267 } 1268 sc->sk_pc = pc; 1269 1270 if (bus_dmamem_alloc(sc->sc_dmatag, 1271 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1272 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1273 0, &sc->sk_status_seg, 1, &sc->sk_status_nseg, 1274 BUS_DMA_NOWAIT | BUS_DMA_ZERO)) { 1275 printf(": can't alloc status buffers\n"); 1276 goto fail_2; 1277 } 1278 1279 if (bus_dmamem_map(sc->sc_dmatag, 1280 &sc->sk_status_seg, sc->sk_status_nseg, 1281 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1282 &kva, BUS_DMA_NOWAIT)) { 1283 printf(": can't map dma buffers (%zu bytes)\n", 1284 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1285 goto fail_3; 1286 } 1287 if (bus_dmamap_create(sc->sc_dmatag, 1288 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1, 1289 MSK_STATUS_RING_CNT * sizeof(uint64_t), 0, 1290 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, 1291 &sc->sk_status_map)) { 1292 printf(": can't create dma map\n"); 1293 goto fail_4; 1294 } 1295 if (bus_dmamap_load(sc->sc_dmatag, sc->sk_status_map, kva, 1296 MSK_STATUS_RING_CNT * sizeof(uint64_t), 1297 NULL, BUS_DMA_NOWAIT)) { 1298 printf(": can't load dma map\n"); 1299 goto fail_5; 1300 } 1301 sc->sk_status_ring = (uint64_t *)kva; 1302 1303 /* Reset the adapter. */ 1304 mskc_reset(sc); 1305 1306 sc->sk_ramsize = sk_win_read_1(sc, SK_EPROM0) * 4096; 1307 DPRINTFN(2, ("mskc_attach: ramsize=%dK\n", sc->sk_ramsize / 1024)); 1308 1309 pmd = sk_win_read_1(sc, SK_PMDTYPE); 1310 if (pmd == 'L' || pmd == 'S' || pmd == 'P') 1311 sc->sk_fibertype = 1; 1312 1313 switch (sc->sk_type) { 1314 case SK_YUKON_XL: 1315 sc->sk_name = "Yukon-2 XL"; 1316 break; 1317 case SK_YUKON_EC_U: 1318 sc->sk_name = "Yukon-2 EC Ultra"; 1319 break; 1320 case SK_YUKON_EX: 1321 sc->sk_name = "Yukon-2 Extreme"; 1322 break; 1323 case SK_YUKON_EC: 1324 sc->sk_name = "Yukon-2 EC"; 1325 break; 1326 case SK_YUKON_FE: 1327 sc->sk_name = "Yukon-2 FE"; 1328 break; 1329 case SK_YUKON_FE_P: 1330 sc->sk_name = "Yukon-2 FE+"; 1331 break; 1332 case SK_YUKON_SUPR: 1333 sc->sk_name = "Yukon-2 Supreme"; 1334 break; 1335 case SK_YUKON_ULTRA2: 1336 sc->sk_name = "Yukon-2 Ultra 2"; 1337 break; 1338 case SK_YUKON_OPTIMA: 1339 sc->sk_name = "Yukon-2 Optima"; 1340 break; 1341 case SK_YUKON_PRM: 1342 sc->sk_name = "Yukon-2 Optima Prime"; 1343 break; 1344 case SK_YUKON_OPTIMA2: 1345 sc->sk_name = "Yukon-2 Optima 2"; 1346 break; 1347 default: 1348 sc->sk_name = "Yukon (Unknown)"; 1349 } 1350 1351 if (sc->sk_type == SK_YUKON_XL) { 1352 switch (sc->sk_rev) { 1353 case SK_YUKON_XL_REV_A0: 1354 revstr = "A0"; 1355 break; 1356 case SK_YUKON_XL_REV_A1: 1357 revstr = "A1"; 1358 break; 1359 case SK_YUKON_XL_REV_A2: 1360 revstr = "A2"; 1361 break; 1362 case SK_YUKON_XL_REV_A3: 1363 revstr = "A3"; 1364 break; 1365 default: 1366 ; 1367 } 1368 } 1369 1370 if (sc->sk_type == SK_YUKON_EC) { 1371 switch (sc->sk_rev) { 1372 case SK_YUKON_EC_REV_A1: 1373 revstr = "A1"; 1374 break; 1375 case SK_YUKON_EC_REV_A2: 1376 revstr = "A2"; 1377 break; 1378 case SK_YUKON_EC_REV_A3: 1379 revstr = "A3"; 1380 break; 1381 default: 1382 ; 1383 } 1384 } 1385 1386 if (sc->sk_type == SK_YUKON_EC_U) { 1387 switch (sc->sk_rev) { 1388 case SK_YUKON_EC_U_REV_A0: 1389 revstr = "A0"; 1390 break; 1391 case SK_YUKON_EC_U_REV_A1: 1392 revstr = "A1"; 1393 break; 1394 case SK_YUKON_EC_U_REV_B0: 1395 revstr = "B0"; 1396 break; 1397 case SK_YUKON_EC_U_REV_B1: 1398 revstr = "B1"; 1399 break; 1400 default: 1401 ; 1402 } 1403 } 1404 1405 if (sc->sk_type == SK_YUKON_FE) { 1406 switch (sc->sk_rev) { 1407 case SK_YUKON_FE_REV_A1: 1408 revstr = "A1"; 1409 break; 1410 case SK_YUKON_FE_REV_A2: 1411 revstr = "A2"; 1412 break; 1413 default: 1414 ; 1415 } 1416 } 1417 1418 if (sc->sk_type == SK_YUKON_FE_P && sc->sk_rev == SK_YUKON_FE_P_REV_A0) 1419 revstr = "A0"; 1420 1421 if (sc->sk_type == SK_YUKON_EX) { 1422 switch (sc->sk_rev) { 1423 case SK_YUKON_EX_REV_A0: 1424 revstr = "A0"; 1425 break; 1426 case SK_YUKON_EX_REV_B0: 1427 revstr = "B0"; 1428 break; 1429 default: 1430 ; 1431 } 1432 } 1433 1434 if (sc->sk_type == SK_YUKON_SUPR) { 1435 switch (sc->sk_rev) { 1436 case SK_YUKON_SUPR_REV_A0: 1437 revstr = "A0"; 1438 break; 1439 case SK_YUKON_SUPR_REV_B0: 1440 revstr = "B0"; 1441 break; 1442 case SK_YUKON_SUPR_REV_B1: 1443 revstr = "B1"; 1444 break; 1445 default: 1446 ; 1447 } 1448 } 1449 1450 if (sc->sk_type == SK_YUKON_PRM) { 1451 switch (sc->sk_rev) { 1452 case SK_YUKON_PRM_REV_Z1: 1453 revstr = "Z1"; 1454 break; 1455 case SK_YUKON_PRM_REV_A0: 1456 revstr = "A0"; 1457 break; 1458 default: 1459 ; 1460 } 1461 } 1462 1463 /* Announce the product name. */ 1464 printf(", %s", sc->sk_name); 1465 if (revstr != NULL) 1466 printf(" rev. %s", revstr); 1467 printf(" (0x%x): %s\n", sc->sk_rev, intrstr); 1468 1469 sc->sk_macs = 1; 1470 1471 hw = sk_win_read_1(sc, SK_Y2_HWRES); 1472 if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { 1473 if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & 1474 SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) 1475 sc->sk_macs++; 1476 } 1477 1478 skca.skc_port = SK_PORT_A; 1479 skca.skc_type = sc->sk_type; 1480 skca.skc_rev = sc->sk_rev; 1481 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1482 1483 if (sc->sk_macs > 1) { 1484 skca.skc_port = SK_PORT_B; 1485 skca.skc_type = sc->sk_type; 1486 skca.skc_rev = sc->sk_rev; 1487 (void)config_found(&sc->sk_dev, &skca, mskcprint); 1488 } 1489 1490 /* Turn on the 'driver is loaded' LED. */ 1491 CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); 1492 1493 return; 1494 1495 fail_4: 1496 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1497 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1498 fail_3: 1499 bus_dmamem_free(sc->sc_dmatag, 1500 &sc->sk_status_seg, sc->sk_status_nseg); 1501 sc->sk_status_nseg = 0; 1502 fail_5: 1503 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1504 fail_2: 1505 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1506 sc->sk_intrhand = NULL; 1507 fail_1: 1508 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1509 sc->sk_bsize = 0; 1510 } 1511 1512 int 1513 mskc_detach(struct device *self, int flags) 1514 { 1515 struct sk_softc *sc = (struct sk_softc *)self; 1516 int rv; 1517 1518 if (sc->sk_intrhand) 1519 pci_intr_disestablish(sc->sk_pc, sc->sk_intrhand); 1520 1521 rv = config_detach_children(self, flags); 1522 if (rv != 0) 1523 return (rv); 1524 1525 if (sc->sk_status_nseg > 0) { 1526 bus_dmamap_destroy(sc->sc_dmatag, sc->sk_status_map); 1527 bus_dmamem_unmap(sc->sc_dmatag, (caddr_t)sc->sk_status_ring, 1528 MSK_STATUS_RING_CNT * sizeof(uint64_t)); 1529 bus_dmamem_free(sc->sc_dmatag, 1530 &sc->sk_status_seg, sc->sk_status_nseg); 1531 } 1532 1533 if (sc->sk_bsize > 0) 1534 bus_space_unmap(sc->sk_btag, sc->sk_bhandle, sc->sk_bsize); 1535 1536 return(0); 1537 } 1538 1539 int 1540 mskc_activate(struct device *self, int act) 1541 { 1542 struct sk_softc *sc = (void *)self; 1543 int rv = 0; 1544 1545 switch (act) { 1546 case DVACT_RESUME: 1547 mskc_reset(sc); 1548 rv = config_activate_children(self, act); 1549 break; 1550 default: 1551 rv = config_activate_children(self, act); 1552 break; 1553 } 1554 return (rv); 1555 } 1556 1557 static unsigned int 1558 msk_encap(struct sk_if_softc *sc_if, struct mbuf *m, uint32_t prod) 1559 { 1560 struct sk_softc *sc = sc_if->sk_softc; 1561 struct msk_ring_data *rd = sc_if->sk_rdata; 1562 struct msk_tx_desc *t; 1563 bus_dmamap_t map; 1564 uint64_t addr; 1565 uint32_t hiaddr; 1566 uint32_t next, last; 1567 uint8_t opcode; 1568 unsigned int entries = 0; 1569 int i; 1570 1571 map = sc_if->sk_cdata.sk_tx_maps[prod]; 1572 1573 switch (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1574 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)) { 1575 case 0: 1576 break; 1577 case EFBIG: /* mbuf chain is too fragmented */ 1578 if (m_defrag(m, M_DONTWAIT) == 0 && 1579 bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 1580 BUS_DMA_STREAMING | BUS_DMA_NOWAIT) == 0) 1581 break; 1582 /* FALLTHROUGH */ 1583 default: 1584 return (0); 1585 } 1586 1587 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1588 BUS_DMASYNC_PREWRITE); 1589 1590 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_PACKET; 1591 next = prod; 1592 for (i = 0; i < map->dm_nsegs; i++) { 1593 /* high 32 bits of address */ 1594 addr = map->dm_segs[i].ds_addr; 1595 hiaddr = addr >> 32; 1596 if (sc_if->sk_cdata.sk_tx_hiaddr != hiaddr) { 1597 t = &rd->sk_tx_ring[next]; 1598 htolem32(&t->sk_addr, hiaddr); 1599 t->sk_opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_ADDR64; 1600 1601 sc_if->sk_cdata.sk_tx_hiaddr = hiaddr; 1602 1603 SK_INC(next, MSK_TX_RING_CNT); 1604 entries++; 1605 } 1606 1607 /* low 32 bits of address + length */ 1608 t = &rd->sk_tx_ring[next]; 1609 htolem32(&t->sk_addr, addr); 1610 htolem16(&t->sk_len, map->dm_segs[i].ds_len); 1611 t->sk_ctl = 0; 1612 t->sk_opcode = opcode; 1613 1614 last = next; 1615 SK_INC(next, MSK_TX_RING_CNT); 1616 entries++; 1617 1618 opcode = SK_Y2_TXOPC_OWN | SK_Y2_TXOPC_BUFFER; 1619 } 1620 t->sk_ctl = SK_Y2_TXCTL_LASTFRAG; 1621 1622 sc_if->sk_cdata.sk_tx_maps[prod] = sc_if->sk_cdata.sk_tx_maps[last]; 1623 sc_if->sk_cdata.sk_tx_maps[last] = map; 1624 sc_if->sk_cdata.sk_tx_mbuf[last] = m; 1625 1626 return (entries); 1627 } 1628 1629 void 1630 msk_start(struct ifnet *ifp) 1631 { 1632 struct sk_if_softc *sc_if = ifp->if_softc; 1633 struct mbuf *m = NULL; 1634 uint32_t prod, free, used; 1635 int post = 0; 1636 1637 prod = sc_if->sk_cdata.sk_tx_prod; 1638 free = sc_if->sk_cdata.sk_tx_cons; 1639 if (free <= prod) 1640 free += MSK_TX_RING_CNT; 1641 free -= prod; 1642 1643 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_POSTWRITE); 1644 1645 for (;;) { 1646 if (free <= SK_NTXSEG * 2) { 1647 ifq_set_oactive(&ifp->if_snd); 1648 break; 1649 } 1650 1651 m = ifq_dequeue(&ifp->if_snd); 1652 if (m == NULL) 1653 break; 1654 1655 used = msk_encap(sc_if, m, prod); 1656 if (used == 0) { 1657 m_freem(m); 1658 continue; 1659 } 1660 1661 free -= used; 1662 prod += used; 1663 prod &= MSK_TX_RING_CNT - 1; 1664 1665 #if NBPFILTER > 0 1666 if (ifp->if_bpf) 1667 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1668 #endif 1669 post = 1; 1670 } 1671 1672 MSK_CDTXSYNC(sc_if, 0, MSK_TX_RING_CNT, BUS_DMASYNC_PREWRITE); 1673 1674 if (post == 0) 1675 return; 1676 1677 /* Transmit */ 1678 sc_if->sk_cdata.sk_tx_prod = prod; 1679 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, prod); 1680 1681 /* Set a timeout in case the chip goes out to lunch. */ 1682 ifp->if_timer = MSK_TX_TIMEOUT; 1683 } 1684 1685 void 1686 msk_watchdog(struct ifnet *ifp) 1687 { 1688 struct sk_if_softc *sc_if = ifp->if_softc; 1689 1690 if (sc_if->sk_cdata.sk_tx_prod != sc_if->sk_cdata.sk_tx_cons) { 1691 printf("%s: watchdog timeout\n", sc_if->sk_dev.dv_xname); 1692 1693 ifp->if_oerrors++; 1694 1695 /* XXX Resets both ports; we shouldn't do that. */ 1696 mskc_reset(sc_if->sk_softc); 1697 msk_reset(sc_if); 1698 msk_init(sc_if); 1699 } 1700 } 1701 1702 static inline int 1703 msk_rxvalid(struct sk_softc *sc, u_int32_t stat, u_int32_t len) 1704 { 1705 if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | 1706 YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | 1707 YU_RXSTAT_JABBER)) != 0 || 1708 (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || 1709 YU_RXSTAT_BYTES(stat) != len) 1710 return (0); 1711 1712 return (1); 1713 } 1714 1715 void 1716 msk_rxeof(struct sk_if_softc *sc_if, struct mbuf_list *ml, 1717 uint16_t len, uint32_t rxstat) 1718 { 1719 struct sk_softc *sc = sc_if->sk_softc; 1720 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1721 struct mbuf *m = NULL; 1722 int prod, cons, tail; 1723 bus_dmamap_t map; 1724 1725 prod = sc_if->sk_cdata.sk_rx_prod; 1726 cons = sc_if->sk_cdata.sk_rx_cons; 1727 1728 while (cons != prod) { 1729 tail = cons; 1730 SK_INC(cons, MSK_RX_RING_CNT); 1731 1732 m = sc_if->sk_cdata.sk_rx_mbuf[tail]; 1733 if (m != NULL) { 1734 /* found it */ 1735 break; 1736 } 1737 } 1738 sc_if->sk_cdata.sk_rx_cons = cons; 1739 1740 if (m == NULL) { 1741 /* maybe if ADDR64 is consumed? */ 1742 return; 1743 } 1744 1745 sc_if->sk_cdata.sk_rx_mbuf[tail] = NULL; 1746 1747 map = sc_if->sk_cdata.sk_rx_maps[tail]; 1748 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, 1); 1749 1750 bus_dmamap_sync(sc_if->sk_softc->sc_dmatag, map, 0, map->dm_mapsize, 1751 BUS_DMASYNC_POSTREAD); 1752 bus_dmamap_unload(sc_if->sk_softc->sc_dmatag, map); 1753 1754 if (len < SK_MIN_FRAMELEN || len > SK_JUMBO_FRAMELEN || 1755 msk_rxvalid(sc, rxstat, len) == 0) { 1756 ifp->if_ierrors++; 1757 m_freem(m); 1758 return; 1759 } 1760 1761 m->m_pkthdr.len = m->m_len = len; 1762 1763 ml_enqueue(ml, m); 1764 } 1765 1766 void 1767 msk_txeof(struct sk_if_softc *sc_if, unsigned int prod) 1768 { 1769 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1770 struct sk_softc *sc = sc_if->sk_softc; 1771 uint32_t cons; 1772 struct mbuf *m; 1773 bus_dmamap_t map; 1774 1775 /* 1776 * Go through our tx ring and free mbufs for those 1777 * frames that have been sent. 1778 */ 1779 cons = sc_if->sk_cdata.sk_tx_cons; 1780 1781 if (cons == prod) 1782 return; 1783 1784 while (cons != prod) { 1785 m = sc_if->sk_cdata.sk_tx_mbuf[cons]; 1786 if (m != NULL) { 1787 sc_if->sk_cdata.sk_tx_mbuf[cons] = NULL; 1788 1789 map = sc_if->sk_cdata.sk_tx_maps[cons]; 1790 bus_dmamap_sync(sc->sc_dmatag, map, 0, 1791 map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1792 bus_dmamap_unload(sc->sc_dmatag, map); 1793 1794 m_freem(m); 1795 } 1796 1797 SK_INC(cons, MSK_TX_RING_CNT); 1798 } 1799 if (cons == sc_if->sk_cdata.sk_tx_prod) 1800 ifp->if_timer = 0; 1801 1802 sc_if->sk_cdata.sk_tx_cons = cons; 1803 1804 if (ifq_is_oactive(&ifp->if_snd)) 1805 ifq_restart(&ifp->if_snd); 1806 } 1807 1808 void 1809 msk_fill_rx_ring(struct sk_if_softc *sc_if) 1810 { 1811 u_int slots, used; 1812 1813 slots = if_rxr_get(&sc_if->sk_cdata.sk_rx_ring, MSK_RX_RING_CNT/2); 1814 1815 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_POSTWRITE); /* XXX */ 1816 while (slots > 0) { 1817 used = msk_newbuf(sc_if); 1818 if (used == 0) 1819 break; 1820 1821 slots -= used; 1822 } 1823 MSK_CDRXSYNC(sc_if, 0, BUS_DMASYNC_PREWRITE); /* XXX */ 1824 1825 if_rxr_put(&sc_if->sk_cdata.sk_rx_ring, slots); 1826 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) 1827 timeout_add(&sc_if->sk_tick_rx, 1); 1828 } 1829 1830 void 1831 msk_fill_rx_tick(void *xsc_if) 1832 { 1833 struct sk_if_softc *sc_if = xsc_if; 1834 int s; 1835 1836 s = splnet(); 1837 if (if_rxr_inuse(&sc_if->sk_cdata.sk_rx_ring) == 0) { 1838 msk_fill_rx_ring(sc_if); 1839 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1840 sc_if->sk_cdata.sk_rx_prod); 1841 } 1842 splx(s); 1843 } 1844 1845 void 1846 msk_tick(void *xsc_if) 1847 { 1848 struct sk_if_softc *sc_if = xsc_if; 1849 struct mii_data *mii = &sc_if->sk_mii; 1850 int s; 1851 1852 s = splnet(); 1853 mii_tick(mii); 1854 splx(s); 1855 timeout_add_sec(&sc_if->sk_tick_ch, 1); 1856 } 1857 1858 void 1859 msk_intr_yukon(struct sk_if_softc *sc_if) 1860 { 1861 u_int8_t status; 1862 1863 status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); 1864 /* RX overrun */ 1865 if ((status & SK_GMAC_INT_RX_OVER) != 0) { 1866 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, 1867 SK_RFCTL_RX_FIFO_OVER); 1868 } 1869 /* TX underrun */ 1870 if ((status & SK_GMAC_INT_TX_UNDER) != 0) { 1871 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, 1872 SK_TFCTL_TX_FIFO_UNDER); 1873 } 1874 1875 DPRINTFN(2, ("msk_intr_yukon status=%#x\n", status)); 1876 } 1877 1878 int 1879 msk_intr(void *xsc) 1880 { 1881 struct sk_softc *sc = xsc; 1882 struct sk_if_softc *sc_if0 = sc->sk_if[SK_PORT_A]; 1883 struct sk_if_softc *sc_if1 = sc->sk_if[SK_PORT_B]; 1884 struct mbuf_list ml[2] = { 1885 MBUF_LIST_INITIALIZER(), 1886 MBUF_LIST_INITIALIZER(), 1887 }; 1888 struct ifnet *ifp0 = NULL, *ifp1 = NULL; 1889 int claimed = 0; 1890 u_int32_t status; 1891 uint64_t *ring = sc->sk_status_ring; 1892 uint64_t desc; 1893 1894 status = CSR_READ_4(sc, SK_Y2_ISSR2); 1895 if (status == 0xffffffff) 1896 return (0); 1897 if (status == 0) { 1898 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1899 return (0); 1900 } 1901 1902 status = CSR_READ_4(sc, SK_ISR); 1903 1904 if (sc_if0 != NULL) 1905 ifp0 = &sc_if0->arpcom.ac_if; 1906 if (sc_if1 != NULL) 1907 ifp1 = &sc_if1->arpcom.ac_if; 1908 1909 if (sc_if0 && (status & SK_Y2_IMR_MAC1) && 1910 (ifp0->if_flags & IFF_RUNNING)) { 1911 msk_intr_yukon(sc_if0); 1912 } 1913 1914 if (sc_if1 && (status & SK_Y2_IMR_MAC2) && 1915 (ifp1->if_flags & IFF_RUNNING)) { 1916 msk_intr_yukon(sc_if1); 1917 } 1918 1919 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1920 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1921 1922 while (MSK_STATUS_OWN(desc = lemtoh64(&ring[sc->sk_status_idx]))) { 1923 unsigned int opcode, port; 1924 1925 ring[sc->sk_status_idx] = htole64(0); /* clear ownership */ 1926 1927 opcode = MSK_STATUS_OPCODE(desc); 1928 switch (opcode) { 1929 case MSK_STATUS_OPCODE_RXSTAT: 1930 port = MSK_STATUS_RXSTAT_PORT(desc); 1931 msk_rxeof(sc->sk_if[port], &ml[port], 1932 MSK_STATUS_RXSTAT_LEN(desc), 1933 MSK_STATUS_RXSTAT_STATUS(desc)); 1934 break; 1935 case SK_Y2_STOPC_TXSTAT: 1936 if (sc_if0) { 1937 msk_txeof(sc_if0, 1938 MSK_STATUS_TXIDX_PORTA(desc)); 1939 } 1940 if (sc_if1) { 1941 msk_txeof(sc_if1, 1942 MSK_STATUS_TXIDX_PORTB(desc)); 1943 } 1944 break; 1945 default: 1946 printf("opcode=0x%x\n", opcode); 1947 break; 1948 } 1949 1950 SK_INC(sc->sk_status_idx, MSK_STATUS_RING_CNT); 1951 } 1952 1953 MSK_CDSTSYNC(sc, sc->sk_status_idx, 1954 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1955 1956 if (status & SK_Y2_IMR_BMU) { 1957 CSR_WRITE_4(sc, SK_STAT_BMU_CSR, SK_STAT_BMU_IRQ_CLEAR); 1958 claimed = 1; 1959 } 1960 1961 CSR_WRITE_4(sc, SK_Y2_ICR, 2); 1962 1963 if (!ml_empty(&ml[0])) { 1964 if (ifiq_input(&ifp0->if_rcv, &ml[0])) 1965 if_rxr_livelocked(&sc_if0->sk_cdata.sk_rx_ring); 1966 msk_fill_rx_ring(sc_if0); 1967 SK_IF_WRITE_2(sc_if0, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1968 sc_if0->sk_cdata.sk_rx_prod); 1969 } 1970 if (!ml_empty(&ml[1])) { 1971 if (ifiq_input(&ifp1->if_rcv, &ml[1])) 1972 if_rxr_livelocked(&sc_if1->sk_cdata.sk_rx_ring); 1973 msk_fill_rx_ring(sc_if1); 1974 SK_IF_WRITE_2(sc_if1, 0, SK_RXQ1_Y2_PREF_PUTIDX, 1975 sc_if1->sk_cdata.sk_rx_prod); 1976 } 1977 1978 return (claimed); 1979 } 1980 1981 void 1982 msk_init_yukon(struct sk_if_softc *sc_if) 1983 { 1984 u_int32_t v; 1985 u_int16_t reg; 1986 struct sk_softc *sc; 1987 int i; 1988 1989 sc = sc_if->sk_softc; 1990 1991 DPRINTFN(2, ("msk_init_yukon: start: sk_csr=%#x\n", 1992 CSR_READ_4(sc_if->sk_softc, SK_CSR))); 1993 1994 DPRINTFN(6, ("msk_init_yukon: 1\n")); 1995 1996 DPRINTFN(3, ("msk_init_yukon: gmac_ctrl=%#x\n", 1997 SK_IF_READ_4(sc_if, 0, SK_GMAC_CTRL))); 1998 1999 DPRINTFN(6, ("msk_init_yukon: 3\n")); 2000 2001 /* unused read of the interrupt source register */ 2002 DPRINTFN(6, ("msk_init_yukon: 4\n")); 2003 SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); 2004 2005 DPRINTFN(6, ("msk_init_yukon: 4a\n")); 2006 reg = SK_YU_READ_2(sc_if, YUKON_PAR); 2007 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 2008 2009 /* MIB Counter Clear Mode set */ 2010 reg |= YU_PAR_MIB_CLR; 2011 DPRINTFN(6, ("msk_init_yukon: YUKON_PAR=%#x\n", reg)); 2012 DPRINTFN(6, ("msk_init_yukon: 4b\n")); 2013 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2014 2015 /* MIB Counter Clear Mode clear */ 2016 DPRINTFN(6, ("msk_init_yukon: 5\n")); 2017 reg &= ~YU_PAR_MIB_CLR; 2018 SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); 2019 2020 /* receive control reg */ 2021 DPRINTFN(6, ("msk_init_yukon: 7\n")); 2022 SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); 2023 2024 /* transmit parameter register */ 2025 DPRINTFN(6, ("msk_init_yukon: 8\n")); 2026 SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | 2027 YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); 2028 2029 /* serial mode register */ 2030 DPRINTFN(6, ("msk_init_yukon: 9\n")); 2031 reg = YU_SMR_DATA_BLIND(0x1c) | 2032 YU_SMR_MFL_VLAN | 2033 YU_SMR_IPG_DATA(0x1e); 2034 2035 if (sc->sk_type != SK_YUKON_FE && 2036 sc->sk_type != SK_YUKON_FE_P) 2037 reg |= YU_SMR_MFL_JUMBO; 2038 2039 SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); 2040 2041 DPRINTFN(6, ("msk_init_yukon: 10\n")); 2042 /* Setup Yukon's address */ 2043 for (i = 0; i < 3; i++) { 2044 /* Write Source Address 1 (unicast filter) */ 2045 SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, 2046 sc_if->arpcom.ac_enaddr[i * 2] | 2047 sc_if->arpcom.ac_enaddr[i * 2 + 1] << 8); 2048 } 2049 2050 for (i = 0; i < 3; i++) { 2051 reg = sk_win_read_2(sc_if->sk_softc, 2052 SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); 2053 SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); 2054 } 2055 2056 /* Program promiscuous mode and multicast filters */ 2057 DPRINTFN(6, ("msk_init_yukon: 11\n")); 2058 msk_iff(sc_if); 2059 2060 /* enable interrupt mask for counter overflows */ 2061 DPRINTFN(6, ("msk_init_yukon: 12\n")); 2062 SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); 2063 SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); 2064 SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); 2065 2066 /* Configure RX MAC FIFO Flush Mask */ 2067 v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | 2068 YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | 2069 YU_RXSTAT_JABBER; 2070 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); 2071 2072 /* Configure RX MAC FIFO */ 2073 SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); 2074 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON | 2075 SK_RFCTL_FIFO_FLUSH_ON); 2076 2077 /* Increase flush threshold to 64 bytes */ 2078 SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, 2079 SK_RFCTL_FIFO_THRESHOLD + 1); 2080 2081 /* Configure TX MAC FIFO */ 2082 SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); 2083 SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); 2084 2085 #if 1 2086 SK_YU_WRITE_2(sc_if, YUKON_GPCR, YU_GPCR_TXEN | YU_GPCR_RXEN); 2087 #endif 2088 DPRINTFN(6, ("msk_init_yukon: end\n")); 2089 } 2090 2091 /* 2092 * Note that to properly initialize any part of the GEnesis chip, 2093 * you first have to take it out of reset mode. 2094 */ 2095 void 2096 msk_init(void *xsc_if) 2097 { 2098 struct sk_if_softc *sc_if = xsc_if; 2099 struct sk_softc *sc = sc_if->sk_softc; 2100 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2101 struct mii_data *mii = &sc_if->sk_mii; 2102 int s; 2103 2104 DPRINTFN(2, ("msk_init\n")); 2105 2106 s = splnet(); 2107 2108 /* Cancel pending I/O and free all RX/TX buffers. */ 2109 msk_stop(sc_if, 0); 2110 2111 /* Configure I2C registers */ 2112 2113 /* Configure XMAC(s) */ 2114 msk_init_yukon(sc_if); 2115 mii_mediachg(mii); 2116 2117 /* Configure transmit arbiter(s) */ 2118 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON); 2119 #if 0 2120 SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); 2121 #endif 2122 2123 /* Configure RAMbuffers */ 2124 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); 2125 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); 2126 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); 2127 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); 2128 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); 2129 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); 2130 2131 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_UNRESET); 2132 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_STORENFWD_ON); 2133 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_START, sc_if->sk_tx_ramstart); 2134 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_WR_PTR, sc_if->sk_tx_ramstart); 2135 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_RD_PTR, sc_if->sk_tx_ramstart); 2136 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_END, sc_if->sk_tx_ramend); 2137 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_ON); 2138 2139 /* Configure BMUs */ 2140 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000016); 2141 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000d28); 2142 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, 0x00000080); 2143 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_WATERMARK, 0x00000600); 2144 2145 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000016); 2146 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000d28); 2147 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, 0x00000080); 2148 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_WATERMARK, 0x00000600); 2149 2150 /* Make sure the sync transmit queue is disabled. */ 2151 SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET); 2152 2153 /* Init descriptors */ 2154 if (msk_init_rx_ring(sc_if) == ENOBUFS) { 2155 printf("%s: initialization failed: no " 2156 "memory for rx buffers\n", sc_if->sk_dev.dv_xname); 2157 msk_stop(sc_if, 0); 2158 splx(s); 2159 return; 2160 } 2161 2162 if (msk_init_tx_ring(sc_if) == ENOBUFS) { 2163 printf("%s: initialization failed: no " 2164 "memory for tx buffers\n", sc_if->sk_dev.dv_xname); 2165 msk_stop(sc_if, 0); 2166 splx(s); 2167 return; 2168 } 2169 2170 /* Initialize prefetch engine. */ 2171 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2172 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000002); 2173 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_LIDX, MSK_RX_RING_CNT - 1); 2174 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRLO, 2175 MSK_RX_RING_ADDR(sc_if, 0)); 2176 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_ADDRHI, 2177 (u_int64_t)MSK_RX_RING_ADDR(sc_if, 0) >> 32); 2178 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000008); 2179 SK_IF_READ_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR); 2180 2181 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2182 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000002); 2183 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_LIDX, MSK_TX_RING_CNT - 1); 2184 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRLO, 2185 MSK_TX_RING_ADDR(sc_if, 0)); 2186 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_ADDRHI, 2187 (u_int64_t)MSK_TX_RING_ADDR(sc_if, 0) >> 32); 2188 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000008); 2189 SK_IF_READ_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR); 2190 2191 SK_IF_WRITE_2(sc_if, 0, SK_RXQ1_Y2_PREF_PUTIDX, 2192 sc_if->sk_cdata.sk_rx_prod); 2193 2194 /* 2195 * tell the chip the tx ring is empty for now. the first 2196 * msk_start will end up posting the ADDR64 tx descriptor 2197 * that resets the high address. 2198 */ 2199 SK_IF_WRITE_2(sc_if, 1, SK_TXQA1_Y2_PREF_PUTIDX, 0); 2200 2201 /* Configure interrupt handling */ 2202 if (sc_if->sk_port == SK_PORT_A) 2203 sc->sk_intrmask |= SK_Y2_INTRS1; 2204 else 2205 sc->sk_intrmask |= SK_Y2_INTRS2; 2206 sc->sk_intrmask |= SK_Y2_IMR_BMU; 2207 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2208 2209 ifp->if_flags |= IFF_RUNNING; 2210 ifq_clr_oactive(&ifp->if_snd); 2211 2212 timeout_add_sec(&sc_if->sk_tick_ch, 1); 2213 2214 splx(s); 2215 } 2216 2217 void 2218 msk_stop(struct sk_if_softc *sc_if, int softonly) 2219 { 2220 struct sk_softc *sc = sc_if->sk_softc; 2221 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2222 struct mbuf *m; 2223 bus_dmamap_t map; 2224 int i; 2225 2226 DPRINTFN(2, ("msk_stop\n")); 2227 2228 timeout_del(&sc_if->sk_tick_ch); 2229 timeout_del(&sc_if->sk_tick_rx); 2230 2231 ifp->if_flags &= ~IFF_RUNNING; 2232 ifq_clr_oactive(&ifp->if_snd); 2233 2234 /* Stop transfer of Tx descriptors */ 2235 2236 /* Stop transfer of Rx descriptors */ 2237 2238 if (!softonly) { 2239 /* Turn off various components of this interface. */ 2240 SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); 2241 SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); 2242 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); 2243 SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2244 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_BMU_CSR, SK_TXBMU_OFFLINE); 2245 SK_IF_WRITE_4(sc_if, 1, SK_TXRBA1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); 2246 SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); 2247 SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); 2248 SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_STOP); 2249 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); 2250 SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); 2251 2252 SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_Y2_PREF_CSR, 0x00000001); 2253 SK_IF_WRITE_4(sc_if, 1, SK_TXQA1_Y2_PREF_CSR, 0x00000001); 2254 2255 /* Disable interrupts */ 2256 if (sc_if->sk_port == SK_PORT_A) 2257 sc->sk_intrmask &= ~SK_Y2_INTRS1; 2258 else 2259 sc->sk_intrmask &= ~SK_Y2_INTRS2; 2260 CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); 2261 } 2262 2263 /* Free RX and TX mbufs still in the queues. */ 2264 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2265 m = sc_if->sk_cdata.sk_rx_mbuf[i]; 2266 if (m == NULL) 2267 continue; 2268 2269 map = sc_if->sk_cdata.sk_rx_maps[i]; 2270 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2271 BUS_DMASYNC_POSTREAD); 2272 bus_dmamap_unload(sc->sc_dmatag, map); 2273 2274 m_freem(m); 2275 2276 sc_if->sk_cdata.sk_rx_mbuf[i] = NULL; 2277 } 2278 2279 sc_if->sk_cdata.sk_rx_prod = 0; 2280 sc_if->sk_cdata.sk_rx_cons = 0; 2281 2282 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2283 m = sc_if->sk_cdata.sk_tx_mbuf[i]; 2284 if (m == NULL) 2285 continue; 2286 2287 map = sc_if->sk_cdata.sk_tx_maps[i]; 2288 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 2289 BUS_DMASYNC_POSTREAD); 2290 bus_dmamap_unload(sc->sc_dmatag, map); 2291 2292 m_freem(m); 2293 2294 sc_if->sk_cdata.sk_tx_mbuf[i] = NULL; 2295 } 2296 } 2297 2298 const struct cfattach mskc_ca = { 2299 sizeof(struct sk_softc), mskc_probe, mskc_attach, mskc_detach, 2300 mskc_activate 2301 }; 2302 2303 struct cfdriver mskc_cd = { 2304 NULL, "mskc", DV_DULL 2305 }; 2306 2307 const struct cfattach msk_ca = { 2308 sizeof(struct sk_if_softc), msk_probe, msk_attach, msk_detach, 2309 msk_activate 2310 }; 2311 2312 struct cfdriver msk_cd = { 2313 NULL, "msk", DV_IFNET 2314 }; 2315 2316 #if NKSTAT > 0 2317 static uint32_t 2318 msk_mib_read32(struct sk_if_softc *sc_if, uint32_t r) 2319 { 2320 uint16_t hi, lo, xx; 2321 2322 hi = SK_YU_READ_2(sc_if, r + 4); 2323 for (;;) { 2324 /* XXX barriers? */ 2325 lo = SK_YU_READ_2(sc_if, r); 2326 xx = SK_YU_READ_2(sc_if, r + 4); 2327 2328 if (hi == xx) 2329 break; 2330 2331 hi = xx; 2332 } 2333 2334 return (((uint32_t)hi << 16) | (uint32_t) lo); 2335 } 2336 2337 static uint64_t 2338 msk_mib_read64(struct sk_if_softc *sc_if, uint32_t r) 2339 { 2340 uint32_t hi, lo, xx; 2341 2342 hi = msk_mib_read32(sc_if, r + 8); 2343 for (;;) { 2344 lo = msk_mib_read32(sc_if, r); 2345 xx = msk_mib_read32(sc_if, r + 8); 2346 2347 if (hi == xx) 2348 break; 2349 2350 hi = xx; 2351 } 2352 2353 return (((uint64_t)hi << 32) | (uint64_t)lo); 2354 } 2355 2356 void 2357 msk_kstat_attach(struct sk_if_softc *sc_if) 2358 { 2359 struct kstat *ks; 2360 struct kstat_kv *kvs; 2361 struct msk_kstat *mks; 2362 size_t i; 2363 2364 ks = kstat_create(sc_if->sk_dev.dv_xname, 0, "msk-mib", 0, 2365 KSTAT_T_KV, 0); 2366 if (ks == NULL) { 2367 /* oh well */ 2368 return; 2369 } 2370 2371 mks = malloc(sizeof(*mks), M_DEVBUF, M_WAITOK); 2372 rw_init(&mks->lock, "mskstat"); 2373 mks->ks = ks; 2374 2375 kvs = mallocarray(nitems(msk_mib), sizeof(*kvs), 2376 M_DEVBUF, M_WAITOK|M_ZERO); 2377 for (i = 0; i < nitems(msk_mib); i++) { 2378 const struct msk_mib *m = &msk_mib[i]; 2379 kstat_kv_unit_init(&kvs[i], m->name, m->type, m->unit); 2380 } 2381 2382 ks->ks_softc = sc_if; 2383 ks->ks_data = kvs; 2384 ks->ks_datalen = nitems(msk_mib) * sizeof(*kvs); 2385 ks->ks_read = msk_kstat_read; 2386 kstat_set_wlock(ks, &mks->lock); 2387 2388 kstat_install(ks); 2389 2390 sc_if->sk_kstat = mks; 2391 } 2392 2393 void 2394 msk_kstat_detach(struct sk_if_softc *sc_if) 2395 { 2396 struct msk_kstat *mks = sc_if->sk_kstat; 2397 struct kstat_kv *kvs; 2398 size_t kvslen; 2399 2400 if (mks == NULL) 2401 return; 2402 2403 sc_if->sk_kstat = NULL; 2404 2405 kvs = mks->ks->ks_data; 2406 kvslen = mks->ks->ks_datalen; 2407 2408 kstat_destroy(mks->ks); 2409 free(kvs, M_DEVBUF, kvslen); 2410 free(mks, M_DEVBUF, sizeof(*mks)); 2411 } 2412 2413 int 2414 msk_kstat_read(struct kstat *ks) 2415 { 2416 struct sk_if_softc *sc_if = ks->ks_softc; 2417 struct kstat_kv *kvs = ks->ks_data; 2418 size_t i; 2419 2420 nanouptime(&ks->ks_updated); 2421 2422 for (i = 0; i < nitems(msk_mib); i++) { 2423 const struct msk_mib *m = &msk_mib[i]; 2424 2425 switch (m->type) { 2426 case KSTAT_KV_T_COUNTER32: 2427 kstat_kv_u32(&kvs[i]) = msk_mib_read32(sc_if, m->reg); 2428 break; 2429 case KSTAT_KV_T_COUNTER64: 2430 kstat_kv_u64(&kvs[i]) = msk_mib_read64(sc_if, m->reg); 2431 break; 2432 default: 2433 panic("unexpected msk_mib type"); 2434 /* NOTREACHED */ 2435 } 2436 } 2437 2438 return (0); 2439 } 2440 #endif /* NKSTAT */ 2441 2442 #ifdef MSK_DEBUG 2443 void 2444 msk_dump_txdesc(struct msk_tx_desc *le, int idx) 2445 { 2446 #define DESC_PRINT(X) \ 2447 if (X) \ 2448 printf("txdesc[%d]." #X "=%#x\n", \ 2449 idx, X); 2450 2451 DESC_PRINT(letoh32(le->sk_addr)); 2452 DESC_PRINT(letoh16(le->sk_len)); 2453 DESC_PRINT(le->sk_ctl); 2454 DESC_PRINT(le->sk_opcode); 2455 #undef DESC_PRINT 2456 } 2457 2458 void 2459 msk_dump_bytes(const char *data, int len) 2460 { 2461 int c, i, j; 2462 2463 for (i = 0; i < len; i += 16) { 2464 printf("%08x ", i); 2465 c = len - i; 2466 if (c > 16) c = 16; 2467 2468 for (j = 0; j < c; j++) { 2469 printf("%02x ", data[i + j] & 0xff); 2470 if ((j & 0xf) == 7 && j > 0) 2471 printf(" "); 2472 } 2473 2474 for (; j < 16; j++) 2475 printf(" "); 2476 printf(" "); 2477 2478 for (j = 0; j < c; j++) { 2479 int ch = data[i + j] & 0xff; 2480 printf("%c", ' ' <= ch && ch <= '~' ? ch : ' '); 2481 } 2482 2483 printf("\n"); 2484 2485 if (c < 16) 2486 break; 2487 } 2488 } 2489 2490 void 2491 msk_dump_mbuf(struct mbuf *m) 2492 { 2493 int count = m->m_pkthdr.len; 2494 2495 printf("m=%#lx, m->m_pkthdr.len=%#d\n", m, m->m_pkthdr.len); 2496 2497 while (count > 0 && m) { 2498 printf("m=%#lx, m->m_data=%#lx, m->m_len=%d\n", 2499 m, m->m_data, m->m_len); 2500 msk_dump_bytes(mtod(m, char *), m->m_len); 2501 2502 count -= m->m_len; 2503 m = m->m_next; 2504 } 2505 } 2506 #endif 2507