1 /****************************************************************************** 2 * 3 * Name : sky2.c 4 * Project: Gigabit Ethernet Driver for FreeBSD 5.x/6.x 5 * Version: $Revision: 1.23 $ 6 * Date : $Date: 2005/12/22 09:04:11 $ 7 * Purpose: Main driver source file 8 * 9 *****************************************************************************/ 10 11 /****************************************************************************** 12 * 13 * LICENSE: 14 * Copyright (C) Marvell International Ltd. and/or its affiliates 15 * 16 * The computer program files contained in this folder ("Files") 17 * are provided to you under the BSD-type license terms provided 18 * below, and any use of such Files and any derivative works 19 * thereof created by you shall be governed by the following terms 20 * and conditions: 21 * 22 * - Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * - Redistributions in binary form must reproduce the above 25 * copyright notice, this list of conditions and the following 26 * disclaimer in the documentation and/or other materials provided 27 * with the distribution. 28 * - Neither the name of Marvell nor the names of its contributors 29 * may be used to endorse or promote products derived from this 30 * software without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 36 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 37 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 38 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 39 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 43 * OF THE POSSIBILITY OF SUCH DAMAGE. 44 * /LICENSE 45 * 46 *****************************************************************************/ 47 48 /*- 49 * Copyright (c) 1997, 1998, 1999, 2000 50 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 51 * 52 * Redistribution and use in source and binary forms, with or without 53 * modification, are permitted provided that the following conditions 54 * are met: 55 * 1. Redistributions of source code must retain the above copyright 56 * notice, this list of conditions and the following disclaimer. 57 * 2. Redistributions in binary form must reproduce the above copyright 58 * notice, this list of conditions and the following disclaimer in the 59 * documentation and/or other materials provided with the distribution. 60 * 3. All advertising materials mentioning features or use of this software 61 * must display the following acknowledgement: 62 * This product includes software developed by Bill Paul. 63 * 4. Neither the name of the author nor the names of any co-contributors 64 * may be used to endorse or promote products derived from this software 65 * without specific prior written permission. 66 * 67 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 68 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 69 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 70 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 71 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 72 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 73 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 74 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 75 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 76 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 77 * THE POSSIBILITY OF SUCH DAMAGE. 78 */ 79 /*- 80 * Copyright (c) 2003 Nathan L. Binkert <binkertn@umich.edu> 81 * 82 * Permission to use, copy, modify, and distribute this software for any 83 * purpose with or without fee is hereby granted, provided that the above 84 * copyright notice and this permission notice appear in all copies. 85 * 86 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 87 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 88 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 89 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 90 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 91 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 92 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 93 */ 94 95 /* $FreeBSD: src/sys/dev/msk/if_msk.c,v 1.26 2007/12/05 09:41:58 remko Exp $ */ 96 97 /* 98 * Device driver for the Marvell Yukon II Ethernet controller. 99 * Due to lack of documentation, this driver is based on the code from 100 * sk(4) and Marvell's myk(4) driver for FreeBSD 5.x. 101 */ 102 103 #include <sys/param.h> 104 #include <sys/endian.h> 105 #include <sys/kernel.h> 106 #include <sys/bus.h> 107 #include <sys/in_cksum.h> 108 #include <sys/interrupt.h> 109 #include <sys/malloc.h> 110 #include <sys/proc.h> 111 #include <sys/rman.h> 112 #include <sys/serialize.h> 113 #include <sys/socket.h> 114 #include <sys/sockio.h> 115 #include <sys/sysctl.h> 116 117 #include <net/ethernet.h> 118 #include <net/if.h> 119 #include <net/bpf.h> 120 #include <net/if_arp.h> 121 #include <net/if_dl.h> 122 #include <net/if_media.h> 123 #include <net/ifq_var.h> 124 #include <net/vlan/if_vlan_var.h> 125 126 #include <netinet/ip.h> 127 #include <netinet/ip_var.h> 128 129 #include <dev/netif/mii_layer/miivar.h> 130 131 #include <bus/pci/pcireg.h> 132 #include <bus/pci/pcivar.h> 133 134 #include "if_mskreg.h" 135 136 /* "device miibus" required. See GENERIC if you get errors here. */ 137 #include "miibus_if.h" 138 139 #define MSK_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 140 141 /* 142 * Devices supported by this driver. 143 */ 144 static const struct msk_product { 145 uint16_t msk_vendorid; 146 uint16_t msk_deviceid; 147 const char *msk_name; 148 } msk_products[] = { 149 { VENDORID_SK, DEVICEID_SK_YUKON2, 150 "SK-9Sxx Gigabit Ethernet" }, 151 { VENDORID_SK, DEVICEID_SK_YUKON2_EXPR, 152 "SK-9Exx Gigabit Ethernet"}, 153 { VENDORID_MARVELL, DEVICEID_MRVL_8021CU, 154 "Marvell Yukon 88E8021CU Gigabit Ethernet" }, 155 { VENDORID_MARVELL, DEVICEID_MRVL_8021X, 156 "Marvell Yukon 88E8021 SX/LX Gigabit Ethernet" }, 157 { VENDORID_MARVELL, DEVICEID_MRVL_8022CU, 158 "Marvell Yukon 88E8022CU Gigabit Ethernet" }, 159 { VENDORID_MARVELL, DEVICEID_MRVL_8022X, 160 "Marvell Yukon 88E8022 SX/LX Gigabit Ethernet" }, 161 { VENDORID_MARVELL, DEVICEID_MRVL_8061CU, 162 "Marvell Yukon 88E8061CU Gigabit Ethernet" }, 163 { VENDORID_MARVELL, DEVICEID_MRVL_8061X, 164 "Marvell Yukon 88E8061 SX/LX Gigabit Ethernet" }, 165 { VENDORID_MARVELL, DEVICEID_MRVL_8062CU, 166 "Marvell Yukon 88E8062CU Gigabit Ethernet" }, 167 { VENDORID_MARVELL, DEVICEID_MRVL_8062X, 168 "Marvell Yukon 88E8062 SX/LX Gigabit Ethernet" }, 169 { VENDORID_MARVELL, DEVICEID_MRVL_8035, 170 "Marvell Yukon 88E8035 Fast Ethernet" }, 171 { VENDORID_MARVELL, DEVICEID_MRVL_8036, 172 "Marvell Yukon 88E8036 Fast Ethernet" }, 173 { VENDORID_MARVELL, DEVICEID_MRVL_8038, 174 "Marvell Yukon 88E8038 Fast Ethernet" }, 175 { VENDORID_MARVELL, DEVICEID_MRVL_8039, 176 "Marvell Yukon 88E8039 Fast Ethernet" }, 177 { VENDORID_MARVELL, DEVICEID_MRVL_8040, 178 "Marvell Yukon 88E8040 Fast Ethernet" }, 179 { VENDORID_MARVELL, DEVICEID_MRVL_8040T, 180 "Marvell Yukon 88E8040T Fast Ethernet" }, 181 { VENDORID_MARVELL, DEVICEID_MRVL_8042, 182 "Marvell Yukon 88E8042 Fast Ethernet" }, 183 { VENDORID_MARVELL, DEVICEID_MRVL_8048, 184 "Marvell Yukon 88E8048 Fast Ethernet" }, 185 { VENDORID_MARVELL, DEVICEID_MRVL_4361, 186 "Marvell Yukon 88E8050 Gigabit Ethernet" }, 187 { VENDORID_MARVELL, DEVICEID_MRVL_4360, 188 "Marvell Yukon 88E8052 Gigabit Ethernet" }, 189 { VENDORID_MARVELL, DEVICEID_MRVL_4362, 190 "Marvell Yukon 88E8053 Gigabit Ethernet" }, 191 { VENDORID_MARVELL, DEVICEID_MRVL_4363, 192 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 193 { VENDORID_MARVELL, DEVICEID_MRVL_4364, 194 "Marvell Yukon 88E8056 Gigabit Ethernet" }, 195 { VENDORID_MARVELL, DEVICEID_MRVL_4365, 196 "Marvell Yukon 88E8070 Gigabit Ethernet" }, 197 { VENDORID_MARVELL, DEVICEID_MRVL_436A, 198 "Marvell Yukon 88E8058 Gigabit Ethernet" }, 199 { VENDORID_MARVELL, DEVICEID_MRVL_436B, 200 "Marvell Yukon 88E8071 Gigabit Ethernet" }, 201 { VENDORID_MARVELL, DEVICEID_MRVL_436C, 202 "Marvell Yukon 88E8072 Gigabit Ethernet" }, 203 { VENDORID_MARVELL, DEVICEID_MRVL_436D, 204 "Marvell Yukon 88E8055 Gigabit Ethernet" }, 205 { VENDORID_MARVELL, DEVICEID_MRVL_4370, 206 "Marvell Yukon 88E8075 Gigabit Ethernet" }, 207 { VENDORID_MARVELL, DEVICEID_MRVL_4380, 208 "Marvell Yukon 88E8057 Gigabit Ethernet" }, 209 { VENDORID_MARVELL, DEVICEID_MRVL_4381, 210 "Marvell Yukon 88E8059 Gigabit Ethernet" }, 211 { VENDORID_DLINK, DEVICEID_DLINK_DGE550SX, 212 "D-Link 550SX Gigabit Ethernet" }, 213 { VENDORID_DLINK, DEVICEID_DLINK_DGE560T, 214 "D-Link 560T Gigabit Ethernet" }, 215 { 0, 0, NULL } 216 }; 217 218 static const char *model_name[] = { 219 "Yukon XL", 220 "Yukon EC Ultra", 221 "Yukon EX", 222 "Yukon EC", 223 "Yukon FE", 224 "Yukon FE+", 225 "Yukon Supreme", 226 "Yukon Ultra 2", 227 "Yukon Unknown", 228 "Yukon Optima" 229 }; 230 231 static int mskc_probe(device_t); 232 static int mskc_attach(device_t); 233 static int mskc_detach(device_t); 234 static int mskc_shutdown(device_t); 235 static int mskc_suspend(device_t); 236 static int mskc_resume(device_t); 237 static void mskc_intr(void *); 238 239 static void mskc_reset(struct msk_softc *); 240 static void mskc_set_imtimer(struct msk_softc *); 241 static void mskc_intr_hwerr(struct msk_softc *); 242 static int mskc_handle_events(struct msk_softc *); 243 static void mskc_phy_power(struct msk_softc *, int); 244 static int mskc_setup_rambuffer(struct msk_softc *); 245 static int mskc_status_dma_alloc(struct msk_softc *); 246 static void mskc_status_dma_free(struct msk_softc *); 247 static int mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS); 248 static int mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 249 250 static int msk_probe(device_t); 251 static int msk_attach(device_t); 252 static int msk_detach(device_t); 253 static int msk_miibus_readreg(device_t, int, int); 254 static int msk_miibus_writereg(device_t, int, int, int); 255 static void msk_miibus_statchg(device_t); 256 257 static void msk_init(void *); 258 static int msk_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 259 static void msk_start(struct ifnet *); 260 static void msk_watchdog(struct ifnet *); 261 static int msk_mediachange(struct ifnet *); 262 static void msk_mediastatus(struct ifnet *, struct ifmediareq *); 263 264 static void msk_tick(void *); 265 static void msk_intr_phy(struct msk_if_softc *); 266 static void msk_intr_gmac(struct msk_if_softc *); 267 static __inline void 268 msk_rxput(struct msk_if_softc *); 269 static void msk_handle_hwerr(struct msk_if_softc *, uint32_t); 270 static void msk_rxeof(struct msk_if_softc *, uint32_t, int); 271 static void msk_txeof(struct msk_if_softc *, int); 272 static void msk_set_prefetch(struct msk_softc *, int, bus_addr_t, uint32_t); 273 static void msk_set_rambuffer(struct msk_if_softc *); 274 static void msk_stop(struct msk_if_softc *); 275 276 static int msk_txrx_dma_alloc(struct msk_if_softc *); 277 static void msk_txrx_dma_free(struct msk_if_softc *); 278 static int msk_init_rx_ring(struct msk_if_softc *); 279 static void msk_init_tx_ring(struct msk_if_softc *); 280 static __inline void 281 msk_discard_rxbuf(struct msk_if_softc *, int); 282 static int msk_newbuf(struct msk_if_softc *, int, int); 283 static int msk_encap(struct msk_if_softc *, struct mbuf **); 284 285 #ifdef MSK_JUMBO 286 static int msk_init_jumbo_rx_ring(struct msk_if_softc *); 287 static __inline void msk_discard_jumbo_rxbuf(struct msk_if_softc *, int); 288 static int msk_jumbo_newbuf(struct msk_if_softc *, int); 289 static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int); 290 static void *msk_jalloc(struct msk_if_softc *); 291 static void msk_jfree(void *, void *); 292 #endif 293 294 static int msk_phy_readreg(struct msk_if_softc *, int, int); 295 static int msk_phy_writereg(struct msk_if_softc *, int, int, int); 296 297 static void msk_rxfilter(struct msk_if_softc *); 298 static void msk_setvlan(struct msk_if_softc *, struct ifnet *); 299 static void msk_set_tx_stfwd(struct msk_if_softc *); 300 301 static int msk_dmamem_create(device_t, bus_size_t, bus_dma_tag_t *, 302 void **, bus_addr_t *, bus_dmamap_t *); 303 static void msk_dmamem_destroy(bus_dma_tag_t, void *, bus_dmamap_t); 304 305 static device_method_t mskc_methods[] = { 306 /* Device interface */ 307 DEVMETHOD(device_probe, mskc_probe), 308 DEVMETHOD(device_attach, mskc_attach), 309 DEVMETHOD(device_detach, mskc_detach), 310 DEVMETHOD(device_suspend, mskc_suspend), 311 DEVMETHOD(device_resume, mskc_resume), 312 DEVMETHOD(device_shutdown, mskc_shutdown), 313 314 /* bus interface */ 315 DEVMETHOD(bus_print_child, bus_generic_print_child), 316 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 317 318 { NULL, NULL } 319 }; 320 321 static DEFINE_CLASS_0(mskc, mskc_driver, mskc_methods, sizeof(struct msk_softc)); 322 static devclass_t mskc_devclass; 323 324 static device_method_t msk_methods[] = { 325 /* Device interface */ 326 DEVMETHOD(device_probe, msk_probe), 327 DEVMETHOD(device_attach, msk_attach), 328 DEVMETHOD(device_detach, msk_detach), 329 DEVMETHOD(device_shutdown, bus_generic_shutdown), 330 331 /* bus interface */ 332 DEVMETHOD(bus_print_child, bus_generic_print_child), 333 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 334 335 /* MII interface */ 336 DEVMETHOD(miibus_readreg, msk_miibus_readreg), 337 DEVMETHOD(miibus_writereg, msk_miibus_writereg), 338 DEVMETHOD(miibus_statchg, msk_miibus_statchg), 339 340 { NULL, NULL } 341 }; 342 343 static DEFINE_CLASS_0(msk, msk_driver, msk_methods, sizeof(struct msk_if_softc)); 344 static devclass_t msk_devclass; 345 346 DECLARE_DUMMY_MODULE(if_msk); 347 DRIVER_MODULE(if_msk, pci, mskc_driver, mskc_devclass, NULL, NULL); 348 DRIVER_MODULE(if_msk, mskc, msk_driver, msk_devclass, NULL, NULL); 349 DRIVER_MODULE(miibus, msk, miibus_driver, miibus_devclass, NULL, NULL); 350 351 static int mskc_intr_rate = 0; 352 static int mskc_process_limit = MSK_PROC_DEFAULT; 353 354 TUNABLE_INT("hw.mskc.intr_rate", &mskc_intr_rate); 355 TUNABLE_INT("hw.mskc.process_limit", &mskc_process_limit); 356 357 static int 358 msk_miibus_readreg(device_t dev, int phy, int reg) 359 { 360 struct msk_if_softc *sc_if; 361 362 if (phy != PHY_ADDR_MARV) 363 return (0); 364 365 sc_if = device_get_softc(dev); 366 367 return (msk_phy_readreg(sc_if, phy, reg)); 368 } 369 370 static int 371 msk_phy_readreg(struct msk_if_softc *sc_if, int phy, int reg) 372 { 373 struct msk_softc *sc; 374 int i, val; 375 376 sc = sc_if->msk_softc; 377 378 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 379 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 380 381 for (i = 0; i < MSK_TIMEOUT; i++) { 382 DELAY(1); 383 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL); 384 if ((val & GM_SMI_CT_RD_VAL) != 0) { 385 val = GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_DATA); 386 break; 387 } 388 } 389 390 if (i == MSK_TIMEOUT) { 391 if_printf(sc_if->msk_ifp, "phy failed to come ready\n"); 392 val = 0; 393 } 394 395 return (val); 396 } 397 398 static int 399 msk_miibus_writereg(device_t dev, int phy, int reg, int val) 400 { 401 struct msk_if_softc *sc_if; 402 403 if (phy != PHY_ADDR_MARV) 404 return (0); 405 406 sc_if = device_get_softc(dev); 407 408 return (msk_phy_writereg(sc_if, phy, reg, val)); 409 } 410 411 static int 412 msk_phy_writereg(struct msk_if_softc *sc_if, int phy, int reg, int val) 413 { 414 struct msk_softc *sc; 415 int i; 416 417 sc = sc_if->msk_softc; 418 419 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_DATA, val); 420 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SMI_CTRL, 421 GM_SMI_CT_PHY_AD(phy) | GM_SMI_CT_REG_AD(reg)); 422 for (i = 0; i < MSK_TIMEOUT; i++) { 423 DELAY(1); 424 if ((GMAC_READ_2(sc, sc_if->msk_port, GM_SMI_CTRL) & 425 GM_SMI_CT_BUSY) == 0) 426 break; 427 } 428 if (i == MSK_TIMEOUT) 429 if_printf(sc_if->msk_ifp, "phy write timeout\n"); 430 431 return (0); 432 } 433 434 static void 435 msk_miibus_statchg(device_t dev) 436 { 437 struct msk_if_softc *sc_if; 438 struct msk_softc *sc; 439 struct mii_data *mii; 440 struct ifnet *ifp; 441 uint32_t gmac; 442 443 sc_if = device_get_softc(dev); 444 sc = sc_if->msk_softc; 445 446 mii = device_get_softc(sc_if->msk_miibus); 447 ifp = sc_if->msk_ifp; 448 449 sc_if->msk_link = 0; 450 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 451 (IFM_AVALID | IFM_ACTIVE)) { 452 switch (IFM_SUBTYPE(mii->mii_media_active)) { 453 case IFM_10_T: 454 case IFM_100_TX: 455 sc_if->msk_link = 1; 456 break; 457 case IFM_1000_T: 458 case IFM_1000_SX: 459 case IFM_1000_LX: 460 case IFM_1000_CX: 461 if ((sc_if->msk_flags & MSK_FLAG_FASTETHER) == 0) 462 sc_if->msk_link = 1; 463 break; 464 } 465 } 466 467 if (sc_if->msk_link != 0) { 468 /* Enable Tx FIFO Underrun. */ 469 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 470 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR); 471 /* 472 * Because mii(4) notify msk(4) that it detected link status 473 * change, there is no need to enable automatic 474 * speed/flow-control/duplex updates. 475 */ 476 gmac = GM_GPCR_AU_ALL_DIS; 477 switch (IFM_SUBTYPE(mii->mii_media_active)) { 478 case IFM_1000_SX: 479 case IFM_1000_T: 480 gmac |= GM_GPCR_SPEED_1000; 481 break; 482 case IFM_100_TX: 483 gmac |= GM_GPCR_SPEED_100; 484 break; 485 case IFM_10_T: 486 break; 487 } 488 489 if ((mii->mii_media_active & IFM_GMASK) & IFM_FDX) 490 gmac |= GM_GPCR_DUP_FULL; 491 else 492 gmac |= GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS; 493 /* Disable Rx flow control. */ 494 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) == 0) 495 gmac |= GM_GPCR_FC_RX_DIS; 496 /* Disable Tx flow control. */ 497 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) == 0) 498 gmac |= GM_GPCR_FC_TX_DIS; 499 gmac |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 500 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 501 /* Read again to ensure writing. */ 502 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 503 504 gmac = GMC_PAUSE_OFF; 505 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) && 506 ((mii->mii_media_active & IFM_GMASK) & IFM_FDX)) 507 gmac = GMC_PAUSE_ON; 508 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), gmac); 509 510 /* Enable PHY interrupt for FIFO underrun/overflow. */ 511 msk_phy_writereg(sc_if, PHY_ADDR_MARV, 512 PHY_MARV_INT_MASK, PHY_M_IS_FIFO_ERROR); 513 } else { 514 /* 515 * Link state changed to down. 516 * Disable PHY interrupts. 517 */ 518 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 519 /* Disable Rx/Tx MAC. */ 520 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 521 if (gmac & (GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)) { 522 gmac &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 523 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, gmac); 524 /* Read again to ensure writing. */ 525 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 526 } 527 } 528 } 529 530 static void 531 msk_rxfilter(struct msk_if_softc *sc_if) 532 { 533 struct msk_softc *sc; 534 struct ifnet *ifp; 535 struct ifmultiaddr *ifma; 536 uint32_t mchash[2]; 537 uint32_t crc; 538 uint16_t mode; 539 540 sc = sc_if->msk_softc; 541 ifp = sc_if->msk_ifp; 542 543 bzero(mchash, sizeof(mchash)); 544 mode = GMAC_READ_2(sc, sc_if->msk_port, GM_RX_CTRL); 545 if ((ifp->if_flags & IFF_PROMISC) != 0) { 546 mode &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 547 } else if ((ifp->if_flags & IFF_ALLMULTI) != 0) { 548 mode |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 549 mchash[0] = 0xffff; 550 mchash[1] = 0xffff; 551 } else { 552 mode |= GM_RXCR_UCF_ENA; 553 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 554 if (ifma->ifma_addr->sa_family != AF_LINK) 555 continue; 556 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 557 ifma->ifma_addr), ETHER_ADDR_LEN); 558 /* Just want the 6 least significant bits. */ 559 crc &= 0x3f; 560 /* Set the corresponding bit in the hash table. */ 561 mchash[crc >> 5] |= 1 << (crc & 0x1f); 562 } 563 if (mchash[0] != 0 || mchash[1] != 0) 564 mode |= GM_RXCR_MCF_ENA; 565 } 566 567 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H1, 568 mchash[0] & 0xffff); 569 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H2, 570 (mchash[0] >> 16) & 0xffff); 571 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H3, 572 mchash[1] & 0xffff); 573 GMAC_WRITE_2(sc, sc_if->msk_port, GM_MC_ADDR_H4, 574 (mchash[1] >> 16) & 0xffff); 575 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, mode); 576 } 577 578 static void 579 msk_setvlan(struct msk_if_softc *sc_if, struct ifnet *ifp) 580 { 581 struct msk_softc *sc; 582 583 sc = sc_if->msk_softc; 584 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 585 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 586 RX_VLAN_STRIP_ON); 587 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 588 TX_VLAN_TAG_ON); 589 } else { 590 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 591 RX_VLAN_STRIP_OFF); 592 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 593 TX_VLAN_TAG_OFF); 594 } 595 } 596 597 static int 598 msk_init_rx_ring(struct msk_if_softc *sc_if) 599 { 600 struct msk_ring_data *rd; 601 struct msk_rxdesc *rxd; 602 int i, prod; 603 604 sc_if->msk_cdata.msk_rx_cons = 0; 605 sc_if->msk_cdata.msk_rx_prod = 0; 606 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 607 608 rd = &sc_if->msk_rdata; 609 bzero(rd->msk_rx_ring, sizeof(struct msk_rx_desc) * MSK_RX_RING_CNT); 610 prod = sc_if->msk_cdata.msk_rx_prod; 611 for (i = 0; i < MSK_RX_RING_CNT; i++) { 612 rxd = &sc_if->msk_cdata.msk_rxdesc[prod]; 613 rxd->rx_m = NULL; 614 rxd->rx_le = &rd->msk_rx_ring[prod]; 615 if (msk_newbuf(sc_if, prod, 1) != 0) 616 return (ENOBUFS); 617 MSK_INC(prod, MSK_RX_RING_CNT); 618 } 619 620 /* Update prefetch unit. */ 621 sc_if->msk_cdata.msk_rx_prod = MSK_RX_RING_CNT - 1; 622 CSR_WRITE_2(sc_if->msk_softc, 623 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 624 sc_if->msk_cdata.msk_rx_prod); 625 626 return (0); 627 } 628 629 #ifdef MSK_JUMBO 630 static int 631 msk_init_jumbo_rx_ring(struct msk_if_softc *sc_if) 632 { 633 struct msk_ring_data *rd; 634 struct msk_rxdesc *rxd; 635 int i, prod; 636 637 MSK_IF_LOCK_ASSERT(sc_if); 638 639 sc_if->msk_cdata.msk_rx_cons = 0; 640 sc_if->msk_cdata.msk_rx_prod = 0; 641 sc_if->msk_cdata.msk_rx_putwm = MSK_PUT_WM; 642 643 rd = &sc_if->msk_rdata; 644 bzero(rd->msk_jumbo_rx_ring, 645 sizeof(struct msk_rx_desc) * MSK_JUMBO_RX_RING_CNT); 646 prod = sc_if->msk_cdata.msk_rx_prod; 647 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 648 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[prod]; 649 rxd->rx_m = NULL; 650 rxd->rx_le = &rd->msk_jumbo_rx_ring[prod]; 651 if (msk_jumbo_newbuf(sc_if, prod) != 0) 652 return (ENOBUFS); 653 MSK_INC(prod, MSK_JUMBO_RX_RING_CNT); 654 } 655 656 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 657 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 658 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 659 660 sc_if->msk_cdata.msk_rx_prod = MSK_JUMBO_RX_RING_CNT - 1; 661 CSR_WRITE_2(sc_if->msk_softc, 662 Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_PUT_IDX_REG), 663 sc_if->msk_cdata.msk_rx_prod); 664 665 return (0); 666 } 667 #endif 668 669 static void 670 msk_init_tx_ring(struct msk_if_softc *sc_if) 671 { 672 struct msk_ring_data *rd; 673 struct msk_txdesc *txd; 674 int i; 675 676 sc_if->msk_cdata.msk_tx_prod = 0; 677 sc_if->msk_cdata.msk_tx_cons = 0; 678 sc_if->msk_cdata.msk_tx_cnt = 0; 679 680 rd = &sc_if->msk_rdata; 681 bzero(rd->msk_tx_ring, sizeof(struct msk_tx_desc) * MSK_TX_RING_CNT); 682 for (i = 0; i < MSK_TX_RING_CNT; i++) { 683 txd = &sc_if->msk_cdata.msk_txdesc[i]; 684 txd->tx_m = NULL; 685 txd->tx_le = &rd->msk_tx_ring[i]; 686 } 687 } 688 689 static __inline void 690 msk_discard_rxbuf(struct msk_if_softc *sc_if, int idx) 691 { 692 struct msk_rx_desc *rx_le; 693 struct msk_rxdesc *rxd; 694 struct mbuf *m; 695 696 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 697 m = rxd->rx_m; 698 rx_le = rxd->rx_le; 699 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 700 } 701 702 #ifdef MSK_JUMBO 703 static __inline void 704 msk_discard_jumbo_rxbuf(struct msk_if_softc *sc_if, int idx) 705 { 706 struct msk_rx_desc *rx_le; 707 struct msk_rxdesc *rxd; 708 struct mbuf *m; 709 710 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 711 m = rxd->rx_m; 712 rx_le = rxd->rx_le; 713 rx_le->msk_control = htole32(m->m_len | OP_PACKET | HW_OWNER); 714 } 715 #endif 716 717 static int 718 msk_newbuf(struct msk_if_softc *sc_if, int idx, int init) 719 { 720 struct msk_rx_desc *rx_le; 721 struct msk_rxdesc *rxd; 722 struct mbuf *m; 723 bus_dma_segment_t seg; 724 bus_dmamap_t map; 725 int error, nseg; 726 727 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 728 if (m == NULL) 729 return (ENOBUFS); 730 731 m->m_len = m->m_pkthdr.len = MCLBYTES; 732 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 733 m_adj(m, ETHER_ALIGN); 734 735 error = bus_dmamap_load_mbuf_segment(sc_if->msk_cdata.msk_rx_tag, 736 sc_if->msk_cdata.msk_rx_sparemap, 737 m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 738 if (error) { 739 m_freem(m); 740 if (init) 741 if_printf(&sc_if->arpcom.ac_if, "can't load RX mbuf\n"); 742 return (error); 743 } 744 745 rxd = &sc_if->msk_cdata.msk_rxdesc[idx]; 746 if (rxd->rx_m != NULL) { 747 bus_dmamap_sync(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap, 748 BUS_DMASYNC_POSTREAD); 749 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, rxd->rx_dmamap); 750 } 751 752 map = rxd->rx_dmamap; 753 rxd->rx_dmamap = sc_if->msk_cdata.msk_rx_sparemap; 754 sc_if->msk_cdata.msk_rx_sparemap = map; 755 756 rxd->rx_m = m; 757 rx_le = rxd->rx_le; 758 rx_le->msk_addr = htole32(MSK_ADDR_LO(seg.ds_addr)); 759 rx_le->msk_control = htole32(seg.ds_len | OP_PACKET | HW_OWNER); 760 761 return (0); 762 } 763 764 #ifdef MSK_JUMBO 765 static int 766 msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx) 767 { 768 struct msk_rx_desc *rx_le; 769 struct msk_rxdesc *rxd; 770 struct mbuf *m; 771 bus_dma_segment_t segs[1]; 772 bus_dmamap_t map; 773 int nsegs; 774 void *buf; 775 776 MGETHDR(m, M_DONTWAIT, MT_DATA); 777 if (m == NULL) 778 return (ENOBUFS); 779 buf = msk_jalloc(sc_if); 780 if (buf == NULL) { 781 m_freem(m); 782 return (ENOBUFS); 783 } 784 /* Attach the buffer to the mbuf. */ 785 MEXTADD(m, buf, MSK_JLEN, msk_jfree, (struct msk_if_softc *)sc_if, 0, 786 EXT_NET_DRV); 787 if ((m->m_flags & M_EXT) == 0) { 788 m_freem(m); 789 return (ENOBUFS); 790 } 791 m->m_pkthdr.len = m->m_len = MSK_JLEN; 792 m_adj(m, ETHER_ALIGN); 793 794 if (bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_jumbo_rx_tag, 795 sc_if->msk_cdata.msk_jumbo_rx_sparemap, m, segs, &nsegs, 796 BUS_DMA_NOWAIT) != 0) { 797 m_freem(m); 798 return (ENOBUFS); 799 } 800 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 801 802 rxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[idx]; 803 if (rxd->rx_m != NULL) { 804 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 805 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 806 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 807 rxd->rx_dmamap); 808 } 809 map = rxd->rx_dmamap; 810 rxd->rx_dmamap = sc_if->msk_cdata.msk_jumbo_rx_sparemap; 811 sc_if->msk_cdata.msk_jumbo_rx_sparemap = map; 812 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, rxd->rx_dmamap, 813 BUS_DMASYNC_PREREAD); 814 rxd->rx_m = m; 815 rx_le = rxd->rx_le; 816 rx_le->msk_addr = htole32(MSK_ADDR_LO(segs[0].ds_addr)); 817 rx_le->msk_control = 818 htole32(segs[0].ds_len | OP_PACKET | HW_OWNER); 819 820 return (0); 821 } 822 #endif 823 824 /* 825 * Set media options. 826 */ 827 static int 828 msk_mediachange(struct ifnet *ifp) 829 { 830 struct msk_if_softc *sc_if = ifp->if_softc; 831 struct mii_data *mii; 832 int error; 833 834 mii = device_get_softc(sc_if->msk_miibus); 835 error = mii_mediachg(mii); 836 837 return (error); 838 } 839 840 /* 841 * Report current media status. 842 */ 843 static void 844 msk_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 845 { 846 struct msk_if_softc *sc_if = ifp->if_softc; 847 struct mii_data *mii; 848 849 mii = device_get_softc(sc_if->msk_miibus); 850 mii_pollstat(mii); 851 852 ifmr->ifm_active = mii->mii_media_active; 853 ifmr->ifm_status = mii->mii_media_status; 854 } 855 856 static int 857 msk_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 858 { 859 struct msk_if_softc *sc_if; 860 struct ifreq *ifr; 861 struct mii_data *mii; 862 int error, mask; 863 864 sc_if = ifp->if_softc; 865 ifr = (struct ifreq *)data; 866 error = 0; 867 868 switch(command) { 869 case SIOCSIFMTU: 870 #ifdef MSK_JUMBO 871 if (ifr->ifr_mtu > MSK_JUMBO_MTU || ifr->ifr_mtu < ETHERMIN) { 872 error = EINVAL; 873 break; 874 } 875 if (sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_FE && 876 ifr->ifr_mtu > MSK_MAX_FRAMELEN) { 877 error = EINVAL; 878 break; 879 } 880 ifp->if_mtu = ifr->ifr_mtu; 881 if ((ifp->if_flags & IFF_RUNNING) != 0) 882 msk_init(sc_if); 883 #else 884 error = EOPNOTSUPP; 885 #endif 886 break; 887 888 case SIOCSIFFLAGS: 889 if (ifp->if_flags & IFF_UP) { 890 if (ifp->if_flags & IFF_RUNNING) { 891 if (((ifp->if_flags ^ sc_if->msk_if_flags) 892 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 893 msk_rxfilter(sc_if); 894 } else { 895 if (sc_if->msk_detach == 0) 896 msk_init(sc_if); 897 } 898 } else { 899 if (ifp->if_flags & IFF_RUNNING) 900 msk_stop(sc_if); 901 } 902 sc_if->msk_if_flags = ifp->if_flags; 903 break; 904 905 case SIOCADDMULTI: 906 case SIOCDELMULTI: 907 if (ifp->if_flags & IFF_RUNNING) 908 msk_rxfilter(sc_if); 909 break; 910 911 case SIOCGIFMEDIA: 912 case SIOCSIFMEDIA: 913 mii = device_get_softc(sc_if->msk_miibus); 914 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 915 break; 916 917 case SIOCSIFCAP: 918 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 919 if ((mask & IFCAP_TXCSUM) != 0) { 920 ifp->if_capenable ^= IFCAP_TXCSUM; 921 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && 922 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) 923 ifp->if_hwassist |= MSK_CSUM_FEATURES; 924 else 925 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 926 } 927 #ifdef notyet 928 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 929 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 930 msk_setvlan(sc_if, ifp); 931 } 932 #endif 933 934 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 935 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 936 /* 937 * In Yukon EC Ultra, TSO & checksum offload is not 938 * supported for jumbo frame. 939 */ 940 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 941 ifp->if_capenable &= ~IFCAP_TXCSUM; 942 } 943 break; 944 945 default: 946 error = ether_ioctl(ifp, command, data); 947 break; 948 } 949 950 return (error); 951 } 952 953 static int 954 mskc_probe(device_t dev) 955 { 956 const struct msk_product *mp; 957 uint16_t vendor, devid; 958 959 vendor = pci_get_vendor(dev); 960 devid = pci_get_device(dev); 961 for (mp = msk_products; mp->msk_name != NULL; ++mp) { 962 if (vendor == mp->msk_vendorid && devid == mp->msk_deviceid) { 963 device_set_desc(dev, mp->msk_name); 964 return (0); 965 } 966 } 967 return (ENXIO); 968 } 969 970 static int 971 mskc_setup_rambuffer(struct msk_softc *sc) 972 { 973 int next; 974 int i; 975 976 /* Get adapter SRAM size. */ 977 sc->msk_ramsize = CSR_READ_1(sc, B2_E_0) * 4; 978 if (bootverbose) { 979 device_printf(sc->msk_dev, 980 "RAM buffer size : %dKB\n", sc->msk_ramsize); 981 } 982 if (sc->msk_ramsize == 0) 983 return (0); 984 sc->msk_pflags |= MSK_FLAG_RAMBUF; 985 986 /* 987 * Give receiver 2/3 of memory and round down to the multiple 988 * of 1024. Tx/Rx RAM buffer size of Yukon II shoud be multiple 989 * of 1024. 990 */ 991 sc->msk_rxqsize = rounddown((sc->msk_ramsize * 1024 * 2) / 3, 1024); 992 sc->msk_txqsize = (sc->msk_ramsize * 1024) - sc->msk_rxqsize; 993 for (i = 0, next = 0; i < sc->msk_num_port; i++) { 994 sc->msk_rxqstart[i] = next; 995 sc->msk_rxqend[i] = next + sc->msk_rxqsize - 1; 996 next = sc->msk_rxqend[i] + 1; 997 sc->msk_txqstart[i] = next; 998 sc->msk_txqend[i] = next + sc->msk_txqsize - 1; 999 next = sc->msk_txqend[i] + 1; 1000 if (bootverbose) { 1001 device_printf(sc->msk_dev, 1002 "Port %d : Rx Queue %dKB(0x%08x:0x%08x)\n", i, 1003 sc->msk_rxqsize / 1024, sc->msk_rxqstart[i], 1004 sc->msk_rxqend[i]); 1005 device_printf(sc->msk_dev, 1006 "Port %d : Tx Queue %dKB(0x%08x:0x%08x)\n", i, 1007 sc->msk_txqsize / 1024, sc->msk_txqstart[i], 1008 sc->msk_txqend[i]); 1009 } 1010 } 1011 1012 return (0); 1013 } 1014 1015 static void 1016 mskc_phy_power(struct msk_softc *sc, int mode) 1017 { 1018 uint32_t our, val; 1019 int i; 1020 1021 switch (mode) { 1022 case MSK_PHY_POWERUP: 1023 /* Switch power to VCC (WA for VAUX problem). */ 1024 CSR_WRITE_1(sc, B0_POWER_CTRL, 1025 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 1026 /* Disable Core Clock Division, set Clock Select to 0. */ 1027 CSR_WRITE_4(sc, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 1028 1029 val = 0; 1030 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1031 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1032 /* Enable bits are inverted. */ 1033 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1034 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1035 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1036 } 1037 /* 1038 * Enable PCI & Core Clock, enable clock gating for both Links. 1039 */ 1040 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1041 1042 our = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1043 our &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); 1044 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 1045 if (sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1046 /* Deassert Low Power for 1st PHY. */ 1047 our |= PCI_Y2_PHY1_COMA; 1048 if (sc->msk_num_port > 1) 1049 our |= PCI_Y2_PHY2_COMA; 1050 } 1051 } 1052 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U || 1053 sc->msk_hw_id == CHIP_ID_YUKON_EX || 1054 sc->msk_hw_id >= CHIP_ID_YUKON_FE_P) { 1055 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_4); 1056 val &= (PCI_FORCE_ASPM_REQUEST | 1057 PCI_ASPM_GPHY_LINK_DOWN | PCI_ASPM_INT_FIFO_EMPTY | 1058 PCI_ASPM_CLKRUN_REQUEST); 1059 /* Set all bits to 0 except bits 15..12. */ 1060 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_4, val); 1061 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_5); 1062 val &= PCI_CTL_TIM_VMAIN_AV_MSK; 1063 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_5, val); 1064 CSR_PCI_WRITE_4(sc, PCI_CFG_REG_1, 0); 1065 CSR_WRITE_2(sc, B0_CTST, Y2_HW_WOL_ON); 1066 /* 1067 * Disable status race, workaround for 1068 * Yukon EC Ultra & Yukon EX. 1069 */ 1070 val = CSR_READ_4(sc, B2_GP_IO); 1071 val |= GLB_GPIO_STAT_RACE_DIS; 1072 CSR_WRITE_4(sc, B2_GP_IO, val); 1073 CSR_READ_4(sc, B2_GP_IO); 1074 } 1075 /* Release PHY from PowerDown/COMA mode. */ 1076 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, our); 1077 1078 for (i = 0; i < sc->msk_num_port; i++) { 1079 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1080 GMLC_RST_SET); 1081 CSR_WRITE_2(sc, MR_ADDR(i, GMAC_LINK_CTRL), 1082 GMLC_RST_CLR); 1083 } 1084 break; 1085 case MSK_PHY_POWERDOWN: 1086 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1087 val |= PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD; 1088 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1089 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1090 val &= ~PCI_Y2_PHY1_COMA; 1091 if (sc->msk_num_port > 1) 1092 val &= ~PCI_Y2_PHY2_COMA; 1093 } 1094 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1095 1096 val = Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 1097 Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS | 1098 Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS; 1099 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1100 sc->msk_hw_rev > CHIP_REV_YU_XL_A1) { 1101 /* Enable bits are inverted. */ 1102 val = 0; 1103 } 1104 /* 1105 * Disable PCI & Core Clock, disable clock gating for 1106 * both Links. 1107 */ 1108 CSR_WRITE_1(sc, B2_Y2_CLK_GATE, val); 1109 CSR_WRITE_1(sc, B0_POWER_CTRL, 1110 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 1111 break; 1112 default: 1113 break; 1114 } 1115 } 1116 1117 static void 1118 mskc_reset(struct msk_softc *sc) 1119 { 1120 bus_addr_t addr; 1121 uint16_t status; 1122 uint32_t val; 1123 int i; 1124 1125 /* Disable ASF. */ 1126 if (sc->msk_hw_id >= CHIP_ID_YUKON_XL && 1127 sc->msk_hw_id <= CHIP_ID_YUKON_SUPR) { 1128 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1129 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1130 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1131 status = CSR_READ_2(sc, B28_Y2_ASF_HCU_CCSR); 1132 /* Clear AHB bridge & microcontroller reset. */ 1133 status &= ~(Y2_ASF_HCU_CCSR_AHB_RST | 1134 Y2_ASF_HCU_CCSR_CPU_RST_MODE); 1135 /* Clear ASF microcontroller state. */ 1136 status &= ~Y2_ASF_HCU_CCSR_UC_STATE_MSK; 1137 status &= ~Y2_ASF_HCU_CCSR_CPU_CLK_DIVIDE_MSK; 1138 CSR_WRITE_2(sc, B28_Y2_ASF_HCU_CCSR, status); 1139 CSR_WRITE_4(sc, B28_Y2_CPU_WDOG, 0); 1140 } else { 1141 CSR_WRITE_1(sc, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); 1142 } 1143 CSR_WRITE_2(sc, B0_CTST, Y2_ASF_DISABLE); 1144 /* 1145 * Since we disabled ASF, S/W reset is required for 1146 * Power Management. 1147 */ 1148 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1149 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1150 } 1151 1152 /* Clear all error bits in the PCI status register. */ 1153 status = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 1154 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1155 1156 pci_write_config(sc->msk_dev, PCIR_STATUS, status | 1157 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 1158 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 1159 CSR_WRITE_2(sc, B0_CTST, CS_MRST_CLR); 1160 1161 switch (sc->msk_bustype) { 1162 case MSK_PEX_BUS: 1163 /* Clear all PEX errors. */ 1164 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 1165 val = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 1166 if ((val & PEX_RX_OV) != 0) { 1167 sc->msk_intrmask &= ~Y2_IS_HW_ERR; 1168 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 1169 } 1170 break; 1171 case MSK_PCI_BUS: 1172 case MSK_PCIX_BUS: 1173 /* Set Cache Line Size to 2(8bytes) if configured to 0. */ 1174 val = pci_read_config(sc->msk_dev, PCIR_CACHELNSZ, 1); 1175 if (val == 0) 1176 pci_write_config(sc->msk_dev, PCIR_CACHELNSZ, 2, 1); 1177 if (sc->msk_bustype == MSK_PCIX_BUS) { 1178 /* Set Cache Line Size opt. */ 1179 val = CSR_PCI_READ_4(sc, PCI_OUR_REG_1); 1180 val |= PCI_CLS_OPT; 1181 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_1, val); 1182 } 1183 break; 1184 } 1185 /* Set PHY power state. */ 1186 mskc_phy_power(sc, MSK_PHY_POWERUP); 1187 1188 /* Reset GPHY/GMAC Control */ 1189 for (i = 0; i < sc->msk_num_port; i++) { 1190 /* GPHY Control reset. */ 1191 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_SET); 1192 CSR_WRITE_1(sc, MR_ADDR(i, GPHY_CTRL), GPC_RST_CLR); 1193 /* GMAC Control reset. */ 1194 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_SET); 1195 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_RST_CLR); 1196 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), GMC_F_LOOPB_OFF); 1197 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 1198 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 1199 CSR_WRITE_4(sc, MR_ADDR(i, GMAC_CTRL), 1200 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 1201 GMC_BYP_RETR_ON); 1202 } 1203 } 1204 1205 if (sc->msk_hw_id == CHIP_ID_YUKON_SUPR && 1206 sc->msk_hw_rev > CHIP_REV_YU_SU_B0) 1207 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, PCI_CLK_MACSEC_DIS); 1208 if (sc->msk_hw_id == CHIP_ID_YUKON_OPT && sc->msk_hw_rev == 0) { 1209 /* Disable PCIe PHY powerdown(reg 0x80, bit7). */ 1210 CSR_WRITE_4(sc, Y2_PEX_PHY_DATA, (0x0080 << 16) | 0x0080); 1211 } 1212 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1213 1214 /* LED On. */ 1215 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_ON); 1216 1217 /* Clear TWSI IRQ. */ 1218 CSR_WRITE_4(sc, B2_I2C_IRQ, I2C_CLR_IRQ); 1219 1220 /* Turn off hardware timer. */ 1221 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_STOP); 1222 CSR_WRITE_1(sc, B2_TI_CTRL, TIM_CLR_IRQ); 1223 1224 /* Turn off descriptor polling. */ 1225 CSR_WRITE_1(sc, B28_DPT_CTRL, DPT_STOP); 1226 1227 /* Turn off time stamps. */ 1228 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_STOP); 1229 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 1230 1231 if (sc->msk_hw_id == CHIP_ID_YUKON_XL || 1232 sc->msk_hw_id == CHIP_ID_YUKON_EC || 1233 sc->msk_hw_id == CHIP_ID_YUKON_FE) { 1234 /* Configure timeout values. */ 1235 for (i = 0; i < sc->msk_num_port; i++) { 1236 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1237 RI_RST_SET); 1238 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(i, B3_RI_CTRL), 1239 RI_RST_CLR); 1240 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R1), 1241 MSK_RI_TO_53); 1242 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA1), 1243 MSK_RI_TO_53); 1244 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS1), 1245 MSK_RI_TO_53); 1246 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R1), 1247 MSK_RI_TO_53); 1248 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA1), 1249 MSK_RI_TO_53); 1250 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS1), 1251 MSK_RI_TO_53); 1252 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_R2), 1253 MSK_RI_TO_53); 1254 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XA2), 1255 MSK_RI_TO_53); 1256 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_WTO_XS2), 1257 MSK_RI_TO_53); 1258 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_R2), 1259 MSK_RI_TO_53); 1260 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XA2), 1261 MSK_RI_TO_53); 1262 CSR_WRITE_1(sc, SELECT_RAM_BUFFER(i, B3_RI_RTO_XS2), 1263 MSK_RI_TO_53); 1264 } 1265 } 1266 1267 /* Disable all interrupts. */ 1268 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1269 CSR_READ_4(sc, B0_HWE_IMSK); 1270 CSR_WRITE_4(sc, B0_IMSK, 0); 1271 CSR_READ_4(sc, B0_IMSK); 1272 1273 /* 1274 * On dual port PCI-X card, there is an problem where status 1275 * can be received out of order due to split transactions. 1276 */ 1277 if (sc->msk_pcixcap != 0 && sc->msk_num_port > 1) { 1278 uint16_t pcix_cmd; 1279 1280 pcix_cmd = pci_read_config(sc->msk_dev, 1281 sc->msk_pcixcap + PCIXR_COMMAND, 2); 1282 /* Clear Max Outstanding Split Transactions. */ 1283 pcix_cmd &= ~PCIXM_COMMAND_MAX_SPLITS; 1284 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 1285 pci_write_config(sc->msk_dev, 1286 sc->msk_pcixcap + PCIXR_COMMAND, pcix_cmd, 2); 1287 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 1288 } 1289 if (sc->msk_pciecap != 0) { 1290 /* Change Max. Read Request Size to 2048 bytes. */ 1291 if (pcie_get_max_readrq(sc->msk_dev) == 1292 PCIEM_DEVCTL_MAX_READRQ_512) { 1293 pcie_set_max_readrq(sc->msk_dev, 1294 PCIEM_DEVCTL_MAX_READRQ_2048); 1295 } 1296 } 1297 1298 /* Clear status list. */ 1299 bzero(sc->msk_stat_ring, 1300 sizeof(struct msk_stat_desc) * MSK_STAT_RING_CNT); 1301 sc->msk_stat_cons = 0; 1302 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_SET); 1303 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_RST_CLR); 1304 /* Set the status list base address. */ 1305 addr = sc->msk_stat_ring_paddr; 1306 CSR_WRITE_4(sc, STAT_LIST_ADDR_LO, MSK_ADDR_LO(addr)); 1307 CSR_WRITE_4(sc, STAT_LIST_ADDR_HI, MSK_ADDR_HI(addr)); 1308 /* Set the status list last index. */ 1309 CSR_WRITE_2(sc, STAT_LAST_IDX, MSK_STAT_RING_CNT - 1); 1310 if (sc->msk_hw_id == CHIP_ID_YUKON_EC && 1311 sc->msk_hw_rev == CHIP_REV_YU_EC_A1) { 1312 /* WA for dev. #4.3 */ 1313 CSR_WRITE_2(sc, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK); 1314 /* WA for dev. #4.18 */ 1315 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x21); 1316 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x07); 1317 } else { 1318 CSR_WRITE_2(sc, STAT_TX_IDX_TH, 0x0a); 1319 CSR_WRITE_1(sc, STAT_FIFO_WM, 0x10); 1320 if (sc->msk_hw_id == CHIP_ID_YUKON_XL && 1321 sc->msk_hw_rev == CHIP_REV_YU_XL_A0) 1322 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x04); 1323 else 1324 CSR_WRITE_1(sc, STAT_FIFO_ISR_WM, 0x10); 1325 CSR_WRITE_4(sc, STAT_ISR_TIMER_INI, 0x0190); 1326 } 1327 /* 1328 * Use default value for STAT_ISR_TIMER_INI, STAT_LEV_TIMER_INI. 1329 */ 1330 CSR_WRITE_4(sc, STAT_TX_TIMER_INI, MSK_USECS(sc, 1000)); 1331 1332 /* Enable status unit. */ 1333 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_OP_ON); 1334 1335 CSR_WRITE_1(sc, STAT_TX_TIMER_CTRL, TIM_START); 1336 CSR_WRITE_1(sc, STAT_LEV_TIMER_CTRL, TIM_START); 1337 CSR_WRITE_1(sc, STAT_ISR_TIMER_CTRL, TIM_START); 1338 } 1339 1340 static int 1341 msk_probe(device_t dev) 1342 { 1343 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1344 char desc[100]; 1345 1346 /* 1347 * Not much to do here. We always know there will be 1348 * at least one GMAC present, and if there are two, 1349 * mskc_attach() will create a second device instance 1350 * for us. 1351 */ 1352 ksnprintf(desc, sizeof(desc), 1353 "Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x", 1354 model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id, 1355 sc->msk_hw_rev); 1356 device_set_desc_copy(dev, desc); 1357 1358 return (0); 1359 } 1360 1361 static int 1362 msk_attach(device_t dev) 1363 { 1364 struct msk_softc *sc = device_get_softc(device_get_parent(dev)); 1365 struct msk_if_softc *sc_if = device_get_softc(dev); 1366 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1367 int i, port, error; 1368 uint8_t eaddr[ETHER_ADDR_LEN]; 1369 1370 port = *(int *)device_get_ivars(dev); 1371 KKASSERT(port == MSK_PORT_A || port == MSK_PORT_B); 1372 1373 kfree(device_get_ivars(dev), M_DEVBUF); 1374 device_set_ivars(dev, NULL); 1375 1376 callout_init(&sc_if->msk_tick_ch); 1377 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1378 1379 sc_if->msk_if_dev = dev; 1380 sc_if->msk_port = port; 1381 sc_if->msk_softc = sc; 1382 sc_if->msk_ifp = ifp; 1383 sc_if->msk_flags = sc->msk_pflags; 1384 sc->msk_if[port] = sc_if; 1385 1386 /* Setup Tx/Rx queue register offsets. */ 1387 if (port == MSK_PORT_A) { 1388 sc_if->msk_txq = Q_XA1; 1389 sc_if->msk_txsq = Q_XS1; 1390 sc_if->msk_rxq = Q_R1; 1391 } else { 1392 sc_if->msk_txq = Q_XA2; 1393 sc_if->msk_txsq = Q_XS2; 1394 sc_if->msk_rxq = Q_R2; 1395 } 1396 1397 error = msk_txrx_dma_alloc(sc_if); 1398 if (error) 1399 goto fail; 1400 1401 ifp->if_softc = sc_if; 1402 ifp->if_mtu = ETHERMTU; 1403 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1404 ifp->if_init = msk_init; 1405 ifp->if_ioctl = msk_ioctl; 1406 ifp->if_start = msk_start; 1407 ifp->if_watchdog = msk_watchdog; 1408 ifq_set_maxlen(&ifp->if_snd, MSK_TX_RING_CNT - 1); 1409 ifq_set_ready(&ifp->if_snd); 1410 1411 #ifdef notyet 1412 /* 1413 * IFCAP_RXCSUM capability is intentionally disabled as the hardware 1414 * has serious bug in Rx checksum offload for all Yukon II family 1415 * hardware. It seems there is a workaround to make it work somtimes. 1416 * However, the workaround also have to check OP code sequences to 1417 * verify whether the OP code is correct. Sometimes it should compute 1418 * IP/TCP/UDP checksum in driver in order to verify correctness of 1419 * checksum computed by hardware. If you have to compute checksum 1420 * with software to verify the hardware's checksum why have hardware 1421 * compute the checksum? I think there is no reason to spend time to 1422 * make Rx checksum offload work on Yukon II hardware. 1423 */ 1424 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU | 1425 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 1426 ifp->if_hwassist = MSK_CSUM_FEATURES; 1427 ifp->if_capenable = ifp->if_capabilities; 1428 #endif 1429 1430 /* 1431 * Get station address for this interface. Note that 1432 * dual port cards actually come with three station 1433 * addresses: one for each port, plus an extra. The 1434 * extra one is used by the SysKonnect driver software 1435 * as a 'virtual' station address for when both ports 1436 * are operating in failover mode. Currently we don't 1437 * use this extra address. 1438 */ 1439 for (i = 0; i < ETHER_ADDR_LEN; i++) 1440 eaddr[i] = CSR_READ_1(sc, B2_MAC_1 + (port * 8) + i); 1441 1442 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 1443 1444 /* 1445 * Do miibus setup. 1446 */ 1447 error = mii_phy_probe(dev, &sc_if->msk_miibus, 1448 msk_mediachange, msk_mediastatus); 1449 if (error) { 1450 device_printf(sc_if->msk_if_dev, "no PHY found!\n"); 1451 goto fail; 1452 } 1453 1454 /* 1455 * Call MI attach routine. Can't hold locks when calling into ether_*. 1456 */ 1457 ether_ifattach(ifp, eaddr, &sc->msk_serializer); 1458 #if 0 1459 /* 1460 * Tell the upper layer(s) we support long frames. 1461 * Must appear after the call to ether_ifattach() because 1462 * ether_ifattach() sets ifi_hdrlen to the default value. 1463 */ 1464 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1465 #endif 1466 1467 return 0; 1468 fail: 1469 msk_detach(dev); 1470 sc->msk_if[port] = NULL; 1471 return (error); 1472 } 1473 1474 /* 1475 * Attach the interface. Allocate softc structures, do ifmedia 1476 * setup and ethernet/BPF attach. 1477 */ 1478 static int 1479 mskc_attach(device_t dev) 1480 { 1481 struct msk_softc *sc; 1482 int error, *port, cpuid; 1483 1484 sc = device_get_softc(dev); 1485 sc->msk_dev = dev; 1486 lwkt_serialize_init(&sc->msk_serializer); 1487 1488 /* 1489 * Initailize sysctl variables 1490 */ 1491 sc->msk_process_limit = mskc_process_limit; 1492 sc->msk_intr_rate = mskc_intr_rate; 1493 1494 #ifndef BURN_BRIDGES 1495 /* 1496 * Handle power management nonsense. 1497 */ 1498 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1499 uint32_t irq, bar0, bar1; 1500 1501 /* Save important PCI config data. */ 1502 bar0 = pci_read_config(dev, PCIR_BAR(0), 4); 1503 bar1 = pci_read_config(dev, PCIR_BAR(1), 4); 1504 irq = pci_read_config(dev, PCIR_INTLINE, 4); 1505 1506 /* Reset the power state. */ 1507 device_printf(dev, "chip is in D%d power mode " 1508 "-- setting to D0\n", pci_get_powerstate(dev)); 1509 1510 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1511 1512 /* Restore PCI config data. */ 1513 pci_write_config(dev, PCIR_BAR(0), bar0, 4); 1514 pci_write_config(dev, PCIR_BAR(1), bar1, 4); 1515 pci_write_config(dev, PCIR_INTLINE, irq, 4); 1516 } 1517 #endif /* BURN_BRIDGES */ 1518 1519 /* 1520 * Map control/status registers. 1521 */ 1522 pci_enable_busmaster(dev); 1523 1524 /* 1525 * Allocate I/O resource 1526 */ 1527 #ifdef MSK_USEIOSPACE 1528 sc->msk_res_type = SYS_RES_IOPORT; 1529 sc->msk_res_rid = PCIR_BAR(1); 1530 #else 1531 sc->msk_res_type = SYS_RES_MEMORY; 1532 sc->msk_res_rid = PCIR_BAR(0); 1533 #endif 1534 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1535 &sc->msk_res_rid, RF_ACTIVE); 1536 if (sc->msk_res == NULL) { 1537 if (sc->msk_res_type == SYS_RES_MEMORY) { 1538 sc->msk_res_type = SYS_RES_IOPORT; 1539 sc->msk_res_rid = PCIR_BAR(1); 1540 } else { 1541 sc->msk_res_type = SYS_RES_MEMORY; 1542 sc->msk_res_rid = PCIR_BAR(0); 1543 } 1544 sc->msk_res = bus_alloc_resource_any(dev, sc->msk_res_type, 1545 &sc->msk_res_rid, 1546 RF_ACTIVE); 1547 if (sc->msk_res == NULL) { 1548 device_printf(dev, "couldn't allocate %s resources\n", 1549 sc->msk_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 1550 return (ENXIO); 1551 } 1552 } 1553 sc->msk_res_bt = rman_get_bustag(sc->msk_res); 1554 sc->msk_res_bh = rman_get_bushandle(sc->msk_res); 1555 1556 /* 1557 * Allocate IRQ 1558 */ 1559 sc->msk_irq_rid = 0; 1560 sc->msk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1561 &sc->msk_irq_rid, 1562 RF_SHAREABLE | RF_ACTIVE); 1563 if (sc->msk_irq == NULL) { 1564 device_printf(dev, "couldn't allocate IRQ resources\n"); 1565 error = ENXIO; 1566 goto fail; 1567 } 1568 1569 /* Enable all clocks before accessing any registers. */ 1570 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 1571 1572 CSR_WRITE_2(sc, B0_CTST, CS_RST_CLR); 1573 sc->msk_hw_id = CSR_READ_1(sc, B2_CHIP_ID); 1574 sc->msk_hw_rev = (CSR_READ_1(sc, B2_MAC_CFG) >> 4) & 0x0f; 1575 /* Bail out if chip is not recognized. */ 1576 if (sc->msk_hw_id < CHIP_ID_YUKON_XL || 1577 sc->msk_hw_id > CHIP_ID_YUKON_OPT || 1578 sc->msk_hw_id == CHIP_ID_YUKON_UNKNOWN) { 1579 device_printf(dev, "unknown device: id=0x%02x, rev=0x%02x\n", 1580 sc->msk_hw_id, sc->msk_hw_rev); 1581 error = ENXIO; 1582 goto fail; 1583 } 1584 1585 /* 1586 * Create sysctl tree 1587 */ 1588 sysctl_ctx_init(&sc->msk_sysctl_ctx); 1589 sc->msk_sysctl_tree = SYSCTL_ADD_NODE(&sc->msk_sysctl_ctx, 1590 SYSCTL_STATIC_CHILDREN(_hw), 1591 OID_AUTO, 1592 device_get_nameunit(dev), 1593 CTLFLAG_RD, 0, ""); 1594 if (sc->msk_sysctl_tree == NULL) { 1595 device_printf(dev, "can't add sysctl node\n"); 1596 error = ENXIO; 1597 goto fail; 1598 } 1599 1600 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1601 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1602 OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, 1603 &sc->msk_process_limit, 0, mskc_sysctl_proc_limit, 1604 "I", "max number of Rx events to process"); 1605 SYSCTL_ADD_PROC(&sc->msk_sysctl_ctx, 1606 SYSCTL_CHILDREN(sc->msk_sysctl_tree), 1607 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1608 sc, 0, mskc_sysctl_intr_rate, 1609 "I", "max number of interrupt per second"); 1610 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1611 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1612 "defrag_avoided", CTLFLAG_RW, &sc->msk_defrag_avoided, 1613 0, "# of avoided m_defrag on TX path"); 1614 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1615 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1616 "leading_copied", CTLFLAG_RW, &sc->msk_leading_copied, 1617 0, "# of leading copies on TX path"); 1618 SYSCTL_ADD_INT(&sc->msk_sysctl_ctx, 1619 SYSCTL_CHILDREN(sc->msk_sysctl_tree), OID_AUTO, 1620 "trailing_copied", CTLFLAG_RW, &sc->msk_trailing_copied, 1621 0, "# of trailing copies on TX path"); 1622 1623 sc->msk_pmd = CSR_READ_1(sc, B2_PMD_TYP); 1624 if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S') 1625 sc->msk_coppertype = 0; 1626 else 1627 sc->msk_coppertype = 1; 1628 /* Check number of MACs. */ 1629 sc->msk_num_port = 1; 1630 if ((CSR_READ_1(sc, B2_Y2_HW_RES) & CFG_DUAL_MAC_MSK) == 1631 CFG_DUAL_MAC_MSK) { 1632 if (!(CSR_READ_1(sc, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) 1633 sc->msk_num_port++; 1634 } 1635 1636 /* Check bus type. */ 1637 if (pci_is_pcie(sc->msk_dev) == 0) { 1638 sc->msk_bustype = MSK_PEX_BUS; 1639 sc->msk_pciecap = pci_get_pciecap_ptr(sc->msk_dev); 1640 } else if (pci_is_pcix(sc->msk_dev) == 0) { 1641 sc->msk_bustype = MSK_PCIX_BUS; 1642 sc->msk_pcixcap = pci_get_pcixcap_ptr(sc->msk_dev); 1643 } else { 1644 sc->msk_bustype = MSK_PCI_BUS; 1645 } 1646 1647 switch (sc->msk_hw_id) { 1648 case CHIP_ID_YUKON_EC: 1649 case CHIP_ID_YUKON_EC_U: 1650 sc->msk_clock = 125; /* 125 Mhz */ 1651 break; 1652 case CHIP_ID_YUKON_EX: 1653 sc->msk_clock = 125; /* 125 Mhz */ 1654 break; 1655 case CHIP_ID_YUKON_FE: 1656 sc->msk_clock = 100; /* 100 Mhz */ 1657 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1658 break; 1659 case CHIP_ID_YUKON_FE_P: 1660 sc->msk_clock = 50; /* 50 Mhz */ 1661 /* DESCV2 */ 1662 sc->msk_pflags |= MSK_FLAG_FASTETHER; 1663 if (sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 1664 /* 1665 * XXX 1666 * FE+ A0 has status LE writeback bug so msk(4) 1667 * does not rely on status word of received frame 1668 * in msk_rxeof() which in turn disables all 1669 * hardware assistance bits reported by the status 1670 * word as well as validity of the recevied frame. 1671 * Just pass received frames to upper stack with 1672 * minimal test and let upper stack handle them. 1673 */ 1674 sc->msk_pflags |= MSK_FLAG_NORXCHK; 1675 } 1676 break; 1677 case CHIP_ID_YUKON_XL: 1678 sc->msk_clock = 156; /* 156 Mhz */ 1679 break; 1680 case CHIP_ID_YUKON_SUPR: 1681 sc->msk_clock = 125; /* 125 MHz */ 1682 break; 1683 case CHIP_ID_YUKON_UL_2: 1684 sc->msk_clock = 125; /* 125 Mhz */ 1685 break; 1686 case CHIP_ID_YUKON_OPT: 1687 sc->msk_clock = 125; /* 125 MHz */ 1688 break; 1689 default: 1690 sc->msk_clock = 156; /* 156 Mhz */ 1691 break; 1692 } 1693 1694 error = mskc_status_dma_alloc(sc); 1695 if (error) 1696 goto fail; 1697 1698 /* Set base interrupt mask. */ 1699 sc->msk_intrmask = Y2_IS_HW_ERR | Y2_IS_STAT_BMU; 1700 sc->msk_intrhwemask = Y2_IS_TIST_OV | Y2_IS_MST_ERR | 1701 Y2_IS_IRQ_STAT | Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP; 1702 1703 /* Reset the adapter. */ 1704 mskc_reset(sc); 1705 1706 error = mskc_setup_rambuffer(sc); 1707 if (error) 1708 goto fail; 1709 1710 sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1); 1711 if (sc->msk_devs[MSK_PORT_A] == NULL) { 1712 device_printf(dev, "failed to add child for PORT_A\n"); 1713 error = ENXIO; 1714 goto fail; 1715 } 1716 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1717 *port = MSK_PORT_A; 1718 device_set_ivars(sc->msk_devs[MSK_PORT_A], port); 1719 1720 if (sc->msk_num_port > 1) { 1721 sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1); 1722 if (sc->msk_devs[MSK_PORT_B] == NULL) { 1723 device_printf(dev, "failed to add child for PORT_B\n"); 1724 error = ENXIO; 1725 goto fail; 1726 } 1727 port = kmalloc(sizeof(*port), M_DEVBUF, M_WAITOK); 1728 *port = MSK_PORT_B; 1729 device_set_ivars(sc->msk_devs[MSK_PORT_B], port); 1730 } 1731 1732 bus_generic_attach(dev); 1733 1734 error = bus_setup_intr(dev, sc->msk_irq, INTR_MPSAFE, 1735 mskc_intr, sc, &sc->msk_intrhand, 1736 &sc->msk_serializer); 1737 if (error) { 1738 device_printf(dev, "couldn't set up interrupt handler\n"); 1739 goto fail; 1740 } 1741 1742 cpuid = rman_get_cpuid(sc->msk_irq); 1743 KKASSERT(cpuid >= 0 && cpuid < ncpus); 1744 1745 if (sc->msk_if[0] != NULL) 1746 sc->msk_if[0]->msk_ifp->if_cpuid = cpuid; 1747 if (sc->msk_if[1] != NULL) 1748 sc->msk_if[1]->msk_ifp->if_cpuid = cpuid; 1749 return 0; 1750 fail: 1751 mskc_detach(dev); 1752 return (error); 1753 } 1754 1755 /* 1756 * Shutdown hardware and free up resources. This can be called any 1757 * time after the mutex has been initialized. It is called in both 1758 * the error case in attach and the normal detach case so it needs 1759 * to be careful about only freeing resources that have actually been 1760 * allocated. 1761 */ 1762 static int 1763 msk_detach(device_t dev) 1764 { 1765 struct msk_if_softc *sc_if = device_get_softc(dev); 1766 1767 if (device_is_attached(dev)) { 1768 struct msk_softc *sc = sc_if->msk_softc; 1769 struct ifnet *ifp = &sc_if->arpcom.ac_if; 1770 1771 lwkt_serialize_enter(ifp->if_serializer); 1772 1773 if (sc->msk_intrhand != NULL) { 1774 if (sc->msk_if[MSK_PORT_A] != NULL) 1775 msk_stop(sc->msk_if[MSK_PORT_A]); 1776 if (sc->msk_if[MSK_PORT_B] != NULL) 1777 msk_stop(sc->msk_if[MSK_PORT_B]); 1778 1779 bus_teardown_intr(sc->msk_dev, sc->msk_irq, 1780 sc->msk_intrhand); 1781 sc->msk_intrhand = NULL; 1782 } 1783 1784 lwkt_serialize_exit(ifp->if_serializer); 1785 1786 ether_ifdetach(ifp); 1787 } 1788 1789 if (sc_if->msk_miibus != NULL) 1790 device_delete_child(dev, sc_if->msk_miibus); 1791 1792 msk_txrx_dma_free(sc_if); 1793 return (0); 1794 } 1795 1796 static int 1797 mskc_detach(device_t dev) 1798 { 1799 struct msk_softc *sc = device_get_softc(dev); 1800 int *port, i; 1801 1802 #ifdef INVARIANTS 1803 if (device_is_attached(dev)) { 1804 KASSERT(sc->msk_intrhand == NULL, 1805 ("intr is not torn down yet")); 1806 } 1807 #endif 1808 1809 for (i = 0; i < sc->msk_num_port; ++i) { 1810 if (sc->msk_devs[i] != NULL) { 1811 port = device_get_ivars(sc->msk_devs[i]); 1812 if (port != NULL) { 1813 kfree(port, M_DEVBUF); 1814 device_set_ivars(sc->msk_devs[i], NULL); 1815 } 1816 device_delete_child(dev, sc->msk_devs[i]); 1817 } 1818 } 1819 1820 /* Disable all interrupts. */ 1821 CSR_WRITE_4(sc, B0_IMSK, 0); 1822 CSR_READ_4(sc, B0_IMSK); 1823 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 1824 CSR_READ_4(sc, B0_HWE_IMSK); 1825 1826 /* LED Off. */ 1827 CSR_WRITE_2(sc, B0_CTST, Y2_LED_STAT_OFF); 1828 1829 /* Put hardware reset. */ 1830 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 1831 1832 mskc_status_dma_free(sc); 1833 1834 if (sc->msk_irq != NULL) { 1835 bus_release_resource(dev, SYS_RES_IRQ, sc->msk_irq_rid, 1836 sc->msk_irq); 1837 } 1838 if (sc->msk_res != NULL) { 1839 bus_release_resource(dev, sc->msk_res_type, sc->msk_res_rid, 1840 sc->msk_res); 1841 } 1842 1843 if (sc->msk_sysctl_tree != NULL) 1844 sysctl_ctx_free(&sc->msk_sysctl_ctx); 1845 1846 return (0); 1847 } 1848 1849 /* Create status DMA region. */ 1850 static int 1851 mskc_status_dma_alloc(struct msk_softc *sc) 1852 { 1853 bus_dmamem_t dmem; 1854 int error; 1855 1856 error = bus_dmamem_coherent(NULL/* XXX parent */, MSK_STAT_ALIGN, 0, 1857 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1858 MSK_STAT_RING_SZ, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1859 if (error) { 1860 device_printf(sc->msk_dev, 1861 "failed to create status coherent DMA memory\n"); 1862 return error; 1863 } 1864 sc->msk_stat_tag = dmem.dmem_tag; 1865 sc->msk_stat_map = dmem.dmem_map; 1866 sc->msk_stat_ring = dmem.dmem_addr; 1867 sc->msk_stat_ring_paddr = dmem.dmem_busaddr; 1868 1869 return (0); 1870 } 1871 1872 static void 1873 mskc_status_dma_free(struct msk_softc *sc) 1874 { 1875 /* Destroy status block. */ 1876 if (sc->msk_stat_tag) { 1877 bus_dmamap_unload(sc->msk_stat_tag, sc->msk_stat_map); 1878 bus_dmamem_free(sc->msk_stat_tag, sc->msk_stat_ring, 1879 sc->msk_stat_map); 1880 bus_dma_tag_destroy(sc->msk_stat_tag); 1881 sc->msk_stat_tag = NULL; 1882 } 1883 } 1884 1885 static int 1886 msk_txrx_dma_alloc(struct msk_if_softc *sc_if) 1887 { 1888 int error, i, j; 1889 #ifdef MSK_JUMBO 1890 struct msk_rxdesc *jrxd; 1891 struct msk_jpool_entry *entry; 1892 uint8_t *ptr; 1893 #endif 1894 bus_size_t rxalign; 1895 1896 /* Create parent DMA tag. */ 1897 /* 1898 * XXX 1899 * It seems that Yukon II supports full 64bits DMA operations. But 1900 * it needs two descriptors(list elements) for 64bits DMA operations. 1901 * Since we don't know what DMA address mappings(32bits or 64bits) 1902 * would be used in advance for each mbufs, we limits its DMA space 1903 * to be in range of 32bits address space. Otherwise, we should check 1904 * what DMA address is used and chain another descriptor for the 1905 * 64bits DMA operation. This also means descriptor ring size is 1906 * variable. Limiting DMA address to be in 32bit address space greatly 1907 * simplyfies descriptor handling and possibly would increase 1908 * performance a bit due to efficient handling of descriptors. 1909 * Apart from harassing checksum offloading mechanisms, it seems 1910 * it's really bad idea to use a seperate descriptor for 64bit 1911 * DMA operation to save small descriptor memory. Anyway, I've 1912 * never seen these exotic scheme on ethernet interface hardware. 1913 */ 1914 error = bus_dma_tag_create( 1915 NULL, /* parent */ 1916 1, 0, /* alignment, boundary */ 1917 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1918 BUS_SPACE_MAXADDR, /* highaddr */ 1919 NULL, NULL, /* filter, filterarg */ 1920 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1921 0, /* nsegments */ 1922 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1923 0, /* flags */ 1924 &sc_if->msk_cdata.msk_parent_tag); 1925 if (error) { 1926 device_printf(sc_if->msk_if_dev, 1927 "failed to create parent DMA tag\n"); 1928 return error; 1929 } 1930 1931 /* Create DMA stuffs for Tx ring. */ 1932 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_TX_RING_SZ, 1933 &sc_if->msk_cdata.msk_tx_ring_tag, 1934 (void *)&sc_if->msk_rdata.msk_tx_ring, 1935 &sc_if->msk_rdata.msk_tx_ring_paddr, 1936 &sc_if->msk_cdata.msk_tx_ring_map); 1937 if (error) { 1938 device_printf(sc_if->msk_if_dev, 1939 "failed to create TX ring DMA stuffs\n"); 1940 return error; 1941 } 1942 1943 /* Create DMA stuffs for Rx ring. */ 1944 error = msk_dmamem_create(sc_if->msk_if_dev, MSK_RX_RING_SZ, 1945 &sc_if->msk_cdata.msk_rx_ring_tag, 1946 (void *)&sc_if->msk_rdata.msk_rx_ring, 1947 &sc_if->msk_rdata.msk_rx_ring_paddr, 1948 &sc_if->msk_cdata.msk_rx_ring_map); 1949 if (error) { 1950 device_printf(sc_if->msk_if_dev, 1951 "failed to create RX ring DMA stuffs\n"); 1952 return error; 1953 } 1954 1955 /* Create tag for Tx buffers. */ 1956 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 1957 1, 0, /* alignment, boundary */ 1958 BUS_SPACE_MAXADDR, /* lowaddr */ 1959 BUS_SPACE_MAXADDR, /* highaddr */ 1960 NULL, NULL, /* filter, filterarg */ 1961 MSK_JUMBO_FRAMELEN, /* maxsize */ 1962 MSK_MAXTXSEGS, /* nsegments */ 1963 MSK_MAXSGSIZE, /* maxsegsize */ 1964 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | 1965 BUS_DMA_ONEBPAGE, /* flags */ 1966 &sc_if->msk_cdata.msk_tx_tag); 1967 if (error) { 1968 device_printf(sc_if->msk_if_dev, 1969 "failed to create Tx DMA tag\n"); 1970 return error; 1971 } 1972 1973 /* Create DMA maps for Tx buffers. */ 1974 for (i = 0; i < MSK_TX_RING_CNT; i++) { 1975 struct msk_txdesc *txd = &sc_if->msk_cdata.msk_txdesc[i]; 1976 1977 error = bus_dmamap_create(sc_if->msk_cdata.msk_tx_tag, 1978 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1979 &txd->tx_dmamap); 1980 if (error) { 1981 device_printf(sc_if->msk_if_dev, 1982 "failed to create %dth Tx dmamap\n", i); 1983 1984 for (j = 0; j < i; ++j) { 1985 txd = &sc_if->msk_cdata.msk_txdesc[j]; 1986 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 1987 txd->tx_dmamap); 1988 } 1989 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 1990 sc_if->msk_cdata.msk_tx_tag = NULL; 1991 1992 return error; 1993 } 1994 } 1995 1996 /* 1997 * Workaround hardware hang which seems to happen when Rx buffer 1998 * is not aligned on multiple of FIFO word(8 bytes). 1999 */ 2000 if (sc_if->msk_flags & MSK_FLAG_RAMBUF) 2001 rxalign = MSK_RX_BUF_ALIGN; 2002 else 2003 rxalign = 1; 2004 2005 /* Create tag for Rx buffers. */ 2006 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2007 rxalign, 0, /* alignment, boundary */ 2008 BUS_SPACE_MAXADDR, /* lowaddr */ 2009 BUS_SPACE_MAXADDR, /* highaddr */ 2010 NULL, NULL, /* filter, filterarg */ 2011 MCLBYTES, /* maxsize */ 2012 1, /* nsegments */ 2013 MCLBYTES, /* maxsegsize */ 2014 BUS_DMA_ALLOCNOW | BUS_DMA_ALIGNED | 2015 BUS_DMA_WAITOK, /* flags */ 2016 &sc_if->msk_cdata.msk_rx_tag); 2017 if (error) { 2018 device_printf(sc_if->msk_if_dev, 2019 "failed to create Rx DMA tag\n"); 2020 return error; 2021 } 2022 2023 /* Create DMA maps for Rx buffers. */ 2024 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, BUS_DMA_WAITOK, 2025 &sc_if->msk_cdata.msk_rx_sparemap); 2026 if (error) { 2027 device_printf(sc_if->msk_if_dev, 2028 "failed to create spare Rx dmamap\n"); 2029 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2030 sc_if->msk_cdata.msk_rx_tag = NULL; 2031 return error; 2032 } 2033 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2034 struct msk_rxdesc *rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2035 2036 error = bus_dmamap_create(sc_if->msk_cdata.msk_rx_tag, 2037 BUS_DMA_WAITOK, &rxd->rx_dmamap); 2038 if (error) { 2039 device_printf(sc_if->msk_if_dev, 2040 "failed to create %dth Rx dmamap\n", i); 2041 2042 for (j = 0; j < i; ++j) { 2043 rxd = &sc_if->msk_cdata.msk_rxdesc[j]; 2044 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2045 rxd->rx_dmamap); 2046 } 2047 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2048 sc_if->msk_cdata.msk_rx_sparemap); 2049 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2050 sc_if->msk_cdata.msk_rx_tag = NULL; 2051 2052 return error; 2053 } 2054 } 2055 2056 #ifdef MSK_JUMBO 2057 SLIST_INIT(&sc_if->msk_jfree_listhead); 2058 SLIST_INIT(&sc_if->msk_jinuse_listhead); 2059 2060 /* Create tag for jumbo Rx ring. */ 2061 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2062 MSK_RING_ALIGN, 0, /* alignment, boundary */ 2063 BUS_SPACE_MAXADDR, /* lowaddr */ 2064 BUS_SPACE_MAXADDR, /* highaddr */ 2065 NULL, NULL, /* filter, filterarg */ 2066 MSK_JUMBO_RX_RING_SZ, /* maxsize */ 2067 1, /* nsegments */ 2068 MSK_JUMBO_RX_RING_SZ, /* maxsegsize */ 2069 0, /* flags */ 2070 NULL, NULL, /* lockfunc, lockarg */ 2071 &sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2072 if (error != 0) { 2073 device_printf(sc_if->msk_if_dev, 2074 "failed to create jumbo Rx ring DMA tag\n"); 2075 goto fail; 2076 } 2077 2078 /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ 2079 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2080 (void **)&sc_if->msk_rdata.msk_jumbo_rx_ring, 2081 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2082 &sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2083 if (error != 0) { 2084 device_printf(sc_if->msk_if_dev, 2085 "failed to allocate DMA'able memory for jumbo Rx ring\n"); 2086 goto fail; 2087 } 2088 2089 ctx.msk_busaddr = 0; 2090 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2091 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 2092 sc_if->msk_rdata.msk_jumbo_rx_ring, MSK_JUMBO_RX_RING_SZ, 2093 msk_dmamap_cb, &ctx, 0); 2094 if (error != 0) { 2095 device_printf(sc_if->msk_if_dev, 2096 "failed to load DMA'able memory for jumbo Rx ring\n"); 2097 goto fail; 2098 } 2099 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr = ctx.msk_busaddr; 2100 2101 /* Create tag for jumbo buffer blocks. */ 2102 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2103 PAGE_SIZE, 0, /* alignment, boundary */ 2104 BUS_SPACE_MAXADDR, /* lowaddr */ 2105 BUS_SPACE_MAXADDR, /* highaddr */ 2106 NULL, NULL, /* filter, filterarg */ 2107 MSK_JMEM, /* maxsize */ 2108 1, /* nsegments */ 2109 MSK_JMEM, /* maxsegsize */ 2110 0, /* flags */ 2111 NULL, NULL, /* lockfunc, lockarg */ 2112 &sc_if->msk_cdata.msk_jumbo_tag); 2113 if (error != 0) { 2114 device_printf(sc_if->msk_if_dev, 2115 "failed to create jumbo Rx buffer block DMA tag\n"); 2116 goto fail; 2117 } 2118 2119 /* Create tag for jumbo Rx buffers. */ 2120 error = bus_dma_tag_create(sc_if->msk_cdata.msk_parent_tag,/* parent */ 2121 PAGE_SIZE, 0, /* alignment, boundary */ 2122 BUS_SPACE_MAXADDR, /* lowaddr */ 2123 BUS_SPACE_MAXADDR, /* highaddr */ 2124 NULL, NULL, /* filter, filterarg */ 2125 MCLBYTES * MSK_MAXRXSEGS, /* maxsize */ 2126 MSK_MAXRXSEGS, /* nsegments */ 2127 MSK_JLEN, /* maxsegsize */ 2128 0, /* flags */ 2129 NULL, NULL, /* lockfunc, lockarg */ 2130 &sc_if->msk_cdata.msk_jumbo_rx_tag); 2131 if (error != 0) { 2132 device_printf(sc_if->msk_if_dev, 2133 "failed to create jumbo Rx DMA tag\n"); 2134 goto fail; 2135 } 2136 2137 /* Create DMA maps for jumbo Rx buffers. */ 2138 if ((error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2139 &sc_if->msk_cdata.msk_jumbo_rx_sparemap)) != 0) { 2140 device_printf(sc_if->msk_if_dev, 2141 "failed to create spare jumbo Rx dmamap\n"); 2142 goto fail; 2143 } 2144 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2145 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2146 jrxd->rx_m = NULL; 2147 jrxd->rx_dmamap = NULL; 2148 error = bus_dmamap_create(sc_if->msk_cdata.msk_jumbo_rx_tag, 0, 2149 &jrxd->rx_dmamap); 2150 if (error != 0) { 2151 device_printf(sc_if->msk_if_dev, 2152 "failed to create jumbo Rx dmamap\n"); 2153 goto fail; 2154 } 2155 } 2156 2157 /* Allocate DMA'able memory and load the DMA map for jumbo buf. */ 2158 error = bus_dmamem_alloc(sc_if->msk_cdata.msk_jumbo_tag, 2159 (void **)&sc_if->msk_rdata.msk_jumbo_buf, 2160 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 2161 &sc_if->msk_cdata.msk_jumbo_map); 2162 if (error != 0) { 2163 device_printf(sc_if->msk_if_dev, 2164 "failed to allocate DMA'able memory for jumbo buf\n"); 2165 goto fail; 2166 } 2167 2168 ctx.msk_busaddr = 0; 2169 error = bus_dmamap_load(sc_if->msk_cdata.msk_jumbo_tag, 2170 sc_if->msk_cdata.msk_jumbo_map, sc_if->msk_rdata.msk_jumbo_buf, 2171 MSK_JMEM, msk_dmamap_cb, &ctx, 0); 2172 if (error != 0) { 2173 device_printf(sc_if->msk_if_dev, 2174 "failed to load DMA'able memory for jumbobuf\n"); 2175 goto fail; 2176 } 2177 sc_if->msk_rdata.msk_jumbo_buf_paddr = ctx.msk_busaddr; 2178 2179 /* 2180 * Now divide it up into 9K pieces and save the addresses 2181 * in an array. 2182 */ 2183 ptr = sc_if->msk_rdata.msk_jumbo_buf; 2184 for (i = 0; i < MSK_JSLOTS; i++) { 2185 sc_if->msk_cdata.msk_jslots[i] = ptr; 2186 ptr += MSK_JLEN; 2187 entry = malloc(sizeof(struct msk_jpool_entry), 2188 M_DEVBUF, M_WAITOK); 2189 if (entry == NULL) { 2190 device_printf(sc_if->msk_if_dev, 2191 "no memory for jumbo buffers!\n"); 2192 error = ENOMEM; 2193 goto fail; 2194 } 2195 entry->slot = i; 2196 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2197 jpool_entries); 2198 } 2199 #endif 2200 return 0; 2201 } 2202 2203 static void 2204 msk_txrx_dma_free(struct msk_if_softc *sc_if) 2205 { 2206 struct msk_txdesc *txd; 2207 struct msk_rxdesc *rxd; 2208 #ifdef MSK_JUMBO 2209 struct msk_rxdesc *jrxd; 2210 struct msk_jpool_entry *entry; 2211 #endif 2212 int i; 2213 2214 #ifdef MSK_JUMBO 2215 MSK_JLIST_LOCK(sc_if); 2216 while ((entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead))) { 2217 device_printf(sc_if->msk_if_dev, 2218 "asked to free buffer that is in use!\n"); 2219 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2220 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, 2221 jpool_entries); 2222 } 2223 2224 while (!SLIST_EMPTY(&sc_if->msk_jfree_listhead)) { 2225 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2226 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2227 free(entry, M_DEVBUF); 2228 } 2229 MSK_JLIST_UNLOCK(sc_if); 2230 2231 /* Destroy jumbo buffer block. */ 2232 if (sc_if->msk_cdata.msk_jumbo_map) 2233 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_tag, 2234 sc_if->msk_cdata.msk_jumbo_map); 2235 2236 if (sc_if->msk_rdata.msk_jumbo_buf) { 2237 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_tag, 2238 sc_if->msk_rdata.msk_jumbo_buf, 2239 sc_if->msk_cdata.msk_jumbo_map); 2240 sc_if->msk_rdata.msk_jumbo_buf = NULL; 2241 sc_if->msk_cdata.msk_jumbo_map = NULL; 2242 } 2243 2244 /* Jumbo Rx ring. */ 2245 if (sc_if->msk_cdata.msk_jumbo_rx_ring_tag) { 2246 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map) 2247 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2248 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2249 if (sc_if->msk_cdata.msk_jumbo_rx_ring_map && 2250 sc_if->msk_rdata.msk_jumbo_rx_ring) 2251 bus_dmamem_free(sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 2252 sc_if->msk_rdata.msk_jumbo_rx_ring, 2253 sc_if->msk_cdata.msk_jumbo_rx_ring_map); 2254 sc_if->msk_rdata.msk_jumbo_rx_ring = NULL; 2255 sc_if->msk_cdata.msk_jumbo_rx_ring_map = NULL; 2256 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_ring_tag); 2257 sc_if->msk_cdata.msk_jumbo_rx_ring_tag = NULL; 2258 } 2259 2260 /* Jumbo Rx buffers. */ 2261 if (sc_if->msk_cdata.msk_jumbo_rx_tag) { 2262 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 2263 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 2264 if (jrxd->rx_dmamap) { 2265 bus_dmamap_destroy( 2266 sc_if->msk_cdata.msk_jumbo_rx_tag, 2267 jrxd->rx_dmamap); 2268 jrxd->rx_dmamap = NULL; 2269 } 2270 } 2271 if (sc_if->msk_cdata.msk_jumbo_rx_sparemap) { 2272 bus_dmamap_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag, 2273 sc_if->msk_cdata.msk_jumbo_rx_sparemap); 2274 sc_if->msk_cdata.msk_jumbo_rx_sparemap = 0; 2275 } 2276 bus_dma_tag_destroy(sc_if->msk_cdata.msk_jumbo_rx_tag); 2277 sc_if->msk_cdata.msk_jumbo_rx_tag = NULL; 2278 } 2279 #endif 2280 2281 /* Tx ring. */ 2282 msk_dmamem_destroy(sc_if->msk_cdata.msk_tx_ring_tag, 2283 sc_if->msk_rdata.msk_tx_ring, 2284 sc_if->msk_cdata.msk_tx_ring_map); 2285 2286 /* Rx ring. */ 2287 msk_dmamem_destroy(sc_if->msk_cdata.msk_rx_ring_tag, 2288 sc_if->msk_rdata.msk_rx_ring, 2289 sc_if->msk_cdata.msk_rx_ring_map); 2290 2291 /* Tx buffers. */ 2292 if (sc_if->msk_cdata.msk_tx_tag) { 2293 for (i = 0; i < MSK_TX_RING_CNT; i++) { 2294 txd = &sc_if->msk_cdata.msk_txdesc[i]; 2295 bus_dmamap_destroy(sc_if->msk_cdata.msk_tx_tag, 2296 txd->tx_dmamap); 2297 } 2298 bus_dma_tag_destroy(sc_if->msk_cdata.msk_tx_tag); 2299 sc_if->msk_cdata.msk_tx_tag = NULL; 2300 } 2301 2302 /* Rx buffers. */ 2303 if (sc_if->msk_cdata.msk_rx_tag) { 2304 for (i = 0; i < MSK_RX_RING_CNT; i++) { 2305 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 2306 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2307 rxd->rx_dmamap); 2308 } 2309 bus_dmamap_destroy(sc_if->msk_cdata.msk_rx_tag, 2310 sc_if->msk_cdata.msk_rx_sparemap); 2311 bus_dma_tag_destroy(sc_if->msk_cdata.msk_rx_tag); 2312 sc_if->msk_cdata.msk_rx_tag = NULL; 2313 } 2314 2315 if (sc_if->msk_cdata.msk_parent_tag) { 2316 bus_dma_tag_destroy(sc_if->msk_cdata.msk_parent_tag); 2317 sc_if->msk_cdata.msk_parent_tag = NULL; 2318 } 2319 } 2320 2321 #ifdef MSK_JUMBO 2322 /* 2323 * Allocate a jumbo buffer. 2324 */ 2325 static void * 2326 msk_jalloc(struct msk_if_softc *sc_if) 2327 { 2328 struct msk_jpool_entry *entry; 2329 2330 MSK_JLIST_LOCK(sc_if); 2331 2332 entry = SLIST_FIRST(&sc_if->msk_jfree_listhead); 2333 2334 if (entry == NULL) { 2335 MSK_JLIST_UNLOCK(sc_if); 2336 return (NULL); 2337 } 2338 2339 SLIST_REMOVE_HEAD(&sc_if->msk_jfree_listhead, jpool_entries); 2340 SLIST_INSERT_HEAD(&sc_if->msk_jinuse_listhead, entry, jpool_entries); 2341 2342 MSK_JLIST_UNLOCK(sc_if); 2343 2344 return (sc_if->msk_cdata.msk_jslots[entry->slot]); 2345 } 2346 2347 /* 2348 * Release a jumbo buffer. 2349 */ 2350 static void 2351 msk_jfree(void *buf, void *args) 2352 { 2353 struct msk_if_softc *sc_if; 2354 struct msk_jpool_entry *entry; 2355 int i; 2356 2357 /* Extract the softc struct pointer. */ 2358 sc_if = (struct msk_if_softc *)args; 2359 KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); 2360 2361 MSK_JLIST_LOCK(sc_if); 2362 /* Calculate the slot this buffer belongs to. */ 2363 i = ((vm_offset_t)buf 2364 - (vm_offset_t)sc_if->msk_rdata.msk_jumbo_buf) / MSK_JLEN; 2365 KASSERT(i >= 0 && i < MSK_JSLOTS, 2366 ("%s: asked to free buffer that we don't manage!", __func__)); 2367 2368 entry = SLIST_FIRST(&sc_if->msk_jinuse_listhead); 2369 KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); 2370 entry->slot = i; 2371 SLIST_REMOVE_HEAD(&sc_if->msk_jinuse_listhead, jpool_entries); 2372 SLIST_INSERT_HEAD(&sc_if->msk_jfree_listhead, entry, jpool_entries); 2373 if (SLIST_EMPTY(&sc_if->msk_jinuse_listhead)) 2374 wakeup(sc_if); 2375 2376 MSK_JLIST_UNLOCK(sc_if); 2377 } 2378 #endif 2379 2380 static int 2381 msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head) 2382 { 2383 struct msk_txdesc *txd, *txd_last; 2384 struct msk_tx_desc *tx_le; 2385 struct mbuf *m; 2386 bus_dmamap_t map; 2387 bus_dma_segment_t txsegs[MSK_MAXTXSEGS]; 2388 uint32_t control, prod, si; 2389 uint16_t offset, tcp_offset; 2390 int error, i, nsegs, maxsegs, defrag; 2391 2392 maxsegs = MSK_TX_RING_CNT - sc_if->msk_cdata.msk_tx_cnt - 2393 MSK_RESERVED_TX_DESC_CNT; 2394 KASSERT(maxsegs >= MSK_SPARE_TX_DESC_CNT, 2395 ("not enough spare TX desc")); 2396 if (maxsegs > MSK_MAXTXSEGS) 2397 maxsegs = MSK_MAXTXSEGS; 2398 2399 /* 2400 * Align TX buffer to 64bytes boundary. This greately improves 2401 * bulk data TX performance on my 88E8053 (+100Mbps) at least. 2402 * Try avoiding m_defrag(), if the mbufs are not chained together 2403 * by m_next (i.e. m->m_len == m->m_pkthdr.len). 2404 */ 2405 2406 #define MSK_TXBUF_ALIGN 64 2407 #define MSK_TXBUF_MASK (MSK_TXBUF_ALIGN - 1) 2408 2409 defrag = 1; 2410 m = *m_head; 2411 if (m->m_len == m->m_pkthdr.len) { 2412 int space; 2413 2414 space = ((uintptr_t)m->m_data & MSK_TXBUF_MASK); 2415 if (space) { 2416 if (M_WRITABLE(m)) { 2417 if (M_TRAILINGSPACE(m) >= space) { 2418 /* e.g. TCP ACKs */ 2419 bcopy(m->m_data, m->m_data + space, 2420 m->m_len); 2421 m->m_data += space; 2422 defrag = 0; 2423 sc_if->msk_softc->msk_trailing_copied++; 2424 } else { 2425 space = MSK_TXBUF_ALIGN - space; 2426 if (M_LEADINGSPACE(m) >= space) { 2427 /* e.g. Small UDP datagrams */ 2428 bcopy(m->m_data, 2429 m->m_data - space, 2430 m->m_len); 2431 m->m_data -= space; 2432 defrag = 0; 2433 sc_if->msk_softc-> 2434 msk_leading_copied++; 2435 } 2436 } 2437 } 2438 } else { 2439 /* e.g. on forwarding path */ 2440 defrag = 0; 2441 } 2442 } 2443 if (defrag) { 2444 m = m_defrag(*m_head, MB_DONTWAIT); 2445 if (m == NULL) { 2446 m_freem(*m_head); 2447 *m_head = NULL; 2448 return ENOBUFS; 2449 } 2450 *m_head = m; 2451 } else { 2452 sc_if->msk_softc->msk_defrag_avoided++; 2453 } 2454 2455 #undef MSK_TXBUF_MASK 2456 #undef MSK_TXBUF_ALIGN 2457 2458 tcp_offset = offset = 0; 2459 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2460 /* 2461 * Since mbuf has no protocol specific structure information 2462 * in it we have to inspect protocol information here to 2463 * setup TSO and checksum offload. I don't know why Marvell 2464 * made a such decision in chip design because other GigE 2465 * hardwares normally takes care of all these chores in 2466 * hardware. However, TSO performance of Yukon II is very 2467 * good such that it's worth to implement it. 2468 */ 2469 struct ether_header *eh; 2470 struct ip *ip; 2471 2472 /* TODO check for M_WRITABLE(m) */ 2473 2474 offset = sizeof(struct ether_header); 2475 m = m_pullup(m, offset); 2476 if (m == NULL) { 2477 *m_head = NULL; 2478 return (ENOBUFS); 2479 } 2480 eh = mtod(m, struct ether_header *); 2481 /* Check if hardware VLAN insertion is off. */ 2482 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2483 offset = sizeof(struct ether_vlan_header); 2484 m = m_pullup(m, offset); 2485 if (m == NULL) { 2486 *m_head = NULL; 2487 return (ENOBUFS); 2488 } 2489 } 2490 m = m_pullup(m, offset + sizeof(struct ip)); 2491 if (m == NULL) { 2492 *m_head = NULL; 2493 return (ENOBUFS); 2494 } 2495 ip = (struct ip *)(mtod(m, char *) + offset); 2496 offset += (ip->ip_hl << 2); 2497 tcp_offset = offset; 2498 /* 2499 * It seems that Yukon II has Tx checksum offload bug for 2500 * small TCP packets that's less than 60 bytes in size 2501 * (e.g. TCP window probe packet, pure ACK packet). 2502 * Common work around like padding with zeros to make the 2503 * frame minimum ethernet frame size didn't work at all. 2504 * Instead of disabling checksum offload completely we 2505 * resort to S/W checksum routine when we encounter short 2506 * TCP frames. 2507 * Short UDP packets appear to be handled correctly by 2508 * Yukon II. 2509 */ 2510 if (m->m_pkthdr.len < MSK_MIN_FRAMELEN && 2511 (m->m_pkthdr.csum_flags & CSUM_TCP) != 0) { 2512 uint16_t csum; 2513 2514 csum = in_cksum_skip(m, ntohs(ip->ip_len) + offset - 2515 (ip->ip_hl << 2), offset); 2516 *(uint16_t *)(m->m_data + offset + 2517 m->m_pkthdr.csum_data) = csum; 2518 m->m_pkthdr.csum_flags &= ~CSUM_TCP; 2519 } 2520 *m_head = m; 2521 } 2522 2523 prod = sc_if->msk_cdata.msk_tx_prod; 2524 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2525 txd_last = txd; 2526 map = txd->tx_dmamap; 2527 2528 error = bus_dmamap_load_mbuf_defrag(sc_if->msk_cdata.msk_tx_tag, map, 2529 m_head, txsegs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 2530 if (error) { 2531 m_freem(*m_head); 2532 *m_head = NULL; 2533 return error; 2534 } 2535 bus_dmamap_sync(sc_if->msk_cdata.msk_tx_tag, map, BUS_DMASYNC_PREWRITE); 2536 2537 m = *m_head; 2538 control = 0; 2539 tx_le = NULL; 2540 2541 #ifdef notyet 2542 /* Check if we have a VLAN tag to insert. */ 2543 if ((m->m_flags & M_VLANTAG) != 0) { 2544 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2545 tx_le->msk_addr = htole32(0); 2546 tx_le->msk_control = htole32(OP_VLAN | HW_OWNER | 2547 htons(m->m_pkthdr.ether_vtag)); 2548 sc_if->msk_cdata.msk_tx_cnt++; 2549 MSK_INC(prod, MSK_TX_RING_CNT); 2550 control |= INS_VLAN; 2551 } 2552 #endif 2553 /* Check if we have to handle checksum offload. */ 2554 if (m->m_pkthdr.csum_flags & MSK_CSUM_FEATURES) { 2555 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2556 tx_le->msk_addr = htole32(((tcp_offset + m->m_pkthdr.csum_data) 2557 & 0xffff) | ((uint32_t)tcp_offset << 16)); 2558 tx_le->msk_control = htole32(1 << 16 | (OP_TCPLISW | HW_OWNER)); 2559 control = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 2560 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2561 control |= UDPTCP; 2562 sc_if->msk_cdata.msk_tx_cnt++; 2563 MSK_INC(prod, MSK_TX_RING_CNT); 2564 } 2565 2566 si = prod; 2567 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2568 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[0].ds_addr)); 2569 tx_le->msk_control = htole32(txsegs[0].ds_len | control | 2570 OP_PACKET); 2571 sc_if->msk_cdata.msk_tx_cnt++; 2572 MSK_INC(prod, MSK_TX_RING_CNT); 2573 2574 for (i = 1; i < nsegs; i++) { 2575 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2576 tx_le->msk_addr = htole32(MSK_ADDR_LO(txsegs[i].ds_addr)); 2577 tx_le->msk_control = htole32(txsegs[i].ds_len | control | 2578 OP_BUFFER | HW_OWNER); 2579 sc_if->msk_cdata.msk_tx_cnt++; 2580 MSK_INC(prod, MSK_TX_RING_CNT); 2581 } 2582 /* Update producer index. */ 2583 sc_if->msk_cdata.msk_tx_prod = prod; 2584 2585 /* Set EOP on the last desciptor. */ 2586 prod = (prod + MSK_TX_RING_CNT - 1) % MSK_TX_RING_CNT; 2587 tx_le = &sc_if->msk_rdata.msk_tx_ring[prod]; 2588 tx_le->msk_control |= htole32(EOP); 2589 2590 /* Turn the first descriptor ownership to hardware. */ 2591 tx_le = &sc_if->msk_rdata.msk_tx_ring[si]; 2592 tx_le->msk_control |= htole32(HW_OWNER); 2593 2594 txd = &sc_if->msk_cdata.msk_txdesc[prod]; 2595 map = txd_last->tx_dmamap; 2596 txd_last->tx_dmamap = txd->tx_dmamap; 2597 txd->tx_dmamap = map; 2598 txd->tx_m = m; 2599 2600 return (0); 2601 } 2602 2603 static void 2604 msk_start(struct ifnet *ifp) 2605 { 2606 struct msk_if_softc *sc_if; 2607 struct mbuf *m_head; 2608 int enq; 2609 2610 sc_if = ifp->if_softc; 2611 2612 ASSERT_SERIALIZED(ifp->if_serializer); 2613 2614 if (!sc_if->msk_link) { 2615 ifq_purge(&ifp->if_snd); 2616 return; 2617 } 2618 2619 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2620 return; 2621 2622 enq = 0; 2623 while (!ifq_is_empty(&ifp->if_snd)) { 2624 if (MSK_IS_OACTIVE(sc_if)) { 2625 ifp->if_flags |= IFF_OACTIVE; 2626 break; 2627 } 2628 2629 m_head = ifq_dequeue(&ifp->if_snd, NULL); 2630 if (m_head == NULL) 2631 break; 2632 2633 /* 2634 * Pack the data into the transmit ring. If we 2635 * don't have room, set the OACTIVE flag and wait 2636 * for the NIC to drain the ring. 2637 */ 2638 if (msk_encap(sc_if, &m_head) != 0) { 2639 ifp->if_oerrors++; 2640 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2641 continue; 2642 } else { 2643 ifp->if_flags |= IFF_OACTIVE; 2644 break; 2645 } 2646 } 2647 enq = 1; 2648 2649 /* 2650 * If there's a BPF listener, bounce a copy of this frame 2651 * to him. 2652 */ 2653 BPF_MTAP(ifp, m_head); 2654 } 2655 2656 if (enq) { 2657 /* Transmit */ 2658 CSR_WRITE_2(sc_if->msk_softc, 2659 Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_PUT_IDX_REG), 2660 sc_if->msk_cdata.msk_tx_prod); 2661 2662 /* Set a timeout in case the chip goes out to lunch. */ 2663 ifp->if_timer = MSK_TX_TIMEOUT; 2664 } 2665 } 2666 2667 static void 2668 msk_watchdog(struct ifnet *ifp) 2669 { 2670 struct msk_if_softc *sc_if = ifp->if_softc; 2671 uint32_t ridx; 2672 int idx; 2673 2674 ASSERT_SERIALIZED(ifp->if_serializer); 2675 2676 if (sc_if->msk_link == 0) { 2677 if (bootverbose) 2678 if_printf(sc_if->msk_ifp, "watchdog timeout " 2679 "(missed link)\n"); 2680 ifp->if_oerrors++; 2681 msk_init(sc_if); 2682 return; 2683 } 2684 2685 /* 2686 * Reclaim first as there is a possibility of losing Tx completion 2687 * interrupts. 2688 */ 2689 ridx = sc_if->msk_port == MSK_PORT_A ? STAT_TXA1_RIDX : STAT_TXA2_RIDX; 2690 idx = CSR_READ_2(sc_if->msk_softc, ridx); 2691 if (sc_if->msk_cdata.msk_tx_cons != idx) { 2692 msk_txeof(sc_if, idx); 2693 if (sc_if->msk_cdata.msk_tx_cnt == 0) { 2694 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2695 "-- recovering\n"); 2696 if (!ifq_is_empty(&ifp->if_snd)) 2697 if_devstart(ifp); 2698 return; 2699 } 2700 } 2701 2702 if_printf(ifp, "watchdog timeout\n"); 2703 ifp->if_oerrors++; 2704 msk_init(sc_if); 2705 if (!ifq_is_empty(&ifp->if_snd)) 2706 if_devstart(ifp); 2707 } 2708 2709 static int 2710 mskc_shutdown(device_t dev) 2711 { 2712 struct msk_softc *sc = device_get_softc(dev); 2713 int i; 2714 2715 lwkt_serialize_enter(&sc->msk_serializer); 2716 2717 for (i = 0; i < sc->msk_num_port; i++) { 2718 if (sc->msk_if[i] != NULL) 2719 msk_stop(sc->msk_if[i]); 2720 } 2721 2722 /* Put hardware reset. */ 2723 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2724 2725 lwkt_serialize_exit(&sc->msk_serializer); 2726 return (0); 2727 } 2728 2729 static int 2730 mskc_suspend(device_t dev) 2731 { 2732 struct msk_softc *sc = device_get_softc(dev); 2733 int i; 2734 2735 lwkt_serialize_enter(&sc->msk_serializer); 2736 2737 for (i = 0; i < sc->msk_num_port; i++) { 2738 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2739 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_RUNNING) != 0)) 2740 msk_stop(sc->msk_if[i]); 2741 } 2742 2743 /* Disable all interrupts. */ 2744 CSR_WRITE_4(sc, B0_IMSK, 0); 2745 CSR_READ_4(sc, B0_IMSK); 2746 CSR_WRITE_4(sc, B0_HWE_IMSK, 0); 2747 CSR_READ_4(sc, B0_HWE_IMSK); 2748 2749 mskc_phy_power(sc, MSK_PHY_POWERDOWN); 2750 2751 /* Put hardware reset. */ 2752 CSR_WRITE_2(sc, B0_CTST, CS_RST_SET); 2753 sc->msk_suspended = 1; 2754 2755 lwkt_serialize_exit(&sc->msk_serializer); 2756 2757 return (0); 2758 } 2759 2760 static int 2761 mskc_resume(device_t dev) 2762 { 2763 struct msk_softc *sc = device_get_softc(dev); 2764 int i; 2765 2766 lwkt_serialize_enter(&sc->msk_serializer); 2767 2768 /* Enable all clocks before accessing any registers. */ 2769 CSR_PCI_WRITE_4(sc, PCI_OUR_REG_3, 0); 2770 mskc_reset(sc); 2771 for (i = 0; i < sc->msk_num_port; i++) { 2772 if (sc->msk_if[i] != NULL && sc->msk_if[i]->msk_ifp != NULL && 2773 ((sc->msk_if[i]->msk_ifp->if_flags & IFF_UP) != 0)) 2774 msk_init(sc->msk_if[i]); 2775 } 2776 sc->msk_suspended = 0; 2777 2778 lwkt_serialize_exit(&sc->msk_serializer); 2779 2780 return (0); 2781 } 2782 2783 static void 2784 msk_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2785 { 2786 struct mbuf *m; 2787 struct ifnet *ifp; 2788 struct msk_rxdesc *rxd; 2789 int cons, rxlen; 2790 2791 ifp = sc_if->msk_ifp; 2792 2793 cons = sc_if->msk_cdata.msk_rx_cons; 2794 do { 2795 rxlen = status >> 16; 2796 if ((status & GMR_FS_VLAN) != 0 && 2797 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2798 rxlen -= EVL_ENCAPLEN; 2799 if (sc_if->msk_flags & MSK_FLAG_NORXCHK) { 2800 /* 2801 * For controllers that returns bogus status code 2802 * just do minimal check and let upper stack 2803 * handle this frame. 2804 */ 2805 if (len > MSK_MAX_FRAMELEN || len < ETHER_HDR_LEN) { 2806 ifp->if_ierrors++; 2807 msk_discard_rxbuf(sc_if, cons); 2808 break; 2809 } 2810 } else if (len > sc_if->msk_framesize || 2811 ((status & GMR_FS_ANY_ERR) != 0) || 2812 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2813 /* Don't count flow-control packet as errors. */ 2814 if ((status & GMR_FS_GOOD_FC) == 0) 2815 ifp->if_ierrors++; 2816 msk_discard_rxbuf(sc_if, cons); 2817 break; 2818 } 2819 rxd = &sc_if->msk_cdata.msk_rxdesc[cons]; 2820 m = rxd->rx_m; 2821 if (msk_newbuf(sc_if, cons, 0) != 0) { 2822 ifp->if_iqdrops++; 2823 /* Reuse old buffer. */ 2824 msk_discard_rxbuf(sc_if, cons); 2825 break; 2826 } 2827 m->m_pkthdr.rcvif = ifp; 2828 m->m_pkthdr.len = m->m_len = len; 2829 ifp->if_ipackets++; 2830 #ifdef notyet 2831 /* Check for VLAN tagged packets. */ 2832 if ((status & GMR_FS_VLAN) != 0 && 2833 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2834 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2835 m->m_flags |= M_VLANTAG; 2836 } 2837 #endif 2838 2839 ifp->if_input(ifp, m); 2840 } while (0); 2841 2842 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_RX_RING_CNT); 2843 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_RX_RING_CNT); 2844 } 2845 2846 #ifdef MSK_JUMBO 2847 static void 2848 msk_jumbo_rxeof(struct msk_if_softc *sc_if, uint32_t status, int len) 2849 { 2850 struct mbuf *m; 2851 struct ifnet *ifp; 2852 struct msk_rxdesc *jrxd; 2853 int cons, rxlen; 2854 2855 ifp = sc_if->msk_ifp; 2856 2857 MSK_IF_LOCK_ASSERT(sc_if); 2858 2859 cons = sc_if->msk_cdata.msk_rx_cons; 2860 do { 2861 rxlen = status >> 16; 2862 if ((status & GMR_FS_VLAN) != 0 && 2863 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2864 rxlen -= ETHER_VLAN_ENCAP_LEN; 2865 if (len > sc_if->msk_framesize || 2866 ((status & GMR_FS_ANY_ERR) != 0) || 2867 ((status & GMR_FS_RX_OK) == 0) || (rxlen != len)) { 2868 /* Don't count flow-control packet as errors. */ 2869 if ((status & GMR_FS_GOOD_FC) == 0) 2870 ifp->if_ierrors++; 2871 msk_discard_jumbo_rxbuf(sc_if, cons); 2872 break; 2873 } 2874 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[cons]; 2875 m = jrxd->rx_m; 2876 if (msk_jumbo_newbuf(sc_if, cons) != 0) { 2877 ifp->if_iqdrops++; 2878 /* Reuse old buffer. */ 2879 msk_discard_jumbo_rxbuf(sc_if, cons); 2880 break; 2881 } 2882 m->m_pkthdr.rcvif = ifp; 2883 m->m_pkthdr.len = m->m_len = len; 2884 ifp->if_ipackets++; 2885 /* Check for VLAN tagged packets. */ 2886 if ((status & GMR_FS_VLAN) != 0 && 2887 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 2888 m->m_pkthdr.ether_vtag = sc_if->msk_vtag; 2889 m->m_flags |= M_VLANTAG; 2890 } 2891 MSK_IF_UNLOCK(sc_if); 2892 (*ifp->if_input)(ifp, m); 2893 MSK_IF_LOCK(sc_if); 2894 } while (0); 2895 2896 MSK_INC(sc_if->msk_cdata.msk_rx_cons, MSK_JUMBO_RX_RING_CNT); 2897 MSK_INC(sc_if->msk_cdata.msk_rx_prod, MSK_JUMBO_RX_RING_CNT); 2898 } 2899 #endif 2900 2901 static void 2902 msk_txeof(struct msk_if_softc *sc_if, int idx) 2903 { 2904 struct msk_txdesc *txd; 2905 struct msk_tx_desc *cur_tx; 2906 struct ifnet *ifp; 2907 uint32_t control; 2908 int cons, prog; 2909 2910 ifp = sc_if->msk_ifp; 2911 2912 /* 2913 * Go through our tx ring and free mbufs for those 2914 * frames that have been sent. 2915 */ 2916 cons = sc_if->msk_cdata.msk_tx_cons; 2917 prog = 0; 2918 for (; cons != idx; MSK_INC(cons, MSK_TX_RING_CNT)) { 2919 if (sc_if->msk_cdata.msk_tx_cnt <= 0) 2920 break; 2921 prog++; 2922 cur_tx = &sc_if->msk_rdata.msk_tx_ring[cons]; 2923 control = le32toh(cur_tx->msk_control); 2924 sc_if->msk_cdata.msk_tx_cnt--; 2925 if ((control & EOP) == 0) 2926 continue; 2927 txd = &sc_if->msk_cdata.msk_txdesc[cons]; 2928 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, txd->tx_dmamap); 2929 2930 ifp->if_opackets++; 2931 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!", 2932 __func__)); 2933 m_freem(txd->tx_m); 2934 txd->tx_m = NULL; 2935 } 2936 2937 if (prog > 0) { 2938 sc_if->msk_cdata.msk_tx_cons = cons; 2939 if (!MSK_IS_OACTIVE(sc_if)) 2940 ifp->if_flags &= ~IFF_OACTIVE; 2941 if (sc_if->msk_cdata.msk_tx_cnt == 0) 2942 ifp->if_timer = 0; 2943 /* No need to sync LEs as we didn't update LEs. */ 2944 } 2945 } 2946 2947 static void 2948 msk_tick(void *xsc_if) 2949 { 2950 struct msk_if_softc *sc_if = xsc_if; 2951 struct ifnet *ifp = &sc_if->arpcom.ac_if; 2952 struct mii_data *mii; 2953 2954 lwkt_serialize_enter(ifp->if_serializer); 2955 2956 mii = device_get_softc(sc_if->msk_miibus); 2957 2958 mii_tick(mii); 2959 if (!sc_if->msk_link) 2960 msk_miibus_statchg(sc_if->msk_if_dev); 2961 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 2962 2963 lwkt_serialize_exit(ifp->if_serializer); 2964 } 2965 2966 static void 2967 msk_intr_phy(struct msk_if_softc *sc_if) 2968 { 2969 uint16_t status; 2970 2971 msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2972 status = msk_phy_readreg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_STAT); 2973 /* Handle FIFO Underrun/Overflow? */ 2974 if (status & PHY_M_IS_FIFO_ERROR) { 2975 device_printf(sc_if->msk_if_dev, 2976 "PHY FIFO underrun/overflow.\n"); 2977 } 2978 } 2979 2980 static void 2981 msk_intr_gmac(struct msk_if_softc *sc_if) 2982 { 2983 struct msk_softc *sc; 2984 uint8_t status; 2985 2986 sc = sc_if->msk_softc; 2987 status = CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 2988 2989 /* GMAC Rx FIFO overrun. */ 2990 if ((status & GM_IS_RX_FF_OR) != 0) { 2991 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 2992 GMF_CLI_RX_FO); 2993 } 2994 /* GMAC Tx FIFO underrun. */ 2995 if ((status & GM_IS_TX_FF_UR) != 0) { 2996 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 2997 GMF_CLI_TX_FU); 2998 device_printf(sc_if->msk_if_dev, "Tx FIFO underrun!\n"); 2999 /* 3000 * XXX 3001 * In case of Tx underrun, we may need to flush/reset 3002 * Tx MAC but that would also require resynchronization 3003 * with status LEs. Reintializing status LEs would 3004 * affect other port in dual MAC configuration so it 3005 * should be avoided as possible as we can. 3006 * Due to lack of documentation it's all vague guess but 3007 * it needs more investigation. 3008 */ 3009 } 3010 } 3011 3012 static void 3013 msk_handle_hwerr(struct msk_if_softc *sc_if, uint32_t status) 3014 { 3015 struct msk_softc *sc; 3016 3017 sc = sc_if->msk_softc; 3018 if ((status & Y2_IS_PAR_RD1) != 0) { 3019 device_printf(sc_if->msk_if_dev, 3020 "RAM buffer read parity error\n"); 3021 /* Clear IRQ. */ 3022 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3023 RI_CLR_RD_PERR); 3024 } 3025 if ((status & Y2_IS_PAR_WR1) != 0) { 3026 device_printf(sc_if->msk_if_dev, 3027 "RAM buffer write parity error\n"); 3028 /* Clear IRQ. */ 3029 CSR_WRITE_2(sc, SELECT_RAM_BUFFER(sc_if->msk_port, B3_RI_CTRL), 3030 RI_CLR_WR_PERR); 3031 } 3032 if ((status & Y2_IS_PAR_MAC1) != 0) { 3033 device_printf(sc_if->msk_if_dev, "Tx MAC parity error\n"); 3034 /* Clear IRQ. */ 3035 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3036 GMF_CLI_TX_PE); 3037 } 3038 if ((status & Y2_IS_PAR_RX1) != 0) { 3039 device_printf(sc_if->msk_if_dev, "Rx parity error\n"); 3040 /* Clear IRQ. */ 3041 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_IRQ_PAR); 3042 } 3043 if ((status & (Y2_IS_TCP_TXS1 | Y2_IS_TCP_TXA1)) != 0) { 3044 device_printf(sc_if->msk_if_dev, "TCP segmentation error\n"); 3045 /* Clear IRQ. */ 3046 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_IRQ_TCP); 3047 } 3048 } 3049 3050 static void 3051 mskc_intr_hwerr(struct msk_softc *sc) 3052 { 3053 uint32_t status; 3054 uint32_t tlphead[4]; 3055 3056 status = CSR_READ_4(sc, B0_HWE_ISRC); 3057 /* Time Stamp timer overflow. */ 3058 if ((status & Y2_IS_TIST_OV) != 0) 3059 CSR_WRITE_1(sc, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); 3060 if ((status & Y2_IS_PCI_NEXP) != 0) { 3061 /* 3062 * PCI Express Error occured which is not described in PEX 3063 * spec. 3064 * This error is also mapped either to Master Abort( 3065 * Y2_IS_MST_ERR) or Target Abort (Y2_IS_IRQ_STAT) bit and 3066 * can only be cleared there. 3067 */ 3068 device_printf(sc->msk_dev, 3069 "PCI Express protocol violation error\n"); 3070 } 3071 3072 if ((status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) != 0) { 3073 uint16_t v16; 3074 3075 if ((status & Y2_IS_MST_ERR) != 0) 3076 device_printf(sc->msk_dev, 3077 "unexpected IRQ Status error\n"); 3078 else 3079 device_printf(sc->msk_dev, 3080 "unexpected IRQ Master error\n"); 3081 /* Reset all bits in the PCI status register. */ 3082 v16 = pci_read_config(sc->msk_dev, PCIR_STATUS, 2); 3083 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3084 pci_write_config(sc->msk_dev, PCIR_STATUS, v16 | 3085 PCIM_STATUS_PERR | PCIM_STATUS_SERR | PCIM_STATUS_RMABORT | 3086 PCIM_STATUS_RTABORT | PCIM_STATUS_PERRREPORT, 2); 3087 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3088 } 3089 3090 /* Check for PCI Express Uncorrectable Error. */ 3091 if ((status & Y2_IS_PCI_EXP) != 0) { 3092 uint32_t v32; 3093 3094 /* 3095 * On PCI Express bus bridges are called root complexes (RC). 3096 * PCI Express errors are recognized by the root complex too, 3097 * which requests the system to handle the problem. After 3098 * error occurence it may be that no access to the adapter 3099 * may be performed any longer. 3100 */ 3101 3102 v32 = CSR_PCI_READ_4(sc, PEX_UNC_ERR_STAT); 3103 if ((v32 & PEX_UNSUP_REQ) != 0) { 3104 /* Ignore unsupported request error. */ 3105 if (bootverbose) { 3106 device_printf(sc->msk_dev, 3107 "Uncorrectable PCI Express error\n"); 3108 } 3109 } 3110 if ((v32 & (PEX_FATAL_ERRORS | PEX_POIS_TLP)) != 0) { 3111 int i; 3112 3113 /* Get TLP header form Log Registers. */ 3114 for (i = 0; i < 4; i++) 3115 tlphead[i] = CSR_PCI_READ_4(sc, 3116 PEX_HEADER_LOG + i * 4); 3117 /* Check for vendor defined broadcast message. */ 3118 if (!(tlphead[0] == 0x73004001 && tlphead[1] == 0x7f)) { 3119 sc->msk_intrhwemask &= ~Y2_IS_PCI_EXP; 3120 CSR_WRITE_4(sc, B0_HWE_IMSK, 3121 sc->msk_intrhwemask); 3122 CSR_READ_4(sc, B0_HWE_IMSK); 3123 } 3124 } 3125 /* Clear the interrupt. */ 3126 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3127 CSR_PCI_WRITE_4(sc, PEX_UNC_ERR_STAT, 0xffffffff); 3128 CSR_WRITE_1(sc, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3129 } 3130 3131 if ((status & Y2_HWE_L1_MASK) != 0 && sc->msk_if[MSK_PORT_A] != NULL) 3132 msk_handle_hwerr(sc->msk_if[MSK_PORT_A], status); 3133 if ((status & Y2_HWE_L2_MASK) != 0 && sc->msk_if[MSK_PORT_B] != NULL) 3134 msk_handle_hwerr(sc->msk_if[MSK_PORT_B], status >> 8); 3135 } 3136 3137 static __inline void 3138 msk_rxput(struct msk_if_softc *sc_if) 3139 { 3140 struct msk_softc *sc; 3141 3142 sc = sc_if->msk_softc; 3143 #ifdef MSK_JUMBO 3144 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3145 bus_dmamap_sync( 3146 sc_if->msk_cdata.msk_jumbo_rx_ring_tag, 3147 sc_if->msk_cdata.msk_jumbo_rx_ring_map, 3148 BUS_DMASYNC_PREWRITE); 3149 } 3150 #endif 3151 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, 3152 PREF_UNIT_PUT_IDX_REG), sc_if->msk_cdata.msk_rx_prod); 3153 } 3154 3155 static int 3156 mskc_handle_events(struct msk_softc *sc) 3157 { 3158 struct msk_if_softc *sc_if; 3159 int rxput[2]; 3160 struct msk_stat_desc *sd; 3161 uint32_t control, status; 3162 int cons, idx, len, port, rxprog; 3163 3164 idx = CSR_READ_2(sc, STAT_PUT_IDX); 3165 if (idx == sc->msk_stat_cons) 3166 return (0); 3167 3168 rxput[MSK_PORT_A] = rxput[MSK_PORT_B] = 0; 3169 3170 rxprog = 0; 3171 for (cons = sc->msk_stat_cons; cons != idx;) { 3172 sd = &sc->msk_stat_ring[cons]; 3173 control = le32toh(sd->msk_control); 3174 if ((control & HW_OWNER) == 0) 3175 break; 3176 /* 3177 * Marvell's FreeBSD driver updates status LE after clearing 3178 * HW_OWNER. However we don't have a way to sync single LE 3179 * with bus_dma(9) API. bus_dma(9) provides a way to sync 3180 * an entire DMA map. So don't sync LE until we have a better 3181 * way to sync LEs. 3182 */ 3183 control &= ~HW_OWNER; 3184 sd->msk_control = htole32(control); 3185 status = le32toh(sd->msk_status); 3186 len = control & STLE_LEN_MASK; 3187 port = (control >> 16) & 0x01; 3188 sc_if = sc->msk_if[port]; 3189 if (sc_if == NULL) { 3190 device_printf(sc->msk_dev, "invalid port opcode " 3191 "0x%08x\n", control & STLE_OP_MASK); 3192 continue; 3193 } 3194 3195 switch (control & STLE_OP_MASK) { 3196 case OP_RXVLAN: 3197 sc_if->msk_vtag = ntohs(len); 3198 break; 3199 case OP_RXCHKSVLAN: 3200 sc_if->msk_vtag = ntohs(len); 3201 break; 3202 case OP_RXSTAT: 3203 if ((sc_if->msk_ifp->if_flags & IFF_RUNNING) == 0) 3204 break; 3205 #ifdef MSK_JUMBO 3206 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) 3207 msk_jumbo_rxeof(sc_if, status, len); 3208 else 3209 #endif 3210 msk_rxeof(sc_if, status, len); 3211 rxprog++; 3212 /* 3213 * Because there is no way to sync single Rx LE 3214 * put the DMA sync operation off until the end of 3215 * event processing. 3216 */ 3217 rxput[port]++; 3218 /* Update prefetch unit if we've passed water mark. */ 3219 if (rxput[port] >= sc_if->msk_cdata.msk_rx_putwm) { 3220 msk_rxput(sc_if); 3221 rxput[port] = 0; 3222 } 3223 break; 3224 case OP_TXINDEXLE: 3225 if (sc->msk_if[MSK_PORT_A] != NULL) { 3226 msk_txeof(sc->msk_if[MSK_PORT_A], 3227 status & STLE_TXA1_MSKL); 3228 } 3229 if (sc->msk_if[MSK_PORT_B] != NULL) { 3230 msk_txeof(sc->msk_if[MSK_PORT_B], 3231 ((status & STLE_TXA2_MSKL) >> 3232 STLE_TXA2_SHIFTL) | 3233 ((len & STLE_TXA2_MSKH) << 3234 STLE_TXA2_SHIFTH)); 3235 } 3236 break; 3237 default: 3238 device_printf(sc->msk_dev, "unhandled opcode 0x%08x\n", 3239 control & STLE_OP_MASK); 3240 break; 3241 } 3242 MSK_INC(cons, MSK_STAT_RING_CNT); 3243 if (rxprog > sc->msk_process_limit) 3244 break; 3245 } 3246 3247 sc->msk_stat_cons = cons; 3248 /* XXX We should sync status LEs here. See above notes. */ 3249 3250 if (rxput[MSK_PORT_A] > 0) 3251 msk_rxput(sc->msk_if[MSK_PORT_A]); 3252 if (rxput[MSK_PORT_B] > 0) 3253 msk_rxput(sc->msk_if[MSK_PORT_B]); 3254 3255 return (sc->msk_stat_cons != CSR_READ_2(sc, STAT_PUT_IDX)); 3256 } 3257 3258 /* Legacy interrupt handler for shared interrupt. */ 3259 static void 3260 mskc_intr(void *xsc) 3261 { 3262 struct msk_softc *sc; 3263 struct msk_if_softc *sc_if0, *sc_if1; 3264 struct ifnet *ifp0, *ifp1; 3265 uint32_t status; 3266 3267 sc = xsc; 3268 ASSERT_SERIALIZED(&sc->msk_serializer); 3269 3270 /* Reading B0_Y2_SP_ISRC2 masks further interrupts. */ 3271 status = CSR_READ_4(sc, B0_Y2_SP_ISRC2); 3272 if (status == 0 || status == 0xffffffff || sc->msk_suspended != 0 || 3273 (status & sc->msk_intrmask) == 0) { 3274 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3275 return; 3276 } 3277 3278 sc_if0 = sc->msk_if[MSK_PORT_A]; 3279 sc_if1 = sc->msk_if[MSK_PORT_B]; 3280 ifp0 = ifp1 = NULL; 3281 if (sc_if0 != NULL) 3282 ifp0 = sc_if0->msk_ifp; 3283 if (sc_if1 != NULL) 3284 ifp1 = sc_if1->msk_ifp; 3285 3286 if ((status & Y2_IS_IRQ_PHY1) != 0 && sc_if0 != NULL) 3287 msk_intr_phy(sc_if0); 3288 if ((status & Y2_IS_IRQ_PHY2) != 0 && sc_if1 != NULL) 3289 msk_intr_phy(sc_if1); 3290 if ((status & Y2_IS_IRQ_MAC1) != 0 && sc_if0 != NULL) 3291 msk_intr_gmac(sc_if0); 3292 if ((status & Y2_IS_IRQ_MAC2) != 0 && sc_if1 != NULL) 3293 msk_intr_gmac(sc_if1); 3294 if ((status & (Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2)) != 0) { 3295 device_printf(sc->msk_dev, "Rx descriptor error\n"); 3296 sc->msk_intrmask &= ~(Y2_IS_CHK_RX1 | Y2_IS_CHK_RX2); 3297 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3298 CSR_READ_4(sc, B0_IMSK); 3299 } 3300 if ((status & (Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2)) != 0) { 3301 device_printf(sc->msk_dev, "Tx descriptor error\n"); 3302 sc->msk_intrmask &= ~(Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXA2); 3303 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3304 CSR_READ_4(sc, B0_IMSK); 3305 } 3306 if ((status & Y2_IS_HW_ERR) != 0) 3307 mskc_intr_hwerr(sc); 3308 3309 while (mskc_handle_events(sc) != 0) 3310 ; 3311 if ((status & Y2_IS_STAT_BMU) != 0) 3312 CSR_WRITE_4(sc, STAT_CTRL, SC_STAT_CLR_IRQ); 3313 3314 /* Reenable interrupts. */ 3315 CSR_WRITE_4(sc, B0_Y2_SP_ICR, 2); 3316 3317 if (ifp0 != NULL && (ifp0->if_flags & IFF_RUNNING) != 0 && 3318 !ifq_is_empty(&ifp0->if_snd)) 3319 if_devstart(ifp0); 3320 if (ifp1 != NULL && (ifp1->if_flags & IFF_RUNNING) != 0 && 3321 !ifq_is_empty(&ifp1->if_snd)) 3322 if_devstart(ifp1); 3323 } 3324 3325 static void 3326 msk_set_tx_stfwd(struct msk_if_softc *sc_if) 3327 { 3328 struct msk_softc *sc = sc_if->msk_softc; 3329 struct ifnet *ifp = sc_if->msk_ifp; 3330 3331 if ((sc->msk_hw_id == CHIP_ID_YUKON_EX && 3332 sc->msk_hw_rev != CHIP_REV_YU_EX_A0) || 3333 sc->msk_hw_id >= CHIP_ID_YUKON_SUPR) { 3334 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3335 TX_STFW_ENA); 3336 } else { 3337 if (ifp->if_mtu > ETHERMTU) { 3338 /* Set Tx GMAC FIFO Almost Empty Threshold. */ 3339 CSR_WRITE_4(sc, 3340 MR_ADDR(sc_if->msk_port, TX_GMF_AE_THR), 3341 MSK_ECU_JUMBO_WM << 16 | MSK_ECU_AE_THR); 3342 /* Disable Store & Forward mode for Tx. */ 3343 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3344 TX_STFW_DIS); 3345 } else { 3346 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), 3347 TX_STFW_ENA); 3348 } 3349 } 3350 } 3351 3352 static void 3353 msk_init(void *xsc) 3354 { 3355 struct msk_if_softc *sc_if = xsc; 3356 struct msk_softc *sc = sc_if->msk_softc; 3357 struct ifnet *ifp = sc_if->msk_ifp; 3358 struct mii_data *mii; 3359 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 3360 uint16_t gmac; 3361 uint32_t reg; 3362 int error, i; 3363 3364 ASSERT_SERIALIZED(ifp->if_serializer); 3365 3366 mii = device_get_softc(sc_if->msk_miibus); 3367 3368 error = 0; 3369 /* Cancel pending I/O and free all Rx/Tx buffers. */ 3370 msk_stop(sc_if); 3371 3372 sc_if->msk_framesize = ifp->if_mtu + ETHER_HDR_LEN + EVL_ENCAPLEN; 3373 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN && 3374 sc_if->msk_softc->msk_hw_id == CHIP_ID_YUKON_EC_U) { 3375 /* 3376 * In Yukon EC Ultra, TSO & checksum offload is not 3377 * supported for jumbo frame. 3378 */ 3379 ifp->if_hwassist &= ~MSK_CSUM_FEATURES; 3380 ifp->if_capenable &= ~IFCAP_TXCSUM; 3381 } 3382 3383 /* GMAC Control reset. */ 3384 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_SET); 3385 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_RST_CLR); 3386 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_F_LOOPB_OFF); 3387 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3388 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3389 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), 3390 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON | 3391 GMC_BYP_RETR_ON); 3392 } 3393 3394 /* 3395 * Initialize GMAC first such that speed/duplex/flow-control 3396 * parameters are renegotiated when interface is brought up. 3397 */ 3398 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, 0); 3399 3400 /* Dummy read the Interrupt Source Register. */ 3401 CSR_READ_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_SRC)); 3402 3403 /* Set MIB Clear Counter Mode. */ 3404 gmac = GMAC_READ_2(sc, sc_if->msk_port, GM_PHY_ADDR); 3405 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac | GM_PAR_MIB_CLR); 3406 /* Read all MIB Counters with Clear Mode set. */ 3407 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 3408 GMAC_READ_2(sc, sc_if->msk_port, GM_MIB_CNT_BASE + 8 * i); 3409 /* Clear MIB Clear Counter Mode. */ 3410 gmac &= ~GM_PAR_MIB_CLR; 3411 GMAC_WRITE_2(sc, sc_if->msk_port, GM_PHY_ADDR, gmac); 3412 3413 /* Disable FCS. */ 3414 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_CTRL, GM_RXCR_CRC_DIS); 3415 3416 /* Setup Transmit Control Register. */ 3417 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 3418 3419 /* Setup Transmit Flow Control Register. */ 3420 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_FLOW_CTRL, 0xffff); 3421 3422 /* Setup Transmit Parameter Register. */ 3423 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_PARAM, 3424 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 3425 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) | TX_BACK_OFF_LIM(TX_BOF_LIM_DEF)); 3426 3427 gmac = DATA_BLIND_VAL(DATA_BLIND_DEF) | 3428 GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); 3429 3430 if (sc_if->msk_framesize > MSK_MAX_FRAMELEN) 3431 gmac |= GM_SMOD_JUMBO_ENA; 3432 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SERIAL_MODE, gmac); 3433 3434 /* Set station address. */ 3435 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3436 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3437 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_1L + i * 4, 3438 eaddr[i]); 3439 for (i = 0; i < ETHER_ADDR_LEN /2; i++) 3440 GMAC_WRITE_2(sc, sc_if->msk_port, GM_SRC_ADDR_2L + i * 4, 3441 eaddr[i]); 3442 3443 /* Disable interrupts for counter overflows. */ 3444 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TX_IRQ_MSK, 0); 3445 GMAC_WRITE_2(sc, sc_if->msk_port, GM_RX_IRQ_MSK, 0); 3446 GMAC_WRITE_2(sc, sc_if->msk_port, GM_TR_IRQ_MSK, 0); 3447 3448 /* Configure Rx MAC FIFO. */ 3449 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3450 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_CLR); 3451 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 3452 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P || 3453 sc->msk_hw_id == CHIP_ID_YUKON_EX) 3454 reg |= GMF_RX_OVER_ON; 3455 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), reg); 3456 3457 /* Set receive filter. */ 3458 msk_rxfilter(sc_if); 3459 3460 if (sc->msk_hw_id == CHIP_ID_YUKON_XL) { 3461 /* Clear flush mask - HW bug. */ 3462 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 0); 3463 } else { 3464 /* Flush Rx MAC FIFO on any flow control or error. */ 3465 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_MSK), 3466 GMR_FS_ANY_ERR); 3467 } 3468 3469 /* 3470 * Set Rx FIFO flush threshold to 64 bytes 1 FIFO word 3471 * due to hardware hang on receipt of pause frames. 3472 */ 3473 reg = RX_GMF_FL_THR_DEF + 1; 3474 /* Another magic for Yukon FE+ - From Linux. */ 3475 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3476 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) 3477 reg = 0x178; 3478 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_FL_THR), reg); 3479 3480 3481 /* Configure Tx MAC FIFO. */ 3482 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3483 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_CLR); 3484 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_OPER_ON); 3485 3486 /* Configure hardware VLAN tag insertion/stripping. */ 3487 msk_setvlan(sc_if, ifp); 3488 3489 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) { 3490 /* Set Rx Pause threshould. */ 3491 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_LP_THR), 3492 MSK_ECU_LLPP); 3493 CSR_WRITE_2(sc, MR_ADDR(sc_if->msk_port, RX_GMF_UP_THR), 3494 MSK_ECU_ULPP); 3495 /* Configure store-and-forward for Tx. */ 3496 msk_set_tx_stfwd(sc_if); 3497 } 3498 3499 if (sc->msk_hw_id == CHIP_ID_YUKON_FE_P && 3500 sc->msk_hw_rev == CHIP_REV_YU_FE_P_A0) { 3501 /* Disable dynamic watermark - from Linux. */ 3502 reg = CSR_READ_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA)); 3503 reg &= ~0x03; 3504 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_EA), reg); 3505 } 3506 3507 /* 3508 * Disable Force Sync bit and Alloc bit in Tx RAM interface 3509 * arbiter as we don't use Sync Tx queue. 3510 */ 3511 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), 3512 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 3513 /* Enable the RAM Interface Arbiter. */ 3514 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_ENA_ARB); 3515 3516 /* Setup RAM buffer. */ 3517 msk_set_rambuffer(sc_if); 3518 3519 /* Disable Tx sync Queue. */ 3520 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txsq, RB_CTRL), RB_RST_SET); 3521 3522 /* Setup Tx Queue Bus Memory Interface. */ 3523 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_CLR_RESET); 3524 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_OPER_INIT); 3525 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_FIFO_OP_ON); 3526 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_WM), MSK_BMU_TX_WM); 3527 switch (sc->msk_hw_id) { 3528 case CHIP_ID_YUKON_EC_U: 3529 if (sc->msk_hw_rev == CHIP_REV_YU_EC_U_A0) { 3530 /* Fix for Yukon-EC Ultra: set BMU FIFO level */ 3531 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_txq, Q_AL), 3532 MSK_ECU_TXFF_LEV); 3533 } 3534 break; 3535 case CHIP_ID_YUKON_EX: 3536 /* 3537 * Yukon Extreme seems to have silicon bug for 3538 * automatic Tx checksum calculation capability. 3539 */ 3540 if (sc->msk_hw_rev == CHIP_REV_YU_EX_B0) { 3541 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_F), 3542 F_TX_CHK_AUTO_OFF); 3543 } 3544 break; 3545 } 3546 3547 /* Setup Rx Queue Bus Memory Interface. */ 3548 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_CLR_RESET); 3549 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_OPER_INIT); 3550 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), BMU_FIFO_OP_ON); 3551 CSR_WRITE_2(sc, Q_ADDR(sc_if->msk_rxq, Q_WM), MSK_BMU_RX_WM); 3552 if (sc->msk_hw_id == CHIP_ID_YUKON_EC_U && 3553 sc->msk_hw_rev >= CHIP_REV_YU_EC_U_A1) { 3554 /* MAC Rx RAM Read is controlled by hardware. */ 3555 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_F), F_M_RX_RAM_DIS); 3556 } 3557 3558 msk_set_prefetch(sc, sc_if->msk_txq, 3559 sc_if->msk_rdata.msk_tx_ring_paddr, MSK_TX_RING_CNT - 1); 3560 msk_init_tx_ring(sc_if); 3561 3562 /* Disable Rx checksum offload and RSS hash. */ 3563 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3564 BMU_DIS_RX_CHKSUM | BMU_DIS_RX_RSS_HASH); 3565 #ifdef MSK_JUMBO 3566 if (sc_if->msk_framesize > (MCLBYTES - ETHER_HDR_LEN)) { 3567 msk_set_prefetch(sc, sc_if->msk_rxq, 3568 sc_if->msk_rdata.msk_jumbo_rx_ring_paddr, 3569 MSK_JUMBO_RX_RING_CNT - 1); 3570 error = msk_init_jumbo_rx_ring(sc_if); 3571 } else 3572 #endif 3573 { 3574 msk_set_prefetch(sc, sc_if->msk_rxq, 3575 sc_if->msk_rdata.msk_rx_ring_paddr, 3576 MSK_RX_RING_CNT - 1); 3577 error = msk_init_rx_ring(sc_if); 3578 } 3579 if (error != 0) { 3580 device_printf(sc_if->msk_if_dev, 3581 "initialization failed: no memory for Rx buffers\n"); 3582 msk_stop(sc_if); 3583 return; 3584 } 3585 if (sc->msk_hw_id == CHIP_ID_YUKON_EX || 3586 sc->msk_hw_id == CHIP_ID_YUKON_SUPR) { 3587 /* Disable flushing of non-ASF packets. */ 3588 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), 3589 GMF_RX_MACSEC_FLUSH_OFF); 3590 } 3591 3592 /* Configure interrupt handling. */ 3593 if (sc_if->msk_port == MSK_PORT_A) { 3594 sc->msk_intrmask |= Y2_IS_PORT_A; 3595 sc->msk_intrhwemask |= Y2_HWE_L1_MASK; 3596 } else { 3597 sc->msk_intrmask |= Y2_IS_PORT_B; 3598 sc->msk_intrhwemask |= Y2_HWE_L2_MASK; 3599 } 3600 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3601 CSR_READ_4(sc, B0_HWE_IMSK); 3602 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3603 CSR_READ_4(sc, B0_IMSK); 3604 3605 sc_if->msk_link = 0; 3606 mii_mediachg(mii); 3607 3608 mskc_set_imtimer(sc); 3609 3610 ifp->if_flags |= IFF_RUNNING; 3611 ifp->if_flags &= ~IFF_OACTIVE; 3612 3613 callout_reset(&sc_if->msk_tick_ch, hz, msk_tick, sc_if); 3614 } 3615 3616 static void 3617 msk_set_rambuffer(struct msk_if_softc *sc_if) 3618 { 3619 struct msk_softc *sc; 3620 int ltpp, utpp; 3621 3622 if ((sc_if->msk_flags & MSK_FLAG_RAMBUF) == 0) 3623 return; 3624 3625 sc = sc_if->msk_softc; 3626 3627 /* Setup Rx Queue. */ 3628 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_CLR); 3629 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_START), 3630 sc->msk_rxqstart[sc_if->msk_port] / 8); 3631 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_END), 3632 sc->msk_rxqend[sc_if->msk_port] / 8); 3633 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_WP), 3634 sc->msk_rxqstart[sc_if->msk_port] / 8); 3635 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RP), 3636 sc->msk_rxqstart[sc_if->msk_port] / 8); 3637 3638 utpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3639 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_ULPP) / 8; 3640 ltpp = (sc->msk_rxqend[sc_if->msk_port] + 1 - 3641 sc->msk_rxqstart[sc_if->msk_port] - MSK_RB_LLPP_B) / 8; 3642 if (sc->msk_rxqsize < MSK_MIN_RXQ_SIZE) 3643 ltpp += (MSK_RB_LLPP_B - MSK_RB_LLPP_S) / 8; 3644 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_UTPP), utpp); 3645 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_rxq, RB_RX_LTPP), ltpp); 3646 /* Set Rx priority(RB_RX_UTHP/RB_RX_LTHP) thresholds? */ 3647 3648 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_ENA_OP_MD); 3649 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL)); 3650 3651 /* Setup Tx Queue. */ 3652 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_CLR); 3653 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_START), 3654 sc->msk_txqstart[sc_if->msk_port] / 8); 3655 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_END), 3656 sc->msk_txqend[sc_if->msk_port] / 8); 3657 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_WP), 3658 sc->msk_txqstart[sc_if->msk_port] / 8); 3659 CSR_WRITE_4(sc, RB_ADDR(sc_if->msk_txq, RB_RP), 3660 sc->msk_txqstart[sc_if->msk_port] / 8); 3661 /* Enable Store & Forward for Tx side. */ 3662 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_STFWD); 3663 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_ENA_OP_MD); 3664 CSR_READ_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL)); 3665 } 3666 3667 static void 3668 msk_set_prefetch(struct msk_softc *sc, int qaddr, bus_addr_t addr, 3669 uint32_t count) 3670 { 3671 3672 /* Reset the prefetch unit. */ 3673 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3674 PREF_UNIT_RST_SET); 3675 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3676 PREF_UNIT_RST_CLR); 3677 /* Set LE base address. */ 3678 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_LOW_REG), 3679 MSK_ADDR_LO(addr)); 3680 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_ADDR_HI_REG), 3681 MSK_ADDR_HI(addr)); 3682 /* Set the list last index. */ 3683 CSR_WRITE_2(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_LAST_IDX_REG), 3684 count); 3685 /* Turn on prefetch unit. */ 3686 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG), 3687 PREF_UNIT_OP_ON); 3688 /* Dummy read to ensure write. */ 3689 CSR_READ_4(sc, Y2_PREF_Q_ADDR(qaddr, PREF_UNIT_CTRL_REG)); 3690 } 3691 3692 static void 3693 msk_stop(struct msk_if_softc *sc_if) 3694 { 3695 struct msk_softc *sc = sc_if->msk_softc; 3696 struct ifnet *ifp = sc_if->msk_ifp; 3697 struct msk_txdesc *txd; 3698 struct msk_rxdesc *rxd; 3699 #ifdef MSK_JUMBO 3700 struct msk_rxdesc *jrxd; 3701 #endif 3702 uint32_t val; 3703 int i; 3704 3705 ASSERT_SERIALIZED(ifp->if_serializer); 3706 3707 callout_stop(&sc_if->msk_tick_ch); 3708 ifp->if_timer = 0; 3709 3710 /* Disable interrupts. */ 3711 if (sc_if->msk_port == MSK_PORT_A) { 3712 sc->msk_intrmask &= ~Y2_IS_PORT_A; 3713 sc->msk_intrhwemask &= ~Y2_HWE_L1_MASK; 3714 } else { 3715 sc->msk_intrmask &= ~Y2_IS_PORT_B; 3716 sc->msk_intrhwemask &= ~Y2_HWE_L2_MASK; 3717 } 3718 CSR_WRITE_4(sc, B0_HWE_IMSK, sc->msk_intrhwemask); 3719 CSR_READ_4(sc, B0_HWE_IMSK); 3720 CSR_WRITE_4(sc, B0_IMSK, sc->msk_intrmask); 3721 CSR_READ_4(sc, B0_IMSK); 3722 3723 /* Disable Tx/Rx MAC. */ 3724 val = GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3725 val &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); 3726 GMAC_WRITE_2(sc, sc_if->msk_port, GM_GP_CTRL, val); 3727 /* Read again to ensure writing. */ 3728 GMAC_READ_2(sc, sc_if->msk_port, GM_GP_CTRL); 3729 3730 /* Stop Tx BMU. */ 3731 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), BMU_STOP); 3732 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3733 for (i = 0; i < MSK_TIMEOUT; i++) { 3734 if ((val & (BMU_STOP | BMU_IDLE)) == 0) { 3735 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3736 BMU_STOP); 3737 val = CSR_READ_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR)); 3738 } else 3739 break; 3740 DELAY(1); 3741 } 3742 if (i == MSK_TIMEOUT) 3743 device_printf(sc_if->msk_if_dev, "Tx BMU stop failed\n"); 3744 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), 3745 RB_RST_SET | RB_DIS_OP_MD); 3746 3747 /* Disable all GMAC interrupt. */ 3748 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, GMAC_IRQ_MSK), 0); 3749 /* Disable PHY interrupt. */ 3750 msk_phy_writereg(sc_if, PHY_ADDR_MARV, PHY_MARV_INT_MASK, 0); 3751 3752 /* Disable the RAM Interface Arbiter. */ 3753 CSR_WRITE_1(sc, MR_ADDR(sc_if->msk_port, TXA_CTRL), TXA_DIS_ARB); 3754 3755 /* Reset the PCI FIFO of the async Tx queue */ 3756 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_txq, Q_CSR), 3757 BMU_RST_SET | BMU_FIFO_RST); 3758 3759 /* Reset the Tx prefetch units. */ 3760 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_txq, PREF_UNIT_CTRL_REG), 3761 PREF_UNIT_RST_SET); 3762 3763 /* Reset the RAM Buffer async Tx queue. */ 3764 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_txq, RB_CTRL), RB_RST_SET); 3765 3766 /* Reset Tx MAC FIFO. */ 3767 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, TX_GMF_CTRL_T), GMF_RST_SET); 3768 /* Set Pause Off. */ 3769 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, GMAC_CTRL), GMC_PAUSE_OFF); 3770 3771 /* 3772 * The Rx Stop command will not work for Yukon-2 if the BMU does not 3773 * reach the end of packet and since we can't make sure that we have 3774 * incoming data, we must reset the BMU while it is not during a DMA 3775 * transfer. Since it is possible that the Rx path is still active, 3776 * the Rx RAM buffer will be stopped first, so any possible incoming 3777 * data will not trigger a DMA. After the RAM buffer is stopped, the 3778 * BMU is polled until any DMA in progress is ended and only then it 3779 * will be reset. 3780 */ 3781 3782 /* Disable the RAM Buffer receive queue. */ 3783 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_DIS_OP_MD); 3784 for (i = 0; i < MSK_TIMEOUT; i++) { 3785 if (CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RSL)) == 3786 CSR_READ_1(sc, RB_ADDR(sc_if->msk_rxq, Q_RL))) 3787 break; 3788 DELAY(1); 3789 } 3790 if (i == MSK_TIMEOUT) 3791 device_printf(sc_if->msk_if_dev, "Rx BMU stop failed\n"); 3792 CSR_WRITE_4(sc, Q_ADDR(sc_if->msk_rxq, Q_CSR), 3793 BMU_RST_SET | BMU_FIFO_RST); 3794 /* Reset the Rx prefetch unit. */ 3795 CSR_WRITE_4(sc, Y2_PREF_Q_ADDR(sc_if->msk_rxq, PREF_UNIT_CTRL_REG), 3796 PREF_UNIT_RST_SET); 3797 /* Reset the RAM Buffer receive queue. */ 3798 CSR_WRITE_1(sc, RB_ADDR(sc_if->msk_rxq, RB_CTRL), RB_RST_SET); 3799 /* Reset Rx MAC FIFO. */ 3800 CSR_WRITE_4(sc, MR_ADDR(sc_if->msk_port, RX_GMF_CTRL_T), GMF_RST_SET); 3801 3802 /* Free Rx and Tx mbufs still in the queues. */ 3803 for (i = 0; i < MSK_RX_RING_CNT; i++) { 3804 rxd = &sc_if->msk_cdata.msk_rxdesc[i]; 3805 if (rxd->rx_m != NULL) { 3806 bus_dmamap_unload(sc_if->msk_cdata.msk_rx_tag, 3807 rxd->rx_dmamap); 3808 m_freem(rxd->rx_m); 3809 rxd->rx_m = NULL; 3810 } 3811 } 3812 #ifdef MSK_JUMBO 3813 for (i = 0; i < MSK_JUMBO_RX_RING_CNT; i++) { 3814 jrxd = &sc_if->msk_cdata.msk_jumbo_rxdesc[i]; 3815 if (jrxd->rx_m != NULL) { 3816 bus_dmamap_sync(sc_if->msk_cdata.msk_jumbo_rx_tag, 3817 jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 3818 bus_dmamap_unload(sc_if->msk_cdata.msk_jumbo_rx_tag, 3819 jrxd->rx_dmamap); 3820 m_freem(jrxd->rx_m); 3821 jrxd->rx_m = NULL; 3822 } 3823 } 3824 #endif 3825 for (i = 0; i < MSK_TX_RING_CNT; i++) { 3826 txd = &sc_if->msk_cdata.msk_txdesc[i]; 3827 if (txd->tx_m != NULL) { 3828 bus_dmamap_unload(sc_if->msk_cdata.msk_tx_tag, 3829 txd->tx_dmamap); 3830 m_freem(txd->tx_m); 3831 txd->tx_m = NULL; 3832 } 3833 } 3834 3835 /* 3836 * Mark the interface down. 3837 */ 3838 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3839 sc_if->msk_link = 0; 3840 } 3841 3842 static int 3843 mskc_sysctl_proc_limit(SYSCTL_HANDLER_ARGS) 3844 { 3845 return sysctl_int_range(oidp, arg1, arg2, req, 3846 MSK_PROC_MIN, MSK_PROC_MAX); 3847 } 3848 3849 static int 3850 mskc_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3851 { 3852 struct msk_softc *sc = arg1; 3853 struct lwkt_serialize *serializer = &sc->msk_serializer; 3854 int error = 0, v; 3855 3856 lwkt_serialize_enter(serializer); 3857 3858 v = sc->msk_intr_rate; 3859 error = sysctl_handle_int(oidp, &v, 0, req); 3860 if (error || req->newptr == NULL) 3861 goto back; 3862 if (v < 0) { 3863 error = EINVAL; 3864 goto back; 3865 } 3866 3867 if (sc->msk_intr_rate != v) { 3868 int flag = 0, i; 3869 3870 sc->msk_intr_rate = v; 3871 for (i = 0; i < 2; ++i) { 3872 if (sc->msk_if[i] != NULL) { 3873 flag |= sc->msk_if[i]-> 3874 arpcom.ac_if.if_flags & IFF_RUNNING; 3875 } 3876 } 3877 if (flag) 3878 mskc_set_imtimer(sc); 3879 } 3880 back: 3881 lwkt_serialize_exit(serializer); 3882 return error; 3883 } 3884 3885 static int 3886 msk_dmamem_create(device_t dev, bus_size_t size, bus_dma_tag_t *dtag, 3887 void **addr, bus_addr_t *paddr, bus_dmamap_t *dmap) 3888 { 3889 struct msk_if_softc *sc_if = device_get_softc(dev); 3890 bus_dmamem_t dmem; 3891 int error; 3892 3893 error = bus_dmamem_coherent(sc_if->msk_cdata.msk_parent_tag, 3894 MSK_RING_ALIGN, 0, 3895 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3896 size, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 3897 if (error) { 3898 device_printf(dev, "can't create coherent DMA memory\n"); 3899 return error; 3900 } 3901 3902 *dtag = dmem.dmem_tag; 3903 *dmap = dmem.dmem_map; 3904 *addr = dmem.dmem_addr; 3905 *paddr = dmem.dmem_busaddr; 3906 3907 return 0; 3908 } 3909 3910 static void 3911 msk_dmamem_destroy(bus_dma_tag_t dtag, void *addr, bus_dmamap_t dmap) 3912 { 3913 if (dtag != NULL) { 3914 bus_dmamap_unload(dtag, dmap); 3915 bus_dmamem_free(dtag, addr, dmap); 3916 bus_dma_tag_destroy(dtag); 3917 } 3918 } 3919 3920 static void 3921 mskc_set_imtimer(struct msk_softc *sc) 3922 { 3923 if (sc->msk_intr_rate > 0) { 3924 /* 3925 * XXX myk(4) seems to use 125MHz for EC/FE/XL 3926 * and 78.125MHz for rest of chip types 3927 */ 3928 CSR_WRITE_4(sc, B2_IRQM_INI, 3929 MSK_USECS(sc, 1000000 / sc->msk_intr_rate)); 3930 CSR_WRITE_4(sc, B2_IRQM_MSK, sc->msk_intrmask); 3931 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_START); 3932 } else { 3933 CSR_WRITE_4(sc, B2_IRQM_CTRL, TIM_STOP); 3934 } 3935 } 3936