1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_polling.h" 68 #include "opt_serializer.h" 69 #include "opt_rss.h" 70 #include "opt_emx.h" 71 72 #include <sys/param.h> 73 #include <sys/bus.h> 74 #include <sys/endian.h> 75 #include <sys/interrupt.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/malloc.h> 79 #include <sys/mbuf.h> 80 #include <sys/proc.h> 81 #include <sys/rman.h> 82 #include <sys/serialize.h> 83 #include <sys/socket.h> 84 #include <sys/sockio.h> 85 #include <sys/sysctl.h> 86 #include <sys/systm.h> 87 88 #include <net/bpf.h> 89 #include <net/ethernet.h> 90 #include <net/if.h> 91 #include <net/if_arp.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/ifq_var.h> 95 #include <net/vlan/if_vlan_var.h> 96 #include <net/vlan/if_vlan_ether.h> 97 98 #include <netinet/in_systm.h> 99 #include <netinet/in.h> 100 #include <netinet/ip.h> 101 #include <netinet/tcp.h> 102 #include <netinet/udp.h> 103 104 #include <bus/pci/pcivar.h> 105 #include <bus/pci/pcireg.h> 106 107 #include <dev/netif/ig_hal/e1000_api.h> 108 #include <dev/netif/ig_hal/e1000_82571.h> 109 #include <dev/netif/emx/if_emx.h> 110 111 #ifdef EMX_RSS_DEBUG 112 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 113 do { \ 114 if (sc->rss_debug > lvl) \ 115 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 116 } while (0) 117 #else /* !EMX_RSS_DEBUG */ 118 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 119 #endif /* EMX_RSS_DEBUG */ 120 121 #define EMX_NAME "Intel(R) PRO/1000 " 122 123 #define EMX_DEVICE(id) \ 124 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 125 #define EMX_DEVICE_NULL { 0, 0, NULL } 126 127 static const struct emx_device { 128 uint16_t vid; 129 uint16_t did; 130 const char *desc; 131 } emx_devices[] = { 132 EMX_DEVICE(82571EB_COPPER), 133 EMX_DEVICE(82571EB_FIBER), 134 EMX_DEVICE(82571EB_SERDES), 135 EMX_DEVICE(82571EB_SERDES_DUAL), 136 EMX_DEVICE(82571EB_SERDES_QUAD), 137 EMX_DEVICE(82571EB_QUAD_COPPER), 138 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 139 EMX_DEVICE(82571EB_QUAD_FIBER), 140 EMX_DEVICE(82571PT_QUAD_COPPER), 141 142 EMX_DEVICE(82572EI_COPPER), 143 EMX_DEVICE(82572EI_FIBER), 144 EMX_DEVICE(82572EI_SERDES), 145 EMX_DEVICE(82572EI), 146 147 EMX_DEVICE(82573E), 148 EMX_DEVICE(82573E_IAMT), 149 EMX_DEVICE(82573L), 150 151 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 152 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 153 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 154 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 155 156 EMX_DEVICE(82574L), 157 158 /* required last entry */ 159 EMX_DEVICE_NULL 160 }; 161 162 static int emx_probe(device_t); 163 static int emx_attach(device_t); 164 static int emx_detach(device_t); 165 static int emx_shutdown(device_t); 166 static int emx_suspend(device_t); 167 static int emx_resume(device_t); 168 169 static void emx_init(void *); 170 static void emx_stop(struct emx_softc *); 171 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 172 static void emx_start(struct ifnet *); 173 #ifdef DEVICE_POLLING 174 static void emx_poll(struct ifnet *, enum poll_cmd, int); 175 #endif 176 static void emx_watchdog(struct ifnet *); 177 static void emx_media_status(struct ifnet *, struct ifmediareq *); 178 static int emx_media_change(struct ifnet *); 179 static void emx_timer(void *); 180 181 static void emx_intr(void *); 182 static void emx_rxeof(struct emx_softc *, int, int); 183 static void emx_txeof(struct emx_softc *); 184 static void emx_tx_collect(struct emx_softc *); 185 static void emx_tx_purge(struct emx_softc *); 186 static void emx_enable_intr(struct emx_softc *); 187 static void emx_disable_intr(struct emx_softc *); 188 189 static int emx_dma_alloc(struct emx_softc *); 190 static void emx_dma_free(struct emx_softc *); 191 static void emx_init_tx_ring(struct emx_softc *); 192 static int emx_init_rx_ring(struct emx_softc *, struct emx_rxdata *); 193 static void emx_free_rx_ring(struct emx_softc *, struct emx_rxdata *); 194 static int emx_create_tx_ring(struct emx_softc *); 195 static int emx_create_rx_ring(struct emx_softc *, struct emx_rxdata *); 196 static void emx_destroy_tx_ring(struct emx_softc *, int); 197 static void emx_destroy_rx_ring(struct emx_softc *, 198 struct emx_rxdata *, int); 199 static int emx_newbuf(struct emx_softc *, struct emx_rxdata *, int, int); 200 static int emx_encap(struct emx_softc *, struct mbuf **); 201 static int emx_txcsum_pullup(struct emx_softc *, struct mbuf **); 202 static int emx_txcsum(struct emx_softc *, struct mbuf *, 203 uint32_t *, uint32_t *); 204 205 static int emx_is_valid_eaddr(const uint8_t *); 206 static int emx_hw_init(struct emx_softc *); 207 static void emx_setup_ifp(struct emx_softc *); 208 static void emx_init_tx_unit(struct emx_softc *); 209 static void emx_init_rx_unit(struct emx_softc *); 210 static void emx_update_stats(struct emx_softc *); 211 static void emx_set_promisc(struct emx_softc *); 212 static void emx_disable_promisc(struct emx_softc *); 213 static void emx_set_multi(struct emx_softc *); 214 static void emx_update_link_status(struct emx_softc *); 215 static void emx_smartspeed(struct emx_softc *); 216 217 static void emx_print_debug_info(struct emx_softc *); 218 static void emx_print_nvm_info(struct emx_softc *); 219 static void emx_print_hw_stats(struct emx_softc *); 220 221 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 222 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 223 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 224 static int emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 225 static void emx_add_sysctl(struct emx_softc *); 226 227 /* Management and WOL Support */ 228 static void emx_get_mgmt(struct emx_softc *); 229 static void emx_rel_mgmt(struct emx_softc *); 230 static void emx_get_hw_control(struct emx_softc *); 231 static void emx_rel_hw_control(struct emx_softc *); 232 static void emx_enable_wol(device_t); 233 234 static device_method_t emx_methods[] = { 235 /* Device interface */ 236 DEVMETHOD(device_probe, emx_probe), 237 DEVMETHOD(device_attach, emx_attach), 238 DEVMETHOD(device_detach, emx_detach), 239 DEVMETHOD(device_shutdown, emx_shutdown), 240 DEVMETHOD(device_suspend, emx_suspend), 241 DEVMETHOD(device_resume, emx_resume), 242 { 0, 0 } 243 }; 244 245 static driver_t emx_driver = { 246 "emx", 247 emx_methods, 248 sizeof(struct emx_softc), 249 }; 250 251 static devclass_t emx_devclass; 252 253 DECLARE_DUMMY_MODULE(if_emx); 254 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 255 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, 0, 0); 256 257 /* 258 * Tunables 259 */ 260 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 261 static int emx_rxd = EMX_DEFAULT_RXD; 262 static int emx_txd = EMX_DEFAULT_TXD; 263 static int emx_smart_pwr_down = FALSE; 264 265 /* Controls whether promiscuous also shows bad packets */ 266 static int emx_debug_sbp = FALSE; 267 268 static int emx_82573_workaround = TRUE; 269 270 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 271 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 272 TUNABLE_INT("hw.emx.txd", &emx_txd); 273 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 274 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 275 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 276 277 /* Global used in WOL setup with multiport cards */ 278 static int emx_global_quad_port_a = 0; 279 280 /* Set this to one to display debug statistics */ 281 static int emx_display_debug_stats = 0; 282 283 #if !defined(KTR_IF_EMX) 284 #define KTR_IF_EMX KTR_ALL 285 #endif 286 KTR_INFO_MASTER(if_emx); 287 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin", 0); 288 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end", 0); 289 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet", 0); 290 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet", 0); 291 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean", 0); 292 #define logif(name) KTR_LOG(if_emx_ ## name) 293 294 static __inline void 295 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 296 { 297 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 298 /* DD bit must be cleared */ 299 rxd->rxd_staterr = 0; 300 } 301 302 static __inline void 303 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 304 { 305 /* Ignore Checksum bit is set */ 306 if (staterr & E1000_RXD_STAT_IXSM) 307 return; 308 309 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 310 E1000_RXD_STAT_IPCS) 311 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 312 313 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 314 E1000_RXD_STAT_TCPCS) { 315 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 316 CSUM_PSEUDO_HDR | 317 CSUM_FRAG_NOT_CHECKED; 318 mp->m_pkthdr.csum_data = htons(0xffff); 319 } 320 } 321 322 static int 323 emx_probe(device_t dev) 324 { 325 const struct emx_device *d; 326 uint16_t vid, did; 327 328 vid = pci_get_vendor(dev); 329 did = pci_get_device(dev); 330 331 for (d = emx_devices; d->desc != NULL; ++d) { 332 if (vid == d->vid && did == d->did) { 333 device_set_desc(dev, d->desc); 334 device_set_async_attach(dev, TRUE); 335 return 0; 336 } 337 } 338 return ENXIO; 339 } 340 341 static int 342 emx_attach(device_t dev) 343 { 344 struct emx_softc *sc = device_get_softc(dev); 345 struct ifnet *ifp = &sc->arpcom.ac_if; 346 int error = 0; 347 uint16_t eeprom_data, device_id; 348 349 callout_init(&sc->timer); 350 351 sc->dev = sc->osdep.dev = dev; 352 353 /* 354 * Determine hardware and mac type 355 */ 356 sc->hw.vendor_id = pci_get_vendor(dev); 357 sc->hw.device_id = pci_get_device(dev); 358 sc->hw.revision_id = pci_get_revid(dev); 359 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 360 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 361 362 if (e1000_set_mac_type(&sc->hw)) 363 return ENXIO; 364 365 /* Enable bus mastering */ 366 pci_enable_busmaster(dev); 367 368 /* 369 * Allocate IO memory 370 */ 371 sc->memory_rid = EMX_BAR_MEM; 372 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 373 &sc->memory_rid, RF_ACTIVE); 374 if (sc->memory == NULL) { 375 device_printf(dev, "Unable to allocate bus resource: memory\n"); 376 error = ENXIO; 377 goto fail; 378 } 379 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 380 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 381 382 /* XXX This is quite goofy, it is not actually used */ 383 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 384 385 /* 386 * Allocate interrupt 387 */ 388 sc->intr_rid = 0; 389 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 390 RF_SHAREABLE | RF_ACTIVE); 391 if (sc->intr_res == NULL) { 392 device_printf(dev, "Unable to allocate bus resource: " 393 "interrupt\n"); 394 error = ENXIO; 395 goto fail; 396 } 397 398 /* Save PCI command register for Shared Code */ 399 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 400 sc->hw.back = &sc->osdep; 401 402 /* Do Shared Code initialization */ 403 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 404 device_printf(dev, "Setup of Shared code failed\n"); 405 error = ENXIO; 406 goto fail; 407 } 408 e1000_get_bus_info(&sc->hw); 409 410 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 411 sc->hw.phy.autoneg_wait_to_complete = FALSE; 412 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 413 414 /* 415 * Interrupt throttle rate 416 */ 417 if (emx_int_throttle_ceil == 0) { 418 sc->int_throttle_ceil = 0; 419 } else { 420 int throttle = emx_int_throttle_ceil; 421 422 if (throttle < 0) 423 throttle = EMX_DEFAULT_ITR; 424 425 /* Recalculate the tunable value to get the exact frequency. */ 426 throttle = 1000000000 / 256 / throttle; 427 428 /* Upper 16bits of ITR is reserved and should be zero */ 429 if (throttle & 0xffff0000) 430 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 431 432 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 433 } 434 435 e1000_init_script_state_82541(&sc->hw, TRUE); 436 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 437 438 /* Copper options */ 439 if (sc->hw.phy.media_type == e1000_media_type_copper) { 440 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 441 sc->hw.phy.disable_polarity_correction = FALSE; 442 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 443 } 444 445 /* Set the frame limits assuming standard ethernet sized frames. */ 446 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 447 sc->min_frame_size = ETHER_MIN_LEN; 448 449 /* This controls when hardware reports transmit completion status. */ 450 sc->hw.mac.report_tx_early = 1; 451 452 #ifdef RSS 453 /* Calculate # of RX rings */ 454 if (ncpus > 1) 455 sc->rx_ring_cnt = EMX_NRX_RING; 456 else 457 #endif 458 sc->rx_ring_cnt = 1; 459 sc->rx_ring_inuse = sc->rx_ring_cnt; 460 461 /* Allocate RX/TX rings' busdma(9) stuffs */ 462 error = emx_dma_alloc(sc); 463 if (error) 464 goto fail; 465 466 /* Make sure we have a good EEPROM before we read from it */ 467 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 468 /* 469 * Some PCI-E parts fail the first check due to 470 * the link being in sleep state, call it again, 471 * if it fails a second time its a real issue. 472 */ 473 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 474 device_printf(dev, 475 "The EEPROM Checksum Is Not Valid\n"); 476 error = EIO; 477 goto fail; 478 } 479 } 480 481 /* Initialize the hardware */ 482 error = emx_hw_init(sc); 483 if (error) { 484 device_printf(dev, "Unable to initialize the hardware\n"); 485 goto fail; 486 } 487 488 /* Copy the permanent MAC address out of the EEPROM */ 489 if (e1000_read_mac_addr(&sc->hw) < 0) { 490 device_printf(dev, "EEPROM read error while reading MAC" 491 " address\n"); 492 error = EIO; 493 goto fail; 494 } 495 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 496 device_printf(dev, "Invalid MAC address\n"); 497 error = EIO; 498 goto fail; 499 } 500 501 /* Manually turn off all interrupts */ 502 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 503 504 /* Setup OS specific network interface */ 505 emx_setup_ifp(sc); 506 507 /* Add sysctl tree, must after emx_setup_ifp() */ 508 emx_add_sysctl(sc); 509 510 /* Initialize statistics */ 511 emx_update_stats(sc); 512 513 sc->hw.mac.get_link_status = 1; 514 emx_update_link_status(sc); 515 516 /* Indicate SOL/IDER usage */ 517 if (e1000_check_reset_block(&sc->hw)) { 518 device_printf(dev, 519 "PHY reset is blocked due to SOL/IDER session.\n"); 520 } 521 522 /* Determine if we have to control management hardware */ 523 sc->has_manage = e1000_enable_mng_pass_thru(&sc->hw); 524 525 /* 526 * Setup Wake-on-Lan 527 */ 528 switch (sc->hw.mac.type) { 529 case e1000_82571: 530 case e1000_80003es2lan: 531 if (sc->hw.bus.func == 1) { 532 e1000_read_nvm(&sc->hw, 533 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 534 } else { 535 e1000_read_nvm(&sc->hw, 536 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 537 } 538 eeprom_data &= EMX_EEPROM_APME; 539 break; 540 541 default: 542 /* APME bit in EEPROM is mapped to WUC.APME */ 543 eeprom_data = 544 E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 545 break; 546 } 547 if (eeprom_data) 548 sc->wol = E1000_WUFC_MAG; 549 /* 550 * We have the eeprom settings, now apply the special cases 551 * where the eeprom may be wrong or the board won't support 552 * wake on lan on a particular port 553 */ 554 device_id = pci_get_device(dev); 555 switch (device_id) { 556 case E1000_DEV_ID_82571EB_FIBER: 557 /* 558 * Wake events only supported on port A for dual fiber 559 * regardless of eeprom setting 560 */ 561 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 562 E1000_STATUS_FUNC_1) 563 sc->wol = 0; 564 break; 565 566 case E1000_DEV_ID_82571EB_QUAD_COPPER: 567 case E1000_DEV_ID_82571EB_QUAD_FIBER: 568 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 569 /* if quad port sc, disable WoL on all but port A */ 570 if (emx_global_quad_port_a != 0) 571 sc->wol = 0; 572 /* Reset for multiple quad port adapters */ 573 if (++emx_global_quad_port_a == 4) 574 emx_global_quad_port_a = 0; 575 break; 576 } 577 578 /* XXX disable wol */ 579 sc->wol = 0; 580 581 sc->spare_tx_desc = EMX_TX_SPARE; 582 583 /* 584 * Keep following relationship between spare_tx_desc, oact_tx_desc 585 * and tx_int_nsegs: 586 * (spare_tx_desc + EMX_TX_RESERVED) <= 587 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_int_nsegs 588 */ 589 sc->oact_tx_desc = sc->num_tx_desc / 8; 590 if (sc->oact_tx_desc > EMX_TX_OACTIVE_MAX) 591 sc->oact_tx_desc = EMX_TX_OACTIVE_MAX; 592 if (sc->oact_tx_desc < sc->spare_tx_desc + EMX_TX_RESERVED) 593 sc->oact_tx_desc = sc->spare_tx_desc + EMX_TX_RESERVED; 594 595 sc->tx_int_nsegs = sc->num_tx_desc / 16; 596 if (sc->tx_int_nsegs < sc->oact_tx_desc) 597 sc->tx_int_nsegs = sc->oact_tx_desc; 598 599 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, emx_intr, sc, 600 &sc->intr_tag, ifp->if_serializer); 601 if (error) { 602 device_printf(dev, "Failed to register interrupt handler"); 603 ether_ifdetach(&sc->arpcom.ac_if); 604 goto fail; 605 } 606 607 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->intr_res)); 608 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 609 return (0); 610 fail: 611 emx_detach(dev); 612 return (error); 613 } 614 615 static int 616 emx_detach(device_t dev) 617 { 618 struct emx_softc *sc = device_get_softc(dev); 619 620 if (device_is_attached(dev)) { 621 struct ifnet *ifp = &sc->arpcom.ac_if; 622 623 lwkt_serialize_enter(ifp->if_serializer); 624 625 emx_stop(sc); 626 627 e1000_phy_hw_reset(&sc->hw); 628 629 emx_rel_mgmt(sc); 630 631 if (sc->hw.mac.type == e1000_82573 && 632 e1000_check_mng_mode(&sc->hw)) 633 emx_rel_hw_control(sc); 634 635 if (sc->wol) { 636 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 637 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 638 emx_enable_wol(dev); 639 } 640 641 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 642 643 lwkt_serialize_exit(ifp->if_serializer); 644 645 ether_ifdetach(ifp); 646 } 647 bus_generic_detach(dev); 648 649 if (sc->intr_res != NULL) { 650 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 651 sc->intr_res); 652 } 653 654 if (sc->memory != NULL) { 655 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 656 sc->memory); 657 } 658 659 emx_dma_free(sc); 660 661 /* Free sysctl tree */ 662 if (sc->sysctl_tree != NULL) 663 sysctl_ctx_free(&sc->sysctl_ctx); 664 665 return (0); 666 } 667 668 static int 669 emx_shutdown(device_t dev) 670 { 671 return emx_suspend(dev); 672 } 673 674 static int 675 emx_suspend(device_t dev) 676 { 677 struct emx_softc *sc = device_get_softc(dev); 678 struct ifnet *ifp = &sc->arpcom.ac_if; 679 680 lwkt_serialize_enter(ifp->if_serializer); 681 682 emx_stop(sc); 683 684 emx_rel_mgmt(sc); 685 686 if (sc->hw.mac.type == e1000_82573 && 687 e1000_check_mng_mode(&sc->hw)) 688 emx_rel_hw_control(sc); 689 690 if (sc->wol) { 691 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 692 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 693 emx_enable_wol(dev); 694 } 695 696 lwkt_serialize_exit(ifp->if_serializer); 697 698 return bus_generic_suspend(dev); 699 } 700 701 static int 702 emx_resume(device_t dev) 703 { 704 struct emx_softc *sc = device_get_softc(dev); 705 struct ifnet *ifp = &sc->arpcom.ac_if; 706 707 lwkt_serialize_enter(ifp->if_serializer); 708 709 emx_init(sc); 710 emx_get_mgmt(sc); 711 if_devstart(ifp); 712 713 lwkt_serialize_exit(ifp->if_serializer); 714 715 return bus_generic_resume(dev); 716 } 717 718 static void 719 emx_start(struct ifnet *ifp) 720 { 721 struct emx_softc *sc = ifp->if_softc; 722 struct mbuf *m_head; 723 724 ASSERT_SERIALIZED(ifp->if_serializer); 725 726 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 727 return; 728 729 if (!sc->link_active) { 730 ifq_purge(&ifp->if_snd); 731 return; 732 } 733 734 while (!ifq_is_empty(&ifp->if_snd)) { 735 /* Now do we at least have a minimal? */ 736 if (EMX_IS_OACTIVE(sc)) { 737 emx_tx_collect(sc); 738 if (EMX_IS_OACTIVE(sc)) { 739 ifp->if_flags |= IFF_OACTIVE; 740 sc->no_tx_desc_avail1++; 741 break; 742 } 743 } 744 745 logif(pkt_txqueue); 746 m_head = ifq_dequeue(&ifp->if_snd, NULL); 747 if (m_head == NULL) 748 break; 749 750 if (emx_encap(sc, &m_head)) { 751 ifp->if_oerrors++; 752 emx_tx_collect(sc); 753 continue; 754 } 755 756 /* Send a copy of the frame to the BPF listener */ 757 ETHER_BPF_MTAP(ifp, m_head); 758 759 /* Set timeout in case hardware has problems transmitting. */ 760 ifp->if_timer = EMX_TX_TIMEOUT; 761 } 762 } 763 764 static int 765 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 766 { 767 struct emx_softc *sc = ifp->if_softc; 768 struct ifreq *ifr = (struct ifreq *)data; 769 uint16_t eeprom_data = 0; 770 int max_frame_size, mask, reinit; 771 int error = 0; 772 773 ASSERT_SERIALIZED(ifp->if_serializer); 774 775 switch (command) { 776 case SIOCSIFMTU: 777 switch (sc->hw.mac.type) { 778 case e1000_82573: 779 /* 780 * 82573 only supports jumbo frames 781 * if ASPM is disabled. 782 */ 783 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 784 &eeprom_data); 785 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 786 max_frame_size = ETHER_MAX_LEN; 787 break; 788 } 789 /* FALL THROUGH */ 790 791 /* Limit Jumbo Frame size */ 792 case e1000_82571: 793 case e1000_82572: 794 case e1000_82574: 795 case e1000_80003es2lan: 796 max_frame_size = 9234; 797 break; 798 799 default: 800 max_frame_size = MAX_JUMBO_FRAME_SIZE; 801 break; 802 } 803 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 804 ETHER_CRC_LEN) { 805 error = EINVAL; 806 break; 807 } 808 809 ifp->if_mtu = ifr->ifr_mtu; 810 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 811 ETHER_CRC_LEN; 812 813 if (ifp->if_flags & IFF_RUNNING) 814 emx_init(sc); 815 break; 816 817 case SIOCSIFFLAGS: 818 if (ifp->if_flags & IFF_UP) { 819 if ((ifp->if_flags & IFF_RUNNING)) { 820 if ((ifp->if_flags ^ sc->if_flags) & 821 (IFF_PROMISC | IFF_ALLMULTI)) { 822 emx_disable_promisc(sc); 823 emx_set_promisc(sc); 824 } 825 } else { 826 emx_init(sc); 827 } 828 } else if (ifp->if_flags & IFF_RUNNING) { 829 emx_stop(sc); 830 } 831 sc->if_flags = ifp->if_flags; 832 break; 833 834 case SIOCADDMULTI: 835 case SIOCDELMULTI: 836 if (ifp->if_flags & IFF_RUNNING) { 837 emx_disable_intr(sc); 838 emx_set_multi(sc); 839 #ifdef DEVICE_POLLING 840 if (!(ifp->if_flags & IFF_POLLING)) 841 #endif 842 emx_enable_intr(sc); 843 } 844 break; 845 846 case SIOCSIFMEDIA: 847 /* Check SOL/IDER usage */ 848 if (e1000_check_reset_block(&sc->hw)) { 849 device_printf(sc->dev, "Media change is" 850 " blocked due to SOL/IDER session.\n"); 851 break; 852 } 853 /* FALL THROUGH */ 854 855 case SIOCGIFMEDIA: 856 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 857 break; 858 859 case SIOCSIFCAP: 860 reinit = 0; 861 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 862 if (mask & IFCAP_HWCSUM) { 863 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 864 reinit = 1; 865 } 866 if (mask & IFCAP_VLAN_HWTAGGING) { 867 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 868 reinit = 1; 869 } 870 if (mask & IFCAP_RSS) { 871 ifp->if_capenable ^= IFCAP_RSS; 872 reinit = 1; 873 } 874 if (reinit && (ifp->if_flags & IFF_RUNNING)) 875 emx_init(sc); 876 break; 877 878 default: 879 error = ether_ioctl(ifp, command, data); 880 break; 881 } 882 return (error); 883 } 884 885 static void 886 emx_watchdog(struct ifnet *ifp) 887 { 888 struct emx_softc *sc = ifp->if_softc; 889 890 ASSERT_SERIALIZED(ifp->if_serializer); 891 892 /* 893 * The timer is set to 5 every time start queues a packet. 894 * Then txeof keeps resetting it as long as it cleans at 895 * least one descriptor. 896 * Finally, anytime all descriptors are clean the timer is 897 * set to 0. 898 */ 899 900 if (E1000_READ_REG(&sc->hw, E1000_TDT(0)) == 901 E1000_READ_REG(&sc->hw, E1000_TDH(0))) { 902 /* 903 * If we reach here, all TX jobs are completed and 904 * the TX engine should have been idled for some time. 905 * We don't need to call if_devstart() here. 906 */ 907 ifp->if_flags &= ~IFF_OACTIVE; 908 ifp->if_timer = 0; 909 return; 910 } 911 912 /* 913 * If we are in this routine because of pause frames, then 914 * don't reset the hardware. 915 */ 916 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 917 ifp->if_timer = EMX_TX_TIMEOUT; 918 return; 919 } 920 921 if (e1000_check_for_link(&sc->hw) == 0) 922 if_printf(ifp, "watchdog timeout -- resetting\n"); 923 924 ifp->if_oerrors++; 925 sc->watchdog_events++; 926 927 emx_init(sc); 928 929 if (!ifq_is_empty(&ifp->if_snd)) 930 if_devstart(ifp); 931 } 932 933 static void 934 emx_init(void *xsc) 935 { 936 struct emx_softc *sc = xsc; 937 struct ifnet *ifp = &sc->arpcom.ac_if; 938 device_t dev = sc->dev; 939 uint32_t pba; 940 int i; 941 942 ASSERT_SERIALIZED(ifp->if_serializer); 943 944 emx_stop(sc); 945 946 /* 947 * Packet Buffer Allocation (PBA) 948 * Writing PBA sets the receive portion of the buffer 949 * the remainder is used for the transmit buffer. 950 */ 951 switch (sc->hw.mac.type) { 952 /* Total Packet Buffer on these is 48K */ 953 case e1000_82571: 954 case e1000_82572: 955 case e1000_80003es2lan: 956 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 957 break; 958 959 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 960 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 961 break; 962 963 case e1000_82574: 964 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 965 break; 966 967 default: 968 /* Devices before 82547 had a Packet Buffer of 64K. */ 969 if (sc->max_frame_size > 8192) 970 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 971 else 972 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 973 } 974 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 975 976 /* Get the latest mac address, User can use a LAA */ 977 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 978 979 /* Put the address into the Receive Address Array */ 980 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 981 982 /* 983 * With the 82571 sc, RAR[0] may be overwritten 984 * when the other port is reset, we make a duplicate 985 * in RAR[14] for that eventuality, this assures 986 * the interface continues to function. 987 */ 988 if (sc->hw.mac.type == e1000_82571) { 989 e1000_set_laa_state_82571(&sc->hw, TRUE); 990 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 991 E1000_RAR_ENTRIES - 1); 992 } 993 994 /* Initialize the hardware */ 995 if (emx_hw_init(sc)) { 996 device_printf(dev, "Unable to initialize the hardware\n"); 997 /* XXX emx_stop()? */ 998 return; 999 } 1000 emx_update_link_status(sc); 1001 1002 /* Setup VLAN support, basic and offload if available */ 1003 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1004 1005 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1006 uint32_t ctrl; 1007 1008 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1009 ctrl |= E1000_CTRL_VME; 1010 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1011 } 1012 1013 /* Set hardware offload abilities */ 1014 if (ifp->if_capenable & IFCAP_TXCSUM) 1015 ifp->if_hwassist = EMX_CSUM_FEATURES; 1016 else 1017 ifp->if_hwassist = 0; 1018 1019 /* Configure for OS presence */ 1020 emx_get_mgmt(sc); 1021 1022 /* Prepare transmit descriptors and buffers */ 1023 emx_init_tx_ring(sc); 1024 emx_init_tx_unit(sc); 1025 1026 /* Setup Multicast table */ 1027 emx_set_multi(sc); 1028 1029 /* 1030 * Adjust # of RX ring to be used based on IFCAP_RSS 1031 */ 1032 if (ifp->if_capenable & IFCAP_RSS) 1033 sc->rx_ring_inuse = sc->rx_ring_cnt; 1034 else 1035 sc->rx_ring_inuse = 1; 1036 1037 /* Prepare receive descriptors and buffers */ 1038 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1039 if (emx_init_rx_ring(sc, &sc->rx_data[i])) { 1040 device_printf(dev, 1041 "Could not setup receive structures\n"); 1042 emx_stop(sc); 1043 return; 1044 } 1045 } 1046 emx_init_rx_unit(sc); 1047 1048 /* Don't lose promiscuous settings */ 1049 emx_set_promisc(sc); 1050 1051 ifp->if_flags |= IFF_RUNNING; 1052 ifp->if_flags &= ~IFF_OACTIVE; 1053 1054 callout_reset(&sc->timer, hz, emx_timer, sc); 1055 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1056 1057 /* MSI/X configuration for 82574 */ 1058 if (sc->hw.mac.type == e1000_82574) { 1059 int tmp; 1060 1061 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1062 tmp |= E1000_CTRL_EXT_PBA_CLR; 1063 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1064 /* 1065 * Set the IVAR - interrupt vector routing. 1066 * Each nibble represents a vector, high bit 1067 * is enable, other 3 bits are the MSIX table 1068 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1069 * Link (other) to 2, hence the magic number. 1070 */ 1071 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1072 } 1073 1074 #ifdef DEVICE_POLLING 1075 /* 1076 * Only enable interrupts if we are not polling, make sure 1077 * they are off otherwise. 1078 */ 1079 if (ifp->if_flags & IFF_POLLING) 1080 emx_disable_intr(sc); 1081 else 1082 #endif /* DEVICE_POLLING */ 1083 emx_enable_intr(sc); 1084 1085 /* Don't reset the phy next time init gets called */ 1086 sc->hw.phy.reset_disable = TRUE; 1087 } 1088 1089 #ifdef DEVICE_POLLING 1090 1091 static void 1092 emx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1093 { 1094 struct emx_softc *sc = ifp->if_softc; 1095 uint32_t reg_icr; 1096 1097 ASSERT_SERIALIZED(ifp->if_serializer); 1098 1099 switch (cmd) { 1100 case POLL_REGISTER: 1101 emx_disable_intr(sc); 1102 break; 1103 1104 case POLL_DEREGISTER: 1105 emx_enable_intr(sc); 1106 break; 1107 1108 case POLL_AND_CHECK_STATUS: 1109 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1110 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1111 callout_stop(&sc->timer); 1112 sc->hw.mac.get_link_status = 1; 1113 emx_update_link_status(sc); 1114 callout_reset(&sc->timer, hz, emx_timer, sc); 1115 } 1116 /* FALL THROUGH */ 1117 case POLL_ONLY: 1118 if (ifp->if_flags & IFF_RUNNING) { 1119 int i; 1120 1121 for (i = 0; i < sc->rx_ring_inuse; ++i) 1122 emx_rxeof(sc, i, count); 1123 1124 emx_txeof(sc); 1125 if (!ifq_is_empty(&ifp->if_snd)) 1126 if_devstart(ifp); 1127 } 1128 break; 1129 } 1130 } 1131 1132 #endif /* DEVICE_POLLING */ 1133 1134 static void 1135 emx_intr(void *xsc) 1136 { 1137 struct emx_softc *sc = xsc; 1138 struct ifnet *ifp = &sc->arpcom.ac_if; 1139 uint32_t reg_icr; 1140 1141 logif(intr_beg); 1142 ASSERT_SERIALIZED(ifp->if_serializer); 1143 1144 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1145 1146 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1147 logif(intr_end); 1148 return; 1149 } 1150 1151 /* 1152 * XXX: some laptops trigger several spurious interrupts 1153 * on emx(4) when in the resume cycle. The ICR register 1154 * reports all-ones value in this case. Processing such 1155 * interrupts would lead to a freeze. I don't know why. 1156 */ 1157 if (reg_icr == 0xffffffff) { 1158 logif(intr_end); 1159 return; 1160 } 1161 1162 if (ifp->if_flags & IFF_RUNNING) { 1163 if (reg_icr & 1164 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1165 int i; 1166 1167 for (i = 0; i < sc->rx_ring_inuse; ++i) 1168 emx_rxeof(sc, i, -1); 1169 } 1170 if (reg_icr & E1000_ICR_TXDW) { 1171 emx_txeof(sc); 1172 if (!ifq_is_empty(&ifp->if_snd)) 1173 if_devstart(ifp); 1174 } 1175 } 1176 1177 /* Link status change */ 1178 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1179 callout_stop(&sc->timer); 1180 sc->hw.mac.get_link_status = 1; 1181 emx_update_link_status(sc); 1182 1183 /* Deal with TX cruft when link lost */ 1184 emx_tx_purge(sc); 1185 1186 callout_reset(&sc->timer, hz, emx_timer, sc); 1187 } 1188 1189 if (reg_icr & E1000_ICR_RXO) 1190 sc->rx_overruns++; 1191 1192 logif(intr_end); 1193 } 1194 1195 static void 1196 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1197 { 1198 struct emx_softc *sc = ifp->if_softc; 1199 1200 ASSERT_SERIALIZED(ifp->if_serializer); 1201 1202 emx_update_link_status(sc); 1203 1204 ifmr->ifm_status = IFM_AVALID; 1205 ifmr->ifm_active = IFM_ETHER; 1206 1207 if (!sc->link_active) 1208 return; 1209 1210 ifmr->ifm_status |= IFM_ACTIVE; 1211 1212 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1213 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1214 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1215 } else { 1216 switch (sc->link_speed) { 1217 case 10: 1218 ifmr->ifm_active |= IFM_10_T; 1219 break; 1220 case 100: 1221 ifmr->ifm_active |= IFM_100_TX; 1222 break; 1223 1224 case 1000: 1225 ifmr->ifm_active |= IFM_1000_T; 1226 break; 1227 } 1228 if (sc->link_duplex == FULL_DUPLEX) 1229 ifmr->ifm_active |= IFM_FDX; 1230 else 1231 ifmr->ifm_active |= IFM_HDX; 1232 } 1233 } 1234 1235 static int 1236 emx_media_change(struct ifnet *ifp) 1237 { 1238 struct emx_softc *sc = ifp->if_softc; 1239 struct ifmedia *ifm = &sc->media; 1240 1241 ASSERT_SERIALIZED(ifp->if_serializer); 1242 1243 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1244 return (EINVAL); 1245 1246 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1247 case IFM_AUTO: 1248 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1249 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1250 break; 1251 1252 case IFM_1000_LX: 1253 case IFM_1000_SX: 1254 case IFM_1000_T: 1255 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1256 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1257 break; 1258 1259 case IFM_100_TX: 1260 sc->hw.mac.autoneg = FALSE; 1261 sc->hw.phy.autoneg_advertised = 0; 1262 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1263 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1264 else 1265 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1266 break; 1267 1268 case IFM_10_T: 1269 sc->hw.mac.autoneg = FALSE; 1270 sc->hw.phy.autoneg_advertised = 0; 1271 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1272 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1273 else 1274 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1275 break; 1276 1277 default: 1278 if_printf(ifp, "Unsupported media type\n"); 1279 break; 1280 } 1281 1282 /* 1283 * As the speed/duplex settings my have changed we need to 1284 * reset the PHY. 1285 */ 1286 sc->hw.phy.reset_disable = FALSE; 1287 1288 emx_init(sc); 1289 1290 return (0); 1291 } 1292 1293 static int 1294 emx_encap(struct emx_softc *sc, struct mbuf **m_headp) 1295 { 1296 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1297 bus_dmamap_t map; 1298 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1299 struct e1000_tx_desc *ctxd = NULL; 1300 struct mbuf *m_head = *m_headp; 1301 uint32_t txd_upper, txd_lower, cmd = 0; 1302 int maxsegs, nsegs, i, j, first, last = 0, error; 1303 1304 if (m_head->m_len < EMX_TXCSUM_MINHL && 1305 (m_head->m_flags & EMX_CSUM_FEATURES)) { 1306 /* 1307 * Make sure that ethernet header and ip.ip_hl are in 1308 * contiguous memory, since if TXCSUM is enabled, later 1309 * TX context descriptor's setup need to access ip.ip_hl. 1310 */ 1311 error = emx_txcsum_pullup(sc, m_headp); 1312 if (error) { 1313 KKASSERT(*m_headp == NULL); 1314 return error; 1315 } 1316 m_head = *m_headp; 1317 } 1318 1319 txd_upper = txd_lower = 0; 1320 1321 /* 1322 * Capture the first descriptor index, this descriptor 1323 * will have the index of the EOP which is the only one 1324 * that now gets a DONE bit writeback. 1325 */ 1326 first = sc->next_avail_tx_desc; 1327 tx_buffer = &sc->tx_buf[first]; 1328 tx_buffer_mapped = tx_buffer; 1329 map = tx_buffer->map; 1330 1331 maxsegs = sc->num_tx_desc_avail - EMX_TX_RESERVED; 1332 KASSERT(maxsegs >= sc->spare_tx_desc, ("not enough spare TX desc\n")); 1333 if (maxsegs > EMX_MAX_SCATTER) 1334 maxsegs = EMX_MAX_SCATTER; 1335 1336 error = bus_dmamap_load_mbuf_defrag(sc->txtag, map, m_headp, 1337 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1338 if (error) { 1339 if (error == ENOBUFS) 1340 sc->mbuf_alloc_failed++; 1341 else 1342 sc->no_tx_dma_setup++; 1343 1344 m_freem(*m_headp); 1345 *m_headp = NULL; 1346 return error; 1347 } 1348 bus_dmamap_sync(sc->txtag, map, BUS_DMASYNC_PREWRITE); 1349 1350 m_head = *m_headp; 1351 sc->tx_nsegs += nsegs; 1352 1353 if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1354 /* TX csum offloading will consume one TX desc */ 1355 sc->tx_nsegs += emx_txcsum(sc, m_head, &txd_upper, &txd_lower); 1356 } 1357 i = sc->next_avail_tx_desc; 1358 1359 /* Set up our transmit descriptors */ 1360 for (j = 0; j < nsegs; j++) { 1361 tx_buffer = &sc->tx_buf[i]; 1362 ctxd = &sc->tx_desc_base[i]; 1363 1364 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1365 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1366 txd_lower | segs[j].ds_len); 1367 ctxd->upper.data = htole32(txd_upper); 1368 1369 last = i; 1370 if (++i == sc->num_tx_desc) 1371 i = 0; 1372 } 1373 1374 sc->next_avail_tx_desc = i; 1375 1376 KKASSERT(sc->num_tx_desc_avail > nsegs); 1377 sc->num_tx_desc_avail -= nsegs; 1378 1379 /* Handle VLAN tag */ 1380 if (m_head->m_flags & M_VLANTAG) { 1381 /* Set the vlan id. */ 1382 ctxd->upper.fields.special = 1383 htole16(m_head->m_pkthdr.ether_vlantag); 1384 1385 /* Tell hardware to add tag */ 1386 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1387 } 1388 1389 tx_buffer->m_head = m_head; 1390 tx_buffer_mapped->map = tx_buffer->map; 1391 tx_buffer->map = map; 1392 1393 if (sc->tx_nsegs >= sc->tx_int_nsegs) { 1394 sc->tx_nsegs = 0; 1395 1396 /* 1397 * Report Status (RS) is turned on 1398 * every tx_int_nsegs descriptors. 1399 */ 1400 cmd = E1000_TXD_CMD_RS; 1401 1402 /* 1403 * Keep track of the descriptor, which will 1404 * be written back by hardware. 1405 */ 1406 sc->tx_dd[sc->tx_dd_tail] = last; 1407 EMX_INC_TXDD_IDX(sc->tx_dd_tail); 1408 KKASSERT(sc->tx_dd_tail != sc->tx_dd_head); 1409 } 1410 1411 /* 1412 * Last Descriptor of Packet needs End Of Packet (EOP) 1413 */ 1414 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1415 1416 /* 1417 * Advance the Transmit Descriptor Tail (TDT), this tells 1418 * the E1000 that this frame is available to transmit. 1419 */ 1420 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), i); 1421 1422 return (0); 1423 } 1424 1425 static void 1426 emx_set_promisc(struct emx_softc *sc) 1427 { 1428 struct ifnet *ifp = &sc->arpcom.ac_if; 1429 uint32_t reg_rctl; 1430 1431 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1432 1433 if (ifp->if_flags & IFF_PROMISC) { 1434 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1435 /* Turn this on if you want to see bad packets */ 1436 if (emx_debug_sbp) 1437 reg_rctl |= E1000_RCTL_SBP; 1438 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1439 } else if (ifp->if_flags & IFF_ALLMULTI) { 1440 reg_rctl |= E1000_RCTL_MPE; 1441 reg_rctl &= ~E1000_RCTL_UPE; 1442 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1443 } 1444 } 1445 1446 static void 1447 emx_disable_promisc(struct emx_softc *sc) 1448 { 1449 uint32_t reg_rctl; 1450 1451 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1452 1453 reg_rctl &= ~E1000_RCTL_UPE; 1454 reg_rctl &= ~E1000_RCTL_MPE; 1455 reg_rctl &= ~E1000_RCTL_SBP; 1456 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1457 } 1458 1459 static void 1460 emx_set_multi(struct emx_softc *sc) 1461 { 1462 struct ifnet *ifp = &sc->arpcom.ac_if; 1463 struct ifmultiaddr *ifma; 1464 uint32_t reg_rctl = 0; 1465 uint8_t mta[512]; /* Largest MTS is 4096 bits */ 1466 int mcnt = 0; 1467 1468 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1469 if (ifma->ifma_addr->sa_family != AF_LINK) 1470 continue; 1471 1472 if (mcnt == EMX_MCAST_ADDR_MAX) 1473 break; 1474 1475 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1476 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1477 mcnt++; 1478 } 1479 1480 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1481 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1482 reg_rctl |= E1000_RCTL_MPE; 1483 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1484 } else { 1485 e1000_update_mc_addr_list(&sc->hw, mta, 1486 mcnt, 1, sc->hw.mac.rar_entry_count); 1487 } 1488 } 1489 1490 /* 1491 * This routine checks for link status and updates statistics. 1492 */ 1493 static void 1494 emx_timer(void *xsc) 1495 { 1496 struct emx_softc *sc = xsc; 1497 struct ifnet *ifp = &sc->arpcom.ac_if; 1498 1499 lwkt_serialize_enter(ifp->if_serializer); 1500 1501 emx_update_link_status(sc); 1502 emx_update_stats(sc); 1503 1504 /* Reset LAA into RAR[0] on 82571 */ 1505 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1506 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1507 1508 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1509 emx_print_hw_stats(sc); 1510 1511 emx_smartspeed(sc); 1512 1513 callout_reset(&sc->timer, hz, emx_timer, sc); 1514 1515 lwkt_serialize_exit(ifp->if_serializer); 1516 } 1517 1518 static void 1519 emx_update_link_status(struct emx_softc *sc) 1520 { 1521 struct e1000_hw *hw = &sc->hw; 1522 struct ifnet *ifp = &sc->arpcom.ac_if; 1523 device_t dev = sc->dev; 1524 uint32_t link_check = 0; 1525 1526 /* Get the cached link value or read phy for real */ 1527 switch (hw->phy.media_type) { 1528 case e1000_media_type_copper: 1529 if (hw->mac.get_link_status) { 1530 /* Do the work to read phy */ 1531 e1000_check_for_link(hw); 1532 link_check = !hw->mac.get_link_status; 1533 if (link_check) /* ESB2 fix */ 1534 e1000_cfg_on_link_up(hw); 1535 } else { 1536 link_check = TRUE; 1537 } 1538 break; 1539 1540 case e1000_media_type_fiber: 1541 e1000_check_for_link(hw); 1542 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1543 break; 1544 1545 case e1000_media_type_internal_serdes: 1546 e1000_check_for_link(hw); 1547 link_check = sc->hw.mac.serdes_has_link; 1548 break; 1549 1550 case e1000_media_type_unknown: 1551 default: 1552 break; 1553 } 1554 1555 /* Now check for a transition */ 1556 if (link_check && sc->link_active == 0) { 1557 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1558 &sc->link_duplex); 1559 1560 /* 1561 * Check if we should enable/disable SPEED_MODE bit on 1562 * 82571EB/82572EI 1563 */ 1564 if (hw->mac.type == e1000_82571 || 1565 hw->mac.type == e1000_82572) { 1566 int tarc0; 1567 1568 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1569 if (sc->link_speed != SPEED_1000) 1570 tarc0 &= ~EMX_TARC_SPEED_MODE; 1571 else 1572 tarc0 |= EMX_TARC_SPEED_MODE; 1573 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1574 } 1575 if (bootverbose) { 1576 device_printf(dev, "Link is up %d Mbps %s\n", 1577 sc->link_speed, 1578 ((sc->link_duplex == FULL_DUPLEX) ? 1579 "Full Duplex" : "Half Duplex")); 1580 } 1581 sc->link_active = 1; 1582 sc->smartspeed = 0; 1583 ifp->if_baudrate = sc->link_speed * 1000000; 1584 ifp->if_link_state = LINK_STATE_UP; 1585 if_link_state_change(ifp); 1586 } else if (!link_check && sc->link_active == 1) { 1587 ifp->if_baudrate = sc->link_speed = 0; 1588 sc->link_duplex = 0; 1589 if (bootverbose) 1590 device_printf(dev, "Link is Down\n"); 1591 sc->link_active = 0; 1592 #if 0 1593 /* Link down, disable watchdog */ 1594 if->if_timer = 0; 1595 #endif 1596 ifp->if_link_state = LINK_STATE_DOWN; 1597 if_link_state_change(ifp); 1598 } 1599 } 1600 1601 static void 1602 emx_stop(struct emx_softc *sc) 1603 { 1604 struct ifnet *ifp = &sc->arpcom.ac_if; 1605 int i; 1606 1607 ASSERT_SERIALIZED(ifp->if_serializer); 1608 1609 emx_disable_intr(sc); 1610 1611 callout_stop(&sc->timer); 1612 1613 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1614 ifp->if_timer = 0; 1615 1616 /* 1617 * Disable multiple receive queues. 1618 * 1619 * NOTE: 1620 * We should disable multiple receive queues before 1621 * resetting the hardware. 1622 */ 1623 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1624 1625 e1000_reset_hw(&sc->hw); 1626 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1627 1628 for (i = 0; i < sc->num_tx_desc; i++) { 1629 struct emx_txbuf *tx_buffer = &sc->tx_buf[i]; 1630 1631 if (tx_buffer->m_head != NULL) { 1632 bus_dmamap_unload(sc->txtag, tx_buffer->map); 1633 m_freem(tx_buffer->m_head); 1634 tx_buffer->m_head = NULL; 1635 } 1636 } 1637 1638 for (i = 0; i < sc->rx_ring_inuse; ++i) 1639 emx_free_rx_ring(sc, &sc->rx_data[i]); 1640 1641 sc->csum_flags = 0; 1642 sc->csum_ehlen = 0; 1643 sc->csum_iphlen = 0; 1644 1645 sc->tx_dd_head = 0; 1646 sc->tx_dd_tail = 0; 1647 sc->tx_nsegs = 0; 1648 } 1649 1650 static int 1651 emx_hw_init(struct emx_softc *sc) 1652 { 1653 device_t dev = sc->dev; 1654 uint16_t rx_buffer_size; 1655 1656 /* Issue a global reset */ 1657 e1000_reset_hw(&sc->hw); 1658 1659 /* Get control from any management/hw control */ 1660 if (sc->hw.mac.type == e1000_82573 && 1661 e1000_check_mng_mode(&sc->hw)) 1662 emx_get_hw_control(sc); 1663 1664 /* Set up smart power down as default off on newer adapters. */ 1665 if (!emx_smart_pwr_down && 1666 (sc->hw.mac.type == e1000_82571 || 1667 sc->hw.mac.type == e1000_82572)) { 1668 uint16_t phy_tmp = 0; 1669 1670 /* Speed up time to link by disabling smart power down. */ 1671 e1000_read_phy_reg(&sc->hw, 1672 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1673 phy_tmp &= ~IGP02E1000_PM_SPD; 1674 e1000_write_phy_reg(&sc->hw, 1675 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1676 } 1677 1678 /* 1679 * These parameters control the automatic generation (Tx) and 1680 * response (Rx) to Ethernet PAUSE frames. 1681 * - High water mark should allow for at least two frames to be 1682 * received after sending an XOFF. 1683 * - Low water mark works best when it is very near the high water mark. 1684 * This allows the receiver to restart by sending XON when it has 1685 * drained a bit. Here we use an arbitary value of 1500 which will 1686 * restart after one full frame is pulled from the buffer. There 1687 * could be several smaller frames in the buffer and if so they will 1688 * not trigger the XON until their total number reduces the buffer 1689 * by 1500. 1690 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1691 */ 1692 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1693 1694 sc->hw.fc.high_water = rx_buffer_size - 1695 roundup2(sc->max_frame_size, 1024); 1696 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1697 1698 if (sc->hw.mac.type == e1000_80003es2lan) 1699 sc->hw.fc.pause_time = 0xFFFF; 1700 else 1701 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1702 sc->hw.fc.send_xon = TRUE; 1703 sc->hw.fc.requested_mode = e1000_fc_full; 1704 1705 if (e1000_init_hw(&sc->hw) < 0) { 1706 device_printf(dev, "Hardware Initialization Failed\n"); 1707 return (EIO); 1708 } 1709 1710 e1000_check_for_link(&sc->hw); 1711 1712 return (0); 1713 } 1714 1715 static void 1716 emx_setup_ifp(struct emx_softc *sc) 1717 { 1718 struct ifnet *ifp = &sc->arpcom.ac_if; 1719 1720 if_initname(ifp, device_get_name(sc->dev), 1721 device_get_unit(sc->dev)); 1722 ifp->if_softc = sc; 1723 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1724 ifp->if_init = emx_init; 1725 ifp->if_ioctl = emx_ioctl; 1726 ifp->if_start = emx_start; 1727 #ifdef DEVICE_POLLING 1728 ifp->if_poll = emx_poll; 1729 #endif 1730 ifp->if_watchdog = emx_watchdog; 1731 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 1732 ifq_set_ready(&ifp->if_snd); 1733 1734 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1735 1736 ifp->if_capabilities = IFCAP_HWCSUM | 1737 IFCAP_VLAN_HWTAGGING | 1738 IFCAP_VLAN_MTU; 1739 if (sc->rx_ring_cnt > 1) 1740 ifp->if_capabilities |= IFCAP_RSS; 1741 ifp->if_capenable = ifp->if_capabilities; 1742 ifp->if_hwassist = EMX_CSUM_FEATURES; 1743 1744 /* 1745 * Tell the upper layer(s) we support long frames. 1746 */ 1747 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1748 1749 /* 1750 * Specify the media types supported by this sc and register 1751 * callbacks to update media and link information 1752 */ 1753 ifmedia_init(&sc->media, IFM_IMASK, 1754 emx_media_change, emx_media_status); 1755 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1756 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1757 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1758 0, NULL); 1759 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1760 } else { 1761 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1762 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1763 0, NULL); 1764 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1765 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1766 0, NULL); 1767 if (sc->hw.phy.type != e1000_phy_ife) { 1768 ifmedia_add(&sc->media, 1769 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1770 ifmedia_add(&sc->media, 1771 IFM_ETHER | IFM_1000_T, 0, NULL); 1772 } 1773 } 1774 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1775 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1776 } 1777 1778 /* 1779 * Workaround for SmartSpeed on 82541 and 82547 controllers 1780 */ 1781 static void 1782 emx_smartspeed(struct emx_softc *sc) 1783 { 1784 uint16_t phy_tmp; 1785 1786 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 1787 sc->hw.mac.autoneg == 0 || 1788 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 1789 return; 1790 1791 if (sc->smartspeed == 0) { 1792 /* 1793 * If Master/Slave config fault is asserted twice, 1794 * we assume back-to-back 1795 */ 1796 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1797 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 1798 return; 1799 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1800 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 1801 e1000_read_phy_reg(&sc->hw, 1802 PHY_1000T_CTRL, &phy_tmp); 1803 if (phy_tmp & CR_1000T_MS_ENABLE) { 1804 phy_tmp &= ~CR_1000T_MS_ENABLE; 1805 e1000_write_phy_reg(&sc->hw, 1806 PHY_1000T_CTRL, phy_tmp); 1807 sc->smartspeed++; 1808 if (sc->hw.mac.autoneg && 1809 !e1000_phy_setup_autoneg(&sc->hw) && 1810 !e1000_read_phy_reg(&sc->hw, 1811 PHY_CONTROL, &phy_tmp)) { 1812 phy_tmp |= MII_CR_AUTO_NEG_EN | 1813 MII_CR_RESTART_AUTO_NEG; 1814 e1000_write_phy_reg(&sc->hw, 1815 PHY_CONTROL, phy_tmp); 1816 } 1817 } 1818 } 1819 return; 1820 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 1821 /* If still no link, perhaps using 2/3 pair cable */ 1822 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 1823 phy_tmp |= CR_1000T_MS_ENABLE; 1824 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 1825 if (sc->hw.mac.autoneg && 1826 !e1000_phy_setup_autoneg(&sc->hw) && 1827 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 1828 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 1829 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 1830 } 1831 } 1832 1833 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 1834 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 1835 sc->smartspeed = 0; 1836 } 1837 1838 static int 1839 emx_create_tx_ring(struct emx_softc *sc) 1840 { 1841 device_t dev = sc->dev; 1842 struct emx_txbuf *tx_buffer; 1843 int error, i, tsize; 1844 1845 /* 1846 * Validate number of transmit descriptors. It must not exceed 1847 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 1848 */ 1849 if ((emx_txd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 1850 emx_txd > EMX_MAX_TXD || emx_txd < EMX_MIN_TXD) { 1851 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 1852 EMX_DEFAULT_TXD, emx_txd); 1853 sc->num_tx_desc = EMX_DEFAULT_TXD; 1854 } else { 1855 sc->num_tx_desc = emx_txd; 1856 } 1857 1858 /* 1859 * Allocate Transmit Descriptor ring 1860 */ 1861 tsize = roundup2(sc->num_tx_desc * sizeof(struct e1000_tx_desc), 1862 EMX_DBA_ALIGN); 1863 sc->tx_desc_base = bus_dmamem_coherent_any(sc->parent_dtag, 1864 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1865 &sc->tx_desc_dtag, &sc->tx_desc_dmap, 1866 &sc->tx_desc_paddr); 1867 if (sc->tx_desc_base == NULL) { 1868 device_printf(dev, "Unable to allocate tx_desc memory\n"); 1869 return ENOMEM; 1870 } 1871 1872 sc->tx_buf = kmalloc(sizeof(struct emx_txbuf) * sc->num_tx_desc, 1873 M_DEVBUF, M_WAITOK | M_ZERO); 1874 1875 /* 1876 * Create DMA tags for tx buffers 1877 */ 1878 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 1879 1, 0, /* alignment, bounds */ 1880 BUS_SPACE_MAXADDR, /* lowaddr */ 1881 BUS_SPACE_MAXADDR, /* highaddr */ 1882 NULL, NULL, /* filter, filterarg */ 1883 EMX_TSO_SIZE, /* maxsize */ 1884 EMX_MAX_SCATTER, /* nsegments */ 1885 EMX_MAX_SEGSIZE, /* maxsegsize */ 1886 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1887 BUS_DMA_ONEBPAGE, /* flags */ 1888 &sc->txtag); 1889 if (error) { 1890 device_printf(dev, "Unable to allocate TX DMA tag\n"); 1891 kfree(sc->tx_buf, M_DEVBUF); 1892 sc->tx_buf = NULL; 1893 return error; 1894 } 1895 1896 /* 1897 * Create DMA maps for tx buffers 1898 */ 1899 for (i = 0; i < sc->num_tx_desc; i++) { 1900 tx_buffer = &sc->tx_buf[i]; 1901 1902 error = bus_dmamap_create(sc->txtag, 1903 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1904 &tx_buffer->map); 1905 if (error) { 1906 device_printf(dev, "Unable to create TX DMA map\n"); 1907 emx_destroy_tx_ring(sc, i); 1908 return error; 1909 } 1910 } 1911 return (0); 1912 } 1913 1914 static void 1915 emx_init_tx_ring(struct emx_softc *sc) 1916 { 1917 /* Clear the old ring contents */ 1918 bzero(sc->tx_desc_base, 1919 sizeof(struct e1000_tx_desc) * sc->num_tx_desc); 1920 1921 /* Reset state */ 1922 sc->next_avail_tx_desc = 0; 1923 sc->next_tx_to_clean = 0; 1924 sc->num_tx_desc_avail = sc->num_tx_desc; 1925 } 1926 1927 static void 1928 emx_init_tx_unit(struct emx_softc *sc) 1929 { 1930 uint32_t tctl, tarc, tipg = 0; 1931 uint64_t bus_addr; 1932 1933 /* Setup the Base and Length of the Tx Descriptor Ring */ 1934 bus_addr = sc->tx_desc_paddr; 1935 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(0), 1936 sc->num_tx_desc * sizeof(struct e1000_tx_desc)); 1937 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(0), 1938 (uint32_t)(bus_addr >> 32)); 1939 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(0), 1940 (uint32_t)bus_addr); 1941 /* Setup the HW Tx Head and Tail descriptor pointers */ 1942 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), 0); 1943 E1000_WRITE_REG(&sc->hw, E1000_TDH(0), 0); 1944 1945 /* Set the default values for the Tx Inter Packet Gap timer */ 1946 switch (sc->hw.mac.type) { 1947 case e1000_80003es2lan: 1948 tipg = DEFAULT_82543_TIPG_IPGR1; 1949 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 1950 E1000_TIPG_IPGR2_SHIFT; 1951 break; 1952 1953 default: 1954 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1955 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 1956 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1957 else 1958 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1959 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1960 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1961 break; 1962 } 1963 1964 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 1965 1966 /* NOTE: 0 is not allowed for TIDV */ 1967 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 1968 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 1969 1970 if (sc->hw.mac.type == e1000_82571 || 1971 sc->hw.mac.type == e1000_82572) { 1972 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 1973 tarc |= EMX_TARC_SPEED_MODE; 1974 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 1975 } else if (sc->hw.mac.type == e1000_80003es2lan) { 1976 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 1977 tarc |= 1; 1978 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 1979 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 1980 tarc |= 1; 1981 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 1982 } 1983 1984 /* Program the Transmit Control Register */ 1985 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 1986 tctl &= ~E1000_TCTL_CT; 1987 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 1988 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1989 tctl |= E1000_TCTL_MULR; 1990 1991 /* This write will effectively turn on the transmit unit. */ 1992 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 1993 } 1994 1995 static void 1996 emx_destroy_tx_ring(struct emx_softc *sc, int ndesc) 1997 { 1998 struct emx_txbuf *tx_buffer; 1999 int i; 2000 2001 /* Free Transmit Descriptor ring */ 2002 if (sc->tx_desc_base) { 2003 bus_dmamap_unload(sc->tx_desc_dtag, sc->tx_desc_dmap); 2004 bus_dmamem_free(sc->tx_desc_dtag, sc->tx_desc_base, 2005 sc->tx_desc_dmap); 2006 bus_dma_tag_destroy(sc->tx_desc_dtag); 2007 2008 sc->tx_desc_base = NULL; 2009 } 2010 2011 if (sc->tx_buf == NULL) 2012 return; 2013 2014 for (i = 0; i < ndesc; i++) { 2015 tx_buffer = &sc->tx_buf[i]; 2016 2017 KKASSERT(tx_buffer->m_head == NULL); 2018 bus_dmamap_destroy(sc->txtag, tx_buffer->map); 2019 } 2020 bus_dma_tag_destroy(sc->txtag); 2021 2022 kfree(sc->tx_buf, M_DEVBUF); 2023 sc->tx_buf = NULL; 2024 } 2025 2026 /* 2027 * The offload context needs to be set when we transfer the first 2028 * packet of a particular protocol (TCP/UDP). This routine has been 2029 * enhanced to deal with inserted VLAN headers. 2030 * 2031 * If the new packet's ether header length, ip header length and 2032 * csum offloading type are same as the previous packet, we should 2033 * avoid allocating a new csum context descriptor; mainly to take 2034 * advantage of the pipeline effect of the TX data read request. 2035 * 2036 * This function returns number of TX descrptors allocated for 2037 * csum context. 2038 */ 2039 static int 2040 emx_txcsum(struct emx_softc *sc, struct mbuf *mp, 2041 uint32_t *txd_upper, uint32_t *txd_lower) 2042 { 2043 struct e1000_context_desc *TXD; 2044 struct emx_txbuf *tx_buffer; 2045 struct ether_vlan_header *eh; 2046 struct ip *ip; 2047 int curr_txd, ehdrlen, csum_flags; 2048 uint32_t cmd, hdr_len, ip_hlen; 2049 uint16_t etype; 2050 2051 /* 2052 * Determine where frame payload starts. 2053 * Jump over vlan headers if already present, 2054 * helpful for QinQ too. 2055 */ 2056 KASSERT(mp->m_len >= ETHER_HDR_LEN, 2057 ("emx_txcsum_pullup is not called (eh)?\n")); 2058 eh = mtod(mp, struct ether_vlan_header *); 2059 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2060 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 2061 ("emx_txcsum_pullup is not called (evh)?\n")); 2062 etype = ntohs(eh->evl_proto); 2063 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 2064 } else { 2065 etype = ntohs(eh->evl_encap_proto); 2066 ehdrlen = ETHER_HDR_LEN; 2067 } 2068 2069 /* 2070 * We only support TCP/UDP for IPv4 for the moment. 2071 * TODO: Support SCTP too when it hits the tree. 2072 */ 2073 if (etype != ETHERTYPE_IP) 2074 return 0; 2075 2076 KASSERT(mp->m_len >= ehdrlen + EMX_IPVHL_SIZE, 2077 ("emx_txcsum_pullup is not called (eh+ip_vhl)?\n")); 2078 2079 /* NOTE: We could only safely access ip.ip_vhl part */ 2080 ip = (struct ip *)(mp->m_data + ehdrlen); 2081 ip_hlen = ip->ip_hl << 2; 2082 2083 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2084 2085 if (sc->csum_ehlen == ehdrlen && sc->csum_iphlen == ip_hlen && 2086 sc->csum_flags == csum_flags) { 2087 /* 2088 * Same csum offload context as the previous packets; 2089 * just return. 2090 */ 2091 *txd_upper = sc->csum_txd_upper; 2092 *txd_lower = sc->csum_txd_lower; 2093 return 0; 2094 } 2095 2096 /* 2097 * Setup a new csum offload context. 2098 */ 2099 2100 curr_txd = sc->next_avail_tx_desc; 2101 tx_buffer = &sc->tx_buf[curr_txd]; 2102 TXD = (struct e1000_context_desc *)&sc->tx_desc_base[curr_txd]; 2103 2104 cmd = 0; 2105 2106 /* Setup of IP header checksum. */ 2107 if (csum_flags & CSUM_IP) { 2108 /* 2109 * Start offset for header checksum calculation. 2110 * End offset for header checksum calculation. 2111 * Offset of place to put the checksum. 2112 */ 2113 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2114 TXD->lower_setup.ip_fields.ipcse = 2115 htole16(ehdrlen + ip_hlen - 1); 2116 TXD->lower_setup.ip_fields.ipcso = 2117 ehdrlen + offsetof(struct ip, ip_sum); 2118 cmd |= E1000_TXD_CMD_IP; 2119 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2120 } 2121 hdr_len = ehdrlen + ip_hlen; 2122 2123 if (csum_flags & CSUM_TCP) { 2124 /* 2125 * Start offset for payload checksum calculation. 2126 * End offset for payload checksum calculation. 2127 * Offset of place to put the checksum. 2128 */ 2129 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2130 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2131 TXD->upper_setup.tcp_fields.tucso = 2132 hdr_len + offsetof(struct tcphdr, th_sum); 2133 cmd |= E1000_TXD_CMD_TCP; 2134 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2135 } else if (csum_flags & CSUM_UDP) { 2136 /* 2137 * Start offset for header checksum calculation. 2138 * End offset for header checksum calculation. 2139 * Offset of place to put the checksum. 2140 */ 2141 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2142 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2143 TXD->upper_setup.tcp_fields.tucso = 2144 hdr_len + offsetof(struct udphdr, uh_sum); 2145 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2146 } 2147 2148 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2149 E1000_TXD_DTYP_D; /* Data descr */ 2150 2151 /* Save the information for this csum offloading context */ 2152 sc->csum_ehlen = ehdrlen; 2153 sc->csum_iphlen = ip_hlen; 2154 sc->csum_flags = csum_flags; 2155 sc->csum_txd_upper = *txd_upper; 2156 sc->csum_txd_lower = *txd_lower; 2157 2158 TXD->tcp_seg_setup.data = htole32(0); 2159 TXD->cmd_and_length = 2160 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2161 2162 if (++curr_txd == sc->num_tx_desc) 2163 curr_txd = 0; 2164 2165 KKASSERT(sc->num_tx_desc_avail > 0); 2166 sc->num_tx_desc_avail--; 2167 2168 sc->next_avail_tx_desc = curr_txd; 2169 return 1; 2170 } 2171 2172 static int 2173 emx_txcsum_pullup(struct emx_softc *sc, struct mbuf **m0) 2174 { 2175 struct mbuf *m = *m0; 2176 struct ether_header *eh; 2177 int len; 2178 2179 sc->tx_csum_try_pullup++; 2180 2181 len = ETHER_HDR_LEN + EMX_IPVHL_SIZE; 2182 2183 if (__predict_false(!M_WRITABLE(m))) { 2184 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2185 sc->tx_csum_drop1++; 2186 m_freem(m); 2187 *m0 = NULL; 2188 return ENOBUFS; 2189 } 2190 eh = mtod(m, struct ether_header *); 2191 2192 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2193 len += EVL_ENCAPLEN; 2194 2195 if (m->m_len < len) { 2196 sc->tx_csum_drop2++; 2197 m_freem(m); 2198 *m0 = NULL; 2199 return ENOBUFS; 2200 } 2201 return 0; 2202 } 2203 2204 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2205 sc->tx_csum_pullup1++; 2206 m = m_pullup(m, ETHER_HDR_LEN); 2207 if (m == NULL) { 2208 sc->tx_csum_pullup1_failed++; 2209 *m0 = NULL; 2210 return ENOBUFS; 2211 } 2212 *m0 = m; 2213 } 2214 eh = mtod(m, struct ether_header *); 2215 2216 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2217 len += EVL_ENCAPLEN; 2218 2219 if (m->m_len < len) { 2220 sc->tx_csum_pullup2++; 2221 m = m_pullup(m, len); 2222 if (m == NULL) { 2223 sc->tx_csum_pullup2_failed++; 2224 *m0 = NULL; 2225 return ENOBUFS; 2226 } 2227 *m0 = m; 2228 } 2229 return 0; 2230 } 2231 2232 static void 2233 emx_txeof(struct emx_softc *sc) 2234 { 2235 struct ifnet *ifp = &sc->arpcom.ac_if; 2236 struct emx_txbuf *tx_buffer; 2237 int first, num_avail; 2238 2239 if (sc->tx_dd_head == sc->tx_dd_tail) 2240 return; 2241 2242 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2243 return; 2244 2245 num_avail = sc->num_tx_desc_avail; 2246 first = sc->next_tx_to_clean; 2247 2248 while (sc->tx_dd_head != sc->tx_dd_tail) { 2249 int dd_idx = sc->tx_dd[sc->tx_dd_head]; 2250 struct e1000_tx_desc *tx_desc; 2251 2252 tx_desc = &sc->tx_desc_base[dd_idx]; 2253 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2254 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2255 2256 if (++dd_idx == sc->num_tx_desc) 2257 dd_idx = 0; 2258 2259 while (first != dd_idx) { 2260 logif(pkt_txclean); 2261 2262 num_avail++; 2263 2264 tx_buffer = &sc->tx_buf[first]; 2265 if (tx_buffer->m_head) { 2266 ifp->if_opackets++; 2267 bus_dmamap_unload(sc->txtag, 2268 tx_buffer->map); 2269 m_freem(tx_buffer->m_head); 2270 tx_buffer->m_head = NULL; 2271 } 2272 2273 if (++first == sc->num_tx_desc) 2274 first = 0; 2275 } 2276 } else { 2277 break; 2278 } 2279 } 2280 sc->next_tx_to_clean = first; 2281 sc->num_tx_desc_avail = num_avail; 2282 2283 if (sc->tx_dd_head == sc->tx_dd_tail) { 2284 sc->tx_dd_head = 0; 2285 sc->tx_dd_tail = 0; 2286 } 2287 2288 if (!EMX_IS_OACTIVE(sc)) { 2289 ifp->if_flags &= ~IFF_OACTIVE; 2290 2291 /* All clean, turn off the timer */ 2292 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2293 ifp->if_timer = 0; 2294 } 2295 } 2296 2297 static void 2298 emx_tx_collect(struct emx_softc *sc) 2299 { 2300 struct ifnet *ifp = &sc->arpcom.ac_if; 2301 struct emx_txbuf *tx_buffer; 2302 int tdh, first, num_avail, dd_idx = -1; 2303 2304 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2305 return; 2306 2307 tdh = E1000_READ_REG(&sc->hw, E1000_TDH(0)); 2308 if (tdh == sc->next_tx_to_clean) 2309 return; 2310 2311 if (sc->tx_dd_head != sc->tx_dd_tail) 2312 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2313 2314 num_avail = sc->num_tx_desc_avail; 2315 first = sc->next_tx_to_clean; 2316 2317 while (first != tdh) { 2318 logif(pkt_txclean); 2319 2320 num_avail++; 2321 2322 tx_buffer = &sc->tx_buf[first]; 2323 if (tx_buffer->m_head) { 2324 ifp->if_opackets++; 2325 bus_dmamap_unload(sc->txtag, 2326 tx_buffer->map); 2327 m_freem(tx_buffer->m_head); 2328 tx_buffer->m_head = NULL; 2329 } 2330 2331 if (first == dd_idx) { 2332 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2333 if (sc->tx_dd_head == sc->tx_dd_tail) { 2334 sc->tx_dd_head = 0; 2335 sc->tx_dd_tail = 0; 2336 dd_idx = -1; 2337 } else { 2338 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2339 } 2340 } 2341 2342 if (++first == sc->num_tx_desc) 2343 first = 0; 2344 } 2345 sc->next_tx_to_clean = first; 2346 sc->num_tx_desc_avail = num_avail; 2347 2348 if (!EMX_IS_OACTIVE(sc)) { 2349 ifp->if_flags &= ~IFF_OACTIVE; 2350 2351 /* All clean, turn off the timer */ 2352 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2353 ifp->if_timer = 0; 2354 } 2355 } 2356 2357 /* 2358 * When Link is lost sometimes there is work still in the TX ring 2359 * which will result in a watchdog, rather than allow that do an 2360 * attempted cleanup and then reinit here. Note that this has been 2361 * seens mostly with fiber adapters. 2362 */ 2363 static void 2364 emx_tx_purge(struct emx_softc *sc) 2365 { 2366 struct ifnet *ifp = &sc->arpcom.ac_if; 2367 2368 if (!sc->link_active && ifp->if_timer) { 2369 emx_tx_collect(sc); 2370 if (ifp->if_timer) { 2371 if_printf(ifp, "Link lost, TX pending, reinit\n"); 2372 ifp->if_timer = 0; 2373 emx_init(sc); 2374 } 2375 } 2376 } 2377 2378 static int 2379 emx_newbuf(struct emx_softc *sc, struct emx_rxdata *rdata, int i, int init) 2380 { 2381 struct mbuf *m; 2382 bus_dma_segment_t seg; 2383 bus_dmamap_t map; 2384 struct emx_rxbuf *rx_buffer; 2385 int error, nseg; 2386 2387 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2388 if (m == NULL) { 2389 rdata->mbuf_cluster_failed++; 2390 if (init) { 2391 if_printf(&sc->arpcom.ac_if, 2392 "Unable to allocate RX mbuf\n"); 2393 } 2394 return (ENOBUFS); 2395 } 2396 m->m_len = m->m_pkthdr.len = MCLBYTES; 2397 2398 if (sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2399 m_adj(m, ETHER_ALIGN); 2400 2401 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2402 rdata->rx_sparemap, m, 2403 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2404 if (error) { 2405 m_freem(m); 2406 if (init) { 2407 if_printf(&sc->arpcom.ac_if, 2408 "Unable to load RX mbuf\n"); 2409 } 2410 return (error); 2411 } 2412 2413 rx_buffer = &rdata->rx_buf[i]; 2414 if (rx_buffer->m_head != NULL) 2415 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2416 2417 map = rx_buffer->map; 2418 rx_buffer->map = rdata->rx_sparemap; 2419 rdata->rx_sparemap = map; 2420 2421 rx_buffer->m_head = m; 2422 rx_buffer->paddr = seg.ds_addr; 2423 2424 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2425 return (0); 2426 } 2427 2428 static int 2429 emx_create_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2430 { 2431 device_t dev = sc->dev; 2432 struct emx_rxbuf *rx_buffer; 2433 int i, error, rsize; 2434 2435 /* 2436 * Validate number of receive descriptors. It must not exceed 2437 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2438 */ 2439 if ((emx_rxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2440 emx_rxd > EMX_MAX_RXD || emx_rxd < EMX_MIN_RXD) { 2441 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2442 EMX_DEFAULT_RXD, emx_rxd); 2443 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2444 } else { 2445 rdata->num_rx_desc = emx_rxd; 2446 } 2447 2448 /* 2449 * Allocate Receive Descriptor ring 2450 */ 2451 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2452 EMX_DBA_ALIGN); 2453 rdata->rx_desc = bus_dmamem_coherent_any(sc->parent_dtag, 2454 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2455 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2456 &rdata->rx_desc_paddr); 2457 if (rdata->rx_desc == NULL) { 2458 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2459 return ENOMEM; 2460 } 2461 2462 rdata->rx_buf = kmalloc(sizeof(struct emx_rxbuf) * rdata->num_rx_desc, 2463 M_DEVBUF, M_WAITOK | M_ZERO); 2464 2465 /* 2466 * Create DMA tag for rx buffers 2467 */ 2468 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 2469 1, 0, /* alignment, bounds */ 2470 BUS_SPACE_MAXADDR, /* lowaddr */ 2471 BUS_SPACE_MAXADDR, /* highaddr */ 2472 NULL, NULL, /* filter, filterarg */ 2473 MCLBYTES, /* maxsize */ 2474 1, /* nsegments */ 2475 MCLBYTES, /* maxsegsize */ 2476 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2477 &rdata->rxtag); 2478 if (error) { 2479 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2480 kfree(rdata->rx_buf, M_DEVBUF); 2481 rdata->rx_buf = NULL; 2482 return error; 2483 } 2484 2485 /* 2486 * Create spare DMA map for rx buffers 2487 */ 2488 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2489 &rdata->rx_sparemap); 2490 if (error) { 2491 device_printf(dev, "Unable to create spare RX DMA map\n"); 2492 bus_dma_tag_destroy(rdata->rxtag); 2493 kfree(rdata->rx_buf, M_DEVBUF); 2494 rdata->rx_buf = NULL; 2495 return error; 2496 } 2497 2498 /* 2499 * Create DMA maps for rx buffers 2500 */ 2501 for (i = 0; i < rdata->num_rx_desc; i++) { 2502 rx_buffer = &rdata->rx_buf[i]; 2503 2504 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2505 &rx_buffer->map); 2506 if (error) { 2507 device_printf(dev, "Unable to create RX DMA map\n"); 2508 emx_destroy_rx_ring(sc, rdata, i); 2509 return error; 2510 } 2511 } 2512 return (0); 2513 } 2514 2515 static void 2516 emx_free_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2517 { 2518 int i; 2519 2520 for (i = 0; i < rdata->num_rx_desc; i++) { 2521 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2522 2523 if (rx_buffer->m_head != NULL) { 2524 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2525 m_freem(rx_buffer->m_head); 2526 rx_buffer->m_head = NULL; 2527 } 2528 } 2529 2530 if (rdata->fmp != NULL) 2531 m_freem(rdata->fmp); 2532 rdata->fmp = NULL; 2533 rdata->lmp = NULL; 2534 } 2535 2536 static int 2537 emx_init_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2538 { 2539 int i, error; 2540 2541 /* Reset descriptor ring */ 2542 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2543 2544 /* Allocate new ones. */ 2545 for (i = 0; i < rdata->num_rx_desc; i++) { 2546 error = emx_newbuf(sc, rdata, i, 1); 2547 if (error) 2548 return (error); 2549 } 2550 2551 /* Setup our descriptor pointers */ 2552 rdata->next_rx_desc_to_check = 0; 2553 2554 return (0); 2555 } 2556 2557 static void 2558 emx_init_rx_unit(struct emx_softc *sc) 2559 { 2560 struct ifnet *ifp = &sc->arpcom.ac_if; 2561 uint64_t bus_addr; 2562 uint32_t rctl, rxcsum, rfctl, key, reta; 2563 int i; 2564 2565 /* 2566 * Make sure receives are disabled while setting 2567 * up the descriptor ring 2568 */ 2569 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2570 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2571 2572 /* 2573 * Set the interrupt throttling rate. Value is calculated 2574 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2575 */ 2576 if (sc->int_throttle_ceil) { 2577 E1000_WRITE_REG(&sc->hw, E1000_ITR, 2578 1000000000 / 256 / sc->int_throttle_ceil); 2579 } else { 2580 E1000_WRITE_REG(&sc->hw, E1000_ITR, 0); 2581 } 2582 2583 /* Use extended RX descriptor */ 2584 rfctl = E1000_RFCTL_EXTEN; 2585 2586 /* Disable accelerated ackknowledge */ 2587 if (sc->hw.mac.type == e1000_82574) 2588 rfctl |= E1000_RFCTL_ACK_DIS; 2589 2590 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2591 2592 /* Setup the Base and Length of the Rx Descriptor Ring */ 2593 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2594 struct emx_rxdata *rdata = &sc->rx_data[i]; 2595 2596 bus_addr = rdata->rx_desc_paddr; 2597 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 2598 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 2599 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 2600 (uint32_t)(bus_addr >> 32)); 2601 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 2602 (uint32_t)bus_addr); 2603 } 2604 2605 /* Setup the Receive Control Register */ 2606 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2607 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2608 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 2609 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2610 2611 /* Make sure VLAN Filters are off */ 2612 rctl &= ~E1000_RCTL_VFE; 2613 2614 /* Don't store bad paket */ 2615 rctl &= ~E1000_RCTL_SBP; 2616 2617 /* MCLBYTES */ 2618 rctl |= E1000_RCTL_SZ_2048; 2619 2620 if (ifp->if_mtu > ETHERMTU) 2621 rctl |= E1000_RCTL_LPE; 2622 else 2623 rctl &= ~E1000_RCTL_LPE; 2624 2625 /* 2626 * Receive Checksum Offload for TCP and UDP 2627 * 2628 * Checksum offloading is also enabled if multiple receive 2629 * queue is to be supported, since we need it to figure out 2630 * packet type. 2631 */ 2632 if (ifp->if_capenable & (IFCAP_RSS | IFCAP_RXCSUM)) { 2633 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2634 2635 /* 2636 * NOTE: 2637 * PCSD must be enabled to enable multiple 2638 * receive queues. 2639 */ 2640 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2641 E1000_RXCSUM_PCSD; 2642 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2643 } 2644 2645 /* 2646 * Configure multiple receive queue (RSS) 2647 */ 2648 if (ifp->if_capenable & IFCAP_RSS) { 2649 /* 2650 * NOTE: 2651 * When we reach here, RSS has already been disabled 2652 * in emx_stop(), so we could safely configure RSS key 2653 * and redirect table. 2654 */ 2655 2656 /* 2657 * Configure RSS key 2658 */ 2659 key = 0x5a6d5a6d; /* XXX */ 2660 for (i = 0; i < EMX_NRSSRK; ++i) 2661 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), key); 2662 2663 /* 2664 * Configure RSS redirect table 2665 */ 2666 reta = 0x80008000; 2667 for (i = 0; i < EMX_NRETA; ++i) 2668 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2669 2670 /* 2671 * Enable multiple receive queues. 2672 * Enable IPv4 RSS standard hash functions. 2673 * Disable RSS interrupt. 2674 */ 2675 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2676 E1000_MRQC_ENABLE_RSS_2Q | 2677 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2678 E1000_MRQC_RSS_FIELD_IPV4); 2679 } 2680 2681 /* 2682 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2683 * long latencies are observed, like Lenovo X60. This 2684 * change eliminates the problem, but since having positive 2685 * values in RDTR is a known source of problems on other 2686 * platforms another solution is being sought. 2687 */ 2688 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 2689 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 2690 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 2691 } 2692 2693 /* 2694 * Setup the HW Rx Head and Tail Descriptor Pointers 2695 */ 2696 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2697 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 2698 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 2699 sc->rx_data[i].num_rx_desc - 1); 2700 } 2701 2702 /* Enable Receives */ 2703 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 2704 } 2705 2706 static void 2707 emx_destroy_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata, int ndesc) 2708 { 2709 struct emx_rxbuf *rx_buffer; 2710 int i; 2711 2712 /* Free Receive Descriptor ring */ 2713 if (rdata->rx_desc) { 2714 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 2715 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 2716 rdata->rx_desc_dmap); 2717 bus_dma_tag_destroy(rdata->rx_desc_dtag); 2718 2719 rdata->rx_desc = NULL; 2720 } 2721 2722 if (rdata->rx_buf == NULL) 2723 return; 2724 2725 for (i = 0; i < ndesc; i++) { 2726 rx_buffer = &rdata->rx_buf[i]; 2727 2728 KKASSERT(rx_buffer->m_head == NULL); 2729 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 2730 } 2731 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 2732 bus_dma_tag_destroy(rdata->rxtag); 2733 2734 kfree(rdata->rx_buf, M_DEVBUF); 2735 rdata->rx_buf = NULL; 2736 } 2737 2738 static void 2739 emx_rxeof(struct emx_softc *sc, int ring_idx, int count) 2740 { 2741 struct emx_rxdata *rdata = &sc->rx_data[ring_idx]; 2742 struct ifnet *ifp = &sc->arpcom.ac_if; 2743 uint32_t staterr; 2744 emx_rxdesc_t *current_desc; 2745 struct mbuf *mp; 2746 int i; 2747 struct mbuf_chain chain[MAXCPU]; 2748 2749 i = rdata->next_rx_desc_to_check; 2750 current_desc = &rdata->rx_desc[i]; 2751 staterr = le32toh(current_desc->rxd_staterr); 2752 2753 if (!(staterr & E1000_RXD_STAT_DD)) 2754 return; 2755 2756 ether_input_chain_init(chain); 2757 2758 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2759 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 2760 struct mbuf *m = NULL; 2761 int eop, len; 2762 2763 logif(pkt_receive); 2764 2765 mp = rx_buf->m_head; 2766 2767 /* 2768 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 2769 * needs to access the last received byte in the mbuf. 2770 */ 2771 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 2772 BUS_DMASYNC_POSTREAD); 2773 2774 len = le16toh(current_desc->rxd_length); 2775 if (staterr & E1000_RXD_STAT_EOP) { 2776 count--; 2777 eop = 1; 2778 } else { 2779 eop = 0; 2780 } 2781 2782 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2783 uint16_t vlan = 0; 2784 uint32_t mrq, rss_hash; 2785 2786 /* 2787 * Save several necessary information, 2788 * before emx_newbuf() destroy it. 2789 */ 2790 if ((staterr & E1000_RXD_STAT_VP) && eop) 2791 vlan = le16toh(current_desc->rxd_vlan); 2792 2793 mrq = le32toh(current_desc->rxd_mrq); 2794 rss_hash = le32toh(current_desc->rxd_rss); 2795 2796 EMX_RSS_DPRINTF(sc, 10, 2797 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 2798 ring_idx, mrq, rss_hash); 2799 2800 if (emx_newbuf(sc, rdata, i, 0) != 0) { 2801 ifp->if_iqdrops++; 2802 goto discard; 2803 } 2804 2805 /* Assign correct length to the current fragment */ 2806 mp->m_len = len; 2807 2808 if (rdata->fmp == NULL) { 2809 mp->m_pkthdr.len = len; 2810 rdata->fmp = mp; /* Store the first mbuf */ 2811 rdata->lmp = mp; 2812 } else { 2813 /* 2814 * Chain mbuf's together 2815 */ 2816 rdata->lmp->m_next = mp; 2817 rdata->lmp = rdata->lmp->m_next; 2818 rdata->fmp->m_pkthdr.len += len; 2819 } 2820 2821 if (eop) { 2822 rdata->fmp->m_pkthdr.rcvif = ifp; 2823 ifp->if_ipackets++; 2824 2825 if (ifp->if_capenable & IFCAP_RXCSUM) 2826 emx_rxcsum(staterr, rdata->fmp); 2827 2828 if (staterr & E1000_RXD_STAT_VP) { 2829 rdata->fmp->m_pkthdr.ether_vlantag = 2830 vlan; 2831 rdata->fmp->m_flags |= M_VLANTAG; 2832 } 2833 m = rdata->fmp; 2834 rdata->fmp = NULL; 2835 rdata->lmp = NULL; 2836 2837 #ifdef EMX_RSS_DEBUG 2838 rdata->rx_pkts++; 2839 #endif 2840 } 2841 } else { 2842 ifp->if_ierrors++; 2843 discard: 2844 emx_setup_rxdesc(current_desc, rx_buf); 2845 if (rdata->fmp != NULL) { 2846 m_freem(rdata->fmp); 2847 rdata->fmp = NULL; 2848 rdata->lmp = NULL; 2849 } 2850 m = NULL; 2851 } 2852 2853 if (m != NULL) 2854 ether_input_chain(ifp, m, chain); 2855 2856 /* Advance our pointers to the next descriptor. */ 2857 if (++i == rdata->num_rx_desc) 2858 i = 0; 2859 2860 current_desc = &rdata->rx_desc[i]; 2861 staterr = le32toh(current_desc->rxd_staterr); 2862 } 2863 rdata->next_rx_desc_to_check = i; 2864 2865 ether_input_dispatch(chain); 2866 2867 /* Advance the E1000's Receive Queue "Tail Pointer". */ 2868 if (--i < 0) 2869 i = rdata->num_rx_desc - 1; 2870 E1000_WRITE_REG(&sc->hw, E1000_RDT(ring_idx), i); 2871 } 2872 2873 static void 2874 emx_enable_intr(struct emx_softc *sc) 2875 { 2876 lwkt_serialize_handler_enable(sc->arpcom.ac_if.if_serializer); 2877 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2878 } 2879 2880 static void 2881 emx_disable_intr(struct emx_softc *sc) 2882 { 2883 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2884 lwkt_serialize_handler_disable(sc->arpcom.ac_if.if_serializer); 2885 } 2886 2887 /* 2888 * Bit of a misnomer, what this really means is 2889 * to enable OS management of the system... aka 2890 * to disable special hardware management features 2891 */ 2892 static void 2893 emx_get_mgmt(struct emx_softc *sc) 2894 { 2895 /* A shared code workaround */ 2896 if (sc->has_manage) { 2897 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2898 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2899 2900 /* disable hardware interception of ARP */ 2901 manc &= ~(E1000_MANC_ARP_EN); 2902 2903 /* enable receiving management packets to the host */ 2904 manc |= E1000_MANC_EN_MNG2HOST; 2905 #define E1000_MNG2HOST_PORT_623 (1 << 5) 2906 #define E1000_MNG2HOST_PORT_664 (1 << 6) 2907 manc2h |= E1000_MNG2HOST_PORT_623; 2908 manc2h |= E1000_MNG2HOST_PORT_664; 2909 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2910 2911 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2912 } 2913 } 2914 2915 /* 2916 * Give control back to hardware management 2917 * controller if there is one. 2918 */ 2919 static void 2920 emx_rel_mgmt(struct emx_softc *sc) 2921 { 2922 if (sc->has_manage) { 2923 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2924 2925 /* re-enable hardware interception of ARP */ 2926 manc |= E1000_MANC_ARP_EN; 2927 manc &= ~E1000_MANC_EN_MNG2HOST; 2928 2929 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2930 } 2931 } 2932 2933 /* 2934 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2935 * For ASF and Pass Through versions of f/w this means that 2936 * the driver is loaded. For AMT version (only with 82573) 2937 * of the f/w this means that the network i/f is open. 2938 */ 2939 static void 2940 emx_get_hw_control(struct emx_softc *sc) 2941 { 2942 uint32_t ctrl_ext, swsm; 2943 2944 /* Let firmware know the driver has taken over */ 2945 switch (sc->hw.mac.type) { 2946 case e1000_82573: 2947 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 2948 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 2949 swsm | E1000_SWSM_DRV_LOAD); 2950 break; 2951 2952 case e1000_82571: 2953 case e1000_82572: 2954 case e1000_80003es2lan: 2955 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2956 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2957 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2958 break; 2959 2960 default: 2961 break; 2962 } 2963 } 2964 2965 /* 2966 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2967 * For ASF and Pass Through versions of f/w this means that the 2968 * driver is no longer loaded. For AMT version (only with 82573) 2969 * of the f/w this means that the network i/f is closed. 2970 */ 2971 static void 2972 emx_rel_hw_control(struct emx_softc *sc) 2973 { 2974 uint32_t ctrl_ext, swsm; 2975 2976 /* Let firmware taken over control of h/w */ 2977 switch (sc->hw.mac.type) { 2978 case e1000_82573: 2979 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 2980 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 2981 swsm & ~E1000_SWSM_DRV_LOAD); 2982 break; 2983 2984 case e1000_82571: 2985 case e1000_82572: 2986 case e1000_80003es2lan: 2987 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2988 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2989 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2990 break; 2991 2992 default: 2993 break; 2994 } 2995 } 2996 2997 static int 2998 emx_is_valid_eaddr(const uint8_t *addr) 2999 { 3000 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3001 3002 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3003 return (FALSE); 3004 3005 return (TRUE); 3006 } 3007 3008 /* 3009 * Enable PCI Wake On Lan capability 3010 */ 3011 void 3012 emx_enable_wol(device_t dev) 3013 { 3014 uint16_t cap, status; 3015 uint8_t id; 3016 3017 /* First find the capabilities pointer*/ 3018 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3019 3020 /* Read the PM Capabilities */ 3021 id = pci_read_config(dev, cap, 1); 3022 if (id != PCIY_PMG) /* Something wrong */ 3023 return; 3024 3025 /* 3026 * OK, we have the power capabilities, 3027 * so now get the status register 3028 */ 3029 cap += PCIR_POWER_STATUS; 3030 status = pci_read_config(dev, cap, 2); 3031 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3032 pci_write_config(dev, cap, status, 2); 3033 } 3034 3035 static void 3036 emx_update_stats(struct emx_softc *sc) 3037 { 3038 struct ifnet *ifp = &sc->arpcom.ac_if; 3039 3040 if (sc->hw.phy.media_type == e1000_media_type_copper || 3041 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3042 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3043 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3044 } 3045 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3046 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3047 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3048 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3049 3050 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3051 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3052 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3053 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3054 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3055 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3056 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3057 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3058 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3059 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3060 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3061 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3062 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3063 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3064 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3065 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3066 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3067 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3068 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3069 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3070 3071 /* For the 64-bit byte counters the low dword must be read first. */ 3072 /* Both registers clear on the read of the high dword */ 3073 3074 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3075 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3076 3077 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3078 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3079 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3080 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3081 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3082 3083 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3084 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3085 3086 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3087 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3088 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3089 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3090 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3091 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3092 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3093 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3094 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3095 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3096 3097 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3098 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3099 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3100 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3101 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3102 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3103 3104 ifp->if_collisions = sc->stats.colc; 3105 3106 /* Rx Errors */ 3107 ifp->if_ierrors = sc->dropped_pkts + sc->stats.rxerrc + 3108 sc->stats.crcerrs + sc->stats.algnerrc + 3109 sc->stats.ruc + sc->stats.roc + 3110 sc->stats.mpc + sc->stats.cexterr; 3111 3112 /* Tx Errors */ 3113 ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol + 3114 sc->watchdog_events; 3115 } 3116 3117 static void 3118 emx_print_debug_info(struct emx_softc *sc) 3119 { 3120 device_t dev = sc->dev; 3121 uint8_t *hw_addr = sc->hw.hw_addr; 3122 3123 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3124 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3125 E1000_READ_REG(&sc->hw, E1000_CTRL), 3126 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3127 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3128 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3129 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3130 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3131 sc->hw.fc.high_water, sc->hw.fc.low_water); 3132 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3133 E1000_READ_REG(&sc->hw, E1000_TIDV), 3134 E1000_READ_REG(&sc->hw, E1000_TADV)); 3135 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3136 E1000_READ_REG(&sc->hw, E1000_RDTR), 3137 E1000_READ_REG(&sc->hw, E1000_RADV)); 3138 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3139 E1000_READ_REG(&sc->hw, E1000_TDH(0)), 3140 E1000_READ_REG(&sc->hw, E1000_TDT(0))); 3141 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3142 E1000_READ_REG(&sc->hw, E1000_RDH(0)), 3143 E1000_READ_REG(&sc->hw, E1000_RDT(0))); 3144 device_printf(dev, "Num Tx descriptors avail = %d\n", 3145 sc->num_tx_desc_avail); 3146 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3147 sc->no_tx_desc_avail1); 3148 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3149 sc->no_tx_desc_avail2); 3150 device_printf(dev, "Std mbuf failed = %ld\n", 3151 sc->mbuf_alloc_failed); 3152 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3153 sc->rx_data[0].mbuf_cluster_failed); 3154 device_printf(dev, "Driver dropped packets = %ld\n", 3155 sc->dropped_pkts); 3156 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3157 sc->no_tx_dma_setup); 3158 3159 device_printf(dev, "TXCSUM try pullup = %lu\n", 3160 sc->tx_csum_try_pullup); 3161 device_printf(dev, "TXCSUM m_pullup(eh) called = %lu\n", 3162 sc->tx_csum_pullup1); 3163 device_printf(dev, "TXCSUM m_pullup(eh) failed = %lu\n", 3164 sc->tx_csum_pullup1_failed); 3165 device_printf(dev, "TXCSUM m_pullup(eh+ip) called = %lu\n", 3166 sc->tx_csum_pullup2); 3167 device_printf(dev, "TXCSUM m_pullup(eh+ip) failed = %lu\n", 3168 sc->tx_csum_pullup2_failed); 3169 device_printf(dev, "TXCSUM non-writable(eh) droped = %lu\n", 3170 sc->tx_csum_drop1); 3171 device_printf(dev, "TXCSUM non-writable(eh+ip) droped = %lu\n", 3172 sc->tx_csum_drop2); 3173 } 3174 3175 static void 3176 emx_print_hw_stats(struct emx_softc *sc) 3177 { 3178 device_t dev = sc->dev; 3179 3180 device_printf(dev, "Excessive collisions = %lld\n", 3181 (long long)sc->stats.ecol); 3182 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3183 device_printf(dev, "Symbol errors = %lld\n", 3184 (long long)sc->stats.symerrs); 3185 #endif 3186 device_printf(dev, "Sequence errors = %lld\n", 3187 (long long)sc->stats.sec); 3188 device_printf(dev, "Defer count = %lld\n", 3189 (long long)sc->stats.dc); 3190 device_printf(dev, "Missed Packets = %lld\n", 3191 (long long)sc->stats.mpc); 3192 device_printf(dev, "Receive No Buffers = %lld\n", 3193 (long long)sc->stats.rnbc); 3194 /* RLEC is inaccurate on some hardware, calculate our own. */ 3195 device_printf(dev, "Receive Length Errors = %lld\n", 3196 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3197 device_printf(dev, "Receive errors = %lld\n", 3198 (long long)sc->stats.rxerrc); 3199 device_printf(dev, "Crc errors = %lld\n", 3200 (long long)sc->stats.crcerrs); 3201 device_printf(dev, "Alignment errors = %lld\n", 3202 (long long)sc->stats.algnerrc); 3203 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3204 (long long)sc->stats.cexterr); 3205 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3206 device_printf(dev, "watchdog timeouts = %ld\n", 3207 sc->watchdog_events); 3208 device_printf(dev, "XON Rcvd = %lld\n", 3209 (long long)sc->stats.xonrxc); 3210 device_printf(dev, "XON Xmtd = %lld\n", 3211 (long long)sc->stats.xontxc); 3212 device_printf(dev, "XOFF Rcvd = %lld\n", 3213 (long long)sc->stats.xoffrxc); 3214 device_printf(dev, "XOFF Xmtd = %lld\n", 3215 (long long)sc->stats.xofftxc); 3216 device_printf(dev, "Good Packets Rcvd = %lld\n", 3217 (long long)sc->stats.gprc); 3218 device_printf(dev, "Good Packets Xmtd = %lld\n", 3219 (long long)sc->stats.gptc); 3220 } 3221 3222 static void 3223 emx_print_nvm_info(struct emx_softc *sc) 3224 { 3225 uint16_t eeprom_data; 3226 int i, j, row = 0; 3227 3228 /* Its a bit crude, but it gets the job done */ 3229 kprintf("\nInterface EEPROM Dump:\n"); 3230 kprintf("Offset\n0x0000 "); 3231 for (i = 0, j = 0; i < 32; i++, j++) { 3232 if (j == 8) { /* Make the offset block */ 3233 j = 0; ++row; 3234 kprintf("\n0x00%x0 ",row); 3235 } 3236 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3237 kprintf("%04x ", eeprom_data); 3238 } 3239 kprintf("\n"); 3240 } 3241 3242 static int 3243 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3244 { 3245 struct emx_softc *sc; 3246 struct ifnet *ifp; 3247 int error, result; 3248 3249 result = -1; 3250 error = sysctl_handle_int(oidp, &result, 0, req); 3251 if (error || !req->newptr) 3252 return (error); 3253 3254 sc = (struct emx_softc *)arg1; 3255 ifp = &sc->arpcom.ac_if; 3256 3257 lwkt_serialize_enter(ifp->if_serializer); 3258 3259 if (result == 1) 3260 emx_print_debug_info(sc); 3261 3262 /* 3263 * This value will cause a hex dump of the 3264 * first 32 16-bit words of the EEPROM to 3265 * the screen. 3266 */ 3267 if (result == 2) 3268 emx_print_nvm_info(sc); 3269 3270 lwkt_serialize_exit(ifp->if_serializer); 3271 3272 return (error); 3273 } 3274 3275 static int 3276 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3277 { 3278 int error, result; 3279 3280 result = -1; 3281 error = sysctl_handle_int(oidp, &result, 0, req); 3282 if (error || !req->newptr) 3283 return (error); 3284 3285 if (result == 1) { 3286 struct emx_softc *sc = (struct emx_softc *)arg1; 3287 struct ifnet *ifp = &sc->arpcom.ac_if; 3288 3289 lwkt_serialize_enter(ifp->if_serializer); 3290 emx_print_hw_stats(sc); 3291 lwkt_serialize_exit(ifp->if_serializer); 3292 } 3293 return (error); 3294 } 3295 3296 static void 3297 emx_add_sysctl(struct emx_softc *sc) 3298 { 3299 #ifdef PROFILE_SERIALIZER 3300 struct ifnet *ifp = &sc->arpcom.ac_if; 3301 #endif 3302 #ifdef EMX_RSS_DEBUG 3303 char rx_pkt[32]; 3304 int i; 3305 #endif 3306 3307 sysctl_ctx_init(&sc->sysctl_ctx); 3308 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 3309 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 3310 device_get_nameunit(sc->dev), 3311 CTLFLAG_RD, 0, ""); 3312 if (sc->sysctl_tree == NULL) { 3313 device_printf(sc->dev, "can't add sysctl node\n"); 3314 return; 3315 } 3316 3317 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3318 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3319 emx_sysctl_debug_info, "I", "Debug Information"); 3320 3321 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3322 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3323 emx_sysctl_stats, "I", "Statistics"); 3324 3325 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3326 OID_AUTO, "rxd", CTLFLAG_RD, 3327 &sc->rx_data[0].num_rx_desc, 0, NULL); 3328 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3329 OID_AUTO, "txd", CTLFLAG_RD, &sc->num_tx_desc, 0, NULL); 3330 3331 #ifdef PROFILE_SERIALIZER 3332 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3333 OID_AUTO, "serializer_sleep", CTLFLAG_RW, 3334 &ifp->if_serializer->sleep_cnt, 0, NULL); 3335 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3336 OID_AUTO, "serializer_tryfail", CTLFLAG_RW, 3337 &ifp->if_serializer->tryfail_cnt, 0, NULL); 3338 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3339 OID_AUTO, "serializer_enter", CTLFLAG_RW, 3340 &ifp->if_serializer->enter_cnt, 0, NULL); 3341 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3342 OID_AUTO, "serializer_try", CTLFLAG_RW, 3343 &ifp->if_serializer->try_cnt, 0, NULL); 3344 #endif 3345 3346 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3347 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, 3348 sc, 0, emx_sysctl_int_throttle, "I", 3349 "interrupt throttling rate"); 3350 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3351 OID_AUTO, "int_tx_nsegs", CTLTYPE_INT|CTLFLAG_RW, 3352 sc, 0, emx_sysctl_int_tx_nsegs, "I", 3353 "# segments per TX interrupt"); 3354 3355 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3356 OID_AUTO, "rx_ring_inuse", CTLFLAG_RD, 3357 &sc->rx_ring_inuse, 0, "RX ring in use"); 3358 3359 #ifdef EMX_RSS_DEBUG 3360 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3361 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3362 0, "RSS debug level"); 3363 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3364 ksnprintf(rx_pkt, sizeof(rx_pkt), "rx%d_pkt", i); 3365 SYSCTL_ADD_UINT(&sc->sysctl_ctx, 3366 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, 3367 rx_pkt, CTLFLAG_RD, 3368 &sc->rx_data[i].rx_pkts, 0, "RXed packets"); 3369 } 3370 #endif 3371 } 3372 3373 static int 3374 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3375 { 3376 struct emx_softc *sc = (void *)arg1; 3377 struct ifnet *ifp = &sc->arpcom.ac_if; 3378 int error, throttle; 3379 3380 throttle = sc->int_throttle_ceil; 3381 error = sysctl_handle_int(oidp, &throttle, 0, req); 3382 if (error || req->newptr == NULL) 3383 return error; 3384 if (throttle < 0 || throttle > 1000000000 / 256) 3385 return EINVAL; 3386 3387 if (throttle) { 3388 /* 3389 * Set the interrupt throttling rate in 256ns increments, 3390 * recalculate sysctl value assignment to get exact frequency. 3391 */ 3392 throttle = 1000000000 / 256 / throttle; 3393 3394 /* Upper 16bits of ITR is reserved and should be zero */ 3395 if (throttle & 0xffff0000) 3396 return EINVAL; 3397 } 3398 3399 lwkt_serialize_enter(ifp->if_serializer); 3400 3401 if (throttle) 3402 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3403 else 3404 sc->int_throttle_ceil = 0; 3405 3406 if (ifp->if_flags & IFF_RUNNING) 3407 E1000_WRITE_REG(&sc->hw, E1000_ITR, throttle); 3408 3409 lwkt_serialize_exit(ifp->if_serializer); 3410 3411 if (bootverbose) { 3412 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3413 sc->int_throttle_ceil); 3414 } 3415 return 0; 3416 } 3417 3418 static int 3419 emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 3420 { 3421 struct emx_softc *sc = (void *)arg1; 3422 struct ifnet *ifp = &sc->arpcom.ac_if; 3423 int error, segs; 3424 3425 segs = sc->tx_int_nsegs; 3426 error = sysctl_handle_int(oidp, &segs, 0, req); 3427 if (error || req->newptr == NULL) 3428 return error; 3429 if (segs <= 0) 3430 return EINVAL; 3431 3432 lwkt_serialize_enter(ifp->if_serializer); 3433 3434 /* 3435 * Don't allow int_tx_nsegs to become: 3436 * o Less the oact_tx_desc 3437 * o Too large that no TX desc will cause TX interrupt to 3438 * be generated (OACTIVE will never recover) 3439 * o Too small that will cause tx_dd[] overflow 3440 */ 3441 if (segs < sc->oact_tx_desc || 3442 segs >= sc->num_tx_desc - sc->oact_tx_desc || 3443 segs < sc->num_tx_desc / EMX_TXDD_SAFE) { 3444 error = EINVAL; 3445 } else { 3446 error = 0; 3447 sc->tx_int_nsegs = segs; 3448 } 3449 3450 lwkt_serialize_exit(ifp->if_serializer); 3451 3452 return error; 3453 } 3454 3455 static int 3456 emx_dma_alloc(struct emx_softc *sc) 3457 { 3458 int error, i; 3459 3460 /* 3461 * Create top level busdma tag 3462 */ 3463 error = bus_dma_tag_create(NULL, 1, 0, 3464 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3465 NULL, NULL, 3466 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3467 0, &sc->parent_dtag); 3468 if (error) { 3469 device_printf(sc->dev, "could not create top level DMA tag\n"); 3470 return error; 3471 } 3472 3473 /* 3474 * Allocate transmit descriptors ring and buffers 3475 */ 3476 error = emx_create_tx_ring(sc); 3477 if (error) { 3478 device_printf(sc->dev, "Could not setup transmit structures\n"); 3479 return error; 3480 } 3481 3482 /* 3483 * Allocate receive descriptors ring and buffers 3484 */ 3485 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3486 error = emx_create_rx_ring(sc, &sc->rx_data[i]); 3487 if (error) { 3488 device_printf(sc->dev, 3489 "Could not setup receive structures\n"); 3490 return error; 3491 } 3492 } 3493 return 0; 3494 } 3495 3496 static void 3497 emx_dma_free(struct emx_softc *sc) 3498 { 3499 int i; 3500 3501 emx_destroy_tx_ring(sc, sc->num_tx_desc); 3502 3503 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3504 emx_destroy_rx_ring(sc, &sc->rx_data[i], 3505 sc->rx_data[i].num_rx_desc); 3506 } 3507 3508 /* Free top level busdma tag */ 3509 if (sc->parent_dtag != NULL) 3510 bus_dma_tag_destroy(sc->parent_dtag); 3511 } 3512