1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_polling.h" 68 #include "opt_serializer.h" 69 #include "opt_rss.h" 70 #include "opt_emx.h" 71 72 #include <sys/param.h> 73 #include <sys/bus.h> 74 #include <sys/endian.h> 75 #include <sys/interrupt.h> 76 #include <sys/kernel.h> 77 #include <sys/ktr.h> 78 #include <sys/malloc.h> 79 #include <sys/mbuf.h> 80 #include <sys/proc.h> 81 #include <sys/rman.h> 82 #include <sys/serialize.h> 83 #include <sys/serialize2.h> 84 #include <sys/socket.h> 85 #include <sys/sockio.h> 86 #include <sys/sysctl.h> 87 #include <sys/systm.h> 88 89 #include <net/bpf.h> 90 #include <net/ethernet.h> 91 #include <net/if.h> 92 #include <net/if_arp.h> 93 #include <net/if_dl.h> 94 #include <net/if_media.h> 95 #include <net/ifq_var.h> 96 #include <net/toeplitz.h> 97 #include <net/toeplitz2.h> 98 #include <net/vlan/if_vlan_var.h> 99 #include <net/vlan/if_vlan_ether.h> 100 101 #include <netinet/in_systm.h> 102 #include <netinet/in.h> 103 #include <netinet/ip.h> 104 #include <netinet/tcp.h> 105 #include <netinet/udp.h> 106 107 #include <bus/pci/pcivar.h> 108 #include <bus/pci/pcireg.h> 109 110 #include <dev/netif/ig_hal/e1000_api.h> 111 #include <dev/netif/ig_hal/e1000_82571.h> 112 #include <dev/netif/emx/if_emx.h> 113 114 #ifdef EMX_RSS_DEBUG 115 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 116 do { \ 117 if (sc->rss_debug >= lvl) \ 118 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 119 } while (0) 120 #else /* !EMX_RSS_DEBUG */ 121 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 122 #endif /* EMX_RSS_DEBUG */ 123 124 #define EMX_NAME "Intel(R) PRO/1000 " 125 126 #define EMX_DEVICE(id) \ 127 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 128 #define EMX_DEVICE_NULL { 0, 0, NULL } 129 130 static const struct emx_device { 131 uint16_t vid; 132 uint16_t did; 133 const char *desc; 134 } emx_devices[] = { 135 EMX_DEVICE(82571EB_COPPER), 136 EMX_DEVICE(82571EB_FIBER), 137 EMX_DEVICE(82571EB_SERDES), 138 EMX_DEVICE(82571EB_SERDES_DUAL), 139 EMX_DEVICE(82571EB_SERDES_QUAD), 140 EMX_DEVICE(82571EB_QUAD_COPPER), 141 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 142 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 143 EMX_DEVICE(82571EB_QUAD_FIBER), 144 EMX_DEVICE(82571PT_QUAD_COPPER), 145 146 EMX_DEVICE(82572EI_COPPER), 147 EMX_DEVICE(82572EI_FIBER), 148 EMX_DEVICE(82572EI_SERDES), 149 EMX_DEVICE(82572EI), 150 151 EMX_DEVICE(82573E), 152 EMX_DEVICE(82573E_IAMT), 153 EMX_DEVICE(82573L), 154 155 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 156 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 157 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 158 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 159 160 EMX_DEVICE(82574L), 161 162 /* required last entry */ 163 EMX_DEVICE_NULL 164 }; 165 166 static int emx_probe(device_t); 167 static int emx_attach(device_t); 168 static int emx_detach(device_t); 169 static int emx_shutdown(device_t); 170 static int emx_suspend(device_t); 171 static int emx_resume(device_t); 172 173 static void emx_init(void *); 174 static void emx_stop(struct emx_softc *); 175 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 176 static void emx_start(struct ifnet *); 177 #ifdef DEVICE_POLLING 178 static void emx_poll(struct ifnet *, enum poll_cmd, int); 179 #endif 180 static void emx_watchdog(struct ifnet *); 181 static void emx_media_status(struct ifnet *, struct ifmediareq *); 182 static int emx_media_change(struct ifnet *); 183 static void emx_timer(void *); 184 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 185 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 186 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 187 #ifdef INVARIANTS 188 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 189 boolean_t); 190 #endif 191 192 static void emx_intr(void *); 193 static void emx_rxeof(struct emx_softc *, int, int); 194 static void emx_txeof(struct emx_softc *); 195 static void emx_tx_collect(struct emx_softc *); 196 static void emx_tx_purge(struct emx_softc *); 197 static void emx_enable_intr(struct emx_softc *); 198 static void emx_disable_intr(struct emx_softc *); 199 200 static int emx_dma_alloc(struct emx_softc *); 201 static void emx_dma_free(struct emx_softc *); 202 static void emx_init_tx_ring(struct emx_softc *); 203 static int emx_init_rx_ring(struct emx_softc *, struct emx_rxdata *); 204 static void emx_free_rx_ring(struct emx_softc *, struct emx_rxdata *); 205 static int emx_create_tx_ring(struct emx_softc *); 206 static int emx_create_rx_ring(struct emx_softc *, struct emx_rxdata *); 207 static void emx_destroy_tx_ring(struct emx_softc *, int); 208 static void emx_destroy_rx_ring(struct emx_softc *, 209 struct emx_rxdata *, int); 210 static int emx_newbuf(struct emx_softc *, struct emx_rxdata *, int, int); 211 static int emx_encap(struct emx_softc *, struct mbuf **); 212 static int emx_txcsum_pullup(struct emx_softc *, struct mbuf **); 213 static int emx_txcsum(struct emx_softc *, struct mbuf *, 214 uint32_t *, uint32_t *); 215 216 static int emx_is_valid_eaddr(const uint8_t *); 217 static int emx_hw_init(struct emx_softc *); 218 static void emx_setup_ifp(struct emx_softc *); 219 static void emx_init_tx_unit(struct emx_softc *); 220 static void emx_init_rx_unit(struct emx_softc *); 221 static void emx_update_stats(struct emx_softc *); 222 static void emx_set_promisc(struct emx_softc *); 223 static void emx_disable_promisc(struct emx_softc *); 224 static void emx_set_multi(struct emx_softc *); 225 static void emx_update_link_status(struct emx_softc *); 226 static void emx_smartspeed(struct emx_softc *); 227 228 static void emx_print_debug_info(struct emx_softc *); 229 static void emx_print_nvm_info(struct emx_softc *); 230 static void emx_print_hw_stats(struct emx_softc *); 231 232 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 233 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 234 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 235 static int emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 236 static void emx_add_sysctl(struct emx_softc *); 237 238 static void emx_serialize_skipmain(struct emx_softc *); 239 static void emx_deserialize_skipmain(struct emx_softc *); 240 static int emx_tryserialize_skipmain(struct emx_softc *); 241 242 /* Management and WOL Support */ 243 static void emx_get_mgmt(struct emx_softc *); 244 static void emx_rel_mgmt(struct emx_softc *); 245 static void emx_get_hw_control(struct emx_softc *); 246 static void emx_rel_hw_control(struct emx_softc *); 247 static void emx_enable_wol(device_t); 248 249 static device_method_t emx_methods[] = { 250 /* Device interface */ 251 DEVMETHOD(device_probe, emx_probe), 252 DEVMETHOD(device_attach, emx_attach), 253 DEVMETHOD(device_detach, emx_detach), 254 DEVMETHOD(device_shutdown, emx_shutdown), 255 DEVMETHOD(device_suspend, emx_suspend), 256 DEVMETHOD(device_resume, emx_resume), 257 { 0, 0 } 258 }; 259 260 static driver_t emx_driver = { 261 "emx", 262 emx_methods, 263 sizeof(struct emx_softc), 264 }; 265 266 static devclass_t emx_devclass; 267 268 DECLARE_DUMMY_MODULE(if_emx); 269 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 270 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, 0, 0); 271 272 /* 273 * Tunables 274 */ 275 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 276 static int emx_rxd = EMX_DEFAULT_RXD; 277 static int emx_txd = EMX_DEFAULT_TXD; 278 static int emx_smart_pwr_down = FALSE; 279 280 /* Controls whether promiscuous also shows bad packets */ 281 static int emx_debug_sbp = FALSE; 282 283 static int emx_82573_workaround = TRUE; 284 285 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 286 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 287 TUNABLE_INT("hw.emx.txd", &emx_txd); 288 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 289 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 290 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 291 292 /* Global used in WOL setup with multiport cards */ 293 static int emx_global_quad_port_a = 0; 294 295 /* Set this to one to display debug statistics */ 296 static int emx_display_debug_stats = 0; 297 298 #if !defined(KTR_IF_EMX) 299 #define KTR_IF_EMX KTR_ALL 300 #endif 301 KTR_INFO_MASTER(if_emx); 302 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin", 0); 303 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end", 0); 304 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet", 0); 305 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet", 0); 306 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean", 0); 307 #define logif(name) KTR_LOG(if_emx_ ## name) 308 309 static __inline void 310 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 311 { 312 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 313 /* DD bit must be cleared */ 314 rxd->rxd_staterr = 0; 315 } 316 317 static __inline void 318 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 319 { 320 /* Ignore Checksum bit is set */ 321 if (staterr & E1000_RXD_STAT_IXSM) 322 return; 323 324 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 325 E1000_RXD_STAT_IPCS) 326 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 327 328 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 329 E1000_RXD_STAT_TCPCS) { 330 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 331 CSUM_PSEUDO_HDR | 332 CSUM_FRAG_NOT_CHECKED; 333 mp->m_pkthdr.csum_data = htons(0xffff); 334 } 335 } 336 337 static __inline struct pktinfo * 338 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 339 uint32_t mrq, uint32_t hash, uint32_t staterr) 340 { 341 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 342 case EMX_RXDMRQ_IPV4_TCP: 343 pi->pi_netisr = NETISR_IP; 344 pi->pi_flags = 0; 345 pi->pi_l3proto = IPPROTO_TCP; 346 break; 347 348 case EMX_RXDMRQ_IPV6_TCP: 349 pi->pi_netisr = NETISR_IPV6; 350 pi->pi_flags = 0; 351 pi->pi_l3proto = IPPROTO_TCP; 352 break; 353 354 case EMX_RXDMRQ_IPV4: 355 if (staterr & E1000_RXD_STAT_IXSM) 356 return NULL; 357 358 if ((staterr & 359 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 360 E1000_RXD_STAT_TCPCS) { 361 pi->pi_netisr = NETISR_IP; 362 pi->pi_flags = 0; 363 pi->pi_l3proto = IPPROTO_UDP; 364 break; 365 } 366 /* FALL THROUGH */ 367 default: 368 return NULL; 369 } 370 371 m->m_flags |= M_HASH; 372 m->m_pkthdr.hash = toeplitz_hash(hash); 373 return pi; 374 } 375 376 static int 377 emx_probe(device_t dev) 378 { 379 const struct emx_device *d; 380 uint16_t vid, did; 381 382 vid = pci_get_vendor(dev); 383 did = pci_get_device(dev); 384 385 for (d = emx_devices; d->desc != NULL; ++d) { 386 if (vid == d->vid && did == d->did) { 387 device_set_desc(dev, d->desc); 388 device_set_async_attach(dev, TRUE); 389 return 0; 390 } 391 } 392 return ENXIO; 393 } 394 395 static int 396 emx_attach(device_t dev) 397 { 398 struct emx_softc *sc = device_get_softc(dev); 399 struct ifnet *ifp = &sc->arpcom.ac_if; 400 int error = 0, i; 401 uint16_t eeprom_data, device_id; 402 403 lwkt_serialize_init(&sc->main_serialize); 404 lwkt_serialize_init(&sc->tx_serialize); 405 for (i = 0; i < EMX_NRX_RING; ++i) 406 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 407 408 i = 0; 409 sc->serializes[i++] = &sc->main_serialize; 410 sc->serializes[i++] = &sc->tx_serialize; 411 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 412 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 413 KKASSERT(i == EMX_NSERIALIZE); 414 415 callout_init(&sc->timer); 416 417 sc->dev = sc->osdep.dev = dev; 418 419 /* 420 * Determine hardware and mac type 421 */ 422 sc->hw.vendor_id = pci_get_vendor(dev); 423 sc->hw.device_id = pci_get_device(dev); 424 sc->hw.revision_id = pci_get_revid(dev); 425 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 426 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 427 428 if (e1000_set_mac_type(&sc->hw)) 429 return ENXIO; 430 431 /* Enable bus mastering */ 432 pci_enable_busmaster(dev); 433 434 /* 435 * Allocate IO memory 436 */ 437 sc->memory_rid = EMX_BAR_MEM; 438 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 439 &sc->memory_rid, RF_ACTIVE); 440 if (sc->memory == NULL) { 441 device_printf(dev, "Unable to allocate bus resource: memory\n"); 442 error = ENXIO; 443 goto fail; 444 } 445 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 446 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 447 448 /* XXX This is quite goofy, it is not actually used */ 449 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 450 451 /* 452 * Allocate interrupt 453 */ 454 sc->intr_rid = 0; 455 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 456 RF_SHAREABLE | RF_ACTIVE); 457 if (sc->intr_res == NULL) { 458 device_printf(dev, "Unable to allocate bus resource: " 459 "interrupt\n"); 460 error = ENXIO; 461 goto fail; 462 } 463 464 /* Save PCI command register for Shared Code */ 465 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 466 sc->hw.back = &sc->osdep; 467 468 /* Do Shared Code initialization */ 469 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 470 device_printf(dev, "Setup of Shared code failed\n"); 471 error = ENXIO; 472 goto fail; 473 } 474 e1000_get_bus_info(&sc->hw); 475 476 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 477 sc->hw.phy.autoneg_wait_to_complete = FALSE; 478 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 479 480 /* 481 * Interrupt throttle rate 482 */ 483 if (emx_int_throttle_ceil == 0) { 484 sc->int_throttle_ceil = 0; 485 } else { 486 int throttle = emx_int_throttle_ceil; 487 488 if (throttle < 0) 489 throttle = EMX_DEFAULT_ITR; 490 491 /* Recalculate the tunable value to get the exact frequency. */ 492 throttle = 1000000000 / 256 / throttle; 493 494 /* Upper 16bits of ITR is reserved and should be zero */ 495 if (throttle & 0xffff0000) 496 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 497 498 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 499 } 500 501 e1000_init_script_state_82541(&sc->hw, TRUE); 502 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 503 504 /* Copper options */ 505 if (sc->hw.phy.media_type == e1000_media_type_copper) { 506 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 507 sc->hw.phy.disable_polarity_correction = FALSE; 508 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 509 } 510 511 /* Set the frame limits assuming standard ethernet sized frames. */ 512 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 513 sc->min_frame_size = ETHER_MIN_LEN; 514 515 /* This controls when hardware reports transmit completion status. */ 516 sc->hw.mac.report_tx_early = 1; 517 518 #ifdef RSS 519 /* Calculate # of RX rings */ 520 if (ncpus > 1) 521 sc->rx_ring_cnt = EMX_NRX_RING; 522 else 523 #endif 524 sc->rx_ring_cnt = 1; 525 sc->rx_ring_inuse = sc->rx_ring_cnt; 526 527 /* Allocate RX/TX rings' busdma(9) stuffs */ 528 error = emx_dma_alloc(sc); 529 if (error) 530 goto fail; 531 532 /* Make sure we have a good EEPROM before we read from it */ 533 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 534 /* 535 * Some PCI-E parts fail the first check due to 536 * the link being in sleep state, call it again, 537 * if it fails a second time its a real issue. 538 */ 539 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 540 device_printf(dev, 541 "The EEPROM Checksum Is Not Valid\n"); 542 error = EIO; 543 goto fail; 544 } 545 } 546 547 /* Initialize the hardware */ 548 error = emx_hw_init(sc); 549 if (error) { 550 device_printf(dev, "Unable to initialize the hardware\n"); 551 goto fail; 552 } 553 554 /* Copy the permanent MAC address out of the EEPROM */ 555 if (e1000_read_mac_addr(&sc->hw) < 0) { 556 device_printf(dev, "EEPROM read error while reading MAC" 557 " address\n"); 558 error = EIO; 559 goto fail; 560 } 561 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 562 device_printf(dev, "Invalid MAC address\n"); 563 error = EIO; 564 goto fail; 565 } 566 567 /* Manually turn off all interrupts */ 568 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 569 570 /* Setup OS specific network interface */ 571 emx_setup_ifp(sc); 572 573 /* Add sysctl tree, must after emx_setup_ifp() */ 574 emx_add_sysctl(sc); 575 576 /* Initialize statistics */ 577 emx_update_stats(sc); 578 579 sc->hw.mac.get_link_status = 1; 580 emx_update_link_status(sc); 581 582 /* Indicate SOL/IDER usage */ 583 if (e1000_check_reset_block(&sc->hw)) { 584 device_printf(dev, 585 "PHY reset is blocked due to SOL/IDER session.\n"); 586 } 587 588 /* Determine if we have to control management hardware */ 589 sc->has_manage = e1000_enable_mng_pass_thru(&sc->hw); 590 591 /* 592 * Setup Wake-on-Lan 593 */ 594 switch (sc->hw.mac.type) { 595 case e1000_82571: 596 case e1000_80003es2lan: 597 if (sc->hw.bus.func == 1) { 598 e1000_read_nvm(&sc->hw, 599 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 600 } else { 601 e1000_read_nvm(&sc->hw, 602 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 603 } 604 eeprom_data &= EMX_EEPROM_APME; 605 break; 606 607 default: 608 /* APME bit in EEPROM is mapped to WUC.APME */ 609 eeprom_data = 610 E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 611 break; 612 } 613 if (eeprom_data) 614 sc->wol = E1000_WUFC_MAG; 615 /* 616 * We have the eeprom settings, now apply the special cases 617 * where the eeprom may be wrong or the board won't support 618 * wake on lan on a particular port 619 */ 620 device_id = pci_get_device(dev); 621 switch (device_id) { 622 case E1000_DEV_ID_82571EB_FIBER: 623 /* 624 * Wake events only supported on port A for dual fiber 625 * regardless of eeprom setting 626 */ 627 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 628 E1000_STATUS_FUNC_1) 629 sc->wol = 0; 630 break; 631 632 case E1000_DEV_ID_82571EB_QUAD_COPPER: 633 case E1000_DEV_ID_82571EB_QUAD_FIBER: 634 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 635 /* if quad port sc, disable WoL on all but port A */ 636 if (emx_global_quad_port_a != 0) 637 sc->wol = 0; 638 /* Reset for multiple quad port adapters */ 639 if (++emx_global_quad_port_a == 4) 640 emx_global_quad_port_a = 0; 641 break; 642 } 643 644 /* XXX disable wol */ 645 sc->wol = 0; 646 647 sc->spare_tx_desc = EMX_TX_SPARE; 648 649 /* 650 * Keep following relationship between spare_tx_desc, oact_tx_desc 651 * and tx_int_nsegs: 652 * (spare_tx_desc + EMX_TX_RESERVED) <= 653 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_int_nsegs 654 */ 655 sc->oact_tx_desc = sc->num_tx_desc / 8; 656 if (sc->oact_tx_desc > EMX_TX_OACTIVE_MAX) 657 sc->oact_tx_desc = EMX_TX_OACTIVE_MAX; 658 if (sc->oact_tx_desc < sc->spare_tx_desc + EMX_TX_RESERVED) 659 sc->oact_tx_desc = sc->spare_tx_desc + EMX_TX_RESERVED; 660 661 sc->tx_int_nsegs = sc->num_tx_desc / 16; 662 if (sc->tx_int_nsegs < sc->oact_tx_desc) 663 sc->tx_int_nsegs = sc->oact_tx_desc; 664 665 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, emx_intr, sc, 666 &sc->intr_tag, &sc->main_serialize); 667 if (error) { 668 device_printf(dev, "Failed to register interrupt handler"); 669 ether_ifdetach(&sc->arpcom.ac_if); 670 goto fail; 671 } 672 673 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->intr_res)); 674 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 675 return (0); 676 fail: 677 emx_detach(dev); 678 return (error); 679 } 680 681 static int 682 emx_detach(device_t dev) 683 { 684 struct emx_softc *sc = device_get_softc(dev); 685 686 if (device_is_attached(dev)) { 687 struct ifnet *ifp = &sc->arpcom.ac_if; 688 689 ifnet_serialize_all(ifp); 690 691 emx_stop(sc); 692 693 e1000_phy_hw_reset(&sc->hw); 694 695 emx_rel_mgmt(sc); 696 697 if (sc->hw.mac.type == e1000_82573 && 698 e1000_check_mng_mode(&sc->hw)) 699 emx_rel_hw_control(sc); 700 701 if (sc->wol) { 702 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 703 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 704 emx_enable_wol(dev); 705 } 706 707 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 708 709 ifnet_deserialize_all(ifp); 710 711 ether_ifdetach(ifp); 712 } 713 bus_generic_detach(dev); 714 715 if (sc->intr_res != NULL) { 716 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 717 sc->intr_res); 718 } 719 720 if (sc->memory != NULL) { 721 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 722 sc->memory); 723 } 724 725 emx_dma_free(sc); 726 727 /* Free sysctl tree */ 728 if (sc->sysctl_tree != NULL) 729 sysctl_ctx_free(&sc->sysctl_ctx); 730 731 return (0); 732 } 733 734 static int 735 emx_shutdown(device_t dev) 736 { 737 return emx_suspend(dev); 738 } 739 740 static int 741 emx_suspend(device_t dev) 742 { 743 struct emx_softc *sc = device_get_softc(dev); 744 struct ifnet *ifp = &sc->arpcom.ac_if; 745 746 ifnet_serialize_all(ifp); 747 748 emx_stop(sc); 749 750 emx_rel_mgmt(sc); 751 752 if (sc->hw.mac.type == e1000_82573 && 753 e1000_check_mng_mode(&sc->hw)) 754 emx_rel_hw_control(sc); 755 756 if (sc->wol) { 757 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 758 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 759 emx_enable_wol(dev); 760 } 761 762 ifnet_deserialize_all(ifp); 763 764 return bus_generic_suspend(dev); 765 } 766 767 static int 768 emx_resume(device_t dev) 769 { 770 struct emx_softc *sc = device_get_softc(dev); 771 struct ifnet *ifp = &sc->arpcom.ac_if; 772 773 ifnet_serialize_all(ifp); 774 775 emx_init(sc); 776 emx_get_mgmt(sc); 777 if_devstart(ifp); 778 779 ifnet_deserialize_all(ifp); 780 781 return bus_generic_resume(dev); 782 } 783 784 static void 785 emx_start(struct ifnet *ifp) 786 { 787 struct emx_softc *sc = ifp->if_softc; 788 struct mbuf *m_head; 789 790 ASSERT_SERIALIZED(&sc->tx_serialize); 791 792 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 793 return; 794 795 if (!sc->link_active) { 796 ifq_purge(&ifp->if_snd); 797 return; 798 } 799 800 while (!ifq_is_empty(&ifp->if_snd)) { 801 /* Now do we at least have a minimal? */ 802 if (EMX_IS_OACTIVE(sc)) { 803 emx_tx_collect(sc); 804 if (EMX_IS_OACTIVE(sc)) { 805 ifp->if_flags |= IFF_OACTIVE; 806 sc->no_tx_desc_avail1++; 807 break; 808 } 809 } 810 811 logif(pkt_txqueue); 812 m_head = ifq_dequeue(&ifp->if_snd, NULL); 813 if (m_head == NULL) 814 break; 815 816 if (emx_encap(sc, &m_head)) { 817 ifp->if_oerrors++; 818 emx_tx_collect(sc); 819 continue; 820 } 821 822 /* Send a copy of the frame to the BPF listener */ 823 ETHER_BPF_MTAP(ifp, m_head); 824 825 /* Set timeout in case hardware has problems transmitting. */ 826 ifp->if_timer = EMX_TX_TIMEOUT; 827 } 828 } 829 830 static int 831 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 832 { 833 struct emx_softc *sc = ifp->if_softc; 834 struct ifreq *ifr = (struct ifreq *)data; 835 uint16_t eeprom_data = 0; 836 int max_frame_size, mask, reinit; 837 int error = 0; 838 839 ASSERT_IFNET_SERIALIZED_ALL(ifp); 840 841 switch (command) { 842 case SIOCSIFMTU: 843 switch (sc->hw.mac.type) { 844 case e1000_82573: 845 /* 846 * 82573 only supports jumbo frames 847 * if ASPM is disabled. 848 */ 849 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 850 &eeprom_data); 851 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 852 max_frame_size = ETHER_MAX_LEN; 853 break; 854 } 855 /* FALL THROUGH */ 856 857 /* Limit Jumbo Frame size */ 858 case e1000_82571: 859 case e1000_82572: 860 case e1000_82574: 861 case e1000_80003es2lan: 862 max_frame_size = 9234; 863 break; 864 865 default: 866 max_frame_size = MAX_JUMBO_FRAME_SIZE; 867 break; 868 } 869 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 870 ETHER_CRC_LEN) { 871 error = EINVAL; 872 break; 873 } 874 875 ifp->if_mtu = ifr->ifr_mtu; 876 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 877 ETHER_CRC_LEN; 878 879 if (ifp->if_flags & IFF_RUNNING) 880 emx_init(sc); 881 break; 882 883 case SIOCSIFFLAGS: 884 if (ifp->if_flags & IFF_UP) { 885 if ((ifp->if_flags & IFF_RUNNING)) { 886 if ((ifp->if_flags ^ sc->if_flags) & 887 (IFF_PROMISC | IFF_ALLMULTI)) { 888 emx_disable_promisc(sc); 889 emx_set_promisc(sc); 890 } 891 } else { 892 emx_init(sc); 893 } 894 } else if (ifp->if_flags & IFF_RUNNING) { 895 emx_stop(sc); 896 } 897 sc->if_flags = ifp->if_flags; 898 break; 899 900 case SIOCADDMULTI: 901 case SIOCDELMULTI: 902 if (ifp->if_flags & IFF_RUNNING) { 903 emx_disable_intr(sc); 904 emx_set_multi(sc); 905 #ifdef DEVICE_POLLING 906 if (!(ifp->if_flags & IFF_POLLING)) 907 #endif 908 emx_enable_intr(sc); 909 } 910 break; 911 912 case SIOCSIFMEDIA: 913 /* Check SOL/IDER usage */ 914 if (e1000_check_reset_block(&sc->hw)) { 915 device_printf(sc->dev, "Media change is" 916 " blocked due to SOL/IDER session.\n"); 917 break; 918 } 919 /* FALL THROUGH */ 920 921 case SIOCGIFMEDIA: 922 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 923 break; 924 925 case SIOCSIFCAP: 926 reinit = 0; 927 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 928 if (mask & IFCAP_HWCSUM) { 929 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 930 reinit = 1; 931 } 932 if (mask & IFCAP_VLAN_HWTAGGING) { 933 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 934 reinit = 1; 935 } 936 if (mask & IFCAP_RSS) { 937 ifp->if_capenable ^= IFCAP_RSS; 938 reinit = 1; 939 } 940 if (reinit && (ifp->if_flags & IFF_RUNNING)) 941 emx_init(sc); 942 break; 943 944 default: 945 error = ether_ioctl(ifp, command, data); 946 break; 947 } 948 return (error); 949 } 950 951 static void 952 emx_watchdog(struct ifnet *ifp) 953 { 954 struct emx_softc *sc = ifp->if_softc; 955 956 ASSERT_IFNET_SERIALIZED_ALL(ifp); 957 958 /* 959 * The timer is set to 5 every time start queues a packet. 960 * Then txeof keeps resetting it as long as it cleans at 961 * least one descriptor. 962 * Finally, anytime all descriptors are clean the timer is 963 * set to 0. 964 */ 965 966 if (E1000_READ_REG(&sc->hw, E1000_TDT(0)) == 967 E1000_READ_REG(&sc->hw, E1000_TDH(0))) { 968 /* 969 * If we reach here, all TX jobs are completed and 970 * the TX engine should have been idled for some time. 971 * We don't need to call if_devstart() here. 972 */ 973 ifp->if_flags &= ~IFF_OACTIVE; 974 ifp->if_timer = 0; 975 return; 976 } 977 978 /* 979 * If we are in this routine because of pause frames, then 980 * don't reset the hardware. 981 */ 982 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 983 ifp->if_timer = EMX_TX_TIMEOUT; 984 return; 985 } 986 987 if (e1000_check_for_link(&sc->hw) == 0) 988 if_printf(ifp, "watchdog timeout -- resetting\n"); 989 990 ifp->if_oerrors++; 991 sc->watchdog_events++; 992 993 emx_init(sc); 994 995 if (!ifq_is_empty(&ifp->if_snd)) 996 if_devstart(ifp); 997 } 998 999 static void 1000 emx_init(void *xsc) 1001 { 1002 struct emx_softc *sc = xsc; 1003 struct ifnet *ifp = &sc->arpcom.ac_if; 1004 device_t dev = sc->dev; 1005 uint32_t pba; 1006 int i; 1007 1008 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1009 1010 emx_stop(sc); 1011 1012 /* 1013 * Packet Buffer Allocation (PBA) 1014 * Writing PBA sets the receive portion of the buffer 1015 * the remainder is used for the transmit buffer. 1016 */ 1017 switch (sc->hw.mac.type) { 1018 /* Total Packet Buffer on these is 48K */ 1019 case e1000_82571: 1020 case e1000_82572: 1021 case e1000_80003es2lan: 1022 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1023 break; 1024 1025 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1026 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1027 break; 1028 1029 case e1000_82574: 1030 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1031 break; 1032 1033 default: 1034 /* Devices before 82547 had a Packet Buffer of 64K. */ 1035 if (sc->max_frame_size > 8192) 1036 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1037 else 1038 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1039 } 1040 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1041 1042 /* Get the latest mac address, User can use a LAA */ 1043 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1044 1045 /* Put the address into the Receive Address Array */ 1046 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1047 1048 /* 1049 * With the 82571 sc, RAR[0] may be overwritten 1050 * when the other port is reset, we make a duplicate 1051 * in RAR[14] for that eventuality, this assures 1052 * the interface continues to function. 1053 */ 1054 if (sc->hw.mac.type == e1000_82571) { 1055 e1000_set_laa_state_82571(&sc->hw, TRUE); 1056 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1057 E1000_RAR_ENTRIES - 1); 1058 } 1059 1060 /* Initialize the hardware */ 1061 if (emx_hw_init(sc)) { 1062 device_printf(dev, "Unable to initialize the hardware\n"); 1063 /* XXX emx_stop()? */ 1064 return; 1065 } 1066 emx_update_link_status(sc); 1067 1068 /* Setup VLAN support, basic and offload if available */ 1069 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1070 1071 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1072 uint32_t ctrl; 1073 1074 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1075 ctrl |= E1000_CTRL_VME; 1076 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1077 } 1078 1079 /* Set hardware offload abilities */ 1080 if (ifp->if_capenable & IFCAP_TXCSUM) 1081 ifp->if_hwassist = EMX_CSUM_FEATURES; 1082 else 1083 ifp->if_hwassist = 0; 1084 1085 /* Configure for OS presence */ 1086 emx_get_mgmt(sc); 1087 1088 /* Prepare transmit descriptors and buffers */ 1089 emx_init_tx_ring(sc); 1090 emx_init_tx_unit(sc); 1091 1092 /* Setup Multicast table */ 1093 emx_set_multi(sc); 1094 1095 /* 1096 * Adjust # of RX ring to be used based on IFCAP_RSS 1097 */ 1098 if (ifp->if_capenable & IFCAP_RSS) 1099 sc->rx_ring_inuse = sc->rx_ring_cnt; 1100 else 1101 sc->rx_ring_inuse = 1; 1102 1103 /* Prepare receive descriptors and buffers */ 1104 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1105 if (emx_init_rx_ring(sc, &sc->rx_data[i])) { 1106 device_printf(dev, 1107 "Could not setup receive structures\n"); 1108 emx_stop(sc); 1109 return; 1110 } 1111 } 1112 emx_init_rx_unit(sc); 1113 1114 /* Don't lose promiscuous settings */ 1115 emx_set_promisc(sc); 1116 1117 ifp->if_flags |= IFF_RUNNING; 1118 ifp->if_flags &= ~IFF_OACTIVE; 1119 1120 callout_reset(&sc->timer, hz, emx_timer, sc); 1121 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1122 1123 /* MSI/X configuration for 82574 */ 1124 if (sc->hw.mac.type == e1000_82574) { 1125 int tmp; 1126 1127 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1128 tmp |= E1000_CTRL_EXT_PBA_CLR; 1129 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1130 /* 1131 * Set the IVAR - interrupt vector routing. 1132 * Each nibble represents a vector, high bit 1133 * is enable, other 3 bits are the MSIX table 1134 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1135 * Link (other) to 2, hence the magic number. 1136 */ 1137 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1138 } 1139 1140 #ifdef DEVICE_POLLING 1141 /* 1142 * Only enable interrupts if we are not polling, make sure 1143 * they are off otherwise. 1144 */ 1145 if (ifp->if_flags & IFF_POLLING) 1146 emx_disable_intr(sc); 1147 else 1148 #endif /* DEVICE_POLLING */ 1149 emx_enable_intr(sc); 1150 1151 /* Don't reset the phy next time init gets called */ 1152 sc->hw.phy.reset_disable = TRUE; 1153 } 1154 1155 #ifdef DEVICE_POLLING 1156 1157 static void 1158 emx_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1159 { 1160 struct emx_softc *sc = ifp->if_softc; 1161 uint32_t reg_icr; 1162 1163 ASSERT_IFNET_SERIALIZED_MAIN(ifp); 1164 1165 switch (cmd) { 1166 case POLL_REGISTER: 1167 emx_disable_intr(sc); 1168 break; 1169 1170 case POLL_DEREGISTER: 1171 emx_enable_intr(sc); 1172 break; 1173 1174 case POLL_AND_CHECK_STATUS: 1175 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1176 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1177 if (emx_tryserialize_skipmain(sc)) { 1178 callout_stop(&sc->timer); 1179 sc->hw.mac.get_link_status = 1; 1180 emx_update_link_status(sc); 1181 callout_reset(&sc->timer, hz, emx_timer, sc); 1182 emx_deserialize_skipmain(sc); 1183 } 1184 } 1185 /* FALL THROUGH */ 1186 case POLL_ONLY: 1187 if (ifp->if_flags & IFF_RUNNING) { 1188 int i; 1189 1190 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1191 if (lwkt_serialize_try( 1192 &sc->rx_data[i].rx_serialize)) { 1193 emx_rxeof(sc, i, count); 1194 lwkt_serialize_exit( 1195 &sc->rx_data[i].rx_serialize); 1196 } 1197 } 1198 1199 if (lwkt_serialize_try(&sc->tx_serialize)) { 1200 emx_txeof(sc); 1201 if (!ifq_is_empty(&ifp->if_snd)) 1202 if_devstart(ifp); 1203 lwkt_serialize_exit(&sc->tx_serialize); 1204 } 1205 } 1206 break; 1207 } 1208 } 1209 1210 #endif /* DEVICE_POLLING */ 1211 1212 static void 1213 emx_intr(void *xsc) 1214 { 1215 struct emx_softc *sc = xsc; 1216 struct ifnet *ifp = &sc->arpcom.ac_if; 1217 uint32_t reg_icr; 1218 1219 logif(intr_beg); 1220 ASSERT_SERIALIZED(&sc->main_serialize); 1221 1222 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1223 1224 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1225 logif(intr_end); 1226 return; 1227 } 1228 1229 /* 1230 * XXX: some laptops trigger several spurious interrupts 1231 * on emx(4) when in the resume cycle. The ICR register 1232 * reports all-ones value in this case. Processing such 1233 * interrupts would lead to a freeze. I don't know why. 1234 */ 1235 if (reg_icr == 0xffffffff) { 1236 logif(intr_end); 1237 return; 1238 } 1239 1240 if (ifp->if_flags & IFF_RUNNING) { 1241 if (reg_icr & 1242 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1243 int i; 1244 1245 for (i = 0; i < sc->rx_ring_inuse; ++i) { 1246 lwkt_serialize_enter( 1247 &sc->rx_data[i].rx_serialize); 1248 emx_rxeof(sc, i, -1); 1249 lwkt_serialize_exit( 1250 &sc->rx_data[i].rx_serialize); 1251 } 1252 } 1253 if (reg_icr & E1000_ICR_TXDW) { 1254 lwkt_serialize_enter(&sc->tx_serialize); 1255 emx_txeof(sc); 1256 if (!ifq_is_empty(&ifp->if_snd)) 1257 if_devstart(ifp); 1258 lwkt_serialize_exit(&sc->tx_serialize); 1259 } 1260 } 1261 1262 /* Link status change */ 1263 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1264 emx_serialize_skipmain(sc); 1265 1266 callout_stop(&sc->timer); 1267 sc->hw.mac.get_link_status = 1; 1268 emx_update_link_status(sc); 1269 1270 /* Deal with TX cruft when link lost */ 1271 emx_tx_purge(sc); 1272 1273 callout_reset(&sc->timer, hz, emx_timer, sc); 1274 1275 emx_deserialize_skipmain(sc); 1276 } 1277 1278 if (reg_icr & E1000_ICR_RXO) 1279 sc->rx_overruns++; 1280 1281 logif(intr_end); 1282 } 1283 1284 static void 1285 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1286 { 1287 struct emx_softc *sc = ifp->if_softc; 1288 1289 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1290 1291 emx_update_link_status(sc); 1292 1293 ifmr->ifm_status = IFM_AVALID; 1294 ifmr->ifm_active = IFM_ETHER; 1295 1296 if (!sc->link_active) 1297 return; 1298 1299 ifmr->ifm_status |= IFM_ACTIVE; 1300 1301 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1302 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1303 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1304 } else { 1305 switch (sc->link_speed) { 1306 case 10: 1307 ifmr->ifm_active |= IFM_10_T; 1308 break; 1309 case 100: 1310 ifmr->ifm_active |= IFM_100_TX; 1311 break; 1312 1313 case 1000: 1314 ifmr->ifm_active |= IFM_1000_T; 1315 break; 1316 } 1317 if (sc->link_duplex == FULL_DUPLEX) 1318 ifmr->ifm_active |= IFM_FDX; 1319 else 1320 ifmr->ifm_active |= IFM_HDX; 1321 } 1322 } 1323 1324 static int 1325 emx_media_change(struct ifnet *ifp) 1326 { 1327 struct emx_softc *sc = ifp->if_softc; 1328 struct ifmedia *ifm = &sc->media; 1329 1330 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1331 1332 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1333 return (EINVAL); 1334 1335 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1336 case IFM_AUTO: 1337 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1338 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1339 break; 1340 1341 case IFM_1000_LX: 1342 case IFM_1000_SX: 1343 case IFM_1000_T: 1344 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1345 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1346 break; 1347 1348 case IFM_100_TX: 1349 sc->hw.mac.autoneg = FALSE; 1350 sc->hw.phy.autoneg_advertised = 0; 1351 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1352 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1353 else 1354 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1355 break; 1356 1357 case IFM_10_T: 1358 sc->hw.mac.autoneg = FALSE; 1359 sc->hw.phy.autoneg_advertised = 0; 1360 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1361 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1362 else 1363 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1364 break; 1365 1366 default: 1367 if_printf(ifp, "Unsupported media type\n"); 1368 break; 1369 } 1370 1371 /* 1372 * As the speed/duplex settings my have changed we need to 1373 * reset the PHY. 1374 */ 1375 sc->hw.phy.reset_disable = FALSE; 1376 1377 emx_init(sc); 1378 1379 return (0); 1380 } 1381 1382 static int 1383 emx_encap(struct emx_softc *sc, struct mbuf **m_headp) 1384 { 1385 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1386 bus_dmamap_t map; 1387 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1388 struct e1000_tx_desc *ctxd = NULL; 1389 struct mbuf *m_head = *m_headp; 1390 uint32_t txd_upper, txd_lower, cmd = 0; 1391 int maxsegs, nsegs, i, j, first, last = 0, error; 1392 1393 if (m_head->m_len < EMX_TXCSUM_MINHL && 1394 (m_head->m_flags & EMX_CSUM_FEATURES)) { 1395 /* 1396 * Make sure that ethernet header and ip.ip_hl are in 1397 * contiguous memory, since if TXCSUM is enabled, later 1398 * TX context descriptor's setup need to access ip.ip_hl. 1399 */ 1400 error = emx_txcsum_pullup(sc, m_headp); 1401 if (error) { 1402 KKASSERT(*m_headp == NULL); 1403 return error; 1404 } 1405 m_head = *m_headp; 1406 } 1407 1408 txd_upper = txd_lower = 0; 1409 1410 /* 1411 * Capture the first descriptor index, this descriptor 1412 * will have the index of the EOP which is the only one 1413 * that now gets a DONE bit writeback. 1414 */ 1415 first = sc->next_avail_tx_desc; 1416 tx_buffer = &sc->tx_buf[first]; 1417 tx_buffer_mapped = tx_buffer; 1418 map = tx_buffer->map; 1419 1420 maxsegs = sc->num_tx_desc_avail - EMX_TX_RESERVED; 1421 KASSERT(maxsegs >= sc->spare_tx_desc, ("not enough spare TX desc\n")); 1422 if (maxsegs > EMX_MAX_SCATTER) 1423 maxsegs = EMX_MAX_SCATTER; 1424 1425 error = bus_dmamap_load_mbuf_defrag(sc->txtag, map, m_headp, 1426 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1427 if (error) { 1428 if (error == ENOBUFS) 1429 sc->mbuf_alloc_failed++; 1430 else 1431 sc->no_tx_dma_setup++; 1432 1433 m_freem(*m_headp); 1434 *m_headp = NULL; 1435 return error; 1436 } 1437 bus_dmamap_sync(sc->txtag, map, BUS_DMASYNC_PREWRITE); 1438 1439 m_head = *m_headp; 1440 sc->tx_nsegs += nsegs; 1441 1442 if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1443 /* TX csum offloading will consume one TX desc */ 1444 sc->tx_nsegs += emx_txcsum(sc, m_head, &txd_upper, &txd_lower); 1445 } 1446 i = sc->next_avail_tx_desc; 1447 1448 /* Set up our transmit descriptors */ 1449 for (j = 0; j < nsegs; j++) { 1450 tx_buffer = &sc->tx_buf[i]; 1451 ctxd = &sc->tx_desc_base[i]; 1452 1453 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1454 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1455 txd_lower | segs[j].ds_len); 1456 ctxd->upper.data = htole32(txd_upper); 1457 1458 last = i; 1459 if (++i == sc->num_tx_desc) 1460 i = 0; 1461 } 1462 1463 sc->next_avail_tx_desc = i; 1464 1465 KKASSERT(sc->num_tx_desc_avail > nsegs); 1466 sc->num_tx_desc_avail -= nsegs; 1467 1468 /* Handle VLAN tag */ 1469 if (m_head->m_flags & M_VLANTAG) { 1470 /* Set the vlan id. */ 1471 ctxd->upper.fields.special = 1472 htole16(m_head->m_pkthdr.ether_vlantag); 1473 1474 /* Tell hardware to add tag */ 1475 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1476 } 1477 1478 tx_buffer->m_head = m_head; 1479 tx_buffer_mapped->map = tx_buffer->map; 1480 tx_buffer->map = map; 1481 1482 if (sc->tx_nsegs >= sc->tx_int_nsegs) { 1483 sc->tx_nsegs = 0; 1484 1485 /* 1486 * Report Status (RS) is turned on 1487 * every tx_int_nsegs descriptors. 1488 */ 1489 cmd = E1000_TXD_CMD_RS; 1490 1491 /* 1492 * Keep track of the descriptor, which will 1493 * be written back by hardware. 1494 */ 1495 sc->tx_dd[sc->tx_dd_tail] = last; 1496 EMX_INC_TXDD_IDX(sc->tx_dd_tail); 1497 KKASSERT(sc->tx_dd_tail != sc->tx_dd_head); 1498 } 1499 1500 /* 1501 * Last Descriptor of Packet needs End Of Packet (EOP) 1502 */ 1503 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1504 1505 /* 1506 * Advance the Transmit Descriptor Tail (TDT), this tells 1507 * the E1000 that this frame is available to transmit. 1508 */ 1509 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), i); 1510 1511 return (0); 1512 } 1513 1514 static void 1515 emx_set_promisc(struct emx_softc *sc) 1516 { 1517 struct ifnet *ifp = &sc->arpcom.ac_if; 1518 uint32_t reg_rctl; 1519 1520 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1521 1522 if (ifp->if_flags & IFF_PROMISC) { 1523 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1524 /* Turn this on if you want to see bad packets */ 1525 if (emx_debug_sbp) 1526 reg_rctl |= E1000_RCTL_SBP; 1527 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1528 } else if (ifp->if_flags & IFF_ALLMULTI) { 1529 reg_rctl |= E1000_RCTL_MPE; 1530 reg_rctl &= ~E1000_RCTL_UPE; 1531 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1532 } 1533 } 1534 1535 static void 1536 emx_disable_promisc(struct emx_softc *sc) 1537 { 1538 uint32_t reg_rctl; 1539 1540 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1541 1542 reg_rctl &= ~E1000_RCTL_UPE; 1543 reg_rctl &= ~E1000_RCTL_MPE; 1544 reg_rctl &= ~E1000_RCTL_SBP; 1545 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1546 } 1547 1548 static void 1549 emx_set_multi(struct emx_softc *sc) 1550 { 1551 struct ifnet *ifp = &sc->arpcom.ac_if; 1552 struct ifmultiaddr *ifma; 1553 uint32_t reg_rctl = 0; 1554 uint8_t mta[512]; /* Largest MTS is 4096 bits */ 1555 int mcnt = 0; 1556 1557 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1558 if (ifma->ifma_addr->sa_family != AF_LINK) 1559 continue; 1560 1561 if (mcnt == EMX_MCAST_ADDR_MAX) 1562 break; 1563 1564 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1565 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1566 mcnt++; 1567 } 1568 1569 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1570 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1571 reg_rctl |= E1000_RCTL_MPE; 1572 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1573 } else { 1574 e1000_update_mc_addr_list(&sc->hw, mta, 1575 mcnt, 1, sc->hw.mac.rar_entry_count); 1576 } 1577 } 1578 1579 /* 1580 * This routine checks for link status and updates statistics. 1581 */ 1582 static void 1583 emx_timer(void *xsc) 1584 { 1585 struct emx_softc *sc = xsc; 1586 struct ifnet *ifp = &sc->arpcom.ac_if; 1587 1588 ifnet_serialize_all(ifp); 1589 1590 emx_update_link_status(sc); 1591 emx_update_stats(sc); 1592 1593 /* Reset LAA into RAR[0] on 82571 */ 1594 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1595 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1596 1597 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1598 emx_print_hw_stats(sc); 1599 1600 emx_smartspeed(sc); 1601 1602 callout_reset(&sc->timer, hz, emx_timer, sc); 1603 1604 ifnet_deserialize_all(ifp); 1605 } 1606 1607 static void 1608 emx_update_link_status(struct emx_softc *sc) 1609 { 1610 struct e1000_hw *hw = &sc->hw; 1611 struct ifnet *ifp = &sc->arpcom.ac_if; 1612 device_t dev = sc->dev; 1613 uint32_t link_check = 0; 1614 1615 /* Get the cached link value or read phy for real */ 1616 switch (hw->phy.media_type) { 1617 case e1000_media_type_copper: 1618 if (hw->mac.get_link_status) { 1619 /* Do the work to read phy */ 1620 e1000_check_for_link(hw); 1621 link_check = !hw->mac.get_link_status; 1622 if (link_check) /* ESB2 fix */ 1623 e1000_cfg_on_link_up(hw); 1624 } else { 1625 link_check = TRUE; 1626 } 1627 break; 1628 1629 case e1000_media_type_fiber: 1630 e1000_check_for_link(hw); 1631 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1632 break; 1633 1634 case e1000_media_type_internal_serdes: 1635 e1000_check_for_link(hw); 1636 link_check = sc->hw.mac.serdes_has_link; 1637 break; 1638 1639 case e1000_media_type_unknown: 1640 default: 1641 break; 1642 } 1643 1644 /* Now check for a transition */ 1645 if (link_check && sc->link_active == 0) { 1646 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1647 &sc->link_duplex); 1648 1649 /* 1650 * Check if we should enable/disable SPEED_MODE bit on 1651 * 82571EB/82572EI 1652 */ 1653 if (hw->mac.type == e1000_82571 || 1654 hw->mac.type == e1000_82572) { 1655 int tarc0; 1656 1657 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1658 if (sc->link_speed != SPEED_1000) 1659 tarc0 &= ~EMX_TARC_SPEED_MODE; 1660 else 1661 tarc0 |= EMX_TARC_SPEED_MODE; 1662 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1663 } 1664 if (bootverbose) { 1665 device_printf(dev, "Link is up %d Mbps %s\n", 1666 sc->link_speed, 1667 ((sc->link_duplex == FULL_DUPLEX) ? 1668 "Full Duplex" : "Half Duplex")); 1669 } 1670 sc->link_active = 1; 1671 sc->smartspeed = 0; 1672 ifp->if_baudrate = sc->link_speed * 1000000; 1673 ifp->if_link_state = LINK_STATE_UP; 1674 if_link_state_change(ifp); 1675 } else if (!link_check && sc->link_active == 1) { 1676 ifp->if_baudrate = sc->link_speed = 0; 1677 sc->link_duplex = 0; 1678 if (bootverbose) 1679 device_printf(dev, "Link is Down\n"); 1680 sc->link_active = 0; 1681 #if 0 1682 /* Link down, disable watchdog */ 1683 if->if_timer = 0; 1684 #endif 1685 ifp->if_link_state = LINK_STATE_DOWN; 1686 if_link_state_change(ifp); 1687 } 1688 } 1689 1690 static void 1691 emx_stop(struct emx_softc *sc) 1692 { 1693 struct ifnet *ifp = &sc->arpcom.ac_if; 1694 int i; 1695 1696 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1697 1698 emx_disable_intr(sc); 1699 1700 callout_stop(&sc->timer); 1701 1702 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1703 ifp->if_timer = 0; 1704 1705 /* 1706 * Disable multiple receive queues. 1707 * 1708 * NOTE: 1709 * We should disable multiple receive queues before 1710 * resetting the hardware. 1711 */ 1712 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1713 1714 e1000_reset_hw(&sc->hw); 1715 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1716 1717 for (i = 0; i < sc->num_tx_desc; i++) { 1718 struct emx_txbuf *tx_buffer = &sc->tx_buf[i]; 1719 1720 if (tx_buffer->m_head != NULL) { 1721 bus_dmamap_unload(sc->txtag, tx_buffer->map); 1722 m_freem(tx_buffer->m_head); 1723 tx_buffer->m_head = NULL; 1724 } 1725 } 1726 1727 for (i = 0; i < sc->rx_ring_inuse; ++i) 1728 emx_free_rx_ring(sc, &sc->rx_data[i]); 1729 1730 sc->csum_flags = 0; 1731 sc->csum_ehlen = 0; 1732 sc->csum_iphlen = 0; 1733 1734 sc->tx_dd_head = 0; 1735 sc->tx_dd_tail = 0; 1736 sc->tx_nsegs = 0; 1737 } 1738 1739 static int 1740 emx_hw_init(struct emx_softc *sc) 1741 { 1742 device_t dev = sc->dev; 1743 uint16_t rx_buffer_size; 1744 1745 /* Issue a global reset */ 1746 e1000_reset_hw(&sc->hw); 1747 1748 /* Get control from any management/hw control */ 1749 if (sc->hw.mac.type == e1000_82573 && 1750 e1000_check_mng_mode(&sc->hw)) 1751 emx_get_hw_control(sc); 1752 1753 /* Set up smart power down as default off on newer adapters. */ 1754 if (!emx_smart_pwr_down && 1755 (sc->hw.mac.type == e1000_82571 || 1756 sc->hw.mac.type == e1000_82572)) { 1757 uint16_t phy_tmp = 0; 1758 1759 /* Speed up time to link by disabling smart power down. */ 1760 e1000_read_phy_reg(&sc->hw, 1761 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1762 phy_tmp &= ~IGP02E1000_PM_SPD; 1763 e1000_write_phy_reg(&sc->hw, 1764 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1765 } 1766 1767 /* 1768 * These parameters control the automatic generation (Tx) and 1769 * response (Rx) to Ethernet PAUSE frames. 1770 * - High water mark should allow for at least two frames to be 1771 * received after sending an XOFF. 1772 * - Low water mark works best when it is very near the high water mark. 1773 * This allows the receiver to restart by sending XON when it has 1774 * drained a bit. Here we use an arbitary value of 1500 which will 1775 * restart after one full frame is pulled from the buffer. There 1776 * could be several smaller frames in the buffer and if so they will 1777 * not trigger the XON until their total number reduces the buffer 1778 * by 1500. 1779 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1780 */ 1781 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1782 1783 sc->hw.fc.high_water = rx_buffer_size - 1784 roundup2(sc->max_frame_size, 1024); 1785 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1786 1787 if (sc->hw.mac.type == e1000_80003es2lan) 1788 sc->hw.fc.pause_time = 0xFFFF; 1789 else 1790 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1791 sc->hw.fc.send_xon = TRUE; 1792 sc->hw.fc.requested_mode = e1000_fc_full; 1793 1794 if (e1000_init_hw(&sc->hw) < 0) { 1795 device_printf(dev, "Hardware Initialization Failed\n"); 1796 return (EIO); 1797 } 1798 1799 e1000_check_for_link(&sc->hw); 1800 1801 return (0); 1802 } 1803 1804 static void 1805 emx_setup_ifp(struct emx_softc *sc) 1806 { 1807 struct ifnet *ifp = &sc->arpcom.ac_if; 1808 1809 if_initname(ifp, device_get_name(sc->dev), 1810 device_get_unit(sc->dev)); 1811 ifp->if_softc = sc; 1812 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1813 ifp->if_init = emx_init; 1814 ifp->if_ioctl = emx_ioctl; 1815 ifp->if_start = emx_start; 1816 #ifdef DEVICE_POLLING 1817 ifp->if_poll = emx_poll; 1818 #endif 1819 ifp->if_watchdog = emx_watchdog; 1820 ifp->if_serialize = emx_serialize; 1821 ifp->if_deserialize = emx_deserialize; 1822 ifp->if_tryserialize = emx_tryserialize; 1823 #ifdef INVARIANTS 1824 ifp->if_serialize_assert = emx_serialize_assert; 1825 #endif 1826 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 1827 ifq_set_ready(&ifp->if_snd); 1828 1829 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1830 1831 ifp->if_capabilities = IFCAP_HWCSUM | 1832 IFCAP_VLAN_HWTAGGING | 1833 IFCAP_VLAN_MTU; 1834 if (sc->rx_ring_cnt > 1) 1835 ifp->if_capabilities |= IFCAP_RSS; 1836 ifp->if_capenable = ifp->if_capabilities; 1837 ifp->if_hwassist = EMX_CSUM_FEATURES; 1838 1839 /* 1840 * Tell the upper layer(s) we support long frames. 1841 */ 1842 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1843 1844 /* 1845 * Specify the media types supported by this sc and register 1846 * callbacks to update media and link information 1847 */ 1848 ifmedia_init(&sc->media, IFM_IMASK, 1849 emx_media_change, emx_media_status); 1850 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1851 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1852 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1853 0, NULL); 1854 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1855 } else { 1856 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1857 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1858 0, NULL); 1859 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1860 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1861 0, NULL); 1862 if (sc->hw.phy.type != e1000_phy_ife) { 1863 ifmedia_add(&sc->media, 1864 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1865 ifmedia_add(&sc->media, 1866 IFM_ETHER | IFM_1000_T, 0, NULL); 1867 } 1868 } 1869 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1870 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1871 } 1872 1873 /* 1874 * Workaround for SmartSpeed on 82541 and 82547 controllers 1875 */ 1876 static void 1877 emx_smartspeed(struct emx_softc *sc) 1878 { 1879 uint16_t phy_tmp; 1880 1881 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 1882 sc->hw.mac.autoneg == 0 || 1883 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 1884 return; 1885 1886 if (sc->smartspeed == 0) { 1887 /* 1888 * If Master/Slave config fault is asserted twice, 1889 * we assume back-to-back 1890 */ 1891 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1892 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 1893 return; 1894 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1895 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 1896 e1000_read_phy_reg(&sc->hw, 1897 PHY_1000T_CTRL, &phy_tmp); 1898 if (phy_tmp & CR_1000T_MS_ENABLE) { 1899 phy_tmp &= ~CR_1000T_MS_ENABLE; 1900 e1000_write_phy_reg(&sc->hw, 1901 PHY_1000T_CTRL, phy_tmp); 1902 sc->smartspeed++; 1903 if (sc->hw.mac.autoneg && 1904 !e1000_phy_setup_autoneg(&sc->hw) && 1905 !e1000_read_phy_reg(&sc->hw, 1906 PHY_CONTROL, &phy_tmp)) { 1907 phy_tmp |= MII_CR_AUTO_NEG_EN | 1908 MII_CR_RESTART_AUTO_NEG; 1909 e1000_write_phy_reg(&sc->hw, 1910 PHY_CONTROL, phy_tmp); 1911 } 1912 } 1913 } 1914 return; 1915 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 1916 /* If still no link, perhaps using 2/3 pair cable */ 1917 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 1918 phy_tmp |= CR_1000T_MS_ENABLE; 1919 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 1920 if (sc->hw.mac.autoneg && 1921 !e1000_phy_setup_autoneg(&sc->hw) && 1922 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 1923 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 1924 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 1925 } 1926 } 1927 1928 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 1929 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 1930 sc->smartspeed = 0; 1931 } 1932 1933 static int 1934 emx_create_tx_ring(struct emx_softc *sc) 1935 { 1936 device_t dev = sc->dev; 1937 struct emx_txbuf *tx_buffer; 1938 int error, i, tsize; 1939 1940 /* 1941 * Validate number of transmit descriptors. It must not exceed 1942 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 1943 */ 1944 if ((emx_txd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 1945 emx_txd > EMX_MAX_TXD || emx_txd < EMX_MIN_TXD) { 1946 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 1947 EMX_DEFAULT_TXD, emx_txd); 1948 sc->num_tx_desc = EMX_DEFAULT_TXD; 1949 } else { 1950 sc->num_tx_desc = emx_txd; 1951 } 1952 1953 /* 1954 * Allocate Transmit Descriptor ring 1955 */ 1956 tsize = roundup2(sc->num_tx_desc * sizeof(struct e1000_tx_desc), 1957 EMX_DBA_ALIGN); 1958 sc->tx_desc_base = bus_dmamem_coherent_any(sc->parent_dtag, 1959 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1960 &sc->tx_desc_dtag, &sc->tx_desc_dmap, 1961 &sc->tx_desc_paddr); 1962 if (sc->tx_desc_base == NULL) { 1963 device_printf(dev, "Unable to allocate tx_desc memory\n"); 1964 return ENOMEM; 1965 } 1966 1967 sc->tx_buf = kmalloc(sizeof(struct emx_txbuf) * sc->num_tx_desc, 1968 M_DEVBUF, M_WAITOK | M_ZERO); 1969 1970 /* 1971 * Create DMA tags for tx buffers 1972 */ 1973 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 1974 1, 0, /* alignment, bounds */ 1975 BUS_SPACE_MAXADDR, /* lowaddr */ 1976 BUS_SPACE_MAXADDR, /* highaddr */ 1977 NULL, NULL, /* filter, filterarg */ 1978 EMX_TSO_SIZE, /* maxsize */ 1979 EMX_MAX_SCATTER, /* nsegments */ 1980 EMX_MAX_SEGSIZE, /* maxsegsize */ 1981 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1982 BUS_DMA_ONEBPAGE, /* flags */ 1983 &sc->txtag); 1984 if (error) { 1985 device_printf(dev, "Unable to allocate TX DMA tag\n"); 1986 kfree(sc->tx_buf, M_DEVBUF); 1987 sc->tx_buf = NULL; 1988 return error; 1989 } 1990 1991 /* 1992 * Create DMA maps for tx buffers 1993 */ 1994 for (i = 0; i < sc->num_tx_desc; i++) { 1995 tx_buffer = &sc->tx_buf[i]; 1996 1997 error = bus_dmamap_create(sc->txtag, 1998 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1999 &tx_buffer->map); 2000 if (error) { 2001 device_printf(dev, "Unable to create TX DMA map\n"); 2002 emx_destroy_tx_ring(sc, i); 2003 return error; 2004 } 2005 } 2006 return (0); 2007 } 2008 2009 static void 2010 emx_init_tx_ring(struct emx_softc *sc) 2011 { 2012 /* Clear the old ring contents */ 2013 bzero(sc->tx_desc_base, 2014 sizeof(struct e1000_tx_desc) * sc->num_tx_desc); 2015 2016 /* Reset state */ 2017 sc->next_avail_tx_desc = 0; 2018 sc->next_tx_to_clean = 0; 2019 sc->num_tx_desc_avail = sc->num_tx_desc; 2020 } 2021 2022 static void 2023 emx_init_tx_unit(struct emx_softc *sc) 2024 { 2025 uint32_t tctl, tarc, tipg = 0; 2026 uint64_t bus_addr; 2027 2028 /* Setup the Base and Length of the Tx Descriptor Ring */ 2029 bus_addr = sc->tx_desc_paddr; 2030 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(0), 2031 sc->num_tx_desc * sizeof(struct e1000_tx_desc)); 2032 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(0), 2033 (uint32_t)(bus_addr >> 32)); 2034 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(0), 2035 (uint32_t)bus_addr); 2036 /* Setup the HW Tx Head and Tail descriptor pointers */ 2037 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), 0); 2038 E1000_WRITE_REG(&sc->hw, E1000_TDH(0), 0); 2039 2040 /* Set the default values for the Tx Inter Packet Gap timer */ 2041 switch (sc->hw.mac.type) { 2042 case e1000_80003es2lan: 2043 tipg = DEFAULT_82543_TIPG_IPGR1; 2044 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2045 E1000_TIPG_IPGR2_SHIFT; 2046 break; 2047 2048 default: 2049 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2050 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2051 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2052 else 2053 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2054 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2055 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2056 break; 2057 } 2058 2059 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2060 2061 /* NOTE: 0 is not allowed for TIDV */ 2062 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2063 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2064 2065 if (sc->hw.mac.type == e1000_82571 || 2066 sc->hw.mac.type == e1000_82572) { 2067 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2068 tarc |= EMX_TARC_SPEED_MODE; 2069 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2070 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2071 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2072 tarc |= 1; 2073 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2074 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2075 tarc |= 1; 2076 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2077 } 2078 2079 /* Program the Transmit Control Register */ 2080 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2081 tctl &= ~E1000_TCTL_CT; 2082 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2083 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2084 tctl |= E1000_TCTL_MULR; 2085 2086 /* This write will effectively turn on the transmit unit. */ 2087 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2088 } 2089 2090 static void 2091 emx_destroy_tx_ring(struct emx_softc *sc, int ndesc) 2092 { 2093 struct emx_txbuf *tx_buffer; 2094 int i; 2095 2096 /* Free Transmit Descriptor ring */ 2097 if (sc->tx_desc_base) { 2098 bus_dmamap_unload(sc->tx_desc_dtag, sc->tx_desc_dmap); 2099 bus_dmamem_free(sc->tx_desc_dtag, sc->tx_desc_base, 2100 sc->tx_desc_dmap); 2101 bus_dma_tag_destroy(sc->tx_desc_dtag); 2102 2103 sc->tx_desc_base = NULL; 2104 } 2105 2106 if (sc->tx_buf == NULL) 2107 return; 2108 2109 for (i = 0; i < ndesc; i++) { 2110 tx_buffer = &sc->tx_buf[i]; 2111 2112 KKASSERT(tx_buffer->m_head == NULL); 2113 bus_dmamap_destroy(sc->txtag, tx_buffer->map); 2114 } 2115 bus_dma_tag_destroy(sc->txtag); 2116 2117 kfree(sc->tx_buf, M_DEVBUF); 2118 sc->tx_buf = NULL; 2119 } 2120 2121 /* 2122 * The offload context needs to be set when we transfer the first 2123 * packet of a particular protocol (TCP/UDP). This routine has been 2124 * enhanced to deal with inserted VLAN headers. 2125 * 2126 * If the new packet's ether header length, ip header length and 2127 * csum offloading type are same as the previous packet, we should 2128 * avoid allocating a new csum context descriptor; mainly to take 2129 * advantage of the pipeline effect of the TX data read request. 2130 * 2131 * This function returns number of TX descrptors allocated for 2132 * csum context. 2133 */ 2134 static int 2135 emx_txcsum(struct emx_softc *sc, struct mbuf *mp, 2136 uint32_t *txd_upper, uint32_t *txd_lower) 2137 { 2138 struct e1000_context_desc *TXD; 2139 struct emx_txbuf *tx_buffer; 2140 struct ether_vlan_header *eh; 2141 struct ip *ip; 2142 int curr_txd, ehdrlen, csum_flags; 2143 uint32_t cmd, hdr_len, ip_hlen; 2144 uint16_t etype; 2145 2146 /* 2147 * Determine where frame payload starts. 2148 * Jump over vlan headers if already present, 2149 * helpful for QinQ too. 2150 */ 2151 KASSERT(mp->m_len >= ETHER_HDR_LEN, 2152 ("emx_txcsum_pullup is not called (eh)?\n")); 2153 eh = mtod(mp, struct ether_vlan_header *); 2154 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2155 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 2156 ("emx_txcsum_pullup is not called (evh)?\n")); 2157 etype = ntohs(eh->evl_proto); 2158 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 2159 } else { 2160 etype = ntohs(eh->evl_encap_proto); 2161 ehdrlen = ETHER_HDR_LEN; 2162 } 2163 2164 /* 2165 * We only support TCP/UDP for IPv4 for the moment. 2166 * TODO: Support SCTP too when it hits the tree. 2167 */ 2168 if (etype != ETHERTYPE_IP) 2169 return 0; 2170 2171 KASSERT(mp->m_len >= ehdrlen + EMX_IPVHL_SIZE, 2172 ("emx_txcsum_pullup is not called (eh+ip_vhl)?\n")); 2173 2174 /* NOTE: We could only safely access ip.ip_vhl part */ 2175 ip = (struct ip *)(mp->m_data + ehdrlen); 2176 ip_hlen = ip->ip_hl << 2; 2177 2178 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2179 2180 if (sc->csum_ehlen == ehdrlen && sc->csum_iphlen == ip_hlen && 2181 sc->csum_flags == csum_flags) { 2182 /* 2183 * Same csum offload context as the previous packets; 2184 * just return. 2185 */ 2186 *txd_upper = sc->csum_txd_upper; 2187 *txd_lower = sc->csum_txd_lower; 2188 return 0; 2189 } 2190 2191 /* 2192 * Setup a new csum offload context. 2193 */ 2194 2195 curr_txd = sc->next_avail_tx_desc; 2196 tx_buffer = &sc->tx_buf[curr_txd]; 2197 TXD = (struct e1000_context_desc *)&sc->tx_desc_base[curr_txd]; 2198 2199 cmd = 0; 2200 2201 /* Setup of IP header checksum. */ 2202 if (csum_flags & CSUM_IP) { 2203 /* 2204 * Start offset for header checksum calculation. 2205 * End offset for header checksum calculation. 2206 * Offset of place to put the checksum. 2207 */ 2208 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2209 TXD->lower_setup.ip_fields.ipcse = 2210 htole16(ehdrlen + ip_hlen - 1); 2211 TXD->lower_setup.ip_fields.ipcso = 2212 ehdrlen + offsetof(struct ip, ip_sum); 2213 cmd |= E1000_TXD_CMD_IP; 2214 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2215 } 2216 hdr_len = ehdrlen + ip_hlen; 2217 2218 if (csum_flags & CSUM_TCP) { 2219 /* 2220 * Start offset for payload checksum calculation. 2221 * End offset for payload checksum calculation. 2222 * Offset of place to put the checksum. 2223 */ 2224 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2225 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2226 TXD->upper_setup.tcp_fields.tucso = 2227 hdr_len + offsetof(struct tcphdr, th_sum); 2228 cmd |= E1000_TXD_CMD_TCP; 2229 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2230 } else if (csum_flags & CSUM_UDP) { 2231 /* 2232 * Start offset for header checksum calculation. 2233 * End offset for header checksum calculation. 2234 * Offset of place to put the checksum. 2235 */ 2236 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2237 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2238 TXD->upper_setup.tcp_fields.tucso = 2239 hdr_len + offsetof(struct udphdr, uh_sum); 2240 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2241 } 2242 2243 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2244 E1000_TXD_DTYP_D; /* Data descr */ 2245 2246 /* Save the information for this csum offloading context */ 2247 sc->csum_ehlen = ehdrlen; 2248 sc->csum_iphlen = ip_hlen; 2249 sc->csum_flags = csum_flags; 2250 sc->csum_txd_upper = *txd_upper; 2251 sc->csum_txd_lower = *txd_lower; 2252 2253 TXD->tcp_seg_setup.data = htole32(0); 2254 TXD->cmd_and_length = 2255 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2256 2257 if (++curr_txd == sc->num_tx_desc) 2258 curr_txd = 0; 2259 2260 KKASSERT(sc->num_tx_desc_avail > 0); 2261 sc->num_tx_desc_avail--; 2262 2263 sc->next_avail_tx_desc = curr_txd; 2264 return 1; 2265 } 2266 2267 static int 2268 emx_txcsum_pullup(struct emx_softc *sc, struct mbuf **m0) 2269 { 2270 struct mbuf *m = *m0; 2271 struct ether_header *eh; 2272 int len; 2273 2274 sc->tx_csum_try_pullup++; 2275 2276 len = ETHER_HDR_LEN + EMX_IPVHL_SIZE; 2277 2278 if (__predict_false(!M_WRITABLE(m))) { 2279 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2280 sc->tx_csum_drop1++; 2281 m_freem(m); 2282 *m0 = NULL; 2283 return ENOBUFS; 2284 } 2285 eh = mtod(m, struct ether_header *); 2286 2287 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2288 len += EVL_ENCAPLEN; 2289 2290 if (m->m_len < len) { 2291 sc->tx_csum_drop2++; 2292 m_freem(m); 2293 *m0 = NULL; 2294 return ENOBUFS; 2295 } 2296 return 0; 2297 } 2298 2299 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2300 sc->tx_csum_pullup1++; 2301 m = m_pullup(m, ETHER_HDR_LEN); 2302 if (m == NULL) { 2303 sc->tx_csum_pullup1_failed++; 2304 *m0 = NULL; 2305 return ENOBUFS; 2306 } 2307 *m0 = m; 2308 } 2309 eh = mtod(m, struct ether_header *); 2310 2311 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2312 len += EVL_ENCAPLEN; 2313 2314 if (m->m_len < len) { 2315 sc->tx_csum_pullup2++; 2316 m = m_pullup(m, len); 2317 if (m == NULL) { 2318 sc->tx_csum_pullup2_failed++; 2319 *m0 = NULL; 2320 return ENOBUFS; 2321 } 2322 *m0 = m; 2323 } 2324 return 0; 2325 } 2326 2327 static void 2328 emx_txeof(struct emx_softc *sc) 2329 { 2330 struct ifnet *ifp = &sc->arpcom.ac_if; 2331 struct emx_txbuf *tx_buffer; 2332 int first, num_avail; 2333 2334 if (sc->tx_dd_head == sc->tx_dd_tail) 2335 return; 2336 2337 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2338 return; 2339 2340 num_avail = sc->num_tx_desc_avail; 2341 first = sc->next_tx_to_clean; 2342 2343 while (sc->tx_dd_head != sc->tx_dd_tail) { 2344 int dd_idx = sc->tx_dd[sc->tx_dd_head]; 2345 struct e1000_tx_desc *tx_desc; 2346 2347 tx_desc = &sc->tx_desc_base[dd_idx]; 2348 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2349 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2350 2351 if (++dd_idx == sc->num_tx_desc) 2352 dd_idx = 0; 2353 2354 while (first != dd_idx) { 2355 logif(pkt_txclean); 2356 2357 num_avail++; 2358 2359 tx_buffer = &sc->tx_buf[first]; 2360 if (tx_buffer->m_head) { 2361 ifp->if_opackets++; 2362 bus_dmamap_unload(sc->txtag, 2363 tx_buffer->map); 2364 m_freem(tx_buffer->m_head); 2365 tx_buffer->m_head = NULL; 2366 } 2367 2368 if (++first == sc->num_tx_desc) 2369 first = 0; 2370 } 2371 } else { 2372 break; 2373 } 2374 } 2375 sc->next_tx_to_clean = first; 2376 sc->num_tx_desc_avail = num_avail; 2377 2378 if (sc->tx_dd_head == sc->tx_dd_tail) { 2379 sc->tx_dd_head = 0; 2380 sc->tx_dd_tail = 0; 2381 } 2382 2383 if (!EMX_IS_OACTIVE(sc)) { 2384 ifp->if_flags &= ~IFF_OACTIVE; 2385 2386 /* All clean, turn off the timer */ 2387 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2388 ifp->if_timer = 0; 2389 } 2390 } 2391 2392 static void 2393 emx_tx_collect(struct emx_softc *sc) 2394 { 2395 struct ifnet *ifp = &sc->arpcom.ac_if; 2396 struct emx_txbuf *tx_buffer; 2397 int tdh, first, num_avail, dd_idx = -1; 2398 2399 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2400 return; 2401 2402 tdh = E1000_READ_REG(&sc->hw, E1000_TDH(0)); 2403 if (tdh == sc->next_tx_to_clean) 2404 return; 2405 2406 if (sc->tx_dd_head != sc->tx_dd_tail) 2407 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2408 2409 num_avail = sc->num_tx_desc_avail; 2410 first = sc->next_tx_to_clean; 2411 2412 while (first != tdh) { 2413 logif(pkt_txclean); 2414 2415 num_avail++; 2416 2417 tx_buffer = &sc->tx_buf[first]; 2418 if (tx_buffer->m_head) { 2419 ifp->if_opackets++; 2420 bus_dmamap_unload(sc->txtag, 2421 tx_buffer->map); 2422 m_freem(tx_buffer->m_head); 2423 tx_buffer->m_head = NULL; 2424 } 2425 2426 if (first == dd_idx) { 2427 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2428 if (sc->tx_dd_head == sc->tx_dd_tail) { 2429 sc->tx_dd_head = 0; 2430 sc->tx_dd_tail = 0; 2431 dd_idx = -1; 2432 } else { 2433 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2434 } 2435 } 2436 2437 if (++first == sc->num_tx_desc) 2438 first = 0; 2439 } 2440 sc->next_tx_to_clean = first; 2441 sc->num_tx_desc_avail = num_avail; 2442 2443 if (!EMX_IS_OACTIVE(sc)) { 2444 ifp->if_flags &= ~IFF_OACTIVE; 2445 2446 /* All clean, turn off the timer */ 2447 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2448 ifp->if_timer = 0; 2449 } 2450 } 2451 2452 /* 2453 * When Link is lost sometimes there is work still in the TX ring 2454 * which will result in a watchdog, rather than allow that do an 2455 * attempted cleanup and then reinit here. Note that this has been 2456 * seens mostly with fiber adapters. 2457 */ 2458 static void 2459 emx_tx_purge(struct emx_softc *sc) 2460 { 2461 struct ifnet *ifp = &sc->arpcom.ac_if; 2462 2463 if (!sc->link_active && ifp->if_timer) { 2464 emx_tx_collect(sc); 2465 if (ifp->if_timer) { 2466 if_printf(ifp, "Link lost, TX pending, reinit\n"); 2467 ifp->if_timer = 0; 2468 emx_init(sc); 2469 } 2470 } 2471 } 2472 2473 static int 2474 emx_newbuf(struct emx_softc *sc, struct emx_rxdata *rdata, int i, int init) 2475 { 2476 struct mbuf *m; 2477 bus_dma_segment_t seg; 2478 bus_dmamap_t map; 2479 struct emx_rxbuf *rx_buffer; 2480 int error, nseg; 2481 2482 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2483 if (m == NULL) { 2484 rdata->mbuf_cluster_failed++; 2485 if (init) { 2486 if_printf(&sc->arpcom.ac_if, 2487 "Unable to allocate RX mbuf\n"); 2488 } 2489 return (ENOBUFS); 2490 } 2491 m->m_len = m->m_pkthdr.len = MCLBYTES; 2492 2493 if (sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2494 m_adj(m, ETHER_ALIGN); 2495 2496 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2497 rdata->rx_sparemap, m, 2498 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2499 if (error) { 2500 m_freem(m); 2501 if (init) { 2502 if_printf(&sc->arpcom.ac_if, 2503 "Unable to load RX mbuf\n"); 2504 } 2505 return (error); 2506 } 2507 2508 rx_buffer = &rdata->rx_buf[i]; 2509 if (rx_buffer->m_head != NULL) 2510 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2511 2512 map = rx_buffer->map; 2513 rx_buffer->map = rdata->rx_sparemap; 2514 rdata->rx_sparemap = map; 2515 2516 rx_buffer->m_head = m; 2517 rx_buffer->paddr = seg.ds_addr; 2518 2519 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2520 return (0); 2521 } 2522 2523 static int 2524 emx_create_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2525 { 2526 device_t dev = sc->dev; 2527 struct emx_rxbuf *rx_buffer; 2528 int i, error, rsize; 2529 2530 /* 2531 * Validate number of receive descriptors. It must not exceed 2532 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2533 */ 2534 if ((emx_rxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2535 emx_rxd > EMX_MAX_RXD || emx_rxd < EMX_MIN_RXD) { 2536 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2537 EMX_DEFAULT_RXD, emx_rxd); 2538 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2539 } else { 2540 rdata->num_rx_desc = emx_rxd; 2541 } 2542 2543 /* 2544 * Allocate Receive Descriptor ring 2545 */ 2546 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2547 EMX_DBA_ALIGN); 2548 rdata->rx_desc = bus_dmamem_coherent_any(sc->parent_dtag, 2549 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2550 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2551 &rdata->rx_desc_paddr); 2552 if (rdata->rx_desc == NULL) { 2553 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2554 return ENOMEM; 2555 } 2556 2557 rdata->rx_buf = kmalloc(sizeof(struct emx_rxbuf) * rdata->num_rx_desc, 2558 M_DEVBUF, M_WAITOK | M_ZERO); 2559 2560 /* 2561 * Create DMA tag for rx buffers 2562 */ 2563 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 2564 1, 0, /* alignment, bounds */ 2565 BUS_SPACE_MAXADDR, /* lowaddr */ 2566 BUS_SPACE_MAXADDR, /* highaddr */ 2567 NULL, NULL, /* filter, filterarg */ 2568 MCLBYTES, /* maxsize */ 2569 1, /* nsegments */ 2570 MCLBYTES, /* maxsegsize */ 2571 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2572 &rdata->rxtag); 2573 if (error) { 2574 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2575 kfree(rdata->rx_buf, M_DEVBUF); 2576 rdata->rx_buf = NULL; 2577 return error; 2578 } 2579 2580 /* 2581 * Create spare DMA map for rx buffers 2582 */ 2583 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2584 &rdata->rx_sparemap); 2585 if (error) { 2586 device_printf(dev, "Unable to create spare RX DMA map\n"); 2587 bus_dma_tag_destroy(rdata->rxtag); 2588 kfree(rdata->rx_buf, M_DEVBUF); 2589 rdata->rx_buf = NULL; 2590 return error; 2591 } 2592 2593 /* 2594 * Create DMA maps for rx buffers 2595 */ 2596 for (i = 0; i < rdata->num_rx_desc; i++) { 2597 rx_buffer = &rdata->rx_buf[i]; 2598 2599 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2600 &rx_buffer->map); 2601 if (error) { 2602 device_printf(dev, "Unable to create RX DMA map\n"); 2603 emx_destroy_rx_ring(sc, rdata, i); 2604 return error; 2605 } 2606 } 2607 return (0); 2608 } 2609 2610 static void 2611 emx_free_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2612 { 2613 int i; 2614 2615 for (i = 0; i < rdata->num_rx_desc; i++) { 2616 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2617 2618 if (rx_buffer->m_head != NULL) { 2619 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2620 m_freem(rx_buffer->m_head); 2621 rx_buffer->m_head = NULL; 2622 } 2623 } 2624 2625 if (rdata->fmp != NULL) 2626 m_freem(rdata->fmp); 2627 rdata->fmp = NULL; 2628 rdata->lmp = NULL; 2629 } 2630 2631 static int 2632 emx_init_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2633 { 2634 int i, error; 2635 2636 /* Reset descriptor ring */ 2637 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2638 2639 /* Allocate new ones. */ 2640 for (i = 0; i < rdata->num_rx_desc; i++) { 2641 error = emx_newbuf(sc, rdata, i, 1); 2642 if (error) 2643 return (error); 2644 } 2645 2646 /* Setup our descriptor pointers */ 2647 rdata->next_rx_desc_to_check = 0; 2648 2649 return (0); 2650 } 2651 2652 static void 2653 emx_init_rx_unit(struct emx_softc *sc) 2654 { 2655 struct ifnet *ifp = &sc->arpcom.ac_if; 2656 uint64_t bus_addr; 2657 uint32_t rctl, rxcsum, rfctl; 2658 int i; 2659 2660 /* 2661 * Make sure receives are disabled while setting 2662 * up the descriptor ring 2663 */ 2664 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2665 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2666 2667 /* 2668 * Set the interrupt throttling rate. Value is calculated 2669 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2670 */ 2671 if (sc->int_throttle_ceil) { 2672 E1000_WRITE_REG(&sc->hw, E1000_ITR, 2673 1000000000 / 256 / sc->int_throttle_ceil); 2674 } else { 2675 E1000_WRITE_REG(&sc->hw, E1000_ITR, 0); 2676 } 2677 2678 /* Use extended RX descriptor */ 2679 rfctl = E1000_RFCTL_EXTEN; 2680 2681 /* Disable accelerated ackknowledge */ 2682 if (sc->hw.mac.type == e1000_82574) 2683 rfctl |= E1000_RFCTL_ACK_DIS; 2684 2685 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2686 2687 /* Setup the Base and Length of the Rx Descriptor Ring */ 2688 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2689 struct emx_rxdata *rdata = &sc->rx_data[i]; 2690 2691 bus_addr = rdata->rx_desc_paddr; 2692 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 2693 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 2694 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 2695 (uint32_t)(bus_addr >> 32)); 2696 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 2697 (uint32_t)bus_addr); 2698 } 2699 2700 /* Setup the Receive Control Register */ 2701 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2702 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2703 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 2704 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2705 2706 /* Make sure VLAN Filters are off */ 2707 rctl &= ~E1000_RCTL_VFE; 2708 2709 /* Don't store bad paket */ 2710 rctl &= ~E1000_RCTL_SBP; 2711 2712 /* MCLBYTES */ 2713 rctl |= E1000_RCTL_SZ_2048; 2714 2715 if (ifp->if_mtu > ETHERMTU) 2716 rctl |= E1000_RCTL_LPE; 2717 else 2718 rctl &= ~E1000_RCTL_LPE; 2719 2720 /* 2721 * Receive Checksum Offload for TCP and UDP 2722 * 2723 * Checksum offloading is also enabled if multiple receive 2724 * queue is to be supported, since we need it to figure out 2725 * packet type. 2726 */ 2727 if (ifp->if_capenable & (IFCAP_RSS | IFCAP_RXCSUM)) { 2728 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2729 2730 /* 2731 * NOTE: 2732 * PCSD must be enabled to enable multiple 2733 * receive queues. 2734 */ 2735 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2736 E1000_RXCSUM_PCSD; 2737 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2738 } 2739 2740 /* 2741 * Configure multiple receive queue (RSS) 2742 */ 2743 if (ifp->if_capenable & IFCAP_RSS) { 2744 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2745 uint32_t reta; 2746 2747 KASSERT(sc->rx_ring_inuse == EMX_NRX_RING, 2748 ("invalid number of RX ring (%d)", 2749 sc->rx_ring_inuse)); 2750 2751 /* 2752 * NOTE: 2753 * When we reach here, RSS has already been disabled 2754 * in emx_stop(), so we could safely configure RSS key 2755 * and redirect table. 2756 */ 2757 2758 /* 2759 * Configure RSS key 2760 */ 2761 toeplitz_get_key(key, sizeof(key)); 2762 for (i = 0; i < EMX_NRSSRK; ++i) { 2763 uint32_t rssrk; 2764 2765 rssrk = EMX_RSSRK_VAL(key, i); 2766 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2767 2768 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2769 } 2770 2771 /* 2772 * Configure RSS redirect table in following fashion: 2773 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2774 */ 2775 reta = 0; 2776 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2777 uint32_t q; 2778 2779 q = (i % sc->rx_ring_inuse) << EMX_RETA_RINGIDX_SHIFT; 2780 reta |= q << (8 * i); 2781 } 2782 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2783 2784 for (i = 0; i < EMX_NRETA; ++i) 2785 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2786 2787 /* 2788 * Enable multiple receive queues. 2789 * Enable IPv4 RSS standard hash functions. 2790 * Disable RSS interrupt. 2791 */ 2792 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2793 E1000_MRQC_ENABLE_RSS_2Q | 2794 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2795 E1000_MRQC_RSS_FIELD_IPV4); 2796 } 2797 2798 /* 2799 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2800 * long latencies are observed, like Lenovo X60. This 2801 * change eliminates the problem, but since having positive 2802 * values in RDTR is a known source of problems on other 2803 * platforms another solution is being sought. 2804 */ 2805 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 2806 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 2807 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 2808 } 2809 2810 /* 2811 * Setup the HW Rx Head and Tail Descriptor Pointers 2812 */ 2813 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2814 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 2815 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 2816 sc->rx_data[i].num_rx_desc - 1); 2817 } 2818 2819 /* Enable Receives */ 2820 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 2821 } 2822 2823 static void 2824 emx_destroy_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata, int ndesc) 2825 { 2826 struct emx_rxbuf *rx_buffer; 2827 int i; 2828 2829 /* Free Receive Descriptor ring */ 2830 if (rdata->rx_desc) { 2831 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 2832 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 2833 rdata->rx_desc_dmap); 2834 bus_dma_tag_destroy(rdata->rx_desc_dtag); 2835 2836 rdata->rx_desc = NULL; 2837 } 2838 2839 if (rdata->rx_buf == NULL) 2840 return; 2841 2842 for (i = 0; i < ndesc; i++) { 2843 rx_buffer = &rdata->rx_buf[i]; 2844 2845 KKASSERT(rx_buffer->m_head == NULL); 2846 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 2847 } 2848 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 2849 bus_dma_tag_destroy(rdata->rxtag); 2850 2851 kfree(rdata->rx_buf, M_DEVBUF); 2852 rdata->rx_buf = NULL; 2853 } 2854 2855 static void 2856 emx_rxeof(struct emx_softc *sc, int ring_idx, int count) 2857 { 2858 struct emx_rxdata *rdata = &sc->rx_data[ring_idx]; 2859 struct ifnet *ifp = &sc->arpcom.ac_if; 2860 uint32_t staterr; 2861 emx_rxdesc_t *current_desc; 2862 struct mbuf *mp; 2863 int i; 2864 struct mbuf_chain chain[MAXCPU]; 2865 2866 i = rdata->next_rx_desc_to_check; 2867 current_desc = &rdata->rx_desc[i]; 2868 staterr = le32toh(current_desc->rxd_staterr); 2869 2870 if (!(staterr & E1000_RXD_STAT_DD)) 2871 return; 2872 2873 ether_input_chain_init(chain); 2874 2875 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2876 struct pktinfo *pi = NULL, pi0; 2877 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 2878 struct mbuf *m = NULL; 2879 int eop, len; 2880 2881 logif(pkt_receive); 2882 2883 mp = rx_buf->m_head; 2884 2885 /* 2886 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 2887 * needs to access the last received byte in the mbuf. 2888 */ 2889 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 2890 BUS_DMASYNC_POSTREAD); 2891 2892 len = le16toh(current_desc->rxd_length); 2893 if (staterr & E1000_RXD_STAT_EOP) { 2894 count--; 2895 eop = 1; 2896 } else { 2897 eop = 0; 2898 } 2899 2900 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2901 uint16_t vlan = 0; 2902 uint32_t mrq, rss_hash; 2903 2904 /* 2905 * Save several necessary information, 2906 * before emx_newbuf() destroy it. 2907 */ 2908 if ((staterr & E1000_RXD_STAT_VP) && eop) 2909 vlan = le16toh(current_desc->rxd_vlan); 2910 2911 mrq = le32toh(current_desc->rxd_mrq); 2912 rss_hash = le32toh(current_desc->rxd_rss); 2913 2914 EMX_RSS_DPRINTF(sc, 10, 2915 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 2916 ring_idx, mrq, rss_hash); 2917 2918 if (emx_newbuf(sc, rdata, i, 0) != 0) { 2919 ifp->if_iqdrops++; 2920 goto discard; 2921 } 2922 2923 /* Assign correct length to the current fragment */ 2924 mp->m_len = len; 2925 2926 if (rdata->fmp == NULL) { 2927 mp->m_pkthdr.len = len; 2928 rdata->fmp = mp; /* Store the first mbuf */ 2929 rdata->lmp = mp; 2930 } else { 2931 /* 2932 * Chain mbuf's together 2933 */ 2934 rdata->lmp->m_next = mp; 2935 rdata->lmp = rdata->lmp->m_next; 2936 rdata->fmp->m_pkthdr.len += len; 2937 } 2938 2939 if (eop) { 2940 rdata->fmp->m_pkthdr.rcvif = ifp; 2941 ifp->if_ipackets++; 2942 2943 if (ifp->if_capenable & IFCAP_RXCSUM) 2944 emx_rxcsum(staterr, rdata->fmp); 2945 2946 if (staterr & E1000_RXD_STAT_VP) { 2947 rdata->fmp->m_pkthdr.ether_vlantag = 2948 vlan; 2949 rdata->fmp->m_flags |= M_VLANTAG; 2950 } 2951 m = rdata->fmp; 2952 rdata->fmp = NULL; 2953 rdata->lmp = NULL; 2954 2955 if (ifp->if_capenable & IFCAP_RSS) { 2956 pi = emx_rssinfo(m, &pi0, mrq, 2957 rss_hash, staterr); 2958 } 2959 #ifdef EMX_RSS_DEBUG 2960 rdata->rx_pkts++; 2961 #endif 2962 } 2963 } else { 2964 ifp->if_ierrors++; 2965 discard: 2966 emx_setup_rxdesc(current_desc, rx_buf); 2967 if (rdata->fmp != NULL) { 2968 m_freem(rdata->fmp); 2969 rdata->fmp = NULL; 2970 rdata->lmp = NULL; 2971 } 2972 m = NULL; 2973 } 2974 2975 if (m != NULL) 2976 ether_input_chain(ifp, m, pi, chain); 2977 2978 /* Advance our pointers to the next descriptor. */ 2979 if (++i == rdata->num_rx_desc) 2980 i = 0; 2981 2982 current_desc = &rdata->rx_desc[i]; 2983 staterr = le32toh(current_desc->rxd_staterr); 2984 } 2985 rdata->next_rx_desc_to_check = i; 2986 2987 ether_input_dispatch(chain); 2988 2989 /* Advance the E1000's Receive Queue "Tail Pointer". */ 2990 if (--i < 0) 2991 i = rdata->num_rx_desc - 1; 2992 E1000_WRITE_REG(&sc->hw, E1000_RDT(ring_idx), i); 2993 } 2994 2995 static void 2996 emx_enable_intr(struct emx_softc *sc) 2997 { 2998 lwkt_serialize_handler_enable(&sc->main_serialize); 2999 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 3000 } 3001 3002 static void 3003 emx_disable_intr(struct emx_softc *sc) 3004 { 3005 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3006 lwkt_serialize_handler_disable(&sc->main_serialize); 3007 } 3008 3009 /* 3010 * Bit of a misnomer, what this really means is 3011 * to enable OS management of the system... aka 3012 * to disable special hardware management features 3013 */ 3014 static void 3015 emx_get_mgmt(struct emx_softc *sc) 3016 { 3017 /* A shared code workaround */ 3018 if (sc->has_manage) { 3019 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3020 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3021 3022 /* disable hardware interception of ARP */ 3023 manc &= ~(E1000_MANC_ARP_EN); 3024 3025 /* enable receiving management packets to the host */ 3026 manc |= E1000_MANC_EN_MNG2HOST; 3027 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3028 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3029 manc2h |= E1000_MNG2HOST_PORT_623; 3030 manc2h |= E1000_MNG2HOST_PORT_664; 3031 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3032 3033 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3034 } 3035 } 3036 3037 /* 3038 * Give control back to hardware management 3039 * controller if there is one. 3040 */ 3041 static void 3042 emx_rel_mgmt(struct emx_softc *sc) 3043 { 3044 if (sc->has_manage) { 3045 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3046 3047 /* re-enable hardware interception of ARP */ 3048 manc |= E1000_MANC_ARP_EN; 3049 manc &= ~E1000_MANC_EN_MNG2HOST; 3050 3051 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3052 } 3053 } 3054 3055 /* 3056 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3057 * For ASF and Pass Through versions of f/w this means that 3058 * the driver is loaded. For AMT version (only with 82573) 3059 * of the f/w this means that the network i/f is open. 3060 */ 3061 static void 3062 emx_get_hw_control(struct emx_softc *sc) 3063 { 3064 uint32_t ctrl_ext, swsm; 3065 3066 /* Let firmware know the driver has taken over */ 3067 switch (sc->hw.mac.type) { 3068 case e1000_82573: 3069 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3070 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3071 swsm | E1000_SWSM_DRV_LOAD); 3072 break; 3073 3074 case e1000_82571: 3075 case e1000_82572: 3076 case e1000_80003es2lan: 3077 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3078 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3079 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3080 break; 3081 3082 default: 3083 break; 3084 } 3085 } 3086 3087 /* 3088 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3089 * For ASF and Pass Through versions of f/w this means that the 3090 * driver is no longer loaded. For AMT version (only with 82573) 3091 * of the f/w this means that the network i/f is closed. 3092 */ 3093 static void 3094 emx_rel_hw_control(struct emx_softc *sc) 3095 { 3096 uint32_t ctrl_ext, swsm; 3097 3098 /* Let firmware taken over control of h/w */ 3099 switch (sc->hw.mac.type) { 3100 case e1000_82573: 3101 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3102 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3103 swsm & ~E1000_SWSM_DRV_LOAD); 3104 break; 3105 3106 case e1000_82571: 3107 case e1000_82572: 3108 case e1000_80003es2lan: 3109 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3110 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3111 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3112 break; 3113 3114 default: 3115 break; 3116 } 3117 } 3118 3119 static int 3120 emx_is_valid_eaddr(const uint8_t *addr) 3121 { 3122 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3123 3124 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3125 return (FALSE); 3126 3127 return (TRUE); 3128 } 3129 3130 /* 3131 * Enable PCI Wake On Lan capability 3132 */ 3133 void 3134 emx_enable_wol(device_t dev) 3135 { 3136 uint16_t cap, status; 3137 uint8_t id; 3138 3139 /* First find the capabilities pointer*/ 3140 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3141 3142 /* Read the PM Capabilities */ 3143 id = pci_read_config(dev, cap, 1); 3144 if (id != PCIY_PMG) /* Something wrong */ 3145 return; 3146 3147 /* 3148 * OK, we have the power capabilities, 3149 * so now get the status register 3150 */ 3151 cap += PCIR_POWER_STATUS; 3152 status = pci_read_config(dev, cap, 2); 3153 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3154 pci_write_config(dev, cap, status, 2); 3155 } 3156 3157 static void 3158 emx_update_stats(struct emx_softc *sc) 3159 { 3160 struct ifnet *ifp = &sc->arpcom.ac_if; 3161 3162 if (sc->hw.phy.media_type == e1000_media_type_copper || 3163 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3164 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3165 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3166 } 3167 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3168 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3169 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3170 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3171 3172 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3173 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3174 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3175 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3176 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3177 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3178 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3179 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3180 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3181 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3182 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3183 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3184 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3185 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3186 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3187 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3188 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3189 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3190 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3191 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3192 3193 /* For the 64-bit byte counters the low dword must be read first. */ 3194 /* Both registers clear on the read of the high dword */ 3195 3196 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3197 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3198 3199 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3200 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3201 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3202 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3203 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3204 3205 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3206 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3207 3208 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3209 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3210 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3211 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3212 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3213 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3214 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3215 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3216 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3217 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3218 3219 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3220 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3221 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3222 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3223 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3224 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3225 3226 ifp->if_collisions = sc->stats.colc; 3227 3228 /* Rx Errors */ 3229 ifp->if_ierrors = sc->dropped_pkts + sc->stats.rxerrc + 3230 sc->stats.crcerrs + sc->stats.algnerrc + 3231 sc->stats.ruc + sc->stats.roc + 3232 sc->stats.mpc + sc->stats.cexterr; 3233 3234 /* Tx Errors */ 3235 ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol + 3236 sc->watchdog_events; 3237 } 3238 3239 static void 3240 emx_print_debug_info(struct emx_softc *sc) 3241 { 3242 device_t dev = sc->dev; 3243 uint8_t *hw_addr = sc->hw.hw_addr; 3244 3245 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3246 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3247 E1000_READ_REG(&sc->hw, E1000_CTRL), 3248 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3249 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3250 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3251 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3252 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3253 sc->hw.fc.high_water, sc->hw.fc.low_water); 3254 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3255 E1000_READ_REG(&sc->hw, E1000_TIDV), 3256 E1000_READ_REG(&sc->hw, E1000_TADV)); 3257 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3258 E1000_READ_REG(&sc->hw, E1000_RDTR), 3259 E1000_READ_REG(&sc->hw, E1000_RADV)); 3260 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3261 E1000_READ_REG(&sc->hw, E1000_TDH(0)), 3262 E1000_READ_REG(&sc->hw, E1000_TDT(0))); 3263 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3264 E1000_READ_REG(&sc->hw, E1000_RDH(0)), 3265 E1000_READ_REG(&sc->hw, E1000_RDT(0))); 3266 device_printf(dev, "Num Tx descriptors avail = %d\n", 3267 sc->num_tx_desc_avail); 3268 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3269 sc->no_tx_desc_avail1); 3270 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3271 sc->no_tx_desc_avail2); 3272 device_printf(dev, "Std mbuf failed = %ld\n", 3273 sc->mbuf_alloc_failed); 3274 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3275 sc->rx_data[0].mbuf_cluster_failed); 3276 device_printf(dev, "Driver dropped packets = %ld\n", 3277 sc->dropped_pkts); 3278 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3279 sc->no_tx_dma_setup); 3280 3281 device_printf(dev, "TXCSUM try pullup = %lu\n", 3282 sc->tx_csum_try_pullup); 3283 device_printf(dev, "TXCSUM m_pullup(eh) called = %lu\n", 3284 sc->tx_csum_pullup1); 3285 device_printf(dev, "TXCSUM m_pullup(eh) failed = %lu\n", 3286 sc->tx_csum_pullup1_failed); 3287 device_printf(dev, "TXCSUM m_pullup(eh+ip) called = %lu\n", 3288 sc->tx_csum_pullup2); 3289 device_printf(dev, "TXCSUM m_pullup(eh+ip) failed = %lu\n", 3290 sc->tx_csum_pullup2_failed); 3291 device_printf(dev, "TXCSUM non-writable(eh) droped = %lu\n", 3292 sc->tx_csum_drop1); 3293 device_printf(dev, "TXCSUM non-writable(eh+ip) droped = %lu\n", 3294 sc->tx_csum_drop2); 3295 } 3296 3297 static void 3298 emx_print_hw_stats(struct emx_softc *sc) 3299 { 3300 device_t dev = sc->dev; 3301 3302 device_printf(dev, "Excessive collisions = %lld\n", 3303 (long long)sc->stats.ecol); 3304 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3305 device_printf(dev, "Symbol errors = %lld\n", 3306 (long long)sc->stats.symerrs); 3307 #endif 3308 device_printf(dev, "Sequence errors = %lld\n", 3309 (long long)sc->stats.sec); 3310 device_printf(dev, "Defer count = %lld\n", 3311 (long long)sc->stats.dc); 3312 device_printf(dev, "Missed Packets = %lld\n", 3313 (long long)sc->stats.mpc); 3314 device_printf(dev, "Receive No Buffers = %lld\n", 3315 (long long)sc->stats.rnbc); 3316 /* RLEC is inaccurate on some hardware, calculate our own. */ 3317 device_printf(dev, "Receive Length Errors = %lld\n", 3318 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3319 device_printf(dev, "Receive errors = %lld\n", 3320 (long long)sc->stats.rxerrc); 3321 device_printf(dev, "Crc errors = %lld\n", 3322 (long long)sc->stats.crcerrs); 3323 device_printf(dev, "Alignment errors = %lld\n", 3324 (long long)sc->stats.algnerrc); 3325 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3326 (long long)sc->stats.cexterr); 3327 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3328 device_printf(dev, "watchdog timeouts = %ld\n", 3329 sc->watchdog_events); 3330 device_printf(dev, "XON Rcvd = %lld\n", 3331 (long long)sc->stats.xonrxc); 3332 device_printf(dev, "XON Xmtd = %lld\n", 3333 (long long)sc->stats.xontxc); 3334 device_printf(dev, "XOFF Rcvd = %lld\n", 3335 (long long)sc->stats.xoffrxc); 3336 device_printf(dev, "XOFF Xmtd = %lld\n", 3337 (long long)sc->stats.xofftxc); 3338 device_printf(dev, "Good Packets Rcvd = %lld\n", 3339 (long long)sc->stats.gprc); 3340 device_printf(dev, "Good Packets Xmtd = %lld\n", 3341 (long long)sc->stats.gptc); 3342 } 3343 3344 static void 3345 emx_print_nvm_info(struct emx_softc *sc) 3346 { 3347 uint16_t eeprom_data; 3348 int i, j, row = 0; 3349 3350 /* Its a bit crude, but it gets the job done */ 3351 kprintf("\nInterface EEPROM Dump:\n"); 3352 kprintf("Offset\n0x0000 "); 3353 for (i = 0, j = 0; i < 32; i++, j++) { 3354 if (j == 8) { /* Make the offset block */ 3355 j = 0; ++row; 3356 kprintf("\n0x00%x0 ",row); 3357 } 3358 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3359 kprintf("%04x ", eeprom_data); 3360 } 3361 kprintf("\n"); 3362 } 3363 3364 static int 3365 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3366 { 3367 struct emx_softc *sc; 3368 struct ifnet *ifp; 3369 int error, result; 3370 3371 result = -1; 3372 error = sysctl_handle_int(oidp, &result, 0, req); 3373 if (error || !req->newptr) 3374 return (error); 3375 3376 sc = (struct emx_softc *)arg1; 3377 ifp = &sc->arpcom.ac_if; 3378 3379 ifnet_serialize_all(ifp); 3380 3381 if (result == 1) 3382 emx_print_debug_info(sc); 3383 3384 /* 3385 * This value will cause a hex dump of the 3386 * first 32 16-bit words of the EEPROM to 3387 * the screen. 3388 */ 3389 if (result == 2) 3390 emx_print_nvm_info(sc); 3391 3392 ifnet_deserialize_all(ifp); 3393 3394 return (error); 3395 } 3396 3397 static int 3398 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3399 { 3400 int error, result; 3401 3402 result = -1; 3403 error = sysctl_handle_int(oidp, &result, 0, req); 3404 if (error || !req->newptr) 3405 return (error); 3406 3407 if (result == 1) { 3408 struct emx_softc *sc = (struct emx_softc *)arg1; 3409 struct ifnet *ifp = &sc->arpcom.ac_if; 3410 3411 ifnet_serialize_all(ifp); 3412 emx_print_hw_stats(sc); 3413 ifnet_deserialize_all(ifp); 3414 } 3415 return (error); 3416 } 3417 3418 static void 3419 emx_add_sysctl(struct emx_softc *sc) 3420 { 3421 #ifdef PROFILE_SERIALIZER 3422 struct ifnet *ifp = &sc->arpcom.ac_if; 3423 #endif 3424 #ifdef EMX_RSS_DEBUG 3425 char rx_pkt[32]; 3426 int i; 3427 #endif 3428 3429 sysctl_ctx_init(&sc->sysctl_ctx); 3430 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 3431 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 3432 device_get_nameunit(sc->dev), 3433 CTLFLAG_RD, 0, ""); 3434 if (sc->sysctl_tree == NULL) { 3435 device_printf(sc->dev, "can't add sysctl node\n"); 3436 return; 3437 } 3438 3439 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3440 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3441 emx_sysctl_debug_info, "I", "Debug Information"); 3442 3443 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3444 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3445 emx_sysctl_stats, "I", "Statistics"); 3446 3447 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3448 OID_AUTO, "rxd", CTLFLAG_RD, 3449 &sc->rx_data[0].num_rx_desc, 0, NULL); 3450 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3451 OID_AUTO, "txd", CTLFLAG_RD, &sc->num_tx_desc, 0, NULL); 3452 3453 #ifdef notyet 3454 #ifdef PROFILE_SERIALIZER 3455 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3456 OID_AUTO, "serializer_sleep", CTLFLAG_RW, 3457 &ifp->if_serializer->sleep_cnt, 0, NULL); 3458 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3459 OID_AUTO, "serializer_tryfail", CTLFLAG_RW, 3460 &ifp->if_serializer->tryfail_cnt, 0, NULL); 3461 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3462 OID_AUTO, "serializer_enter", CTLFLAG_RW, 3463 &ifp->if_serializer->enter_cnt, 0, NULL); 3464 SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3465 OID_AUTO, "serializer_try", CTLFLAG_RW, 3466 &ifp->if_serializer->try_cnt, 0, NULL); 3467 #endif 3468 #endif 3469 3470 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3471 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, 3472 sc, 0, emx_sysctl_int_throttle, "I", 3473 "interrupt throttling rate"); 3474 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3475 OID_AUTO, "int_tx_nsegs", CTLTYPE_INT|CTLFLAG_RW, 3476 sc, 0, emx_sysctl_int_tx_nsegs, "I", 3477 "# segments per TX interrupt"); 3478 3479 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3480 OID_AUTO, "rx_ring_inuse", CTLFLAG_RD, 3481 &sc->rx_ring_inuse, 0, "RX ring in use"); 3482 3483 #ifdef EMX_RSS_DEBUG 3484 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3485 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3486 0, "RSS debug level"); 3487 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3488 ksnprintf(rx_pkt, sizeof(rx_pkt), "rx%d_pkt", i); 3489 SYSCTL_ADD_UINT(&sc->sysctl_ctx, 3490 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, 3491 rx_pkt, CTLFLAG_RW, 3492 &sc->rx_data[i].rx_pkts, 0, "RXed packets"); 3493 } 3494 #endif 3495 } 3496 3497 static int 3498 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3499 { 3500 struct emx_softc *sc = (void *)arg1; 3501 struct ifnet *ifp = &sc->arpcom.ac_if; 3502 int error, throttle; 3503 3504 throttle = sc->int_throttle_ceil; 3505 error = sysctl_handle_int(oidp, &throttle, 0, req); 3506 if (error || req->newptr == NULL) 3507 return error; 3508 if (throttle < 0 || throttle > 1000000000 / 256) 3509 return EINVAL; 3510 3511 if (throttle) { 3512 /* 3513 * Set the interrupt throttling rate in 256ns increments, 3514 * recalculate sysctl value assignment to get exact frequency. 3515 */ 3516 throttle = 1000000000 / 256 / throttle; 3517 3518 /* Upper 16bits of ITR is reserved and should be zero */ 3519 if (throttle & 0xffff0000) 3520 return EINVAL; 3521 } 3522 3523 ifnet_serialize_all(ifp); 3524 3525 if (throttle) 3526 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3527 else 3528 sc->int_throttle_ceil = 0; 3529 3530 if (ifp->if_flags & IFF_RUNNING) 3531 E1000_WRITE_REG(&sc->hw, E1000_ITR, throttle); 3532 3533 ifnet_deserialize_all(ifp); 3534 3535 if (bootverbose) { 3536 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3537 sc->int_throttle_ceil); 3538 } 3539 return 0; 3540 } 3541 3542 static int 3543 emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 3544 { 3545 struct emx_softc *sc = (void *)arg1; 3546 struct ifnet *ifp = &sc->arpcom.ac_if; 3547 int error, segs; 3548 3549 segs = sc->tx_int_nsegs; 3550 error = sysctl_handle_int(oidp, &segs, 0, req); 3551 if (error || req->newptr == NULL) 3552 return error; 3553 if (segs <= 0) 3554 return EINVAL; 3555 3556 ifnet_serialize_all(ifp); 3557 3558 /* 3559 * Don't allow int_tx_nsegs to become: 3560 * o Less the oact_tx_desc 3561 * o Too large that no TX desc will cause TX interrupt to 3562 * be generated (OACTIVE will never recover) 3563 * o Too small that will cause tx_dd[] overflow 3564 */ 3565 if (segs < sc->oact_tx_desc || 3566 segs >= sc->num_tx_desc - sc->oact_tx_desc || 3567 segs < sc->num_tx_desc / EMX_TXDD_SAFE) { 3568 error = EINVAL; 3569 } else { 3570 error = 0; 3571 sc->tx_int_nsegs = segs; 3572 } 3573 3574 ifnet_deserialize_all(ifp); 3575 3576 return error; 3577 } 3578 3579 static int 3580 emx_dma_alloc(struct emx_softc *sc) 3581 { 3582 int error, i; 3583 3584 /* 3585 * Create top level busdma tag 3586 */ 3587 error = bus_dma_tag_create(NULL, 1, 0, 3588 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3589 NULL, NULL, 3590 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3591 0, &sc->parent_dtag); 3592 if (error) { 3593 device_printf(sc->dev, "could not create top level DMA tag\n"); 3594 return error; 3595 } 3596 3597 /* 3598 * Allocate transmit descriptors ring and buffers 3599 */ 3600 error = emx_create_tx_ring(sc); 3601 if (error) { 3602 device_printf(sc->dev, "Could not setup transmit structures\n"); 3603 return error; 3604 } 3605 3606 /* 3607 * Allocate receive descriptors ring and buffers 3608 */ 3609 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3610 error = emx_create_rx_ring(sc, &sc->rx_data[i]); 3611 if (error) { 3612 device_printf(sc->dev, 3613 "Could not setup receive structures\n"); 3614 return error; 3615 } 3616 } 3617 return 0; 3618 } 3619 3620 static void 3621 emx_dma_free(struct emx_softc *sc) 3622 { 3623 int i; 3624 3625 emx_destroy_tx_ring(sc, sc->num_tx_desc); 3626 3627 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3628 emx_destroy_rx_ring(sc, &sc->rx_data[i], 3629 sc->rx_data[i].num_rx_desc); 3630 } 3631 3632 /* Free top level busdma tag */ 3633 if (sc->parent_dtag != NULL) 3634 bus_dma_tag_destroy(sc->parent_dtag); 3635 } 3636 3637 static void 3638 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3639 { 3640 struct emx_softc *sc = ifp->if_softc; 3641 3642 switch (slz) { 3643 case IFNET_SERIALIZE_ALL: 3644 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 0); 3645 break; 3646 3647 case IFNET_SERIALIZE_MAIN: 3648 lwkt_serialize_enter(&sc->main_serialize); 3649 break; 3650 3651 case IFNET_SERIALIZE_TX: 3652 lwkt_serialize_enter(&sc->tx_serialize); 3653 break; 3654 3655 case IFNET_SERIALIZE_RX(0): 3656 lwkt_serialize_enter(&sc->rx_data[0].rx_serialize); 3657 break; 3658 3659 case IFNET_SERIALIZE_RX(1): 3660 lwkt_serialize_enter(&sc->rx_data[1].rx_serialize); 3661 break; 3662 3663 default: 3664 panic("%s unsupported serialize type\n", ifp->if_xname); 3665 } 3666 } 3667 3668 static void 3669 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3670 { 3671 struct emx_softc *sc = ifp->if_softc; 3672 3673 switch (slz) { 3674 case IFNET_SERIALIZE_ALL: 3675 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 0); 3676 break; 3677 3678 case IFNET_SERIALIZE_MAIN: 3679 lwkt_serialize_exit(&sc->main_serialize); 3680 break; 3681 3682 case IFNET_SERIALIZE_TX: 3683 lwkt_serialize_exit(&sc->tx_serialize); 3684 break; 3685 3686 case IFNET_SERIALIZE_RX(0): 3687 lwkt_serialize_exit(&sc->rx_data[0].rx_serialize); 3688 break; 3689 3690 case IFNET_SERIALIZE_RX(1): 3691 lwkt_serialize_exit(&sc->rx_data[1].rx_serialize); 3692 break; 3693 3694 default: 3695 panic("%s unsupported serialize type\n", ifp->if_xname); 3696 } 3697 } 3698 3699 static int 3700 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3701 { 3702 struct emx_softc *sc = ifp->if_softc; 3703 3704 switch (slz) { 3705 case IFNET_SERIALIZE_ALL: 3706 return lwkt_serialize_array_try(sc->serializes, 3707 EMX_NSERIALIZE, 0); 3708 3709 case IFNET_SERIALIZE_MAIN: 3710 return lwkt_serialize_try(&sc->main_serialize); 3711 3712 case IFNET_SERIALIZE_TX: 3713 return lwkt_serialize_try(&sc->tx_serialize); 3714 3715 case IFNET_SERIALIZE_RX(0): 3716 return lwkt_serialize_try(&sc->rx_data[0].rx_serialize); 3717 3718 case IFNET_SERIALIZE_RX(1): 3719 return lwkt_serialize_try(&sc->rx_data[1].rx_serialize); 3720 3721 default: 3722 panic("%s unsupported serialize type\n", ifp->if_xname); 3723 } 3724 } 3725 3726 static void 3727 emx_serialize_skipmain(struct emx_softc *sc) 3728 { 3729 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3730 } 3731 3732 static int 3733 emx_tryserialize_skipmain(struct emx_softc *sc) 3734 { 3735 return lwkt_serialize_array_try(sc->serializes, EMX_NSERIALIZE, 1); 3736 } 3737 3738 static void 3739 emx_deserialize_skipmain(struct emx_softc *sc) 3740 { 3741 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3742 } 3743 3744 #ifdef INVARIANTS 3745 3746 static void 3747 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3748 boolean_t serialized) 3749 { 3750 struct emx_softc *sc = ifp->if_softc; 3751 int i; 3752 3753 switch (slz) { 3754 case IFNET_SERIALIZE_ALL: 3755 if (serialized) { 3756 for (i = 0; i < EMX_NSERIALIZE; ++i) 3757 ASSERT_SERIALIZED(sc->serializes[i]); 3758 } else { 3759 for (i = 0; i < EMX_NSERIALIZE; ++i) 3760 ASSERT_NOT_SERIALIZED(sc->serializes[i]); 3761 } 3762 break; 3763 3764 case IFNET_SERIALIZE_MAIN: 3765 if (serialized) 3766 ASSERT_SERIALIZED(&sc->main_serialize); 3767 else 3768 ASSERT_NOT_SERIALIZED(&sc->main_serialize); 3769 break; 3770 3771 case IFNET_SERIALIZE_TX: 3772 if (serialized) 3773 ASSERT_SERIALIZED(&sc->tx_serialize); 3774 else 3775 ASSERT_NOT_SERIALIZED(&sc->tx_serialize); 3776 break; 3777 3778 case IFNET_SERIALIZE_RX(0): 3779 if (serialized) 3780 ASSERT_SERIALIZED(&sc->rx_data[0].rx_serialize); 3781 else 3782 ASSERT_NOT_SERIALIZED(&sc->rx_data[0].rx_serialize); 3783 break; 3784 3785 case IFNET_SERIALIZE_RX(1): 3786 if (serialized) 3787 ASSERT_SERIALIZED(&sc->rx_data[1].rx_serialize); 3788 else 3789 ASSERT_NOT_SERIALIZED(&sc->rx_data[1].rx_serialize); 3790 break; 3791 3792 default: 3793 panic("%s unsupported serialize type\n", ifp->if_xname); 3794 } 3795 } 3796 3797 #endif /* INVARIANTS */ 3798