1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/emx/if_emx.h> 112 113 #ifdef EMX_RSS_DEBUG 114 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 115 do { \ 116 if (sc->rss_debug >= lvl) \ 117 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 118 } while (0) 119 #else /* !EMX_RSS_DEBUG */ 120 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 121 #endif /* EMX_RSS_DEBUG */ 122 123 #define EMX_TX_SERIALIZE 1 124 #define EMX_RX_SERIALIZE 2 125 126 #define EMX_NAME "Intel(R) PRO/1000 " 127 128 #define EMX_DEVICE(id) \ 129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 130 #define EMX_DEVICE_NULL { 0, 0, NULL } 131 132 static const struct emx_device { 133 uint16_t vid; 134 uint16_t did; 135 const char *desc; 136 } emx_devices[] = { 137 EMX_DEVICE(82571EB_COPPER), 138 EMX_DEVICE(82571EB_FIBER), 139 EMX_DEVICE(82571EB_SERDES), 140 EMX_DEVICE(82571EB_SERDES_DUAL), 141 EMX_DEVICE(82571EB_SERDES_QUAD), 142 EMX_DEVICE(82571EB_QUAD_COPPER), 143 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 144 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 145 EMX_DEVICE(82571EB_QUAD_FIBER), 146 EMX_DEVICE(82571PT_QUAD_COPPER), 147 148 EMX_DEVICE(82572EI_COPPER), 149 EMX_DEVICE(82572EI_FIBER), 150 EMX_DEVICE(82572EI_SERDES), 151 EMX_DEVICE(82572EI), 152 153 EMX_DEVICE(82573E), 154 EMX_DEVICE(82573E_IAMT), 155 EMX_DEVICE(82573L), 156 157 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 158 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 159 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 160 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 161 162 EMX_DEVICE(82574L), 163 EMX_DEVICE(82574LA), 164 165 /* required last entry */ 166 EMX_DEVICE_NULL 167 }; 168 169 static int emx_probe(device_t); 170 static int emx_attach(device_t); 171 static int emx_detach(device_t); 172 static int emx_shutdown(device_t); 173 static int emx_suspend(device_t); 174 static int emx_resume(device_t); 175 176 static void emx_init(void *); 177 static void emx_stop(struct emx_softc *); 178 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 179 static void emx_start(struct ifnet *); 180 #ifdef IFPOLL_ENABLE 181 static void emx_qpoll(struct ifnet *, struct ifpoll_info *); 182 #endif 183 static void emx_watchdog(struct ifnet *); 184 static void emx_media_status(struct ifnet *, struct ifmediareq *); 185 static int emx_media_change(struct ifnet *); 186 static void emx_timer(void *); 187 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 188 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 189 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 190 #ifdef INVARIANTS 191 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 192 boolean_t); 193 #endif 194 195 static void emx_intr(void *); 196 static void emx_intr_mask(void *); 197 static void emx_intr_body(struct emx_softc *, boolean_t); 198 static void emx_rxeof(struct emx_softc *, int, int); 199 static void emx_txeof(struct emx_softc *); 200 static void emx_tx_collect(struct emx_softc *); 201 static void emx_tx_purge(struct emx_softc *); 202 static void emx_enable_intr(struct emx_softc *); 203 static void emx_disable_intr(struct emx_softc *); 204 205 static int emx_dma_alloc(struct emx_softc *); 206 static void emx_dma_free(struct emx_softc *); 207 static void emx_init_tx_ring(struct emx_softc *); 208 static int emx_init_rx_ring(struct emx_softc *, struct emx_rxdata *); 209 static void emx_free_rx_ring(struct emx_softc *, struct emx_rxdata *); 210 static int emx_create_tx_ring(struct emx_softc *); 211 static int emx_create_rx_ring(struct emx_softc *, struct emx_rxdata *); 212 static void emx_destroy_tx_ring(struct emx_softc *, int); 213 static void emx_destroy_rx_ring(struct emx_softc *, 214 struct emx_rxdata *, int); 215 static int emx_newbuf(struct emx_softc *, struct emx_rxdata *, int, int); 216 static int emx_encap(struct emx_softc *, struct mbuf **); 217 static int emx_txcsum(struct emx_softc *, struct mbuf *, 218 uint32_t *, uint32_t *); 219 static int emx_tso_pullup(struct emx_softc *, struct mbuf **); 220 static int emx_tso_setup(struct emx_softc *, struct mbuf *, 221 uint32_t *, uint32_t *); 222 223 static int emx_is_valid_eaddr(const uint8_t *); 224 static int emx_reset(struct emx_softc *); 225 static void emx_setup_ifp(struct emx_softc *); 226 static void emx_init_tx_unit(struct emx_softc *); 227 static void emx_init_rx_unit(struct emx_softc *); 228 static void emx_update_stats(struct emx_softc *); 229 static void emx_set_promisc(struct emx_softc *); 230 static void emx_disable_promisc(struct emx_softc *); 231 static void emx_set_multi(struct emx_softc *); 232 static void emx_update_link_status(struct emx_softc *); 233 static void emx_smartspeed(struct emx_softc *); 234 static void emx_set_itr(struct emx_softc *, uint32_t); 235 static void emx_disable_aspm(struct emx_softc *); 236 237 static void emx_print_debug_info(struct emx_softc *); 238 static void emx_print_nvm_info(struct emx_softc *); 239 static void emx_print_hw_stats(struct emx_softc *); 240 241 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 242 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 243 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 244 static int emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 245 static void emx_add_sysctl(struct emx_softc *); 246 247 static void emx_serialize_skipmain(struct emx_softc *); 248 static void emx_deserialize_skipmain(struct emx_softc *); 249 250 /* Management and WOL Support */ 251 static void emx_get_mgmt(struct emx_softc *); 252 static void emx_rel_mgmt(struct emx_softc *); 253 static void emx_get_hw_control(struct emx_softc *); 254 static void emx_rel_hw_control(struct emx_softc *); 255 static void emx_enable_wol(device_t); 256 257 static device_method_t emx_methods[] = { 258 /* Device interface */ 259 DEVMETHOD(device_probe, emx_probe), 260 DEVMETHOD(device_attach, emx_attach), 261 DEVMETHOD(device_detach, emx_detach), 262 DEVMETHOD(device_shutdown, emx_shutdown), 263 DEVMETHOD(device_suspend, emx_suspend), 264 DEVMETHOD(device_resume, emx_resume), 265 { 0, 0 } 266 }; 267 268 static driver_t emx_driver = { 269 "emx", 270 emx_methods, 271 sizeof(struct emx_softc), 272 }; 273 274 static devclass_t emx_devclass; 275 276 DECLARE_DUMMY_MODULE(if_emx); 277 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 278 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 279 280 /* 281 * Tunables 282 */ 283 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 284 static int emx_rxd = EMX_DEFAULT_RXD; 285 static int emx_txd = EMX_DEFAULT_TXD; 286 static int emx_smart_pwr_down = 0; 287 static int emx_rxr = 0; 288 289 /* Controls whether promiscuous also shows bad packets */ 290 static int emx_debug_sbp = 0; 291 292 static int emx_82573_workaround = 1; 293 static int emx_msi_enable = 1; 294 295 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 296 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 297 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 298 TUNABLE_INT("hw.emx.txd", &emx_txd); 299 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 300 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 301 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 302 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 303 304 /* Global used in WOL setup with multiport cards */ 305 static int emx_global_quad_port_a = 0; 306 307 /* Set this to one to display debug statistics */ 308 static int emx_display_debug_stats = 0; 309 310 #if !defined(KTR_IF_EMX) 311 #define KTR_IF_EMX KTR_ALL 312 #endif 313 KTR_INFO_MASTER(if_emx); 314 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 315 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 316 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 317 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 318 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 319 #define logif(name) KTR_LOG(if_emx_ ## name) 320 321 static __inline void 322 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 323 { 324 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 325 /* DD bit must be cleared */ 326 rxd->rxd_staterr = 0; 327 } 328 329 static __inline void 330 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 331 { 332 /* Ignore Checksum bit is set */ 333 if (staterr & E1000_RXD_STAT_IXSM) 334 return; 335 336 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 337 E1000_RXD_STAT_IPCS) 338 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 339 340 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 341 E1000_RXD_STAT_TCPCS) { 342 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 343 CSUM_PSEUDO_HDR | 344 CSUM_FRAG_NOT_CHECKED; 345 mp->m_pkthdr.csum_data = htons(0xffff); 346 } 347 } 348 349 static __inline struct pktinfo * 350 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 351 uint32_t mrq, uint32_t hash, uint32_t staterr) 352 { 353 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 354 case EMX_RXDMRQ_IPV4_TCP: 355 pi->pi_netisr = NETISR_IP; 356 pi->pi_flags = 0; 357 pi->pi_l3proto = IPPROTO_TCP; 358 break; 359 360 case EMX_RXDMRQ_IPV6_TCP: 361 pi->pi_netisr = NETISR_IPV6; 362 pi->pi_flags = 0; 363 pi->pi_l3proto = IPPROTO_TCP; 364 break; 365 366 case EMX_RXDMRQ_IPV4: 367 if (staterr & E1000_RXD_STAT_IXSM) 368 return NULL; 369 370 if ((staterr & 371 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 372 E1000_RXD_STAT_TCPCS) { 373 pi->pi_netisr = NETISR_IP; 374 pi->pi_flags = 0; 375 pi->pi_l3proto = IPPROTO_UDP; 376 break; 377 } 378 /* FALL THROUGH */ 379 default: 380 return NULL; 381 } 382 383 m->m_flags |= M_HASH; 384 m->m_pkthdr.hash = toeplitz_hash(hash); 385 return pi; 386 } 387 388 static int 389 emx_probe(device_t dev) 390 { 391 const struct emx_device *d; 392 uint16_t vid, did; 393 394 vid = pci_get_vendor(dev); 395 did = pci_get_device(dev); 396 397 for (d = emx_devices; d->desc != NULL; ++d) { 398 if (vid == d->vid && did == d->did) { 399 device_set_desc(dev, d->desc); 400 device_set_async_attach(dev, TRUE); 401 return 0; 402 } 403 } 404 return ENXIO; 405 } 406 407 static int 408 emx_attach(device_t dev) 409 { 410 struct emx_softc *sc = device_get_softc(dev); 411 struct ifnet *ifp = &sc->arpcom.ac_if; 412 int error = 0, i, throttle, msi_enable; 413 u_int intr_flags; 414 uint16_t eeprom_data, device_id, apme_mask; 415 driver_intr_t *intr_func; 416 417 lwkt_serialize_init(&sc->main_serialize); 418 lwkt_serialize_init(&sc->tx_serialize); 419 for (i = 0; i < EMX_NRX_RING; ++i) 420 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 421 422 i = 0; 423 sc->serializes[i++] = &sc->main_serialize; 424 sc->serializes[i++] = &sc->tx_serialize; 425 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 426 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 427 KKASSERT(i == EMX_NSERIALIZE); 428 429 callout_init_mp(&sc->timer); 430 431 sc->dev = sc->osdep.dev = dev; 432 433 /* 434 * Determine hardware and mac type 435 */ 436 sc->hw.vendor_id = pci_get_vendor(dev); 437 sc->hw.device_id = pci_get_device(dev); 438 sc->hw.revision_id = pci_get_revid(dev); 439 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 440 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 441 442 if (e1000_set_mac_type(&sc->hw)) 443 return ENXIO; 444 445 /* 446 * Pullup extra 4bytes into the first data segment, see: 447 * 82571/82572 specification update errata #7 448 * 449 * NOTE: 450 * 4bytes instead of 2bytes, which are mentioned in the errata, 451 * are pulled; mainly to keep rest of the data properly aligned. 452 */ 453 if (sc->hw.mac.type == e1000_82571 || sc->hw.mac.type == e1000_82572) 454 sc->flags |= EMX_FLAG_TSO_PULLEX; 455 456 /* Enable bus mastering */ 457 pci_enable_busmaster(dev); 458 459 /* 460 * Allocate IO memory 461 */ 462 sc->memory_rid = EMX_BAR_MEM; 463 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 464 &sc->memory_rid, RF_ACTIVE); 465 if (sc->memory == NULL) { 466 device_printf(dev, "Unable to allocate bus resource: memory\n"); 467 error = ENXIO; 468 goto fail; 469 } 470 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 471 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 472 473 /* XXX This is quite goofy, it is not actually used */ 474 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 475 476 /* 477 * Don't enable MSI-X on 82574, see: 478 * 82574 specification update errata #15 479 * 480 * Don't enable MSI on 82571/82572, see: 481 * 82571/82572 specification update errata #63 482 */ 483 msi_enable = emx_msi_enable; 484 if (msi_enable && 485 (sc->hw.mac.type == e1000_82571 || 486 sc->hw.mac.type == e1000_82572)) 487 msi_enable = 0; 488 489 /* 490 * Allocate interrupt 491 */ 492 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 493 &sc->intr_rid, &intr_flags); 494 495 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 496 int unshared; 497 498 unshared = device_getenv_int(dev, "irq.unshared", 0); 499 if (!unshared) { 500 sc->flags |= EMX_FLAG_SHARED_INTR; 501 if (bootverbose) 502 device_printf(dev, "IRQ shared\n"); 503 } else { 504 intr_flags &= ~RF_SHAREABLE; 505 if (bootverbose) 506 device_printf(dev, "IRQ unshared\n"); 507 } 508 } 509 510 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 511 intr_flags); 512 if (sc->intr_res == NULL) { 513 device_printf(dev, "Unable to allocate bus resource: " 514 "interrupt\n"); 515 error = ENXIO; 516 goto fail; 517 } 518 519 /* Save PCI command register for Shared Code */ 520 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 521 sc->hw.back = &sc->osdep; 522 523 /* Do Shared Code initialization */ 524 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 525 device_printf(dev, "Setup of Shared code failed\n"); 526 error = ENXIO; 527 goto fail; 528 } 529 e1000_get_bus_info(&sc->hw); 530 531 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 532 sc->hw.phy.autoneg_wait_to_complete = FALSE; 533 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 534 535 /* 536 * Interrupt throttle rate 537 */ 538 throttle = device_getenv_int(dev, "int_throttle_ceil", 539 emx_int_throttle_ceil); 540 if (throttle == 0) { 541 sc->int_throttle_ceil = 0; 542 } else { 543 if (throttle < 0) 544 throttle = EMX_DEFAULT_ITR; 545 546 /* Recalculate the tunable value to get the exact frequency. */ 547 throttle = 1000000000 / 256 / throttle; 548 549 /* Upper 16bits of ITR is reserved and should be zero */ 550 if (throttle & 0xffff0000) 551 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 552 553 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 554 } 555 556 e1000_init_script_state_82541(&sc->hw, TRUE); 557 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 558 559 /* Copper options */ 560 if (sc->hw.phy.media_type == e1000_media_type_copper) { 561 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 562 sc->hw.phy.disable_polarity_correction = FALSE; 563 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 564 } 565 566 /* Set the frame limits assuming standard ethernet sized frames. */ 567 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 568 sc->min_frame_size = ETHER_MIN_LEN; 569 570 /* This controls when hardware reports transmit completion status. */ 571 sc->hw.mac.report_tx_early = 1; 572 573 /* Calculate # of RX rings */ 574 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 575 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 576 577 /* Allocate RX/TX rings' busdma(9) stuffs */ 578 error = emx_dma_alloc(sc); 579 if (error) 580 goto fail; 581 582 /* Allocate multicast array memory. */ 583 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 584 M_DEVBUF, M_WAITOK); 585 586 /* Indicate SOL/IDER usage */ 587 if (e1000_check_reset_block(&sc->hw)) { 588 device_printf(dev, 589 "PHY reset is blocked due to SOL/IDER session.\n"); 590 } 591 592 /* 593 * Start from a known state, this is important in reading the 594 * nvm and mac from that. 595 */ 596 e1000_reset_hw(&sc->hw); 597 598 /* Make sure we have a good EEPROM before we read from it */ 599 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 600 /* 601 * Some PCI-E parts fail the first check due to 602 * the link being in sleep state, call it again, 603 * if it fails a second time its a real issue. 604 */ 605 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 606 device_printf(dev, 607 "The EEPROM Checksum Is Not Valid\n"); 608 error = EIO; 609 goto fail; 610 } 611 } 612 613 /* Copy the permanent MAC address out of the EEPROM */ 614 if (e1000_read_mac_addr(&sc->hw) < 0) { 615 device_printf(dev, "EEPROM read error while reading MAC" 616 " address\n"); 617 error = EIO; 618 goto fail; 619 } 620 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 621 device_printf(dev, "Invalid MAC address\n"); 622 error = EIO; 623 goto fail; 624 } 625 626 /* Determine if we have to control management hardware */ 627 if (e1000_enable_mng_pass_thru(&sc->hw)) 628 sc->flags |= EMX_FLAG_HAS_MGMT; 629 630 /* 631 * Setup Wake-on-Lan 632 */ 633 apme_mask = EMX_EEPROM_APME; 634 eeprom_data = 0; 635 switch (sc->hw.mac.type) { 636 case e1000_82573: 637 sc->flags |= EMX_FLAG_HAS_AMT; 638 /* FALL THROUGH */ 639 640 case e1000_82571: 641 case e1000_82572: 642 case e1000_80003es2lan: 643 if (sc->hw.bus.func == 1) { 644 e1000_read_nvm(&sc->hw, 645 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 646 } else { 647 e1000_read_nvm(&sc->hw, 648 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 649 } 650 break; 651 652 default: 653 e1000_read_nvm(&sc->hw, 654 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 655 break; 656 } 657 if (eeprom_data & apme_mask) 658 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 659 660 /* 661 * We have the eeprom settings, now apply the special cases 662 * where the eeprom may be wrong or the board won't support 663 * wake on lan on a particular port 664 */ 665 device_id = pci_get_device(dev); 666 switch (device_id) { 667 case E1000_DEV_ID_82571EB_FIBER: 668 /* 669 * Wake events only supported on port A for dual fiber 670 * regardless of eeprom setting 671 */ 672 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 673 E1000_STATUS_FUNC_1) 674 sc->wol = 0; 675 break; 676 677 case E1000_DEV_ID_82571EB_QUAD_COPPER: 678 case E1000_DEV_ID_82571EB_QUAD_FIBER: 679 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 680 /* if quad port sc, disable WoL on all but port A */ 681 if (emx_global_quad_port_a != 0) 682 sc->wol = 0; 683 /* Reset for multiple quad port adapters */ 684 if (++emx_global_quad_port_a == 4) 685 emx_global_quad_port_a = 0; 686 break; 687 } 688 689 /* XXX disable wol */ 690 sc->wol = 0; 691 692 /* Setup OS specific network interface */ 693 emx_setup_ifp(sc); 694 695 /* Add sysctl tree, must after em_setup_ifp() */ 696 emx_add_sysctl(sc); 697 698 /* Reset the hardware */ 699 error = emx_reset(sc); 700 if (error) { 701 device_printf(dev, "Unable to reset the hardware\n"); 702 goto fail; 703 } 704 705 /* Initialize statistics */ 706 emx_update_stats(sc); 707 708 sc->hw.mac.get_link_status = 1; 709 emx_update_link_status(sc); 710 711 sc->spare_tx_desc = EMX_TX_SPARE; 712 713 /* 714 * Keep following relationship between spare_tx_desc, oact_tx_desc 715 * and tx_int_nsegs: 716 * (spare_tx_desc + EMX_TX_RESERVED) <= 717 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_int_nsegs 718 */ 719 sc->oact_tx_desc = sc->num_tx_desc / 8; 720 if (sc->oact_tx_desc > EMX_TX_OACTIVE_MAX) 721 sc->oact_tx_desc = EMX_TX_OACTIVE_MAX; 722 if (sc->oact_tx_desc < sc->spare_tx_desc + EMX_TX_RESERVED) 723 sc->oact_tx_desc = sc->spare_tx_desc + EMX_TX_RESERVED; 724 725 sc->tx_int_nsegs = sc->num_tx_desc / 16; 726 if (sc->tx_int_nsegs < sc->oact_tx_desc) 727 sc->tx_int_nsegs = sc->oact_tx_desc; 728 729 /* Non-AMT based hardware can now take control from firmware */ 730 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 731 EMX_FLAG_HAS_MGMT) 732 emx_get_hw_control(sc); 733 734 /* 735 * Missing Interrupt Following ICR read: 736 * 737 * 82571/82572 specification update errata #76 738 * 82573 specification update errata #31 739 * 82574 specification update errata #12 740 */ 741 intr_func = emx_intr; 742 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 743 (sc->hw.mac.type == e1000_82571 || 744 sc->hw.mac.type == e1000_82572 || 745 sc->hw.mac.type == e1000_82573 || 746 sc->hw.mac.type == e1000_82574)) 747 intr_func = emx_intr_mask; 748 749 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 750 &sc->intr_tag, &sc->main_serialize); 751 if (error) { 752 device_printf(dev, "Failed to register interrupt handler"); 753 ether_ifdetach(&sc->arpcom.ac_if); 754 goto fail; 755 } 756 757 ifp->if_cpuid = rman_get_cpuid(sc->intr_res); 758 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 759 return (0); 760 fail: 761 emx_detach(dev); 762 return (error); 763 } 764 765 static int 766 emx_detach(device_t dev) 767 { 768 struct emx_softc *sc = device_get_softc(dev); 769 770 if (device_is_attached(dev)) { 771 struct ifnet *ifp = &sc->arpcom.ac_if; 772 773 ifnet_serialize_all(ifp); 774 775 emx_stop(sc); 776 777 e1000_phy_hw_reset(&sc->hw); 778 779 emx_rel_mgmt(sc); 780 emx_rel_hw_control(sc); 781 782 if (sc->wol) { 783 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 784 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 785 emx_enable_wol(dev); 786 } 787 788 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 789 790 ifnet_deserialize_all(ifp); 791 792 ether_ifdetach(ifp); 793 } else if (sc->memory != NULL) { 794 emx_rel_hw_control(sc); 795 } 796 bus_generic_detach(dev); 797 798 if (sc->intr_res != NULL) { 799 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 800 sc->intr_res); 801 } 802 803 if (sc->intr_type == PCI_INTR_TYPE_MSI) 804 pci_release_msi(dev); 805 806 if (sc->memory != NULL) { 807 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 808 sc->memory); 809 } 810 811 emx_dma_free(sc); 812 813 /* Free sysctl tree */ 814 if (sc->sysctl_tree != NULL) 815 sysctl_ctx_free(&sc->sysctl_ctx); 816 817 if (sc->mta != NULL) 818 kfree(sc->mta, M_DEVBUF); 819 820 return (0); 821 } 822 823 static int 824 emx_shutdown(device_t dev) 825 { 826 return emx_suspend(dev); 827 } 828 829 static int 830 emx_suspend(device_t dev) 831 { 832 struct emx_softc *sc = device_get_softc(dev); 833 struct ifnet *ifp = &sc->arpcom.ac_if; 834 835 ifnet_serialize_all(ifp); 836 837 emx_stop(sc); 838 839 emx_rel_mgmt(sc); 840 emx_rel_hw_control(sc); 841 842 if (sc->wol) { 843 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 844 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 845 emx_enable_wol(dev); 846 } 847 848 ifnet_deserialize_all(ifp); 849 850 return bus_generic_suspend(dev); 851 } 852 853 static int 854 emx_resume(device_t dev) 855 { 856 struct emx_softc *sc = device_get_softc(dev); 857 struct ifnet *ifp = &sc->arpcom.ac_if; 858 859 ifnet_serialize_all(ifp); 860 861 emx_init(sc); 862 emx_get_mgmt(sc); 863 if_devstart(ifp); 864 865 ifnet_deserialize_all(ifp); 866 867 return bus_generic_resume(dev); 868 } 869 870 static void 871 emx_start(struct ifnet *ifp) 872 { 873 struct emx_softc *sc = ifp->if_softc; 874 struct mbuf *m_head; 875 876 ASSERT_SERIALIZED(&sc->tx_serialize); 877 878 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 879 return; 880 881 if (!sc->link_active) { 882 ifq_purge(&ifp->if_snd); 883 return; 884 } 885 886 while (!ifq_is_empty(&ifp->if_snd)) { 887 /* Now do we at least have a minimal? */ 888 if (EMX_IS_OACTIVE(sc)) { 889 emx_tx_collect(sc); 890 if (EMX_IS_OACTIVE(sc)) { 891 ifp->if_flags |= IFF_OACTIVE; 892 sc->no_tx_desc_avail1++; 893 break; 894 } 895 } 896 897 logif(pkt_txqueue); 898 m_head = ifq_dequeue(&ifp->if_snd, NULL); 899 if (m_head == NULL) 900 break; 901 902 if (emx_encap(sc, &m_head)) { 903 ifp->if_oerrors++; 904 emx_tx_collect(sc); 905 continue; 906 } 907 908 /* Send a copy of the frame to the BPF listener */ 909 ETHER_BPF_MTAP(ifp, m_head); 910 911 /* Set timeout in case hardware has problems transmitting. */ 912 ifp->if_timer = EMX_TX_TIMEOUT; 913 } 914 } 915 916 static int 917 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 918 { 919 struct emx_softc *sc = ifp->if_softc; 920 struct ifreq *ifr = (struct ifreq *)data; 921 uint16_t eeprom_data = 0; 922 int max_frame_size, mask, reinit; 923 int error = 0; 924 925 ASSERT_IFNET_SERIALIZED_ALL(ifp); 926 927 switch (command) { 928 case SIOCSIFMTU: 929 switch (sc->hw.mac.type) { 930 case e1000_82573: 931 /* 932 * 82573 only supports jumbo frames 933 * if ASPM is disabled. 934 */ 935 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 936 &eeprom_data); 937 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 938 max_frame_size = ETHER_MAX_LEN; 939 break; 940 } 941 /* FALL THROUGH */ 942 943 /* Limit Jumbo Frame size */ 944 case e1000_82571: 945 case e1000_82572: 946 case e1000_82574: 947 case e1000_80003es2lan: 948 max_frame_size = 9234; 949 break; 950 951 default: 952 max_frame_size = MAX_JUMBO_FRAME_SIZE; 953 break; 954 } 955 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 956 ETHER_CRC_LEN) { 957 error = EINVAL; 958 break; 959 } 960 961 ifp->if_mtu = ifr->ifr_mtu; 962 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 963 ETHER_CRC_LEN; 964 965 if (ifp->if_flags & IFF_RUNNING) 966 emx_init(sc); 967 break; 968 969 case SIOCSIFFLAGS: 970 if (ifp->if_flags & IFF_UP) { 971 if ((ifp->if_flags & IFF_RUNNING)) { 972 if ((ifp->if_flags ^ sc->if_flags) & 973 (IFF_PROMISC | IFF_ALLMULTI)) { 974 emx_disable_promisc(sc); 975 emx_set_promisc(sc); 976 } 977 } else { 978 emx_init(sc); 979 } 980 } else if (ifp->if_flags & IFF_RUNNING) { 981 emx_stop(sc); 982 } 983 sc->if_flags = ifp->if_flags; 984 break; 985 986 case SIOCADDMULTI: 987 case SIOCDELMULTI: 988 if (ifp->if_flags & IFF_RUNNING) { 989 emx_disable_intr(sc); 990 emx_set_multi(sc); 991 #ifdef IFPOLL_ENABLE 992 if (!(ifp->if_flags & IFF_NPOLLING)) 993 #endif 994 emx_enable_intr(sc); 995 } 996 break; 997 998 case SIOCSIFMEDIA: 999 /* Check SOL/IDER usage */ 1000 if (e1000_check_reset_block(&sc->hw)) { 1001 device_printf(sc->dev, "Media change is" 1002 " blocked due to SOL/IDER session.\n"); 1003 break; 1004 } 1005 /* FALL THROUGH */ 1006 1007 case SIOCGIFMEDIA: 1008 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1009 break; 1010 1011 case SIOCSIFCAP: 1012 reinit = 0; 1013 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1014 if (mask & IFCAP_RXCSUM) { 1015 ifp->if_capenable ^= IFCAP_RXCSUM; 1016 reinit = 1; 1017 } 1018 if (mask & IFCAP_VLAN_HWTAGGING) { 1019 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1020 reinit = 1; 1021 } 1022 if (mask & IFCAP_TXCSUM) { 1023 ifp->if_capenable ^= IFCAP_TXCSUM; 1024 if (ifp->if_capenable & IFCAP_TXCSUM) 1025 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1026 else 1027 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1028 } 1029 if (mask & IFCAP_TSO) { 1030 ifp->if_capenable ^= IFCAP_TSO; 1031 if (ifp->if_capenable & IFCAP_TSO) 1032 ifp->if_hwassist |= CSUM_TSO; 1033 else 1034 ifp->if_hwassist &= ~CSUM_TSO; 1035 } 1036 if (mask & IFCAP_RSS) 1037 ifp->if_capenable ^= IFCAP_RSS; 1038 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1039 emx_init(sc); 1040 break; 1041 1042 default: 1043 error = ether_ioctl(ifp, command, data); 1044 break; 1045 } 1046 return (error); 1047 } 1048 1049 static void 1050 emx_watchdog(struct ifnet *ifp) 1051 { 1052 struct emx_softc *sc = ifp->if_softc; 1053 1054 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1055 1056 /* 1057 * The timer is set to 5 every time start queues a packet. 1058 * Then txeof keeps resetting it as long as it cleans at 1059 * least one descriptor. 1060 * Finally, anytime all descriptors are clean the timer is 1061 * set to 0. 1062 */ 1063 1064 if (E1000_READ_REG(&sc->hw, E1000_TDT(0)) == 1065 E1000_READ_REG(&sc->hw, E1000_TDH(0))) { 1066 /* 1067 * If we reach here, all TX jobs are completed and 1068 * the TX engine should have been idled for some time. 1069 * We don't need to call if_devstart() here. 1070 */ 1071 ifp->if_flags &= ~IFF_OACTIVE; 1072 ifp->if_timer = 0; 1073 return; 1074 } 1075 1076 /* 1077 * If we are in this routine because of pause frames, then 1078 * don't reset the hardware. 1079 */ 1080 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1081 ifp->if_timer = EMX_TX_TIMEOUT; 1082 return; 1083 } 1084 1085 if (e1000_check_for_link(&sc->hw) == 0) 1086 if_printf(ifp, "watchdog timeout -- resetting\n"); 1087 1088 ifp->if_oerrors++; 1089 sc->watchdog_events++; 1090 1091 emx_init(sc); 1092 1093 if (!ifq_is_empty(&ifp->if_snd)) 1094 if_devstart(ifp); 1095 } 1096 1097 static void 1098 emx_init(void *xsc) 1099 { 1100 struct emx_softc *sc = xsc; 1101 struct ifnet *ifp = &sc->arpcom.ac_if; 1102 device_t dev = sc->dev; 1103 uint32_t pba; 1104 int i; 1105 1106 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1107 1108 emx_stop(sc); 1109 1110 /* 1111 * Packet Buffer Allocation (PBA) 1112 * Writing PBA sets the receive portion of the buffer 1113 * the remainder is used for the transmit buffer. 1114 */ 1115 switch (sc->hw.mac.type) { 1116 /* Total Packet Buffer on these is 48K */ 1117 case e1000_82571: 1118 case e1000_82572: 1119 case e1000_80003es2lan: 1120 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1121 break; 1122 1123 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1124 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1125 break; 1126 1127 case e1000_82574: 1128 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1129 break; 1130 1131 default: 1132 /* Devices before 82547 had a Packet Buffer of 64K. */ 1133 if (sc->max_frame_size > 8192) 1134 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1135 else 1136 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1137 } 1138 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 1139 1140 /* Get the latest mac address, User can use a LAA */ 1141 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1142 1143 /* Put the address into the Receive Address Array */ 1144 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1145 1146 /* 1147 * With the 82571 sc, RAR[0] may be overwritten 1148 * when the other port is reset, we make a duplicate 1149 * in RAR[14] for that eventuality, this assures 1150 * the interface continues to function. 1151 */ 1152 if (sc->hw.mac.type == e1000_82571) { 1153 e1000_set_laa_state_82571(&sc->hw, TRUE); 1154 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1155 E1000_RAR_ENTRIES - 1); 1156 } 1157 1158 /* Initialize the hardware */ 1159 if (emx_reset(sc)) { 1160 device_printf(dev, "Unable to reset the hardware\n"); 1161 /* XXX emx_stop()? */ 1162 return; 1163 } 1164 emx_update_link_status(sc); 1165 1166 /* Setup VLAN support, basic and offload if available */ 1167 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1168 1169 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1170 uint32_t ctrl; 1171 1172 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1173 ctrl |= E1000_CTRL_VME; 1174 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1175 } 1176 1177 /* Configure for OS presence */ 1178 emx_get_mgmt(sc); 1179 1180 /* Prepare transmit descriptors and buffers */ 1181 emx_init_tx_ring(sc); 1182 emx_init_tx_unit(sc); 1183 1184 /* Setup Multicast table */ 1185 emx_set_multi(sc); 1186 1187 /* Prepare receive descriptors and buffers */ 1188 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1189 if (emx_init_rx_ring(sc, &sc->rx_data[i])) { 1190 device_printf(dev, 1191 "Could not setup receive structures\n"); 1192 emx_stop(sc); 1193 return; 1194 } 1195 } 1196 emx_init_rx_unit(sc); 1197 1198 /* Don't lose promiscuous settings */ 1199 emx_set_promisc(sc); 1200 1201 ifp->if_flags |= IFF_RUNNING; 1202 ifp->if_flags &= ~IFF_OACTIVE; 1203 1204 callout_reset(&sc->timer, hz, emx_timer, sc); 1205 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1206 1207 /* MSI/X configuration for 82574 */ 1208 if (sc->hw.mac.type == e1000_82574) { 1209 int tmp; 1210 1211 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1212 tmp |= E1000_CTRL_EXT_PBA_CLR; 1213 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1214 /* 1215 * XXX MSIX 1216 * Set the IVAR - interrupt vector routing. 1217 * Each nibble represents a vector, high bit 1218 * is enable, other 3 bits are the MSIX table 1219 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1220 * Link (other) to 2, hence the magic number. 1221 */ 1222 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1223 } 1224 1225 #ifdef IFPOLL_ENABLE 1226 /* 1227 * Only enable interrupts if we are not polling, make sure 1228 * they are off otherwise. 1229 */ 1230 if (ifp->if_flags & IFF_NPOLLING) 1231 emx_disable_intr(sc); 1232 else 1233 #endif /* IFPOLL_ENABLE */ 1234 emx_enable_intr(sc); 1235 1236 /* AMT based hardware can now take control from firmware */ 1237 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1238 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1239 emx_get_hw_control(sc); 1240 1241 /* Don't reset the phy next time init gets called */ 1242 sc->hw.phy.reset_disable = TRUE; 1243 } 1244 1245 static void 1246 emx_intr(void *xsc) 1247 { 1248 emx_intr_body(xsc, TRUE); 1249 } 1250 1251 static void 1252 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1253 { 1254 struct ifnet *ifp = &sc->arpcom.ac_if; 1255 uint32_t reg_icr; 1256 1257 logif(intr_beg); 1258 ASSERT_SERIALIZED(&sc->main_serialize); 1259 1260 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1261 1262 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1263 logif(intr_end); 1264 return; 1265 } 1266 1267 /* 1268 * XXX: some laptops trigger several spurious interrupts 1269 * on emx(4) when in the resume cycle. The ICR register 1270 * reports all-ones value in this case. Processing such 1271 * interrupts would lead to a freeze. I don't know why. 1272 */ 1273 if (reg_icr == 0xffffffff) { 1274 logif(intr_end); 1275 return; 1276 } 1277 1278 if (ifp->if_flags & IFF_RUNNING) { 1279 if (reg_icr & 1280 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1281 int i; 1282 1283 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1284 lwkt_serialize_enter( 1285 &sc->rx_data[i].rx_serialize); 1286 emx_rxeof(sc, i, -1); 1287 lwkt_serialize_exit( 1288 &sc->rx_data[i].rx_serialize); 1289 } 1290 } 1291 if (reg_icr & E1000_ICR_TXDW) { 1292 lwkt_serialize_enter(&sc->tx_serialize); 1293 emx_txeof(sc); 1294 if (!ifq_is_empty(&ifp->if_snd)) 1295 if_devstart(ifp); 1296 lwkt_serialize_exit(&sc->tx_serialize); 1297 } 1298 } 1299 1300 /* Link status change */ 1301 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1302 emx_serialize_skipmain(sc); 1303 1304 callout_stop(&sc->timer); 1305 sc->hw.mac.get_link_status = 1; 1306 emx_update_link_status(sc); 1307 1308 /* Deal with TX cruft when link lost */ 1309 emx_tx_purge(sc); 1310 1311 callout_reset(&sc->timer, hz, emx_timer, sc); 1312 1313 emx_deserialize_skipmain(sc); 1314 } 1315 1316 if (reg_icr & E1000_ICR_RXO) 1317 sc->rx_overruns++; 1318 1319 logif(intr_end); 1320 } 1321 1322 static void 1323 emx_intr_mask(void *xsc) 1324 { 1325 struct emx_softc *sc = xsc; 1326 1327 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1328 /* 1329 * NOTE: 1330 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1331 * so don't check it. 1332 */ 1333 emx_intr_body(sc, FALSE); 1334 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1335 } 1336 1337 static void 1338 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1339 { 1340 struct emx_softc *sc = ifp->if_softc; 1341 1342 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1343 1344 emx_update_link_status(sc); 1345 1346 ifmr->ifm_status = IFM_AVALID; 1347 ifmr->ifm_active = IFM_ETHER; 1348 1349 if (!sc->link_active) 1350 return; 1351 1352 ifmr->ifm_status |= IFM_ACTIVE; 1353 1354 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1355 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1356 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1357 } else { 1358 switch (sc->link_speed) { 1359 case 10: 1360 ifmr->ifm_active |= IFM_10_T; 1361 break; 1362 case 100: 1363 ifmr->ifm_active |= IFM_100_TX; 1364 break; 1365 1366 case 1000: 1367 ifmr->ifm_active |= IFM_1000_T; 1368 break; 1369 } 1370 if (sc->link_duplex == FULL_DUPLEX) 1371 ifmr->ifm_active |= IFM_FDX; 1372 else 1373 ifmr->ifm_active |= IFM_HDX; 1374 } 1375 } 1376 1377 static int 1378 emx_media_change(struct ifnet *ifp) 1379 { 1380 struct emx_softc *sc = ifp->if_softc; 1381 struct ifmedia *ifm = &sc->media; 1382 1383 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1384 1385 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1386 return (EINVAL); 1387 1388 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1389 case IFM_AUTO: 1390 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1391 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1392 break; 1393 1394 case IFM_1000_LX: 1395 case IFM_1000_SX: 1396 case IFM_1000_T: 1397 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1398 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1399 break; 1400 1401 case IFM_100_TX: 1402 sc->hw.mac.autoneg = FALSE; 1403 sc->hw.phy.autoneg_advertised = 0; 1404 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1405 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1406 else 1407 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1408 break; 1409 1410 case IFM_10_T: 1411 sc->hw.mac.autoneg = FALSE; 1412 sc->hw.phy.autoneg_advertised = 0; 1413 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1414 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1415 else 1416 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1417 break; 1418 1419 default: 1420 if_printf(ifp, "Unsupported media type\n"); 1421 break; 1422 } 1423 1424 /* 1425 * As the speed/duplex settings my have changed we need to 1426 * reset the PHY. 1427 */ 1428 sc->hw.phy.reset_disable = FALSE; 1429 1430 emx_init(sc); 1431 1432 return (0); 1433 } 1434 1435 static int 1436 emx_encap(struct emx_softc *sc, struct mbuf **m_headp) 1437 { 1438 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1439 bus_dmamap_t map; 1440 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1441 struct e1000_tx_desc *ctxd = NULL; 1442 struct mbuf *m_head = *m_headp; 1443 uint32_t txd_upper, txd_lower, cmd = 0; 1444 int maxsegs, nsegs, i, j, first, last = 0, error; 1445 1446 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1447 error = emx_tso_pullup(sc, m_headp); 1448 if (error) 1449 return error; 1450 m_head = *m_headp; 1451 } 1452 1453 txd_upper = txd_lower = 0; 1454 1455 /* 1456 * Capture the first descriptor index, this descriptor 1457 * will have the index of the EOP which is the only one 1458 * that now gets a DONE bit writeback. 1459 */ 1460 first = sc->next_avail_tx_desc; 1461 tx_buffer = &sc->tx_buf[first]; 1462 tx_buffer_mapped = tx_buffer; 1463 map = tx_buffer->map; 1464 1465 maxsegs = sc->num_tx_desc_avail - EMX_TX_RESERVED; 1466 KASSERT(maxsegs >= sc->spare_tx_desc, ("not enough spare TX desc")); 1467 if (maxsegs > EMX_MAX_SCATTER) 1468 maxsegs = EMX_MAX_SCATTER; 1469 1470 error = bus_dmamap_load_mbuf_defrag(sc->txtag, map, m_headp, 1471 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1472 if (error) { 1473 if (error == ENOBUFS) 1474 sc->mbuf_alloc_failed++; 1475 else 1476 sc->no_tx_dma_setup++; 1477 1478 m_freem(*m_headp); 1479 *m_headp = NULL; 1480 return error; 1481 } 1482 bus_dmamap_sync(sc->txtag, map, BUS_DMASYNC_PREWRITE); 1483 1484 m_head = *m_headp; 1485 sc->tx_nsegs += nsegs; 1486 1487 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1488 /* TSO will consume one TX desc */ 1489 sc->tx_nsegs += emx_tso_setup(sc, m_head, 1490 &txd_upper, &txd_lower); 1491 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1492 /* TX csum offloading will consume one TX desc */ 1493 sc->tx_nsegs += emx_txcsum(sc, m_head, &txd_upper, &txd_lower); 1494 } 1495 i = sc->next_avail_tx_desc; 1496 1497 /* Set up our transmit descriptors */ 1498 for (j = 0; j < nsegs; j++) { 1499 tx_buffer = &sc->tx_buf[i]; 1500 ctxd = &sc->tx_desc_base[i]; 1501 1502 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1503 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1504 txd_lower | segs[j].ds_len); 1505 ctxd->upper.data = htole32(txd_upper); 1506 1507 last = i; 1508 if (++i == sc->num_tx_desc) 1509 i = 0; 1510 } 1511 1512 sc->next_avail_tx_desc = i; 1513 1514 KKASSERT(sc->num_tx_desc_avail > nsegs); 1515 sc->num_tx_desc_avail -= nsegs; 1516 1517 /* Handle VLAN tag */ 1518 if (m_head->m_flags & M_VLANTAG) { 1519 /* Set the vlan id. */ 1520 ctxd->upper.fields.special = 1521 htole16(m_head->m_pkthdr.ether_vlantag); 1522 1523 /* Tell hardware to add tag */ 1524 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1525 } 1526 1527 tx_buffer->m_head = m_head; 1528 tx_buffer_mapped->map = tx_buffer->map; 1529 tx_buffer->map = map; 1530 1531 if (sc->tx_nsegs >= sc->tx_int_nsegs) { 1532 sc->tx_nsegs = 0; 1533 1534 /* 1535 * Report Status (RS) is turned on 1536 * every tx_int_nsegs descriptors. 1537 */ 1538 cmd = E1000_TXD_CMD_RS; 1539 1540 /* 1541 * Keep track of the descriptor, which will 1542 * be written back by hardware. 1543 */ 1544 sc->tx_dd[sc->tx_dd_tail] = last; 1545 EMX_INC_TXDD_IDX(sc->tx_dd_tail); 1546 KKASSERT(sc->tx_dd_tail != sc->tx_dd_head); 1547 } 1548 1549 /* 1550 * Last Descriptor of Packet needs End Of Packet (EOP) 1551 */ 1552 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1553 1554 /* 1555 * Advance the Transmit Descriptor Tail (TDT), this tells 1556 * the E1000 that this frame is available to transmit. 1557 */ 1558 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), i); 1559 1560 return (0); 1561 } 1562 1563 static void 1564 emx_set_promisc(struct emx_softc *sc) 1565 { 1566 struct ifnet *ifp = &sc->arpcom.ac_if; 1567 uint32_t reg_rctl; 1568 1569 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1570 1571 if (ifp->if_flags & IFF_PROMISC) { 1572 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1573 /* Turn this on if you want to see bad packets */ 1574 if (emx_debug_sbp) 1575 reg_rctl |= E1000_RCTL_SBP; 1576 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1577 } else if (ifp->if_flags & IFF_ALLMULTI) { 1578 reg_rctl |= E1000_RCTL_MPE; 1579 reg_rctl &= ~E1000_RCTL_UPE; 1580 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1581 } 1582 } 1583 1584 static void 1585 emx_disable_promisc(struct emx_softc *sc) 1586 { 1587 uint32_t reg_rctl; 1588 1589 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1590 1591 reg_rctl &= ~E1000_RCTL_UPE; 1592 reg_rctl &= ~E1000_RCTL_MPE; 1593 reg_rctl &= ~E1000_RCTL_SBP; 1594 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1595 } 1596 1597 static void 1598 emx_set_multi(struct emx_softc *sc) 1599 { 1600 struct ifnet *ifp = &sc->arpcom.ac_if; 1601 struct ifmultiaddr *ifma; 1602 uint32_t reg_rctl = 0; 1603 uint8_t *mta; 1604 int mcnt = 0; 1605 1606 mta = sc->mta; 1607 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1608 1609 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1610 if (ifma->ifma_addr->sa_family != AF_LINK) 1611 continue; 1612 1613 if (mcnt == EMX_MCAST_ADDR_MAX) 1614 break; 1615 1616 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1617 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1618 mcnt++; 1619 } 1620 1621 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1622 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1623 reg_rctl |= E1000_RCTL_MPE; 1624 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1625 } else { 1626 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1627 } 1628 } 1629 1630 /* 1631 * This routine checks for link status and updates statistics. 1632 */ 1633 static void 1634 emx_timer(void *xsc) 1635 { 1636 struct emx_softc *sc = xsc; 1637 struct ifnet *ifp = &sc->arpcom.ac_if; 1638 1639 lwkt_serialize_enter(&sc->main_serialize); 1640 1641 emx_update_link_status(sc); 1642 emx_update_stats(sc); 1643 1644 /* Reset LAA into RAR[0] on 82571 */ 1645 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1646 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1647 1648 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1649 emx_print_hw_stats(sc); 1650 1651 emx_smartspeed(sc); 1652 1653 callout_reset(&sc->timer, hz, emx_timer, sc); 1654 1655 lwkt_serialize_exit(&sc->main_serialize); 1656 } 1657 1658 static void 1659 emx_update_link_status(struct emx_softc *sc) 1660 { 1661 struct e1000_hw *hw = &sc->hw; 1662 struct ifnet *ifp = &sc->arpcom.ac_if; 1663 device_t dev = sc->dev; 1664 uint32_t link_check = 0; 1665 1666 /* Get the cached link value or read phy for real */ 1667 switch (hw->phy.media_type) { 1668 case e1000_media_type_copper: 1669 if (hw->mac.get_link_status) { 1670 /* Do the work to read phy */ 1671 e1000_check_for_link(hw); 1672 link_check = !hw->mac.get_link_status; 1673 if (link_check) /* ESB2 fix */ 1674 e1000_cfg_on_link_up(hw); 1675 } else { 1676 link_check = TRUE; 1677 } 1678 break; 1679 1680 case e1000_media_type_fiber: 1681 e1000_check_for_link(hw); 1682 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1683 break; 1684 1685 case e1000_media_type_internal_serdes: 1686 e1000_check_for_link(hw); 1687 link_check = sc->hw.mac.serdes_has_link; 1688 break; 1689 1690 case e1000_media_type_unknown: 1691 default: 1692 break; 1693 } 1694 1695 /* Now check for a transition */ 1696 if (link_check && sc->link_active == 0) { 1697 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1698 &sc->link_duplex); 1699 1700 /* 1701 * Check if we should enable/disable SPEED_MODE bit on 1702 * 82571EB/82572EI 1703 */ 1704 if (sc->link_speed != SPEED_1000 && 1705 (hw->mac.type == e1000_82571 || 1706 hw->mac.type == e1000_82572)) { 1707 int tarc0; 1708 1709 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1710 tarc0 &= ~EMX_TARC_SPEED_MODE; 1711 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1712 } 1713 if (bootverbose) { 1714 device_printf(dev, "Link is up %d Mbps %s\n", 1715 sc->link_speed, 1716 ((sc->link_duplex == FULL_DUPLEX) ? 1717 "Full Duplex" : "Half Duplex")); 1718 } 1719 sc->link_active = 1; 1720 sc->smartspeed = 0; 1721 ifp->if_baudrate = sc->link_speed * 1000000; 1722 ifp->if_link_state = LINK_STATE_UP; 1723 if_link_state_change(ifp); 1724 } else if (!link_check && sc->link_active == 1) { 1725 ifp->if_baudrate = sc->link_speed = 0; 1726 sc->link_duplex = 0; 1727 if (bootverbose) 1728 device_printf(dev, "Link is Down\n"); 1729 sc->link_active = 0; 1730 #if 0 1731 /* Link down, disable watchdog */ 1732 if->if_timer = 0; 1733 #endif 1734 ifp->if_link_state = LINK_STATE_DOWN; 1735 if_link_state_change(ifp); 1736 } 1737 } 1738 1739 static void 1740 emx_stop(struct emx_softc *sc) 1741 { 1742 struct ifnet *ifp = &sc->arpcom.ac_if; 1743 int i; 1744 1745 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1746 1747 emx_disable_intr(sc); 1748 1749 callout_stop(&sc->timer); 1750 1751 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1752 ifp->if_timer = 0; 1753 1754 /* 1755 * Disable multiple receive queues. 1756 * 1757 * NOTE: 1758 * We should disable multiple receive queues before 1759 * resetting the hardware. 1760 */ 1761 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1762 1763 e1000_reset_hw(&sc->hw); 1764 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1765 1766 for (i = 0; i < sc->num_tx_desc; i++) { 1767 struct emx_txbuf *tx_buffer = &sc->tx_buf[i]; 1768 1769 if (tx_buffer->m_head != NULL) { 1770 bus_dmamap_unload(sc->txtag, tx_buffer->map); 1771 m_freem(tx_buffer->m_head); 1772 tx_buffer->m_head = NULL; 1773 } 1774 } 1775 1776 for (i = 0; i < sc->rx_ring_cnt; ++i) 1777 emx_free_rx_ring(sc, &sc->rx_data[i]); 1778 1779 sc->csum_flags = 0; 1780 sc->csum_lhlen = 0; 1781 sc->csum_iphlen = 0; 1782 sc->csum_thlen = 0; 1783 sc->csum_mss = 0; 1784 sc->csum_pktlen = 0; 1785 1786 sc->tx_dd_head = 0; 1787 sc->tx_dd_tail = 0; 1788 sc->tx_nsegs = 0; 1789 } 1790 1791 static int 1792 emx_reset(struct emx_softc *sc) 1793 { 1794 device_t dev = sc->dev; 1795 uint16_t rx_buffer_size; 1796 1797 /* Set up smart power down as default off on newer adapters. */ 1798 if (!emx_smart_pwr_down && 1799 (sc->hw.mac.type == e1000_82571 || 1800 sc->hw.mac.type == e1000_82572)) { 1801 uint16_t phy_tmp = 0; 1802 1803 /* Speed up time to link by disabling smart power down. */ 1804 e1000_read_phy_reg(&sc->hw, 1805 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1806 phy_tmp &= ~IGP02E1000_PM_SPD; 1807 e1000_write_phy_reg(&sc->hw, 1808 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1809 } 1810 1811 /* 1812 * These parameters control the automatic generation (Tx) and 1813 * response (Rx) to Ethernet PAUSE frames. 1814 * - High water mark should allow for at least two frames to be 1815 * received after sending an XOFF. 1816 * - Low water mark works best when it is very near the high water mark. 1817 * This allows the receiver to restart by sending XON when it has 1818 * drained a bit. Here we use an arbitary value of 1500 which will 1819 * restart after one full frame is pulled from the buffer. There 1820 * could be several smaller frames in the buffer and if so they will 1821 * not trigger the XON until their total number reduces the buffer 1822 * by 1500. 1823 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 1824 */ 1825 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 1826 1827 sc->hw.fc.high_water = rx_buffer_size - 1828 roundup2(sc->max_frame_size, 1024); 1829 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 1830 1831 if (sc->hw.mac.type == e1000_80003es2lan) 1832 sc->hw.fc.pause_time = 0xFFFF; 1833 else 1834 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 1835 sc->hw.fc.send_xon = TRUE; 1836 sc->hw.fc.requested_mode = e1000_fc_full; 1837 1838 /* Issue a global reset */ 1839 e1000_reset_hw(&sc->hw); 1840 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1841 emx_disable_aspm(sc); 1842 1843 if (e1000_init_hw(&sc->hw) < 0) { 1844 device_printf(dev, "Hardware Initialization Failed\n"); 1845 return (EIO); 1846 } 1847 1848 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1849 e1000_get_phy_info(&sc->hw); 1850 e1000_check_for_link(&sc->hw); 1851 1852 return (0); 1853 } 1854 1855 static void 1856 emx_setup_ifp(struct emx_softc *sc) 1857 { 1858 struct ifnet *ifp = &sc->arpcom.ac_if; 1859 1860 if_initname(ifp, device_get_name(sc->dev), 1861 device_get_unit(sc->dev)); 1862 ifp->if_softc = sc; 1863 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1864 ifp->if_init = emx_init; 1865 ifp->if_ioctl = emx_ioctl; 1866 ifp->if_start = emx_start; 1867 #ifdef IFPOLL_ENABLE 1868 ifp->if_qpoll = emx_qpoll; 1869 #endif 1870 ifp->if_watchdog = emx_watchdog; 1871 ifp->if_serialize = emx_serialize; 1872 ifp->if_deserialize = emx_deserialize; 1873 ifp->if_tryserialize = emx_tryserialize; 1874 #ifdef INVARIANTS 1875 ifp->if_serialize_assert = emx_serialize_assert; 1876 #endif 1877 ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 1878 ifq_set_ready(&ifp->if_snd); 1879 1880 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1881 1882 ifp->if_capabilities = IFCAP_HWCSUM | 1883 IFCAP_VLAN_HWTAGGING | 1884 IFCAP_VLAN_MTU | 1885 IFCAP_TSO; 1886 if (sc->rx_ring_cnt > 1) 1887 ifp->if_capabilities |= IFCAP_RSS; 1888 ifp->if_capenable = ifp->if_capabilities; 1889 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 1890 1891 /* 1892 * Tell the upper layer(s) we support long frames. 1893 */ 1894 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1895 1896 /* 1897 * Specify the media types supported by this sc and register 1898 * callbacks to update media and link information 1899 */ 1900 ifmedia_init(&sc->media, IFM_IMASK, 1901 emx_media_change, emx_media_status); 1902 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1903 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1904 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1905 0, NULL); 1906 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1907 } else { 1908 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1909 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1910 0, NULL); 1911 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1912 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1913 0, NULL); 1914 if (sc->hw.phy.type != e1000_phy_ife) { 1915 ifmedia_add(&sc->media, 1916 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1917 ifmedia_add(&sc->media, 1918 IFM_ETHER | IFM_1000_T, 0, NULL); 1919 } 1920 } 1921 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1922 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1923 } 1924 1925 /* 1926 * Workaround for SmartSpeed on 82541 and 82547 controllers 1927 */ 1928 static void 1929 emx_smartspeed(struct emx_softc *sc) 1930 { 1931 uint16_t phy_tmp; 1932 1933 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 1934 sc->hw.mac.autoneg == 0 || 1935 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 1936 return; 1937 1938 if (sc->smartspeed == 0) { 1939 /* 1940 * If Master/Slave config fault is asserted twice, 1941 * we assume back-to-back 1942 */ 1943 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1944 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 1945 return; 1946 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 1947 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 1948 e1000_read_phy_reg(&sc->hw, 1949 PHY_1000T_CTRL, &phy_tmp); 1950 if (phy_tmp & CR_1000T_MS_ENABLE) { 1951 phy_tmp &= ~CR_1000T_MS_ENABLE; 1952 e1000_write_phy_reg(&sc->hw, 1953 PHY_1000T_CTRL, phy_tmp); 1954 sc->smartspeed++; 1955 if (sc->hw.mac.autoneg && 1956 !e1000_phy_setup_autoneg(&sc->hw) && 1957 !e1000_read_phy_reg(&sc->hw, 1958 PHY_CONTROL, &phy_tmp)) { 1959 phy_tmp |= MII_CR_AUTO_NEG_EN | 1960 MII_CR_RESTART_AUTO_NEG; 1961 e1000_write_phy_reg(&sc->hw, 1962 PHY_CONTROL, phy_tmp); 1963 } 1964 } 1965 } 1966 return; 1967 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 1968 /* If still no link, perhaps using 2/3 pair cable */ 1969 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 1970 phy_tmp |= CR_1000T_MS_ENABLE; 1971 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 1972 if (sc->hw.mac.autoneg && 1973 !e1000_phy_setup_autoneg(&sc->hw) && 1974 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 1975 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 1976 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 1977 } 1978 } 1979 1980 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 1981 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 1982 sc->smartspeed = 0; 1983 } 1984 1985 static int 1986 emx_create_tx_ring(struct emx_softc *sc) 1987 { 1988 device_t dev = sc->dev; 1989 struct emx_txbuf *tx_buffer; 1990 int error, i, tsize, ntxd; 1991 1992 /* 1993 * Validate number of transmit descriptors. It must not exceed 1994 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 1995 */ 1996 ntxd = device_getenv_int(dev, "txd", emx_txd); 1997 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 1998 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 1999 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2000 EMX_DEFAULT_TXD, ntxd); 2001 sc->num_tx_desc = EMX_DEFAULT_TXD; 2002 } else { 2003 sc->num_tx_desc = ntxd; 2004 } 2005 2006 /* 2007 * Allocate Transmit Descriptor ring 2008 */ 2009 tsize = roundup2(sc->num_tx_desc * sizeof(struct e1000_tx_desc), 2010 EMX_DBA_ALIGN); 2011 sc->tx_desc_base = bus_dmamem_coherent_any(sc->parent_dtag, 2012 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2013 &sc->tx_desc_dtag, &sc->tx_desc_dmap, 2014 &sc->tx_desc_paddr); 2015 if (sc->tx_desc_base == NULL) { 2016 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2017 return ENOMEM; 2018 } 2019 2020 sc->tx_buf = kmalloc(sizeof(struct emx_txbuf) * sc->num_tx_desc, 2021 M_DEVBUF, M_WAITOK | M_ZERO); 2022 2023 /* 2024 * Create DMA tags for tx buffers 2025 */ 2026 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 2027 1, 0, /* alignment, bounds */ 2028 BUS_SPACE_MAXADDR, /* lowaddr */ 2029 BUS_SPACE_MAXADDR, /* highaddr */ 2030 NULL, NULL, /* filter, filterarg */ 2031 EMX_TSO_SIZE, /* maxsize */ 2032 EMX_MAX_SCATTER, /* nsegments */ 2033 EMX_MAX_SEGSIZE, /* maxsegsize */ 2034 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2035 BUS_DMA_ONEBPAGE, /* flags */ 2036 &sc->txtag); 2037 if (error) { 2038 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2039 kfree(sc->tx_buf, M_DEVBUF); 2040 sc->tx_buf = NULL; 2041 return error; 2042 } 2043 2044 /* 2045 * Create DMA maps for tx buffers 2046 */ 2047 for (i = 0; i < sc->num_tx_desc; i++) { 2048 tx_buffer = &sc->tx_buf[i]; 2049 2050 error = bus_dmamap_create(sc->txtag, 2051 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2052 &tx_buffer->map); 2053 if (error) { 2054 device_printf(dev, "Unable to create TX DMA map\n"); 2055 emx_destroy_tx_ring(sc, i); 2056 return error; 2057 } 2058 } 2059 return (0); 2060 } 2061 2062 static void 2063 emx_init_tx_ring(struct emx_softc *sc) 2064 { 2065 /* Clear the old ring contents */ 2066 bzero(sc->tx_desc_base, 2067 sizeof(struct e1000_tx_desc) * sc->num_tx_desc); 2068 2069 /* Reset state */ 2070 sc->next_avail_tx_desc = 0; 2071 sc->next_tx_to_clean = 0; 2072 sc->num_tx_desc_avail = sc->num_tx_desc; 2073 } 2074 2075 static void 2076 emx_init_tx_unit(struct emx_softc *sc) 2077 { 2078 uint32_t tctl, tarc, tipg = 0; 2079 uint64_t bus_addr; 2080 2081 /* Setup the Base and Length of the Tx Descriptor Ring */ 2082 bus_addr = sc->tx_desc_paddr; 2083 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(0), 2084 sc->num_tx_desc * sizeof(struct e1000_tx_desc)); 2085 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(0), 2086 (uint32_t)(bus_addr >> 32)); 2087 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(0), 2088 (uint32_t)bus_addr); 2089 /* Setup the HW Tx Head and Tail descriptor pointers */ 2090 E1000_WRITE_REG(&sc->hw, E1000_TDT(0), 0); 2091 E1000_WRITE_REG(&sc->hw, E1000_TDH(0), 0); 2092 2093 /* Set the default values for the Tx Inter Packet Gap timer */ 2094 switch (sc->hw.mac.type) { 2095 case e1000_80003es2lan: 2096 tipg = DEFAULT_82543_TIPG_IPGR1; 2097 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2098 E1000_TIPG_IPGR2_SHIFT; 2099 break; 2100 2101 default: 2102 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2103 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2104 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2105 else 2106 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2107 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2108 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2109 break; 2110 } 2111 2112 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2113 2114 /* NOTE: 0 is not allowed for TIDV */ 2115 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2116 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2117 2118 if (sc->hw.mac.type == e1000_82571 || 2119 sc->hw.mac.type == e1000_82572) { 2120 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2121 tarc |= EMX_TARC_SPEED_MODE; 2122 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2123 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2124 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2125 tarc |= 1; 2126 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2127 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2128 tarc |= 1; 2129 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2130 } 2131 2132 /* Program the Transmit Control Register */ 2133 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2134 tctl &= ~E1000_TCTL_CT; 2135 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2136 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2137 tctl |= E1000_TCTL_MULR; 2138 2139 /* This write will effectively turn on the transmit unit. */ 2140 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2141 } 2142 2143 static void 2144 emx_destroy_tx_ring(struct emx_softc *sc, int ndesc) 2145 { 2146 struct emx_txbuf *tx_buffer; 2147 int i; 2148 2149 /* Free Transmit Descriptor ring */ 2150 if (sc->tx_desc_base) { 2151 bus_dmamap_unload(sc->tx_desc_dtag, sc->tx_desc_dmap); 2152 bus_dmamem_free(sc->tx_desc_dtag, sc->tx_desc_base, 2153 sc->tx_desc_dmap); 2154 bus_dma_tag_destroy(sc->tx_desc_dtag); 2155 2156 sc->tx_desc_base = NULL; 2157 } 2158 2159 if (sc->tx_buf == NULL) 2160 return; 2161 2162 for (i = 0; i < ndesc; i++) { 2163 tx_buffer = &sc->tx_buf[i]; 2164 2165 KKASSERT(tx_buffer->m_head == NULL); 2166 bus_dmamap_destroy(sc->txtag, tx_buffer->map); 2167 } 2168 bus_dma_tag_destroy(sc->txtag); 2169 2170 kfree(sc->tx_buf, M_DEVBUF); 2171 sc->tx_buf = NULL; 2172 } 2173 2174 /* 2175 * The offload context needs to be set when we transfer the first 2176 * packet of a particular protocol (TCP/UDP). This routine has been 2177 * enhanced to deal with inserted VLAN headers. 2178 * 2179 * If the new packet's ether header length, ip header length and 2180 * csum offloading type are same as the previous packet, we should 2181 * avoid allocating a new csum context descriptor; mainly to take 2182 * advantage of the pipeline effect of the TX data read request. 2183 * 2184 * This function returns number of TX descrptors allocated for 2185 * csum context. 2186 */ 2187 static int 2188 emx_txcsum(struct emx_softc *sc, struct mbuf *mp, 2189 uint32_t *txd_upper, uint32_t *txd_lower) 2190 { 2191 struct e1000_context_desc *TXD; 2192 int curr_txd, ehdrlen, csum_flags; 2193 uint32_t cmd, hdr_len, ip_hlen; 2194 2195 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2196 ip_hlen = mp->m_pkthdr.csum_iphlen; 2197 ehdrlen = mp->m_pkthdr.csum_lhlen; 2198 2199 if (sc->csum_lhlen == ehdrlen && sc->csum_iphlen == ip_hlen && 2200 sc->csum_flags == csum_flags) { 2201 /* 2202 * Same csum offload context as the previous packets; 2203 * just return. 2204 */ 2205 *txd_upper = sc->csum_txd_upper; 2206 *txd_lower = sc->csum_txd_lower; 2207 return 0; 2208 } 2209 2210 /* 2211 * Setup a new csum offload context. 2212 */ 2213 2214 curr_txd = sc->next_avail_tx_desc; 2215 TXD = (struct e1000_context_desc *)&sc->tx_desc_base[curr_txd]; 2216 2217 cmd = 0; 2218 2219 /* Setup of IP header checksum. */ 2220 if (csum_flags & CSUM_IP) { 2221 /* 2222 * Start offset for header checksum calculation. 2223 * End offset for header checksum calculation. 2224 * Offset of place to put the checksum. 2225 */ 2226 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2227 TXD->lower_setup.ip_fields.ipcse = 2228 htole16(ehdrlen + ip_hlen - 1); 2229 TXD->lower_setup.ip_fields.ipcso = 2230 ehdrlen + offsetof(struct ip, ip_sum); 2231 cmd |= E1000_TXD_CMD_IP; 2232 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2233 } 2234 hdr_len = ehdrlen + ip_hlen; 2235 2236 if (csum_flags & CSUM_TCP) { 2237 /* 2238 * Start offset for payload checksum calculation. 2239 * End offset for payload checksum calculation. 2240 * Offset of place to put the checksum. 2241 */ 2242 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2243 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2244 TXD->upper_setup.tcp_fields.tucso = 2245 hdr_len + offsetof(struct tcphdr, th_sum); 2246 cmd |= E1000_TXD_CMD_TCP; 2247 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2248 } else if (csum_flags & CSUM_UDP) { 2249 /* 2250 * Start offset for header checksum calculation. 2251 * End offset for header checksum calculation. 2252 * Offset of place to put the checksum. 2253 */ 2254 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2255 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2256 TXD->upper_setup.tcp_fields.tucso = 2257 hdr_len + offsetof(struct udphdr, uh_sum); 2258 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2259 } 2260 2261 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2262 E1000_TXD_DTYP_D; /* Data descr */ 2263 2264 /* Save the information for this csum offloading context */ 2265 sc->csum_lhlen = ehdrlen; 2266 sc->csum_iphlen = ip_hlen; 2267 sc->csum_flags = csum_flags; 2268 sc->csum_txd_upper = *txd_upper; 2269 sc->csum_txd_lower = *txd_lower; 2270 2271 TXD->tcp_seg_setup.data = htole32(0); 2272 TXD->cmd_and_length = 2273 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2274 2275 if (++curr_txd == sc->num_tx_desc) 2276 curr_txd = 0; 2277 2278 KKASSERT(sc->num_tx_desc_avail > 0); 2279 sc->num_tx_desc_avail--; 2280 2281 sc->next_avail_tx_desc = curr_txd; 2282 return 1; 2283 } 2284 2285 static void 2286 emx_txeof(struct emx_softc *sc) 2287 { 2288 struct ifnet *ifp = &sc->arpcom.ac_if; 2289 struct emx_txbuf *tx_buffer; 2290 int first, num_avail; 2291 2292 if (sc->tx_dd_head == sc->tx_dd_tail) 2293 return; 2294 2295 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2296 return; 2297 2298 num_avail = sc->num_tx_desc_avail; 2299 first = sc->next_tx_to_clean; 2300 2301 while (sc->tx_dd_head != sc->tx_dd_tail) { 2302 int dd_idx = sc->tx_dd[sc->tx_dd_head]; 2303 struct e1000_tx_desc *tx_desc; 2304 2305 tx_desc = &sc->tx_desc_base[dd_idx]; 2306 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2307 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2308 2309 if (++dd_idx == sc->num_tx_desc) 2310 dd_idx = 0; 2311 2312 while (first != dd_idx) { 2313 logif(pkt_txclean); 2314 2315 num_avail++; 2316 2317 tx_buffer = &sc->tx_buf[first]; 2318 if (tx_buffer->m_head) { 2319 ifp->if_opackets++; 2320 bus_dmamap_unload(sc->txtag, 2321 tx_buffer->map); 2322 m_freem(tx_buffer->m_head); 2323 tx_buffer->m_head = NULL; 2324 } 2325 2326 if (++first == sc->num_tx_desc) 2327 first = 0; 2328 } 2329 } else { 2330 break; 2331 } 2332 } 2333 sc->next_tx_to_clean = first; 2334 sc->num_tx_desc_avail = num_avail; 2335 2336 if (sc->tx_dd_head == sc->tx_dd_tail) { 2337 sc->tx_dd_head = 0; 2338 sc->tx_dd_tail = 0; 2339 } 2340 2341 if (!EMX_IS_OACTIVE(sc)) { 2342 ifp->if_flags &= ~IFF_OACTIVE; 2343 2344 /* All clean, turn off the timer */ 2345 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2346 ifp->if_timer = 0; 2347 } 2348 } 2349 2350 static void 2351 emx_tx_collect(struct emx_softc *sc) 2352 { 2353 struct ifnet *ifp = &sc->arpcom.ac_if; 2354 struct emx_txbuf *tx_buffer; 2355 int tdh, first, num_avail, dd_idx = -1; 2356 2357 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2358 return; 2359 2360 tdh = E1000_READ_REG(&sc->hw, E1000_TDH(0)); 2361 if (tdh == sc->next_tx_to_clean) 2362 return; 2363 2364 if (sc->tx_dd_head != sc->tx_dd_tail) 2365 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2366 2367 num_avail = sc->num_tx_desc_avail; 2368 first = sc->next_tx_to_clean; 2369 2370 while (first != tdh) { 2371 logif(pkt_txclean); 2372 2373 num_avail++; 2374 2375 tx_buffer = &sc->tx_buf[first]; 2376 if (tx_buffer->m_head) { 2377 ifp->if_opackets++; 2378 bus_dmamap_unload(sc->txtag, 2379 tx_buffer->map); 2380 m_freem(tx_buffer->m_head); 2381 tx_buffer->m_head = NULL; 2382 } 2383 2384 if (first == dd_idx) { 2385 EMX_INC_TXDD_IDX(sc->tx_dd_head); 2386 if (sc->tx_dd_head == sc->tx_dd_tail) { 2387 sc->tx_dd_head = 0; 2388 sc->tx_dd_tail = 0; 2389 dd_idx = -1; 2390 } else { 2391 dd_idx = sc->tx_dd[sc->tx_dd_head]; 2392 } 2393 } 2394 2395 if (++first == sc->num_tx_desc) 2396 first = 0; 2397 } 2398 sc->next_tx_to_clean = first; 2399 sc->num_tx_desc_avail = num_avail; 2400 2401 if (!EMX_IS_OACTIVE(sc)) { 2402 ifp->if_flags &= ~IFF_OACTIVE; 2403 2404 /* All clean, turn off the timer */ 2405 if (sc->num_tx_desc_avail == sc->num_tx_desc) 2406 ifp->if_timer = 0; 2407 } 2408 } 2409 2410 /* 2411 * When Link is lost sometimes there is work still in the TX ring 2412 * which will result in a watchdog, rather than allow that do an 2413 * attempted cleanup and then reinit here. Note that this has been 2414 * seens mostly with fiber adapters. 2415 */ 2416 static void 2417 emx_tx_purge(struct emx_softc *sc) 2418 { 2419 struct ifnet *ifp = &sc->arpcom.ac_if; 2420 2421 if (!sc->link_active && ifp->if_timer) { 2422 emx_tx_collect(sc); 2423 if (ifp->if_timer) { 2424 if_printf(ifp, "Link lost, TX pending, reinit\n"); 2425 ifp->if_timer = 0; 2426 emx_init(sc); 2427 } 2428 } 2429 } 2430 2431 static int 2432 emx_newbuf(struct emx_softc *sc, struct emx_rxdata *rdata, int i, int init) 2433 { 2434 struct mbuf *m; 2435 bus_dma_segment_t seg; 2436 bus_dmamap_t map; 2437 struct emx_rxbuf *rx_buffer; 2438 int error, nseg; 2439 2440 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2441 if (m == NULL) { 2442 rdata->mbuf_cluster_failed++; 2443 if (init) { 2444 if_printf(&sc->arpcom.ac_if, 2445 "Unable to allocate RX mbuf\n"); 2446 } 2447 return (ENOBUFS); 2448 } 2449 m->m_len = m->m_pkthdr.len = MCLBYTES; 2450 2451 if (sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2452 m_adj(m, ETHER_ALIGN); 2453 2454 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2455 rdata->rx_sparemap, m, 2456 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2457 if (error) { 2458 m_freem(m); 2459 if (init) { 2460 if_printf(&sc->arpcom.ac_if, 2461 "Unable to load RX mbuf\n"); 2462 } 2463 return (error); 2464 } 2465 2466 rx_buffer = &rdata->rx_buf[i]; 2467 if (rx_buffer->m_head != NULL) 2468 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2469 2470 map = rx_buffer->map; 2471 rx_buffer->map = rdata->rx_sparemap; 2472 rdata->rx_sparemap = map; 2473 2474 rx_buffer->m_head = m; 2475 rx_buffer->paddr = seg.ds_addr; 2476 2477 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2478 return (0); 2479 } 2480 2481 static int 2482 emx_create_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2483 { 2484 device_t dev = sc->dev; 2485 struct emx_rxbuf *rx_buffer; 2486 int i, error, rsize, nrxd; 2487 2488 /* 2489 * Validate number of receive descriptors. It must not exceed 2490 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2491 */ 2492 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2493 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2494 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2495 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2496 EMX_DEFAULT_RXD, nrxd); 2497 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2498 } else { 2499 rdata->num_rx_desc = nrxd; 2500 } 2501 2502 /* 2503 * Allocate Receive Descriptor ring 2504 */ 2505 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2506 EMX_DBA_ALIGN); 2507 rdata->rx_desc = bus_dmamem_coherent_any(sc->parent_dtag, 2508 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2509 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2510 &rdata->rx_desc_paddr); 2511 if (rdata->rx_desc == NULL) { 2512 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2513 return ENOMEM; 2514 } 2515 2516 rdata->rx_buf = kmalloc(sizeof(struct emx_rxbuf) * rdata->num_rx_desc, 2517 M_DEVBUF, M_WAITOK | M_ZERO); 2518 2519 /* 2520 * Create DMA tag for rx buffers 2521 */ 2522 error = bus_dma_tag_create(sc->parent_dtag, /* parent */ 2523 1, 0, /* alignment, bounds */ 2524 BUS_SPACE_MAXADDR, /* lowaddr */ 2525 BUS_SPACE_MAXADDR, /* highaddr */ 2526 NULL, NULL, /* filter, filterarg */ 2527 MCLBYTES, /* maxsize */ 2528 1, /* nsegments */ 2529 MCLBYTES, /* maxsegsize */ 2530 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2531 &rdata->rxtag); 2532 if (error) { 2533 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2534 kfree(rdata->rx_buf, M_DEVBUF); 2535 rdata->rx_buf = NULL; 2536 return error; 2537 } 2538 2539 /* 2540 * Create spare DMA map for rx buffers 2541 */ 2542 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2543 &rdata->rx_sparemap); 2544 if (error) { 2545 device_printf(dev, "Unable to create spare RX DMA map\n"); 2546 bus_dma_tag_destroy(rdata->rxtag); 2547 kfree(rdata->rx_buf, M_DEVBUF); 2548 rdata->rx_buf = NULL; 2549 return error; 2550 } 2551 2552 /* 2553 * Create DMA maps for rx buffers 2554 */ 2555 for (i = 0; i < rdata->num_rx_desc; i++) { 2556 rx_buffer = &rdata->rx_buf[i]; 2557 2558 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2559 &rx_buffer->map); 2560 if (error) { 2561 device_printf(dev, "Unable to create RX DMA map\n"); 2562 emx_destroy_rx_ring(sc, rdata, i); 2563 return error; 2564 } 2565 } 2566 return (0); 2567 } 2568 2569 static void 2570 emx_free_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2571 { 2572 int i; 2573 2574 for (i = 0; i < rdata->num_rx_desc; i++) { 2575 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2576 2577 if (rx_buffer->m_head != NULL) { 2578 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2579 m_freem(rx_buffer->m_head); 2580 rx_buffer->m_head = NULL; 2581 } 2582 } 2583 2584 if (rdata->fmp != NULL) 2585 m_freem(rdata->fmp); 2586 rdata->fmp = NULL; 2587 rdata->lmp = NULL; 2588 } 2589 2590 static int 2591 emx_init_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata) 2592 { 2593 int i, error; 2594 2595 /* Reset descriptor ring */ 2596 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2597 2598 /* Allocate new ones. */ 2599 for (i = 0; i < rdata->num_rx_desc; i++) { 2600 error = emx_newbuf(sc, rdata, i, 1); 2601 if (error) 2602 return (error); 2603 } 2604 2605 /* Setup our descriptor pointers */ 2606 rdata->next_rx_desc_to_check = 0; 2607 2608 return (0); 2609 } 2610 2611 static void 2612 emx_init_rx_unit(struct emx_softc *sc) 2613 { 2614 struct ifnet *ifp = &sc->arpcom.ac_if; 2615 uint64_t bus_addr; 2616 uint32_t rctl, itr, rfctl; 2617 int i; 2618 2619 /* 2620 * Make sure receives are disabled while setting 2621 * up the descriptor ring 2622 */ 2623 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2624 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2625 2626 /* 2627 * Set the interrupt throttling rate. Value is calculated 2628 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2629 */ 2630 if (sc->int_throttle_ceil) 2631 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2632 else 2633 itr = 0; 2634 emx_set_itr(sc, itr); 2635 2636 /* Use extended RX descriptor */ 2637 rfctl = E1000_RFCTL_EXTEN; 2638 2639 /* Disable accelerated ackknowledge */ 2640 if (sc->hw.mac.type == e1000_82574) 2641 rfctl |= E1000_RFCTL_ACK_DIS; 2642 2643 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2644 2645 /* 2646 * Receive Checksum Offload for TCP and UDP 2647 * 2648 * Checksum offloading is also enabled if multiple receive 2649 * queue is to be supported, since we need it to figure out 2650 * packet type. 2651 */ 2652 if ((ifp->if_capenable & IFCAP_RXCSUM) || 2653 sc->rx_ring_cnt > 1) { 2654 uint32_t rxcsum; 2655 2656 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2657 2658 /* 2659 * NOTE: 2660 * PCSD must be enabled to enable multiple 2661 * receive queues. 2662 */ 2663 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2664 E1000_RXCSUM_PCSD; 2665 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2666 } 2667 2668 /* 2669 * Configure multiple receive queue (RSS) 2670 */ 2671 if (sc->rx_ring_cnt > 1) { 2672 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 2673 uint32_t reta; 2674 2675 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 2676 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 2677 2678 /* 2679 * NOTE: 2680 * When we reach here, RSS has already been disabled 2681 * in emx_stop(), so we could safely configure RSS key 2682 * and redirect table. 2683 */ 2684 2685 /* 2686 * Configure RSS key 2687 */ 2688 toeplitz_get_key(key, sizeof(key)); 2689 for (i = 0; i < EMX_NRSSRK; ++i) { 2690 uint32_t rssrk; 2691 2692 rssrk = EMX_RSSRK_VAL(key, i); 2693 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2694 2695 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 2696 } 2697 2698 /* 2699 * Configure RSS redirect table in following fashion: 2700 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2701 */ 2702 reta = 0; 2703 for (i = 0; i < EMX_RETA_SIZE; ++i) { 2704 uint32_t q; 2705 2706 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 2707 reta |= q << (8 * i); 2708 } 2709 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2710 2711 for (i = 0; i < EMX_NRETA; ++i) 2712 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 2713 2714 /* 2715 * Enable multiple receive queues. 2716 * Enable IPv4 RSS standard hash functions. 2717 * Disable RSS interrupt. 2718 */ 2719 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2720 E1000_MRQC_ENABLE_RSS_2Q | 2721 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2722 E1000_MRQC_RSS_FIELD_IPV4); 2723 } 2724 2725 /* 2726 * XXX TEMPORARY WORKAROUND: on some systems with 82573 2727 * long latencies are observed, like Lenovo X60. This 2728 * change eliminates the problem, but since having positive 2729 * values in RDTR is a known source of problems on other 2730 * platforms another solution is being sought. 2731 */ 2732 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 2733 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 2734 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 2735 } 2736 2737 for (i = 0; i < sc->rx_ring_cnt; ++i) { 2738 struct emx_rxdata *rdata = &sc->rx_data[i]; 2739 2740 /* 2741 * Setup the Base and Length of the Rx Descriptor Ring 2742 */ 2743 bus_addr = rdata->rx_desc_paddr; 2744 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 2745 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 2746 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 2747 (uint32_t)(bus_addr >> 32)); 2748 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 2749 (uint32_t)bus_addr); 2750 2751 /* 2752 * Setup the HW Rx Head and Tail Descriptor Pointers 2753 */ 2754 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 2755 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 2756 sc->rx_data[i].num_rx_desc - 1); 2757 } 2758 2759 /* Setup the Receive Control Register */ 2760 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2761 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2762 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 2763 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2764 2765 /* Make sure VLAN Filters are off */ 2766 rctl &= ~E1000_RCTL_VFE; 2767 2768 /* Don't store bad paket */ 2769 rctl &= ~E1000_RCTL_SBP; 2770 2771 /* MCLBYTES */ 2772 rctl |= E1000_RCTL_SZ_2048; 2773 2774 if (ifp->if_mtu > ETHERMTU) 2775 rctl |= E1000_RCTL_LPE; 2776 else 2777 rctl &= ~E1000_RCTL_LPE; 2778 2779 /* Enable Receives */ 2780 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 2781 } 2782 2783 static void 2784 emx_destroy_rx_ring(struct emx_softc *sc, struct emx_rxdata *rdata, int ndesc) 2785 { 2786 struct emx_rxbuf *rx_buffer; 2787 int i; 2788 2789 /* Free Receive Descriptor ring */ 2790 if (rdata->rx_desc) { 2791 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 2792 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 2793 rdata->rx_desc_dmap); 2794 bus_dma_tag_destroy(rdata->rx_desc_dtag); 2795 2796 rdata->rx_desc = NULL; 2797 } 2798 2799 if (rdata->rx_buf == NULL) 2800 return; 2801 2802 for (i = 0; i < ndesc; i++) { 2803 rx_buffer = &rdata->rx_buf[i]; 2804 2805 KKASSERT(rx_buffer->m_head == NULL); 2806 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 2807 } 2808 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 2809 bus_dma_tag_destroy(rdata->rxtag); 2810 2811 kfree(rdata->rx_buf, M_DEVBUF); 2812 rdata->rx_buf = NULL; 2813 } 2814 2815 static void 2816 emx_rxeof(struct emx_softc *sc, int ring_idx, int count) 2817 { 2818 struct emx_rxdata *rdata = &sc->rx_data[ring_idx]; 2819 struct ifnet *ifp = &sc->arpcom.ac_if; 2820 uint32_t staterr; 2821 emx_rxdesc_t *current_desc; 2822 struct mbuf *mp; 2823 int i; 2824 2825 i = rdata->next_rx_desc_to_check; 2826 current_desc = &rdata->rx_desc[i]; 2827 staterr = le32toh(current_desc->rxd_staterr); 2828 2829 if (!(staterr & E1000_RXD_STAT_DD)) 2830 return; 2831 2832 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2833 struct pktinfo *pi = NULL, pi0; 2834 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 2835 struct mbuf *m = NULL; 2836 int eop, len; 2837 2838 logif(pkt_receive); 2839 2840 mp = rx_buf->m_head; 2841 2842 /* 2843 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 2844 * needs to access the last received byte in the mbuf. 2845 */ 2846 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 2847 BUS_DMASYNC_POSTREAD); 2848 2849 len = le16toh(current_desc->rxd_length); 2850 if (staterr & E1000_RXD_STAT_EOP) { 2851 count--; 2852 eop = 1; 2853 } else { 2854 eop = 0; 2855 } 2856 2857 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 2858 uint16_t vlan = 0; 2859 uint32_t mrq, rss_hash; 2860 2861 /* 2862 * Save several necessary information, 2863 * before emx_newbuf() destroy it. 2864 */ 2865 if ((staterr & E1000_RXD_STAT_VP) && eop) 2866 vlan = le16toh(current_desc->rxd_vlan); 2867 2868 mrq = le32toh(current_desc->rxd_mrq); 2869 rss_hash = le32toh(current_desc->rxd_rss); 2870 2871 EMX_RSS_DPRINTF(sc, 10, 2872 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 2873 ring_idx, mrq, rss_hash); 2874 2875 if (emx_newbuf(sc, rdata, i, 0) != 0) { 2876 ifp->if_iqdrops++; 2877 goto discard; 2878 } 2879 2880 /* Assign correct length to the current fragment */ 2881 mp->m_len = len; 2882 2883 if (rdata->fmp == NULL) { 2884 mp->m_pkthdr.len = len; 2885 rdata->fmp = mp; /* Store the first mbuf */ 2886 rdata->lmp = mp; 2887 } else { 2888 /* 2889 * Chain mbuf's together 2890 */ 2891 rdata->lmp->m_next = mp; 2892 rdata->lmp = rdata->lmp->m_next; 2893 rdata->fmp->m_pkthdr.len += len; 2894 } 2895 2896 if (eop) { 2897 rdata->fmp->m_pkthdr.rcvif = ifp; 2898 ifp->if_ipackets++; 2899 2900 if (ifp->if_capenable & IFCAP_RXCSUM) 2901 emx_rxcsum(staterr, rdata->fmp); 2902 2903 if (staterr & E1000_RXD_STAT_VP) { 2904 rdata->fmp->m_pkthdr.ether_vlantag = 2905 vlan; 2906 rdata->fmp->m_flags |= M_VLANTAG; 2907 } 2908 m = rdata->fmp; 2909 rdata->fmp = NULL; 2910 rdata->lmp = NULL; 2911 2912 if (ifp->if_capenable & IFCAP_RSS) { 2913 pi = emx_rssinfo(m, &pi0, mrq, 2914 rss_hash, staterr); 2915 } 2916 #ifdef EMX_RSS_DEBUG 2917 rdata->rx_pkts++; 2918 #endif 2919 } 2920 } else { 2921 ifp->if_ierrors++; 2922 discard: 2923 emx_setup_rxdesc(current_desc, rx_buf); 2924 if (rdata->fmp != NULL) { 2925 m_freem(rdata->fmp); 2926 rdata->fmp = NULL; 2927 rdata->lmp = NULL; 2928 } 2929 m = NULL; 2930 } 2931 2932 if (m != NULL) 2933 ether_input_pkt(ifp, m, pi); 2934 2935 /* Advance our pointers to the next descriptor. */ 2936 if (++i == rdata->num_rx_desc) 2937 i = 0; 2938 2939 current_desc = &rdata->rx_desc[i]; 2940 staterr = le32toh(current_desc->rxd_staterr); 2941 } 2942 rdata->next_rx_desc_to_check = i; 2943 2944 /* Advance the E1000's Receive Queue "Tail Pointer". */ 2945 if (--i < 0) 2946 i = rdata->num_rx_desc - 1; 2947 E1000_WRITE_REG(&sc->hw, E1000_RDT(ring_idx), i); 2948 } 2949 2950 static void 2951 emx_enable_intr(struct emx_softc *sc) 2952 { 2953 uint32_t ims_mask = IMS_ENABLE_MASK; 2954 2955 lwkt_serialize_handler_enable(&sc->main_serialize); 2956 2957 #if 0 2958 if (sc->hw.mac.type == e1000_82574) { 2959 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 2960 ims_mask |= EM_MSIX_MASK; 2961 } 2962 #endif 2963 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 2964 } 2965 2966 static void 2967 emx_disable_intr(struct emx_softc *sc) 2968 { 2969 if (sc->hw.mac.type == e1000_82574) 2970 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 2971 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2972 2973 lwkt_serialize_handler_disable(&sc->main_serialize); 2974 } 2975 2976 /* 2977 * Bit of a misnomer, what this really means is 2978 * to enable OS management of the system... aka 2979 * to disable special hardware management features 2980 */ 2981 static void 2982 emx_get_mgmt(struct emx_softc *sc) 2983 { 2984 /* A shared code workaround */ 2985 if (sc->flags & EMX_FLAG_HAS_MGMT) { 2986 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2987 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2988 2989 /* disable hardware interception of ARP */ 2990 manc &= ~(E1000_MANC_ARP_EN); 2991 2992 /* enable receiving management packets to the host */ 2993 manc |= E1000_MANC_EN_MNG2HOST; 2994 #define E1000_MNG2HOST_PORT_623 (1 << 5) 2995 #define E1000_MNG2HOST_PORT_664 (1 << 6) 2996 manc2h |= E1000_MNG2HOST_PORT_623; 2997 manc2h |= E1000_MNG2HOST_PORT_664; 2998 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2999 3000 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3001 } 3002 } 3003 3004 /* 3005 * Give control back to hardware management 3006 * controller if there is one. 3007 */ 3008 static void 3009 emx_rel_mgmt(struct emx_softc *sc) 3010 { 3011 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3012 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3013 3014 /* re-enable hardware interception of ARP */ 3015 manc |= E1000_MANC_ARP_EN; 3016 manc &= ~E1000_MANC_EN_MNG2HOST; 3017 3018 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3019 } 3020 } 3021 3022 /* 3023 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3024 * For ASF and Pass Through versions of f/w this means that 3025 * the driver is loaded. For AMT version (only with 82573) 3026 * of the f/w this means that the network i/f is open. 3027 */ 3028 static void 3029 emx_get_hw_control(struct emx_softc *sc) 3030 { 3031 /* Let firmware know the driver has taken over */ 3032 if (sc->hw.mac.type == e1000_82573) { 3033 uint32_t swsm; 3034 3035 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3036 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3037 swsm | E1000_SWSM_DRV_LOAD); 3038 } else { 3039 uint32_t ctrl_ext; 3040 3041 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3042 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3043 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3044 } 3045 sc->flags |= EMX_FLAG_HW_CTRL; 3046 } 3047 3048 /* 3049 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3050 * For ASF and Pass Through versions of f/w this means that the 3051 * driver is no longer loaded. For AMT version (only with 82573) 3052 * of the f/w this means that the network i/f is closed. 3053 */ 3054 static void 3055 emx_rel_hw_control(struct emx_softc *sc) 3056 { 3057 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3058 return; 3059 sc->flags &= ~EMX_FLAG_HW_CTRL; 3060 3061 /* Let firmware taken over control of h/w */ 3062 if (sc->hw.mac.type == e1000_82573) { 3063 uint32_t swsm; 3064 3065 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3066 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3067 swsm & ~E1000_SWSM_DRV_LOAD); 3068 } else { 3069 uint32_t ctrl_ext; 3070 3071 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3072 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3073 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3074 } 3075 } 3076 3077 static int 3078 emx_is_valid_eaddr(const uint8_t *addr) 3079 { 3080 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3081 3082 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3083 return (FALSE); 3084 3085 return (TRUE); 3086 } 3087 3088 /* 3089 * Enable PCI Wake On Lan capability 3090 */ 3091 void 3092 emx_enable_wol(device_t dev) 3093 { 3094 uint16_t cap, status; 3095 uint8_t id; 3096 3097 /* First find the capabilities pointer*/ 3098 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3099 3100 /* Read the PM Capabilities */ 3101 id = pci_read_config(dev, cap, 1); 3102 if (id != PCIY_PMG) /* Something wrong */ 3103 return; 3104 3105 /* 3106 * OK, we have the power capabilities, 3107 * so now get the status register 3108 */ 3109 cap += PCIR_POWER_STATUS; 3110 status = pci_read_config(dev, cap, 2); 3111 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3112 pci_write_config(dev, cap, status, 2); 3113 } 3114 3115 static void 3116 emx_update_stats(struct emx_softc *sc) 3117 { 3118 struct ifnet *ifp = &sc->arpcom.ac_if; 3119 3120 if (sc->hw.phy.media_type == e1000_media_type_copper || 3121 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3122 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3123 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3124 } 3125 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3126 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3127 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3128 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3129 3130 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3131 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3132 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3133 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3134 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3135 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3136 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3137 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3138 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3139 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3140 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3141 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3142 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3143 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3144 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3145 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3146 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3147 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3148 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3149 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3150 3151 /* For the 64-bit byte counters the low dword must be read first. */ 3152 /* Both registers clear on the read of the high dword */ 3153 3154 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3155 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3156 3157 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3158 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3159 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3160 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3161 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3162 3163 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3164 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3165 3166 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3167 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3168 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3169 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3170 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3171 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3172 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3173 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3174 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3175 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3176 3177 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3178 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3179 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3180 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3181 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3182 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3183 3184 ifp->if_collisions = sc->stats.colc; 3185 3186 /* Rx Errors */ 3187 ifp->if_ierrors = sc->dropped_pkts + sc->stats.rxerrc + 3188 sc->stats.crcerrs + sc->stats.algnerrc + 3189 sc->stats.ruc + sc->stats.roc + 3190 sc->stats.mpc + sc->stats.cexterr; 3191 3192 /* Tx Errors */ 3193 ifp->if_oerrors = sc->stats.ecol + sc->stats.latecol + 3194 sc->watchdog_events; 3195 } 3196 3197 static void 3198 emx_print_debug_info(struct emx_softc *sc) 3199 { 3200 device_t dev = sc->dev; 3201 uint8_t *hw_addr = sc->hw.hw_addr; 3202 3203 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3204 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3205 E1000_READ_REG(&sc->hw, E1000_CTRL), 3206 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3207 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3208 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3209 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3210 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3211 sc->hw.fc.high_water, sc->hw.fc.low_water); 3212 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3213 E1000_READ_REG(&sc->hw, E1000_TIDV), 3214 E1000_READ_REG(&sc->hw, E1000_TADV)); 3215 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3216 E1000_READ_REG(&sc->hw, E1000_RDTR), 3217 E1000_READ_REG(&sc->hw, E1000_RADV)); 3218 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3219 E1000_READ_REG(&sc->hw, E1000_TDH(0)), 3220 E1000_READ_REG(&sc->hw, E1000_TDT(0))); 3221 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3222 E1000_READ_REG(&sc->hw, E1000_RDH(0)), 3223 E1000_READ_REG(&sc->hw, E1000_RDT(0))); 3224 device_printf(dev, "Num Tx descriptors avail = %d\n", 3225 sc->num_tx_desc_avail); 3226 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3227 sc->no_tx_desc_avail1); 3228 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3229 sc->no_tx_desc_avail2); 3230 device_printf(dev, "Std mbuf failed = %ld\n", 3231 sc->mbuf_alloc_failed); 3232 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3233 sc->rx_data[0].mbuf_cluster_failed); 3234 device_printf(dev, "Driver dropped packets = %ld\n", 3235 sc->dropped_pkts); 3236 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3237 sc->no_tx_dma_setup); 3238 3239 device_printf(dev, "TSO segments %lu\n", sc->tso_segments); 3240 device_printf(dev, "TSO ctx reused %lu\n", sc->tso_ctx_reused); 3241 } 3242 3243 static void 3244 emx_print_hw_stats(struct emx_softc *sc) 3245 { 3246 device_t dev = sc->dev; 3247 3248 device_printf(dev, "Excessive collisions = %lld\n", 3249 (long long)sc->stats.ecol); 3250 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3251 device_printf(dev, "Symbol errors = %lld\n", 3252 (long long)sc->stats.symerrs); 3253 #endif 3254 device_printf(dev, "Sequence errors = %lld\n", 3255 (long long)sc->stats.sec); 3256 device_printf(dev, "Defer count = %lld\n", 3257 (long long)sc->stats.dc); 3258 device_printf(dev, "Missed Packets = %lld\n", 3259 (long long)sc->stats.mpc); 3260 device_printf(dev, "Receive No Buffers = %lld\n", 3261 (long long)sc->stats.rnbc); 3262 /* RLEC is inaccurate on some hardware, calculate our own. */ 3263 device_printf(dev, "Receive Length Errors = %lld\n", 3264 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3265 device_printf(dev, "Receive errors = %lld\n", 3266 (long long)sc->stats.rxerrc); 3267 device_printf(dev, "Crc errors = %lld\n", 3268 (long long)sc->stats.crcerrs); 3269 device_printf(dev, "Alignment errors = %lld\n", 3270 (long long)sc->stats.algnerrc); 3271 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3272 (long long)sc->stats.cexterr); 3273 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3274 device_printf(dev, "watchdog timeouts = %ld\n", 3275 sc->watchdog_events); 3276 device_printf(dev, "XON Rcvd = %lld\n", 3277 (long long)sc->stats.xonrxc); 3278 device_printf(dev, "XON Xmtd = %lld\n", 3279 (long long)sc->stats.xontxc); 3280 device_printf(dev, "XOFF Rcvd = %lld\n", 3281 (long long)sc->stats.xoffrxc); 3282 device_printf(dev, "XOFF Xmtd = %lld\n", 3283 (long long)sc->stats.xofftxc); 3284 device_printf(dev, "Good Packets Rcvd = %lld\n", 3285 (long long)sc->stats.gprc); 3286 device_printf(dev, "Good Packets Xmtd = %lld\n", 3287 (long long)sc->stats.gptc); 3288 } 3289 3290 static void 3291 emx_print_nvm_info(struct emx_softc *sc) 3292 { 3293 uint16_t eeprom_data; 3294 int i, j, row = 0; 3295 3296 /* Its a bit crude, but it gets the job done */ 3297 kprintf("\nInterface EEPROM Dump:\n"); 3298 kprintf("Offset\n0x0000 "); 3299 for (i = 0, j = 0; i < 32; i++, j++) { 3300 if (j == 8) { /* Make the offset block */ 3301 j = 0; ++row; 3302 kprintf("\n0x00%x0 ",row); 3303 } 3304 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3305 kprintf("%04x ", eeprom_data); 3306 } 3307 kprintf("\n"); 3308 } 3309 3310 static int 3311 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3312 { 3313 struct emx_softc *sc; 3314 struct ifnet *ifp; 3315 int error, result; 3316 3317 result = -1; 3318 error = sysctl_handle_int(oidp, &result, 0, req); 3319 if (error || !req->newptr) 3320 return (error); 3321 3322 sc = (struct emx_softc *)arg1; 3323 ifp = &sc->arpcom.ac_if; 3324 3325 ifnet_serialize_all(ifp); 3326 3327 if (result == 1) 3328 emx_print_debug_info(sc); 3329 3330 /* 3331 * This value will cause a hex dump of the 3332 * first 32 16-bit words of the EEPROM to 3333 * the screen. 3334 */ 3335 if (result == 2) 3336 emx_print_nvm_info(sc); 3337 3338 ifnet_deserialize_all(ifp); 3339 3340 return (error); 3341 } 3342 3343 static int 3344 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3345 { 3346 int error, result; 3347 3348 result = -1; 3349 error = sysctl_handle_int(oidp, &result, 0, req); 3350 if (error || !req->newptr) 3351 return (error); 3352 3353 if (result == 1) { 3354 struct emx_softc *sc = (struct emx_softc *)arg1; 3355 struct ifnet *ifp = &sc->arpcom.ac_if; 3356 3357 ifnet_serialize_all(ifp); 3358 emx_print_hw_stats(sc); 3359 ifnet_deserialize_all(ifp); 3360 } 3361 return (error); 3362 } 3363 3364 static void 3365 emx_add_sysctl(struct emx_softc *sc) 3366 { 3367 #ifdef EMX_RSS_DEBUG 3368 char rx_pkt[32]; 3369 int i; 3370 #endif 3371 3372 sysctl_ctx_init(&sc->sysctl_ctx); 3373 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 3374 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 3375 device_get_nameunit(sc->dev), 3376 CTLFLAG_RD, 0, ""); 3377 if (sc->sysctl_tree == NULL) { 3378 device_printf(sc->dev, "can't add sysctl node\n"); 3379 return; 3380 } 3381 3382 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3383 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3384 emx_sysctl_debug_info, "I", "Debug Information"); 3385 3386 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3387 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3388 emx_sysctl_stats, "I", "Statistics"); 3389 3390 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3391 OID_AUTO, "rxd", CTLFLAG_RD, 3392 &sc->rx_data[0].num_rx_desc, 0, NULL); 3393 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3394 OID_AUTO, "txd", CTLFLAG_RD, &sc->num_tx_desc, 0, NULL); 3395 3396 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3397 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, 3398 sc, 0, emx_sysctl_int_throttle, "I", 3399 "interrupt throttling rate"); 3400 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3401 OID_AUTO, "int_tx_nsegs", CTLTYPE_INT|CTLFLAG_RW, 3402 sc, 0, emx_sysctl_int_tx_nsegs, "I", 3403 "# segments per TX interrupt"); 3404 3405 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3406 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, 3407 &sc->rx_ring_cnt, 0, "RX ring count"); 3408 3409 #ifdef EMX_RSS_DEBUG 3410 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 3411 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3412 0, "RSS debug level"); 3413 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3414 ksnprintf(rx_pkt, sizeof(rx_pkt), "rx%d_pkt", i); 3415 SYSCTL_ADD_UINT(&sc->sysctl_ctx, 3416 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, 3417 rx_pkt, CTLFLAG_RW, 3418 &sc->rx_data[i].rx_pkts, 0, "RXed packets"); 3419 } 3420 #endif 3421 } 3422 3423 static int 3424 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3425 { 3426 struct emx_softc *sc = (void *)arg1; 3427 struct ifnet *ifp = &sc->arpcom.ac_if; 3428 int error, throttle; 3429 3430 throttle = sc->int_throttle_ceil; 3431 error = sysctl_handle_int(oidp, &throttle, 0, req); 3432 if (error || req->newptr == NULL) 3433 return error; 3434 if (throttle < 0 || throttle > 1000000000 / 256) 3435 return EINVAL; 3436 3437 if (throttle) { 3438 /* 3439 * Set the interrupt throttling rate in 256ns increments, 3440 * recalculate sysctl value assignment to get exact frequency. 3441 */ 3442 throttle = 1000000000 / 256 / throttle; 3443 3444 /* Upper 16bits of ITR is reserved and should be zero */ 3445 if (throttle & 0xffff0000) 3446 return EINVAL; 3447 } 3448 3449 ifnet_serialize_all(ifp); 3450 3451 if (throttle) 3452 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3453 else 3454 sc->int_throttle_ceil = 0; 3455 3456 if (ifp->if_flags & IFF_RUNNING) 3457 emx_set_itr(sc, throttle); 3458 3459 ifnet_deserialize_all(ifp); 3460 3461 if (bootverbose) { 3462 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3463 sc->int_throttle_ceil); 3464 } 3465 return 0; 3466 } 3467 3468 static int 3469 emx_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 3470 { 3471 struct emx_softc *sc = (void *)arg1; 3472 struct ifnet *ifp = &sc->arpcom.ac_if; 3473 int error, segs; 3474 3475 segs = sc->tx_int_nsegs; 3476 error = sysctl_handle_int(oidp, &segs, 0, req); 3477 if (error || req->newptr == NULL) 3478 return error; 3479 if (segs <= 0) 3480 return EINVAL; 3481 3482 ifnet_serialize_all(ifp); 3483 3484 /* 3485 * Don't allow int_tx_nsegs to become: 3486 * o Less the oact_tx_desc 3487 * o Too large that no TX desc will cause TX interrupt to 3488 * be generated (OACTIVE will never recover) 3489 * o Too small that will cause tx_dd[] overflow 3490 */ 3491 if (segs < sc->oact_tx_desc || 3492 segs >= sc->num_tx_desc - sc->oact_tx_desc || 3493 segs < sc->num_tx_desc / EMX_TXDD_SAFE) { 3494 error = EINVAL; 3495 } else { 3496 error = 0; 3497 sc->tx_int_nsegs = segs; 3498 } 3499 3500 ifnet_deserialize_all(ifp); 3501 3502 return error; 3503 } 3504 3505 static int 3506 emx_dma_alloc(struct emx_softc *sc) 3507 { 3508 int error, i; 3509 3510 /* 3511 * Create top level busdma tag 3512 */ 3513 error = bus_dma_tag_create(NULL, 1, 0, 3514 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3515 NULL, NULL, 3516 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3517 0, &sc->parent_dtag); 3518 if (error) { 3519 device_printf(sc->dev, "could not create top level DMA tag\n"); 3520 return error; 3521 } 3522 3523 /* 3524 * Allocate transmit descriptors ring and buffers 3525 */ 3526 error = emx_create_tx_ring(sc); 3527 if (error) { 3528 device_printf(sc->dev, "Could not setup transmit structures\n"); 3529 return error; 3530 } 3531 3532 /* 3533 * Allocate receive descriptors ring and buffers 3534 */ 3535 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3536 error = emx_create_rx_ring(sc, &sc->rx_data[i]); 3537 if (error) { 3538 device_printf(sc->dev, 3539 "Could not setup receive structures\n"); 3540 return error; 3541 } 3542 } 3543 return 0; 3544 } 3545 3546 static void 3547 emx_dma_free(struct emx_softc *sc) 3548 { 3549 int i; 3550 3551 emx_destroy_tx_ring(sc, sc->num_tx_desc); 3552 3553 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3554 emx_destroy_rx_ring(sc, &sc->rx_data[i], 3555 sc->rx_data[i].num_rx_desc); 3556 } 3557 3558 /* Free top level busdma tag */ 3559 if (sc->parent_dtag != NULL) 3560 bus_dma_tag_destroy(sc->parent_dtag); 3561 } 3562 3563 static void 3564 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3565 { 3566 struct emx_softc *sc = ifp->if_softc; 3567 3568 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 3569 EMX_TX_SERIALIZE, EMX_RX_SERIALIZE, slz); 3570 } 3571 3572 static void 3573 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3574 { 3575 struct emx_softc *sc = ifp->if_softc; 3576 3577 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 3578 EMX_TX_SERIALIZE, EMX_RX_SERIALIZE, slz); 3579 } 3580 3581 static int 3582 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3583 { 3584 struct emx_softc *sc = ifp->if_softc; 3585 3586 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, 3587 EMX_TX_SERIALIZE, EMX_RX_SERIALIZE, slz); 3588 } 3589 3590 static void 3591 emx_serialize_skipmain(struct emx_softc *sc) 3592 { 3593 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 3594 } 3595 3596 static void 3597 emx_deserialize_skipmain(struct emx_softc *sc) 3598 { 3599 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 3600 } 3601 3602 #ifdef INVARIANTS 3603 3604 static void 3605 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3606 boolean_t serialized) 3607 { 3608 struct emx_softc *sc = ifp->if_softc; 3609 3610 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 3611 EMX_TX_SERIALIZE, EMX_RX_SERIALIZE, slz, serialized); 3612 } 3613 3614 #endif /* INVARIANTS */ 3615 3616 #ifdef IFPOLL_ENABLE 3617 3618 static void 3619 emx_qpoll_status(struct ifnet *ifp, int pollhz __unused) 3620 { 3621 struct emx_softc *sc = ifp->if_softc; 3622 uint32_t reg_icr; 3623 3624 ASSERT_SERIALIZED(&sc->main_serialize); 3625 3626 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3627 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3628 callout_stop(&sc->timer); 3629 sc->hw.mac.get_link_status = 1; 3630 emx_update_link_status(sc); 3631 callout_reset(&sc->timer, hz, emx_timer, sc); 3632 } 3633 } 3634 3635 static void 3636 emx_qpoll_tx(struct ifnet *ifp, void *arg __unused, int cycle __unused) 3637 { 3638 struct emx_softc *sc = ifp->if_softc; 3639 3640 ASSERT_SERIALIZED(&sc->tx_serialize); 3641 3642 emx_txeof(sc); 3643 if (!ifq_is_empty(&ifp->if_snd)) 3644 if_devstart(ifp); 3645 } 3646 3647 static void 3648 emx_qpoll_rx(struct ifnet *ifp, void *arg, int cycle) 3649 { 3650 struct emx_softc *sc = ifp->if_softc; 3651 struct emx_rxdata *rdata = arg; 3652 3653 ASSERT_SERIALIZED(&rdata->rx_serialize); 3654 3655 emx_rxeof(sc, rdata - sc->rx_data, cycle); 3656 } 3657 3658 static void 3659 emx_qpoll(struct ifnet *ifp, struct ifpoll_info *info) 3660 { 3661 struct emx_softc *sc = ifp->if_softc; 3662 3663 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3664 3665 if (info) { 3666 int i; 3667 3668 info->ifpi_status.status_func = emx_qpoll_status; 3669 info->ifpi_status.serializer = &sc->main_serialize; 3670 3671 info->ifpi_tx[0].poll_func = emx_qpoll_tx; 3672 info->ifpi_tx[0].arg = NULL; 3673 info->ifpi_tx[0].serializer = &sc->tx_serialize; 3674 3675 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3676 info->ifpi_rx[i].poll_func = emx_qpoll_rx; 3677 info->ifpi_rx[i].arg = &sc->rx_data[i]; 3678 info->ifpi_rx[i].serializer = 3679 &sc->rx_data[i].rx_serialize; 3680 } 3681 3682 if (ifp->if_flags & IFF_RUNNING) 3683 emx_disable_intr(sc); 3684 } else if (ifp->if_flags & IFF_RUNNING) { 3685 emx_enable_intr(sc); 3686 } 3687 } 3688 3689 #endif /* IFPOLL_ENABLE */ 3690 3691 static void 3692 emx_set_itr(struct emx_softc *sc, uint32_t itr) 3693 { 3694 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 3695 if (sc->hw.mac.type == e1000_82574) { 3696 int i; 3697 3698 /* 3699 * When using MSIX interrupts we need to 3700 * throttle using the EITR register 3701 */ 3702 for (i = 0; i < 4; ++i) 3703 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 3704 } 3705 } 3706 3707 /* 3708 * Disable the L0s, 82574L Errata #20 3709 */ 3710 static void 3711 emx_disable_aspm(struct emx_softc *sc) 3712 { 3713 uint16_t link_cap, link_ctrl, disable; 3714 uint8_t pcie_ptr, reg; 3715 device_t dev = sc->dev; 3716 3717 switch (sc->hw.mac.type) { 3718 case e1000_82571: 3719 case e1000_82572: 3720 case e1000_82573: 3721 /* 3722 * 82573 specification update 3723 * errata #8 disable L0s 3724 * errata #41 disable L1 3725 * 3726 * 82571/82572 specification update 3727 # errata #13 disable L1 3728 * errata #68 disable L0s 3729 */ 3730 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 3731 break; 3732 3733 case e1000_82574: 3734 /* 3735 * 82574 specification update errata #20 3736 * 3737 * There is no need to disable L1 3738 */ 3739 disable = PCIEM_LNKCTL_ASPM_L0S; 3740 break; 3741 3742 default: 3743 return; 3744 } 3745 3746 pcie_ptr = pci_get_pciecap_ptr(dev); 3747 if (pcie_ptr == 0) 3748 return; 3749 3750 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 3751 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 3752 return; 3753 3754 if (bootverbose) 3755 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 3756 3757 reg = pcie_ptr + PCIER_LINKCTRL; 3758 link_ctrl = pci_read_config(dev, reg, 2); 3759 link_ctrl &= ~disable; 3760 pci_write_config(dev, reg, link_ctrl, 2); 3761 } 3762 3763 static int 3764 emx_tso_pullup(struct emx_softc *sc, struct mbuf **mp) 3765 { 3766 int iphlen, hoff, thoff, ex = 0; 3767 struct mbuf *m; 3768 struct ip *ip; 3769 3770 m = *mp; 3771 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 3772 3773 iphlen = m->m_pkthdr.csum_iphlen; 3774 thoff = m->m_pkthdr.csum_thlen; 3775 hoff = m->m_pkthdr.csum_lhlen; 3776 3777 KASSERT(iphlen > 0, ("invalid ip hlen")); 3778 KASSERT(thoff > 0, ("invalid tcp hlen")); 3779 KASSERT(hoff > 0, ("invalid ether hlen")); 3780 3781 if (sc->flags & EMX_FLAG_TSO_PULLEX) 3782 ex = 4; 3783 3784 if (m->m_len < hoff + iphlen + thoff + ex) { 3785 m = m_pullup(m, hoff + iphlen + thoff + ex); 3786 if (m == NULL) { 3787 *mp = NULL; 3788 return ENOBUFS; 3789 } 3790 *mp = m; 3791 } 3792 ip = mtodoff(m, struct ip *, hoff); 3793 ip->ip_len = 0; 3794 3795 return 0; 3796 } 3797 3798 static int 3799 emx_tso_setup(struct emx_softc *sc, struct mbuf *mp, 3800 uint32_t *txd_upper, uint32_t *txd_lower) 3801 { 3802 struct e1000_context_desc *TXD; 3803 int hoff, iphlen, thoff, hlen; 3804 int mss, pktlen, curr_txd; 3805 3806 #ifdef EMX_TSO_DEBUG 3807 sc->tso_segments++; 3808 #endif 3809 3810 iphlen = mp->m_pkthdr.csum_iphlen; 3811 thoff = mp->m_pkthdr.csum_thlen; 3812 hoff = mp->m_pkthdr.csum_lhlen; 3813 mss = mp->m_pkthdr.tso_segsz; 3814 pktlen = mp->m_pkthdr.len; 3815 3816 if (sc->csum_flags == CSUM_TSO && 3817 sc->csum_iphlen == iphlen && 3818 sc->csum_lhlen == hoff && 3819 sc->csum_thlen == thoff && 3820 sc->csum_mss == mss && 3821 sc->csum_pktlen == pktlen) { 3822 *txd_upper = sc->csum_txd_upper; 3823 *txd_lower = sc->csum_txd_lower; 3824 #ifdef EMX_TSO_DEBUG 3825 sc->tso_ctx_reused++; 3826 #endif 3827 return 0; 3828 } 3829 hlen = hoff + iphlen + thoff; 3830 3831 /* 3832 * Setup a new TSO context. 3833 */ 3834 3835 curr_txd = sc->next_avail_tx_desc; 3836 TXD = (struct e1000_context_desc *)&sc->tx_desc_base[curr_txd]; 3837 3838 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 3839 E1000_TXD_DTYP_D | /* Data descr type */ 3840 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 3841 3842 /* IP and/or TCP header checksum calculation and insertion. */ 3843 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 3844 3845 /* 3846 * Start offset for header checksum calculation. 3847 * End offset for header checksum calculation. 3848 * Offset of place put the checksum. 3849 */ 3850 TXD->lower_setup.ip_fields.ipcss = hoff; 3851 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 3852 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 3853 3854 /* 3855 * Start offset for payload checksum calculation. 3856 * End offset for payload checksum calculation. 3857 * Offset of place to put the checksum. 3858 */ 3859 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 3860 TXD->upper_setup.tcp_fields.tucse = 0; 3861 TXD->upper_setup.tcp_fields.tucso = 3862 hoff + iphlen + offsetof(struct tcphdr, th_sum); 3863 3864 /* 3865 * Payload size per packet w/o any headers. 3866 * Length of all headers up to payload. 3867 */ 3868 TXD->tcp_seg_setup.fields.mss = htole16(mss); 3869 TXD->tcp_seg_setup.fields.hdr_len = hlen; 3870 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 3871 E1000_TXD_CMD_DEXT | /* Extended descr */ 3872 E1000_TXD_CMD_TSE | /* TSE context */ 3873 E1000_TXD_CMD_IP | /* Do IP csum */ 3874 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 3875 (pktlen - hlen)); /* Total len */ 3876 3877 /* Save the information for this TSO context */ 3878 sc->csum_flags = CSUM_TSO; 3879 sc->csum_lhlen = hoff; 3880 sc->csum_iphlen = iphlen; 3881 sc->csum_thlen = thoff; 3882 sc->csum_mss = mss; 3883 sc->csum_pktlen = pktlen; 3884 sc->csum_txd_upper = *txd_upper; 3885 sc->csum_txd_lower = *txd_lower; 3886 3887 if (++curr_txd == sc->num_tx_desc) 3888 curr_txd = 0; 3889 3890 KKASSERT(sc->num_tx_desc_avail > 0); 3891 sc->num_tx_desc_avail--; 3892 3893 sc->next_avail_tx_desc = curr_txd; 3894 return 1; 3895 } 3896