1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 #include "opt_ifpoll.h" 68 #include "opt_emx.h" 69 70 #include <sys/param.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/interrupt.h> 74 #include <sys/kernel.h> 75 #include <sys/ktr.h> 76 #include <sys/malloc.h> 77 #include <sys/mbuf.h> 78 #include <sys/proc.h> 79 #include <sys/rman.h> 80 #include <sys/serialize.h> 81 #include <sys/serialize2.h> 82 #include <sys/socket.h> 83 #include <sys/sockio.h> 84 #include <sys/sysctl.h> 85 #include <sys/systm.h> 86 87 #include <net/bpf.h> 88 #include <net/ethernet.h> 89 #include <net/if.h> 90 #include <net/if_arp.h> 91 #include <net/if_dl.h> 92 #include <net/if_media.h> 93 #include <net/ifq_var.h> 94 #include <net/toeplitz.h> 95 #include <net/toeplitz2.h> 96 #include <net/vlan/if_vlan_var.h> 97 #include <net/vlan/if_vlan_ether.h> 98 #include <net/if_poll.h> 99 100 #include <netinet/in_systm.h> 101 #include <netinet/in.h> 102 #include <netinet/ip.h> 103 #include <netinet/tcp.h> 104 #include <netinet/udp.h> 105 106 #include <bus/pci/pcivar.h> 107 #include <bus/pci/pcireg.h> 108 109 #include <dev/netif/ig_hal/e1000_api.h> 110 #include <dev/netif/ig_hal/e1000_82571.h> 111 #include <dev/netif/ig_hal/e1000_dragonfly.h> 112 #include <dev/netif/emx/if_emx.h> 113 114 #define DEBUG_HW 0 115 116 #ifdef EMX_RSS_DEBUG 117 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) \ 118 do { \ 119 if (sc->rss_debug >= lvl) \ 120 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 121 } while (0) 122 #else /* !EMX_RSS_DEBUG */ 123 #define EMX_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 124 #endif /* EMX_RSS_DEBUG */ 125 126 #define EMX_NAME "Intel(R) PRO/1000 " 127 128 #define EMX_DEVICE(id) \ 129 { EMX_VENDOR_ID, E1000_DEV_ID_##id, EMX_NAME #id } 130 #define EMX_DEVICE_NULL { 0, 0, NULL } 131 132 static const struct emx_device { 133 uint16_t vid; 134 uint16_t did; 135 const char *desc; 136 } emx_devices[] = { 137 EMX_DEVICE(82571EB_COPPER), 138 EMX_DEVICE(82571EB_FIBER), 139 EMX_DEVICE(82571EB_SERDES), 140 EMX_DEVICE(82571EB_SERDES_DUAL), 141 EMX_DEVICE(82571EB_SERDES_QUAD), 142 EMX_DEVICE(82571EB_QUAD_COPPER), 143 EMX_DEVICE(82571EB_QUAD_COPPER_BP), 144 EMX_DEVICE(82571EB_QUAD_COPPER_LP), 145 EMX_DEVICE(82571EB_QUAD_FIBER), 146 EMX_DEVICE(82571PT_QUAD_COPPER), 147 148 EMX_DEVICE(82572EI_COPPER), 149 EMX_DEVICE(82572EI_FIBER), 150 EMX_DEVICE(82572EI_SERDES), 151 EMX_DEVICE(82572EI), 152 153 EMX_DEVICE(82573E), 154 EMX_DEVICE(82573E_IAMT), 155 EMX_DEVICE(82573L), 156 157 EMX_DEVICE(80003ES2LAN_COPPER_SPT), 158 EMX_DEVICE(80003ES2LAN_SERDES_SPT), 159 EMX_DEVICE(80003ES2LAN_COPPER_DPT), 160 EMX_DEVICE(80003ES2LAN_SERDES_DPT), 161 162 EMX_DEVICE(82574L), 163 EMX_DEVICE(82574LA), 164 165 EMX_DEVICE(PCH_LPT_I217_LM), 166 EMX_DEVICE(PCH_LPT_I217_V), 167 EMX_DEVICE(PCH_LPTLP_I218_LM), 168 EMX_DEVICE(PCH_LPTLP_I218_V), 169 EMX_DEVICE(PCH_I218_LM2), 170 EMX_DEVICE(PCH_I218_V2), 171 EMX_DEVICE(PCH_I218_LM3), 172 EMX_DEVICE(PCH_I218_V3), 173 EMX_DEVICE(PCH_SPT_I219_LM), 174 EMX_DEVICE(PCH_SPT_I219_V), 175 EMX_DEVICE(PCH_SPT_I219_LM2), 176 EMX_DEVICE(PCH_SPT_I219_V2), 177 178 /* required last entry */ 179 EMX_DEVICE_NULL 180 }; 181 182 static int emx_probe(device_t); 183 static int emx_attach(device_t); 184 static int emx_detach(device_t); 185 static int emx_shutdown(device_t); 186 static int emx_suspend(device_t); 187 static int emx_resume(device_t); 188 189 static void emx_init(void *); 190 static void emx_stop(struct emx_softc *); 191 static int emx_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 192 static void emx_start(struct ifnet *, struct ifaltq_subque *); 193 #ifdef IFPOLL_ENABLE 194 static void emx_npoll(struct ifnet *, struct ifpoll_info *); 195 static void emx_npoll_status(struct ifnet *); 196 static void emx_npoll_tx(struct ifnet *, void *, int); 197 static void emx_npoll_rx(struct ifnet *, void *, int); 198 #endif 199 static void emx_watchdog(struct ifaltq_subque *); 200 static void emx_media_status(struct ifnet *, struct ifmediareq *); 201 static int emx_media_change(struct ifnet *); 202 static void emx_timer(void *); 203 static void emx_serialize(struct ifnet *, enum ifnet_serialize); 204 static void emx_deserialize(struct ifnet *, enum ifnet_serialize); 205 static int emx_tryserialize(struct ifnet *, enum ifnet_serialize); 206 #ifdef INVARIANTS 207 static void emx_serialize_assert(struct ifnet *, enum ifnet_serialize, 208 boolean_t); 209 #endif 210 211 static void emx_intr(void *); 212 static void emx_intr_mask(void *); 213 static void emx_intr_body(struct emx_softc *, boolean_t); 214 static void emx_rxeof(struct emx_rxdata *, int); 215 static void emx_txeof(struct emx_txdata *); 216 static void emx_tx_collect(struct emx_txdata *); 217 static void emx_tx_purge(struct emx_softc *); 218 static void emx_enable_intr(struct emx_softc *); 219 static void emx_disable_intr(struct emx_softc *); 220 221 static int emx_dma_alloc(struct emx_softc *); 222 static void emx_dma_free(struct emx_softc *); 223 static void emx_init_tx_ring(struct emx_txdata *); 224 static int emx_init_rx_ring(struct emx_rxdata *); 225 static void emx_free_tx_ring(struct emx_txdata *); 226 static void emx_free_rx_ring(struct emx_rxdata *); 227 static int emx_create_tx_ring(struct emx_txdata *); 228 static int emx_create_rx_ring(struct emx_rxdata *); 229 static void emx_destroy_tx_ring(struct emx_txdata *, int); 230 static void emx_destroy_rx_ring(struct emx_rxdata *, int); 231 static int emx_newbuf(struct emx_rxdata *, int, int); 232 static int emx_encap(struct emx_txdata *, struct mbuf **, int *, int *); 233 static int emx_txcsum(struct emx_txdata *, struct mbuf *, 234 uint32_t *, uint32_t *); 235 static int emx_tso_pullup(struct emx_txdata *, struct mbuf **); 236 static int emx_tso_setup(struct emx_txdata *, struct mbuf *, 237 uint32_t *, uint32_t *); 238 static int emx_get_txring_inuse(const struct emx_softc *, boolean_t); 239 240 static int emx_is_valid_eaddr(const uint8_t *); 241 static int emx_reset(struct emx_softc *); 242 static void emx_setup_ifp(struct emx_softc *); 243 static void emx_init_tx_unit(struct emx_softc *); 244 static void emx_init_rx_unit(struct emx_softc *); 245 static void emx_update_stats(struct emx_softc *); 246 static void emx_set_promisc(struct emx_softc *); 247 static void emx_disable_promisc(struct emx_softc *); 248 static void emx_set_multi(struct emx_softc *); 249 static void emx_update_link_status(struct emx_softc *); 250 static void emx_smartspeed(struct emx_softc *); 251 static void emx_set_itr(struct emx_softc *, uint32_t); 252 static void emx_disable_aspm(struct emx_softc *); 253 254 static void emx_print_debug_info(struct emx_softc *); 255 static void emx_print_nvm_info(struct emx_softc *); 256 static void emx_print_hw_stats(struct emx_softc *); 257 258 static int emx_sysctl_stats(SYSCTL_HANDLER_ARGS); 259 static int emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 260 static int emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 261 static int emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 262 static int emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS); 263 #ifdef IFPOLL_ENABLE 264 static int emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 265 static int emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 266 #endif 267 static void emx_add_sysctl(struct emx_softc *); 268 269 static void emx_serialize_skipmain(struct emx_softc *); 270 static void emx_deserialize_skipmain(struct emx_softc *); 271 272 /* Management and WOL Support */ 273 static void emx_get_mgmt(struct emx_softc *); 274 static void emx_rel_mgmt(struct emx_softc *); 275 static void emx_get_hw_control(struct emx_softc *); 276 static void emx_rel_hw_control(struct emx_softc *); 277 static void emx_enable_wol(device_t); 278 279 static device_method_t emx_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, emx_probe), 282 DEVMETHOD(device_attach, emx_attach), 283 DEVMETHOD(device_detach, emx_detach), 284 DEVMETHOD(device_shutdown, emx_shutdown), 285 DEVMETHOD(device_suspend, emx_suspend), 286 DEVMETHOD(device_resume, emx_resume), 287 DEVMETHOD_END 288 }; 289 290 static driver_t emx_driver = { 291 "emx", 292 emx_methods, 293 sizeof(struct emx_softc), 294 }; 295 296 static devclass_t emx_devclass; 297 298 DECLARE_DUMMY_MODULE(if_emx); 299 MODULE_DEPEND(emx, ig_hal, 1, 1, 1); 300 DRIVER_MODULE(if_emx, pci, emx_driver, emx_devclass, NULL, NULL); 301 302 /* 303 * Tunables 304 */ 305 static int emx_int_throttle_ceil = EMX_DEFAULT_ITR; 306 static int emx_rxd = EMX_DEFAULT_RXD; 307 static int emx_txd = EMX_DEFAULT_TXD; 308 static int emx_smart_pwr_down = 0; 309 static int emx_rxr = 0; 310 static int emx_txr = 1; 311 312 /* Controls whether promiscuous also shows bad packets */ 313 static int emx_debug_sbp = 0; 314 315 static int emx_82573_workaround = 1; 316 static int emx_msi_enable = 1; 317 318 static char emx_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 319 320 TUNABLE_INT("hw.emx.int_throttle_ceil", &emx_int_throttle_ceil); 321 TUNABLE_INT("hw.emx.rxd", &emx_rxd); 322 TUNABLE_INT("hw.emx.rxr", &emx_rxr); 323 TUNABLE_INT("hw.emx.txd", &emx_txd); 324 TUNABLE_INT("hw.emx.txr", &emx_txr); 325 TUNABLE_INT("hw.emx.smart_pwr_down", &emx_smart_pwr_down); 326 TUNABLE_INT("hw.emx.sbp", &emx_debug_sbp); 327 TUNABLE_INT("hw.emx.82573_workaround", &emx_82573_workaround); 328 TUNABLE_INT("hw.emx.msi.enable", &emx_msi_enable); 329 TUNABLE_STR("hw.emx.flow_ctrl", emx_flowctrl, sizeof(emx_flowctrl)); 330 331 /* Global used in WOL setup with multiport cards */ 332 static int emx_global_quad_port_a = 0; 333 334 /* Set this to one to display debug statistics */ 335 static int emx_display_debug_stats = 0; 336 337 #if !defined(KTR_IF_EMX) 338 #define KTR_IF_EMX KTR_ALL 339 #endif 340 KTR_INFO_MASTER(if_emx); 341 KTR_INFO(KTR_IF_EMX, if_emx, intr_beg, 0, "intr begin"); 342 KTR_INFO(KTR_IF_EMX, if_emx, intr_end, 1, "intr end"); 343 KTR_INFO(KTR_IF_EMX, if_emx, pkt_receive, 4, "rx packet"); 344 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txqueue, 5, "tx packet"); 345 KTR_INFO(KTR_IF_EMX, if_emx, pkt_txclean, 6, "tx clean"); 346 #define logif(name) KTR_LOG(if_emx_ ## name) 347 348 static __inline void 349 emx_setup_rxdesc(emx_rxdesc_t *rxd, const struct emx_rxbuf *rxbuf) 350 { 351 rxd->rxd_bufaddr = htole64(rxbuf->paddr); 352 /* DD bit must be cleared */ 353 rxd->rxd_staterr = 0; 354 } 355 356 static __inline void 357 emx_rxcsum(uint32_t staterr, struct mbuf *mp) 358 { 359 /* Ignore Checksum bit is set */ 360 if (staterr & E1000_RXD_STAT_IXSM) 361 return; 362 363 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 364 E1000_RXD_STAT_IPCS) 365 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 366 367 if ((staterr & (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 368 E1000_RXD_STAT_TCPCS) { 369 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 370 CSUM_PSEUDO_HDR | 371 CSUM_FRAG_NOT_CHECKED; 372 mp->m_pkthdr.csum_data = htons(0xffff); 373 } 374 } 375 376 static __inline struct pktinfo * 377 emx_rssinfo(struct mbuf *m, struct pktinfo *pi, 378 uint32_t mrq, uint32_t hash, uint32_t staterr) 379 { 380 switch (mrq & EMX_RXDMRQ_RSSTYPE_MASK) { 381 case EMX_RXDMRQ_IPV4_TCP: 382 pi->pi_netisr = NETISR_IP; 383 pi->pi_flags = 0; 384 pi->pi_l3proto = IPPROTO_TCP; 385 break; 386 387 case EMX_RXDMRQ_IPV6_TCP: 388 pi->pi_netisr = NETISR_IPV6; 389 pi->pi_flags = 0; 390 pi->pi_l3proto = IPPROTO_TCP; 391 break; 392 393 case EMX_RXDMRQ_IPV4: 394 if (staterr & E1000_RXD_STAT_IXSM) 395 return NULL; 396 397 if ((staterr & 398 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 399 E1000_RXD_STAT_TCPCS) { 400 pi->pi_netisr = NETISR_IP; 401 pi->pi_flags = 0; 402 pi->pi_l3proto = IPPROTO_UDP; 403 break; 404 } 405 /* FALL THROUGH */ 406 default: 407 return NULL; 408 } 409 410 m_sethash(m, toeplitz_hash(hash)); 411 return pi; 412 } 413 414 static int 415 emx_probe(device_t dev) 416 { 417 const struct emx_device *d; 418 uint16_t vid, did; 419 420 vid = pci_get_vendor(dev); 421 did = pci_get_device(dev); 422 423 for (d = emx_devices; d->desc != NULL; ++d) { 424 if (vid == d->vid && did == d->did) { 425 device_set_desc(dev, d->desc); 426 device_set_async_attach(dev, TRUE); 427 return 0; 428 } 429 } 430 return ENXIO; 431 } 432 433 static int 434 emx_attach(device_t dev) 435 { 436 struct emx_softc *sc = device_get_softc(dev); 437 int error = 0, i, throttle, msi_enable, tx_ring_max; 438 u_int intr_flags; 439 uint16_t eeprom_data, device_id, apme_mask; 440 driver_intr_t *intr_func; 441 char flowctrl[IFM_ETH_FC_STRLEN]; 442 #ifdef IFPOLL_ENABLE 443 int offset, offset_def; 444 #endif 445 446 /* 447 * Setup RX rings 448 */ 449 for (i = 0; i < EMX_NRX_RING; ++i) { 450 sc->rx_data[i].sc = sc; 451 sc->rx_data[i].idx = i; 452 } 453 454 /* 455 * Setup TX ring 456 */ 457 for (i = 0; i < EMX_NTX_RING; ++i) { 458 sc->tx_data[i].sc = sc; 459 sc->tx_data[i].idx = i; 460 } 461 462 /* 463 * Initialize serializers 464 */ 465 lwkt_serialize_init(&sc->main_serialize); 466 for (i = 0; i < EMX_NTX_RING; ++i) 467 lwkt_serialize_init(&sc->tx_data[i].tx_serialize); 468 for (i = 0; i < EMX_NRX_RING; ++i) 469 lwkt_serialize_init(&sc->rx_data[i].rx_serialize); 470 471 /* 472 * Initialize serializer array 473 */ 474 i = 0; 475 476 KKASSERT(i < EMX_NSERIALIZE); 477 sc->serializes[i++] = &sc->main_serialize; 478 479 KKASSERT(i < EMX_NSERIALIZE); 480 sc->serializes[i++] = &sc->tx_data[0].tx_serialize; 481 KKASSERT(i < EMX_NSERIALIZE); 482 sc->serializes[i++] = &sc->tx_data[1].tx_serialize; 483 484 KKASSERT(i < EMX_NSERIALIZE); 485 sc->serializes[i++] = &sc->rx_data[0].rx_serialize; 486 KKASSERT(i < EMX_NSERIALIZE); 487 sc->serializes[i++] = &sc->rx_data[1].rx_serialize; 488 489 KKASSERT(i == EMX_NSERIALIZE); 490 491 ifmedia_init(&sc->media, IFM_IMASK | IFM_ETH_FCMASK, 492 emx_media_change, emx_media_status); 493 callout_init_mp(&sc->timer); 494 495 sc->dev = sc->osdep.dev = dev; 496 497 /* 498 * Determine hardware and mac type 499 */ 500 sc->hw.vendor_id = pci_get_vendor(dev); 501 sc->hw.device_id = pci_get_device(dev); 502 sc->hw.revision_id = pci_get_revid(dev); 503 sc->hw.subsystem_vendor_id = pci_get_subvendor(dev); 504 sc->hw.subsystem_device_id = pci_get_subdevice(dev); 505 506 if (e1000_set_mac_type(&sc->hw)) 507 return ENXIO; 508 509 /* Enable bus mastering */ 510 pci_enable_busmaster(dev); 511 512 /* 513 * Allocate IO memory 514 */ 515 sc->memory_rid = EMX_BAR_MEM; 516 sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 517 &sc->memory_rid, RF_ACTIVE); 518 if (sc->memory == NULL) { 519 device_printf(dev, "Unable to allocate bus resource: memory\n"); 520 error = ENXIO; 521 goto fail; 522 } 523 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory); 524 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory); 525 526 /* XXX This is quite goofy, it is not actually used */ 527 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 528 529 /* 530 * Don't enable MSI-X on 82574, see: 531 * 82574 specification update errata #15 532 * 533 * Don't enable MSI on 82571/82572, see: 534 * 82571/82572 specification update errata #63 535 */ 536 msi_enable = emx_msi_enable; 537 if (msi_enable && 538 (sc->hw.mac.type == e1000_82571 || 539 sc->hw.mac.type == e1000_82572)) 540 msi_enable = 0; 541 again: 542 /* 543 * Allocate interrupt 544 */ 545 sc->intr_type = pci_alloc_1intr(dev, msi_enable, 546 &sc->intr_rid, &intr_flags); 547 548 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 549 int unshared; 550 551 unshared = device_getenv_int(dev, "irq.unshared", 0); 552 if (!unshared) { 553 sc->flags |= EMX_FLAG_SHARED_INTR; 554 if (bootverbose) 555 device_printf(dev, "IRQ shared\n"); 556 } else { 557 intr_flags &= ~RF_SHAREABLE; 558 if (bootverbose) 559 device_printf(dev, "IRQ unshared\n"); 560 } 561 } 562 563 sc->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->intr_rid, 564 intr_flags); 565 if (sc->intr_res == NULL) { 566 device_printf(dev, "Unable to allocate bus resource: %s\n", 567 sc->intr_type == PCI_INTR_TYPE_MSI ? "MSI" : "legacy intr"); 568 if (!msi_enable) { 569 /* Retry with MSI. */ 570 msi_enable = 1; 571 sc->flags &= ~EMX_FLAG_SHARED_INTR; 572 goto again; 573 } 574 error = ENXIO; 575 goto fail; 576 } 577 578 /* Save PCI command register for Shared Code */ 579 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 580 sc->hw.back = &sc->osdep; 581 582 /* 583 * For I217/I218, we need to map the flash memory and this 584 * must happen after the MAC is identified. 585 */ 586 if (sc->hw.mac.type == e1000_pch_lpt) { 587 sc->flash_rid = EMX_BAR_FLASH; 588 589 sc->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 590 &sc->flash_rid, RF_ACTIVE); 591 if (sc->flash == NULL) { 592 device_printf(dev, "Mapping of Flash failed\n"); 593 error = ENXIO; 594 goto fail; 595 } 596 sc->osdep.flash_bus_space_tag = rman_get_bustag(sc->flash); 597 sc->osdep.flash_bus_space_handle = 598 rman_get_bushandle(sc->flash); 599 600 /* 601 * This is used in the shared code 602 * XXX this goof is actually not used. 603 */ 604 sc->hw.flash_address = (uint8_t *)sc->flash; 605 } 606 607 /* Do Shared Code initialization */ 608 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 609 device_printf(dev, "Setup of Shared code failed\n"); 610 error = ENXIO; 611 goto fail; 612 } 613 e1000_get_bus_info(&sc->hw); 614 615 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 616 sc->hw.phy.autoneg_wait_to_complete = FALSE; 617 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 618 619 /* 620 * Interrupt throttle rate 621 */ 622 throttle = device_getenv_int(dev, "int_throttle_ceil", 623 emx_int_throttle_ceil); 624 if (throttle == 0) { 625 sc->int_throttle_ceil = 0; 626 } else { 627 if (throttle < 0) 628 throttle = EMX_DEFAULT_ITR; 629 630 /* Recalculate the tunable value to get the exact frequency. */ 631 throttle = 1000000000 / 256 / throttle; 632 633 /* Upper 16bits of ITR is reserved and should be zero */ 634 if (throttle & 0xffff0000) 635 throttle = 1000000000 / 256 / EMX_DEFAULT_ITR; 636 637 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 638 } 639 640 e1000_init_script_state_82541(&sc->hw, TRUE); 641 e1000_set_tbi_compatibility_82543(&sc->hw, TRUE); 642 643 /* Copper options */ 644 if (sc->hw.phy.media_type == e1000_media_type_copper) { 645 sc->hw.phy.mdix = EMX_AUTO_ALL_MODES; 646 sc->hw.phy.disable_polarity_correction = FALSE; 647 sc->hw.phy.ms_type = EMX_MASTER_SLAVE; 648 } 649 650 /* Set the frame limits assuming standard ethernet sized frames. */ 651 sc->hw.mac.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 652 653 /* This controls when hardware reports transmit completion status. */ 654 sc->hw.mac.report_tx_early = 1; 655 656 /* Calculate # of RX rings */ 657 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", emx_rxr); 658 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, EMX_NRX_RING); 659 660 /* 661 * Calculate # of TX rings 662 * 663 * XXX 664 * I217/I218 claims to have 2 TX queues 665 * 666 * NOTE: 667 * Don't enable multiple TX queues on 82574; it always gives 668 * watchdog timeout on TX queue0, when multiple TCP streams are 669 * received. It was originally suspected that the hardware TX 670 * checksum offloading caused this watchdog timeout, since only 671 * TCP ACKs are sent during TCP receiving tests. However, even 672 * if the hardware TX checksum offloading is disable, TX queue0 673 * still will give watchdog. 674 */ 675 tx_ring_max = 1; 676 if (sc->hw.mac.type == e1000_82571 || 677 sc->hw.mac.type == e1000_82572 || 678 sc->hw.mac.type == e1000_80003es2lan || 679 sc->hw.mac.type == e1000_pch_lpt || 680 sc->hw.mac.type == e1000_pch_spt || 681 sc->hw.mac.type == e1000_82574) 682 tx_ring_max = EMX_NTX_RING; 683 sc->tx_ring_cnt = device_getenv_int(dev, "txr", emx_txr); 684 sc->tx_ring_cnt = if_ring_count2(sc->tx_ring_cnt, tx_ring_max); 685 686 /* Allocate RX/TX rings' busdma(9) stuffs */ 687 error = emx_dma_alloc(sc); 688 if (error) 689 goto fail; 690 691 /* Allocate multicast array memory. */ 692 sc->mta = kmalloc(ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX, 693 M_DEVBUF, M_WAITOK); 694 695 /* Indicate SOL/IDER usage */ 696 if (e1000_check_reset_block(&sc->hw)) { 697 device_printf(dev, 698 "PHY reset is blocked due to SOL/IDER session.\n"); 699 } 700 701 /* Disable EEE on I217/I218 */ 702 sc->hw.dev_spec.ich8lan.eee_disable = 1; 703 704 /* 705 * Start from a known state, this is important in reading the 706 * nvm and mac from that. 707 */ 708 e1000_reset_hw(&sc->hw); 709 710 /* Make sure we have a good EEPROM before we read from it */ 711 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 712 /* 713 * Some PCI-E parts fail the first check due to 714 * the link being in sleep state, call it again, 715 * if it fails a second time its a real issue. 716 */ 717 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 718 device_printf(dev, 719 "The EEPROM Checksum Is Not Valid\n"); 720 error = EIO; 721 goto fail; 722 } 723 } 724 725 /* Copy the permanent MAC address out of the EEPROM */ 726 if (e1000_read_mac_addr(&sc->hw) < 0) { 727 device_printf(dev, "EEPROM read error while reading MAC" 728 " address\n"); 729 error = EIO; 730 goto fail; 731 } 732 if (!emx_is_valid_eaddr(sc->hw.mac.addr)) { 733 device_printf(dev, "Invalid MAC address\n"); 734 error = EIO; 735 goto fail; 736 } 737 738 /* Disable ULP support */ 739 e1000_disable_ulp_lpt_lp(&sc->hw, TRUE); 740 741 /* Determine if we have to control management hardware */ 742 if (e1000_enable_mng_pass_thru(&sc->hw)) 743 sc->flags |= EMX_FLAG_HAS_MGMT; 744 745 /* 746 * Setup Wake-on-Lan 747 */ 748 apme_mask = EMX_EEPROM_APME; 749 eeprom_data = 0; 750 switch (sc->hw.mac.type) { 751 case e1000_82573: 752 sc->flags |= EMX_FLAG_HAS_AMT; 753 /* FALL THROUGH */ 754 755 case e1000_82571: 756 case e1000_82572: 757 case e1000_80003es2lan: 758 if (sc->hw.bus.func == 1) { 759 e1000_read_nvm(&sc->hw, 760 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 761 } else { 762 e1000_read_nvm(&sc->hw, 763 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 764 } 765 break; 766 767 default: 768 e1000_read_nvm(&sc->hw, 769 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 770 break; 771 } 772 if (eeprom_data & apme_mask) 773 sc->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 774 775 /* 776 * We have the eeprom settings, now apply the special cases 777 * where the eeprom may be wrong or the board won't support 778 * wake on lan on a particular port 779 */ 780 device_id = pci_get_device(dev); 781 switch (device_id) { 782 case E1000_DEV_ID_82571EB_FIBER: 783 /* 784 * Wake events only supported on port A for dual fiber 785 * regardless of eeprom setting 786 */ 787 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & 788 E1000_STATUS_FUNC_1) 789 sc->wol = 0; 790 break; 791 792 case E1000_DEV_ID_82571EB_QUAD_COPPER: 793 case E1000_DEV_ID_82571EB_QUAD_FIBER: 794 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 795 /* if quad port sc, disable WoL on all but port A */ 796 if (emx_global_quad_port_a != 0) 797 sc->wol = 0; 798 /* Reset for multiple quad port adapters */ 799 if (++emx_global_quad_port_a == 4) 800 emx_global_quad_port_a = 0; 801 break; 802 } 803 804 /* XXX disable wol */ 805 sc->wol = 0; 806 807 #ifdef IFPOLL_ENABLE 808 /* 809 * NPOLLING RX CPU offset 810 */ 811 if (sc->rx_ring_cnt == ncpus2) { 812 offset = 0; 813 } else { 814 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 815 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 816 if (offset >= ncpus2 || 817 offset % sc->rx_ring_cnt != 0) { 818 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 819 offset, offset_def); 820 offset = offset_def; 821 } 822 } 823 sc->rx_npoll_off = offset; 824 825 /* 826 * NPOLLING TX CPU offset 827 */ 828 if (sc->tx_ring_cnt == ncpus2) { 829 offset = 0; 830 } else { 831 offset_def = (sc->tx_ring_cnt * device_get_unit(dev)) % ncpus2; 832 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 833 if (offset >= ncpus2 || 834 offset % sc->tx_ring_cnt != 0) { 835 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 836 offset, offset_def); 837 offset = offset_def; 838 } 839 } 840 sc->tx_npoll_off = offset; 841 #endif 842 sc->tx_ring_inuse = emx_get_txring_inuse(sc, FALSE); 843 844 /* Setup flow control. */ 845 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 846 emx_flowctrl); 847 sc->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 848 849 /* Setup OS specific network interface */ 850 emx_setup_ifp(sc); 851 852 /* Add sysctl tree, must after em_setup_ifp() */ 853 emx_add_sysctl(sc); 854 855 /* Reset the hardware */ 856 error = emx_reset(sc); 857 if (error) { 858 /* 859 * Some 82573 parts fail the first reset, call it again, 860 * if it fails a second time its a real issue. 861 */ 862 error = emx_reset(sc); 863 if (error) { 864 device_printf(dev, "Unable to reset the hardware\n"); 865 ether_ifdetach(&sc->arpcom.ac_if); 866 goto fail; 867 } 868 } 869 870 /* Initialize statistics */ 871 emx_update_stats(sc); 872 873 sc->hw.mac.get_link_status = 1; 874 emx_update_link_status(sc); 875 876 /* Non-AMT based hardware can now take control from firmware */ 877 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 878 EMX_FLAG_HAS_MGMT) 879 emx_get_hw_control(sc); 880 881 /* 882 * Missing Interrupt Following ICR read: 883 * 884 * 82571/82572 specification update errata #76 885 * 82573 specification update errata #31 886 * 82574 specification update errata #12 887 */ 888 intr_func = emx_intr; 889 if ((sc->flags & EMX_FLAG_SHARED_INTR) && 890 (sc->hw.mac.type == e1000_82571 || 891 sc->hw.mac.type == e1000_82572 || 892 sc->hw.mac.type == e1000_82573 || 893 sc->hw.mac.type == e1000_82574)) 894 intr_func = emx_intr_mask; 895 896 error = bus_setup_intr(dev, sc->intr_res, INTR_MPSAFE, intr_func, sc, 897 &sc->intr_tag, &sc->main_serialize); 898 if (error) { 899 device_printf(dev, "Failed to register interrupt handler"); 900 ether_ifdetach(&sc->arpcom.ac_if); 901 goto fail; 902 } 903 return (0); 904 fail: 905 emx_detach(dev); 906 return (error); 907 } 908 909 static int 910 emx_detach(device_t dev) 911 { 912 struct emx_softc *sc = device_get_softc(dev); 913 914 if (device_is_attached(dev)) { 915 struct ifnet *ifp = &sc->arpcom.ac_if; 916 917 ifnet_serialize_all(ifp); 918 919 emx_stop(sc); 920 921 e1000_phy_hw_reset(&sc->hw); 922 923 emx_rel_mgmt(sc); 924 emx_rel_hw_control(sc); 925 926 if (sc->wol) { 927 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 928 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 929 emx_enable_wol(dev); 930 } 931 932 bus_teardown_intr(dev, sc->intr_res, sc->intr_tag); 933 934 ifnet_deserialize_all(ifp); 935 936 ether_ifdetach(ifp); 937 } else if (sc->memory != NULL) { 938 emx_rel_hw_control(sc); 939 } 940 941 ifmedia_removeall(&sc->media); 942 bus_generic_detach(dev); 943 944 if (sc->intr_res != NULL) { 945 bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, 946 sc->intr_res); 947 } 948 949 if (sc->intr_type == PCI_INTR_TYPE_MSI) 950 pci_release_msi(dev); 951 952 if (sc->memory != NULL) { 953 bus_release_resource(dev, SYS_RES_MEMORY, sc->memory_rid, 954 sc->memory); 955 } 956 957 if (sc->flash != NULL) { 958 bus_release_resource(dev, SYS_RES_MEMORY, sc->flash_rid, 959 sc->flash); 960 } 961 962 emx_dma_free(sc); 963 964 if (sc->mta != NULL) 965 kfree(sc->mta, M_DEVBUF); 966 967 return (0); 968 } 969 970 static int 971 emx_shutdown(device_t dev) 972 { 973 return emx_suspend(dev); 974 } 975 976 static int 977 emx_suspend(device_t dev) 978 { 979 struct emx_softc *sc = device_get_softc(dev); 980 struct ifnet *ifp = &sc->arpcom.ac_if; 981 982 ifnet_serialize_all(ifp); 983 984 emx_stop(sc); 985 986 emx_rel_mgmt(sc); 987 emx_rel_hw_control(sc); 988 989 if (sc->wol) { 990 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 991 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 992 emx_enable_wol(dev); 993 } 994 995 ifnet_deserialize_all(ifp); 996 997 return bus_generic_suspend(dev); 998 } 999 1000 static int 1001 emx_resume(device_t dev) 1002 { 1003 struct emx_softc *sc = device_get_softc(dev); 1004 struct ifnet *ifp = &sc->arpcom.ac_if; 1005 int i; 1006 1007 ifnet_serialize_all(ifp); 1008 1009 emx_init(sc); 1010 emx_get_mgmt(sc); 1011 for (i = 0; i < sc->tx_ring_inuse; ++i) 1012 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1013 1014 ifnet_deserialize_all(ifp); 1015 1016 return bus_generic_resume(dev); 1017 } 1018 1019 static void 1020 emx_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1021 { 1022 struct emx_softc *sc = ifp->if_softc; 1023 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1024 struct mbuf *m_head; 1025 int idx = -1, nsegs = 0; 1026 1027 KKASSERT(tdata->ifsq == ifsq); 1028 ASSERT_SERIALIZED(&tdata->tx_serialize); 1029 1030 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 1031 return; 1032 1033 if (!sc->link_active || (tdata->tx_flags & EMX_TXFLAG_ENABLED) == 0) { 1034 ifsq_purge(ifsq); 1035 return; 1036 } 1037 1038 while (!ifsq_is_empty(ifsq)) { 1039 /* Now do we at least have a minimal? */ 1040 if (EMX_IS_OACTIVE(tdata)) { 1041 emx_tx_collect(tdata); 1042 if (EMX_IS_OACTIVE(tdata)) { 1043 ifsq_set_oactive(ifsq); 1044 break; 1045 } 1046 } 1047 1048 logif(pkt_txqueue); 1049 m_head = ifsq_dequeue(ifsq); 1050 if (m_head == NULL) 1051 break; 1052 1053 if (emx_encap(tdata, &m_head, &nsegs, &idx)) { 1054 IFNET_STAT_INC(ifp, oerrors, 1); 1055 emx_tx_collect(tdata); 1056 continue; 1057 } 1058 1059 /* 1060 * TX interrupt are aggressively aggregated, so increasing 1061 * opackets at TX interrupt time will make the opackets 1062 * statistics vastly inaccurate; we do the opackets increment 1063 * now. 1064 */ 1065 IFNET_STAT_INC(ifp, opackets, 1); 1066 1067 if (nsegs >= tdata->tx_wreg_nsegs) { 1068 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1069 nsegs = 0; 1070 idx = -1; 1071 } 1072 1073 /* Send a copy of the frame to the BPF listener */ 1074 ETHER_BPF_MTAP(ifp, m_head); 1075 1076 /* Set timeout in case hardware has problems transmitting. */ 1077 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1078 } 1079 if (idx >= 0) 1080 E1000_WRITE_REG(&sc->hw, E1000_TDT(tdata->idx), idx); 1081 } 1082 1083 static int 1084 emx_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1085 { 1086 struct emx_softc *sc = ifp->if_softc; 1087 struct ifreq *ifr = (struct ifreq *)data; 1088 uint16_t eeprom_data = 0; 1089 int max_frame_size, mask, reinit; 1090 int error = 0; 1091 1092 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1093 1094 switch (command) { 1095 case SIOCSIFMTU: 1096 switch (sc->hw.mac.type) { 1097 case e1000_82573: 1098 /* 1099 * 82573 only supports jumbo frames 1100 * if ASPM is disabled. 1101 */ 1102 e1000_read_nvm(&sc->hw, NVM_INIT_3GIO_3, 1, 1103 &eeprom_data); 1104 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1105 max_frame_size = ETHER_MAX_LEN; 1106 break; 1107 } 1108 /* FALL THROUGH */ 1109 1110 /* Limit Jumbo Frame size */ 1111 case e1000_82571: 1112 case e1000_82572: 1113 case e1000_82574: 1114 case e1000_pch_lpt: 1115 case e1000_pch_spt: 1116 case e1000_80003es2lan: 1117 max_frame_size = 9234; 1118 break; 1119 1120 default: 1121 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1122 break; 1123 } 1124 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1125 ETHER_CRC_LEN) { 1126 error = EINVAL; 1127 break; 1128 } 1129 1130 ifp->if_mtu = ifr->ifr_mtu; 1131 sc->hw.mac.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 1132 ETHER_CRC_LEN; 1133 1134 if (ifp->if_flags & IFF_RUNNING) 1135 emx_init(sc); 1136 break; 1137 1138 case SIOCSIFFLAGS: 1139 if (ifp->if_flags & IFF_UP) { 1140 if ((ifp->if_flags & IFF_RUNNING)) { 1141 if ((ifp->if_flags ^ sc->if_flags) & 1142 (IFF_PROMISC | IFF_ALLMULTI)) { 1143 emx_disable_promisc(sc); 1144 emx_set_promisc(sc); 1145 } 1146 } else { 1147 emx_init(sc); 1148 } 1149 } else if (ifp->if_flags & IFF_RUNNING) { 1150 emx_stop(sc); 1151 } 1152 sc->if_flags = ifp->if_flags; 1153 break; 1154 1155 case SIOCADDMULTI: 1156 case SIOCDELMULTI: 1157 if (ifp->if_flags & IFF_RUNNING) { 1158 emx_disable_intr(sc); 1159 emx_set_multi(sc); 1160 #ifdef IFPOLL_ENABLE 1161 if (!(ifp->if_flags & IFF_NPOLLING)) 1162 #endif 1163 emx_enable_intr(sc); 1164 } 1165 break; 1166 1167 case SIOCSIFMEDIA: 1168 /* Check SOL/IDER usage */ 1169 if (e1000_check_reset_block(&sc->hw)) { 1170 device_printf(sc->dev, "Media change is" 1171 " blocked due to SOL/IDER session.\n"); 1172 break; 1173 } 1174 /* FALL THROUGH */ 1175 1176 case SIOCGIFMEDIA: 1177 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 1178 break; 1179 1180 case SIOCSIFCAP: 1181 reinit = 0; 1182 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1183 if (mask & IFCAP_RXCSUM) { 1184 ifp->if_capenable ^= IFCAP_RXCSUM; 1185 reinit = 1; 1186 } 1187 if (mask & IFCAP_VLAN_HWTAGGING) { 1188 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1189 reinit = 1; 1190 } 1191 if (mask & IFCAP_TXCSUM) { 1192 ifp->if_capenable ^= IFCAP_TXCSUM; 1193 if (ifp->if_capenable & IFCAP_TXCSUM) 1194 ifp->if_hwassist |= EMX_CSUM_FEATURES; 1195 else 1196 ifp->if_hwassist &= ~EMX_CSUM_FEATURES; 1197 } 1198 if (mask & IFCAP_TSO) { 1199 ifp->if_capenable ^= IFCAP_TSO; 1200 if (ifp->if_capenable & IFCAP_TSO) 1201 ifp->if_hwassist |= CSUM_TSO; 1202 else 1203 ifp->if_hwassist &= ~CSUM_TSO; 1204 } 1205 if (mask & IFCAP_RSS) 1206 ifp->if_capenable ^= IFCAP_RSS; 1207 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1208 emx_init(sc); 1209 break; 1210 1211 default: 1212 error = ether_ioctl(ifp, command, data); 1213 break; 1214 } 1215 return (error); 1216 } 1217 1218 static void 1219 emx_watchdog(struct ifaltq_subque *ifsq) 1220 { 1221 struct emx_txdata *tdata = ifsq_get_priv(ifsq); 1222 struct ifnet *ifp = ifsq_get_ifp(ifsq); 1223 struct emx_softc *sc = ifp->if_softc; 1224 int i; 1225 1226 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1227 1228 /* 1229 * The timer is set to 5 every time start queues a packet. 1230 * Then txeof keeps resetting it as long as it cleans at 1231 * least one descriptor. 1232 * Finally, anytime all descriptors are clean the timer is 1233 * set to 0. 1234 */ 1235 1236 if (E1000_READ_REG(&sc->hw, E1000_TDT(tdata->idx)) == 1237 E1000_READ_REG(&sc->hw, E1000_TDH(tdata->idx))) { 1238 /* 1239 * If we reach here, all TX jobs are completed and 1240 * the TX engine should have been idled for some time. 1241 * We don't need to call ifsq_devstart_sched() here. 1242 */ 1243 ifsq_clr_oactive(ifsq); 1244 tdata->tx_watchdog.wd_timer = 0; 1245 return; 1246 } 1247 1248 /* 1249 * If we are in this routine because of pause frames, then 1250 * don't reset the hardware. 1251 */ 1252 if (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { 1253 tdata->tx_watchdog.wd_timer = EMX_TX_TIMEOUT; 1254 return; 1255 } 1256 1257 if_printf(ifp, "TX %d watchdog timeout -- resetting\n", tdata->idx); 1258 1259 IFNET_STAT_INC(ifp, oerrors, 1); 1260 1261 emx_init(sc); 1262 for (i = 0; i < sc->tx_ring_inuse; ++i) 1263 ifsq_devstart_sched(sc->tx_data[i].ifsq); 1264 } 1265 1266 static void 1267 emx_init(void *xsc) 1268 { 1269 struct emx_softc *sc = xsc; 1270 struct ifnet *ifp = &sc->arpcom.ac_if; 1271 device_t dev = sc->dev; 1272 boolean_t polling; 1273 int i; 1274 1275 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1276 1277 emx_stop(sc); 1278 1279 /* Get the latest mac address, User can use a LAA */ 1280 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 1281 1282 /* Put the address into the Receive Address Array */ 1283 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1284 1285 /* 1286 * With the 82571 sc, RAR[0] may be overwritten 1287 * when the other port is reset, we make a duplicate 1288 * in RAR[14] for that eventuality, this assures 1289 * the interface continues to function. 1290 */ 1291 if (sc->hw.mac.type == e1000_82571) { 1292 e1000_set_laa_state_82571(&sc->hw, TRUE); 1293 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 1294 E1000_RAR_ENTRIES - 1); 1295 } 1296 1297 /* Initialize the hardware */ 1298 if (emx_reset(sc)) { 1299 device_printf(dev, "Unable to reset the hardware\n"); 1300 /* XXX emx_stop()? */ 1301 return; 1302 } 1303 emx_update_link_status(sc); 1304 1305 /* Setup VLAN support, basic and offload if available */ 1306 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1307 1308 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1309 uint32_t ctrl; 1310 1311 ctrl = E1000_READ_REG(&sc->hw, E1000_CTRL); 1312 ctrl |= E1000_CTRL_VME; 1313 E1000_WRITE_REG(&sc->hw, E1000_CTRL, ctrl); 1314 } 1315 1316 /* Configure for OS presence */ 1317 emx_get_mgmt(sc); 1318 1319 polling = FALSE; 1320 #ifdef IFPOLL_ENABLE 1321 if (ifp->if_flags & IFF_NPOLLING) 1322 polling = TRUE; 1323 #endif 1324 sc->tx_ring_inuse = emx_get_txring_inuse(sc, polling); 1325 ifq_set_subq_mask(&ifp->if_snd, sc->tx_ring_inuse - 1); 1326 1327 /* Prepare transmit descriptors and buffers */ 1328 for (i = 0; i < sc->tx_ring_inuse; ++i) 1329 emx_init_tx_ring(&sc->tx_data[i]); 1330 emx_init_tx_unit(sc); 1331 1332 /* Setup Multicast table */ 1333 emx_set_multi(sc); 1334 1335 /* Prepare receive descriptors and buffers */ 1336 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1337 if (emx_init_rx_ring(&sc->rx_data[i])) { 1338 device_printf(dev, 1339 "Could not setup receive structures\n"); 1340 emx_stop(sc); 1341 return; 1342 } 1343 } 1344 emx_init_rx_unit(sc); 1345 1346 /* Don't lose promiscuous settings */ 1347 emx_set_promisc(sc); 1348 1349 ifp->if_flags |= IFF_RUNNING; 1350 for (i = 0; i < sc->tx_ring_inuse; ++i) { 1351 ifsq_clr_oactive(sc->tx_data[i].ifsq); 1352 ifsq_watchdog_start(&sc->tx_data[i].tx_watchdog); 1353 } 1354 1355 callout_reset(&sc->timer, hz, emx_timer, sc); 1356 e1000_clear_hw_cntrs_base_generic(&sc->hw); 1357 1358 /* MSI/X configuration for 82574 */ 1359 if (sc->hw.mac.type == e1000_82574) { 1360 int tmp; 1361 1362 tmp = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 1363 tmp |= E1000_CTRL_EXT_PBA_CLR; 1364 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp); 1365 /* 1366 * XXX MSIX 1367 * Set the IVAR - interrupt vector routing. 1368 * Each nibble represents a vector, high bit 1369 * is enable, other 3 bits are the MSIX table 1370 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1371 * Link (other) to 2, hence the magic number. 1372 */ 1373 E1000_WRITE_REG(&sc->hw, E1000_IVAR, 0x800A0908); 1374 } 1375 1376 /* 1377 * Only enable interrupts if we are not polling, make sure 1378 * they are off otherwise. 1379 */ 1380 if (polling) 1381 emx_disable_intr(sc); 1382 else 1383 emx_enable_intr(sc); 1384 1385 /* AMT based hardware can now take control from firmware */ 1386 if ((sc->flags & (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) == 1387 (EMX_FLAG_HAS_MGMT | EMX_FLAG_HAS_AMT)) 1388 emx_get_hw_control(sc); 1389 } 1390 1391 static void 1392 emx_intr(void *xsc) 1393 { 1394 emx_intr_body(xsc, TRUE); 1395 } 1396 1397 static void 1398 emx_intr_body(struct emx_softc *sc, boolean_t chk_asserted) 1399 { 1400 struct ifnet *ifp = &sc->arpcom.ac_if; 1401 uint32_t reg_icr; 1402 1403 logif(intr_beg); 1404 ASSERT_SERIALIZED(&sc->main_serialize); 1405 1406 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 1407 1408 if (chk_asserted && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) { 1409 logif(intr_end); 1410 return; 1411 } 1412 1413 /* 1414 * XXX: some laptops trigger several spurious interrupts 1415 * on emx(4) when in the resume cycle. The ICR register 1416 * reports all-ones value in this case. Processing such 1417 * interrupts would lead to a freeze. I don't know why. 1418 */ 1419 if (reg_icr == 0xffffffff) { 1420 logif(intr_end); 1421 return; 1422 } 1423 1424 if (ifp->if_flags & IFF_RUNNING) { 1425 if (reg_icr & 1426 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 1427 int i; 1428 1429 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1430 lwkt_serialize_enter( 1431 &sc->rx_data[i].rx_serialize); 1432 emx_rxeof(&sc->rx_data[i], -1); 1433 lwkt_serialize_exit( 1434 &sc->rx_data[i].rx_serialize); 1435 } 1436 } 1437 if (reg_icr & E1000_ICR_TXDW) { 1438 struct emx_txdata *tdata = &sc->tx_data[0]; 1439 1440 lwkt_serialize_enter(&tdata->tx_serialize); 1441 emx_txeof(tdata); 1442 if (!ifsq_is_empty(tdata->ifsq)) 1443 ifsq_devstart(tdata->ifsq); 1444 lwkt_serialize_exit(&tdata->tx_serialize); 1445 } 1446 } 1447 1448 /* Link status change */ 1449 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1450 emx_serialize_skipmain(sc); 1451 1452 callout_stop(&sc->timer); 1453 sc->hw.mac.get_link_status = 1; 1454 emx_update_link_status(sc); 1455 1456 /* Deal with TX cruft when link lost */ 1457 emx_tx_purge(sc); 1458 1459 callout_reset(&sc->timer, hz, emx_timer, sc); 1460 1461 emx_deserialize_skipmain(sc); 1462 } 1463 1464 if (reg_icr & E1000_ICR_RXO) 1465 sc->rx_overruns++; 1466 1467 logif(intr_end); 1468 } 1469 1470 static void 1471 emx_intr_mask(void *xsc) 1472 { 1473 struct emx_softc *sc = xsc; 1474 1475 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 1476 /* 1477 * NOTE: 1478 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1479 * so don't check it. 1480 */ 1481 emx_intr_body(sc, FALSE); 1482 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 1483 } 1484 1485 static void 1486 emx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1487 { 1488 struct emx_softc *sc = ifp->if_softc; 1489 1490 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1491 1492 emx_update_link_status(sc); 1493 1494 ifmr->ifm_status = IFM_AVALID; 1495 ifmr->ifm_active = IFM_ETHER; 1496 1497 if (!sc->link_active) { 1498 if (sc->hw.mac.autoneg) 1499 ifmr->ifm_active |= IFM_NONE; 1500 else 1501 ifmr->ifm_active |= sc->media.ifm_media; 1502 return; 1503 } 1504 1505 ifmr->ifm_status |= IFM_ACTIVE; 1506 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1507 ifmr->ifm_active |= sc->ifm_flowctrl; 1508 1509 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1510 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1511 ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; 1512 } else { 1513 switch (sc->link_speed) { 1514 case 10: 1515 ifmr->ifm_active |= IFM_10_T; 1516 break; 1517 case 100: 1518 ifmr->ifm_active |= IFM_100_TX; 1519 break; 1520 1521 case 1000: 1522 ifmr->ifm_active |= IFM_1000_T; 1523 break; 1524 } 1525 if (sc->link_duplex == FULL_DUPLEX) 1526 ifmr->ifm_active |= IFM_FDX; 1527 else 1528 ifmr->ifm_active |= IFM_HDX; 1529 } 1530 if (ifmr->ifm_active & IFM_FDX) 1531 ifmr->ifm_active |= e1000_fc2ifmedia(sc->hw.fc.current_mode); 1532 } 1533 1534 static int 1535 emx_media_change(struct ifnet *ifp) 1536 { 1537 struct emx_softc *sc = ifp->if_softc; 1538 struct ifmedia *ifm = &sc->media; 1539 1540 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1541 1542 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1543 return (EINVAL); 1544 1545 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1546 case IFM_AUTO: 1547 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1548 sc->hw.phy.autoneg_advertised = EMX_AUTONEG_ADV_DEFAULT; 1549 break; 1550 1551 case IFM_1000_SX: 1552 case IFM_1000_T: 1553 sc->hw.mac.autoneg = EMX_DO_AUTO_NEG; 1554 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1555 break; 1556 1557 case IFM_100_TX: 1558 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1559 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1560 } else { 1561 if (IFM_OPTIONS(ifm->ifm_media) & 1562 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1563 if (bootverbose) { 1564 if_printf(ifp, "Flow control is not " 1565 "allowed for half-duplex\n"); 1566 } 1567 return EINVAL; 1568 } 1569 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1570 } 1571 sc->hw.mac.autoneg = FALSE; 1572 sc->hw.phy.autoneg_advertised = 0; 1573 break; 1574 1575 case IFM_10_T: 1576 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1577 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1578 } else { 1579 if (IFM_OPTIONS(ifm->ifm_media) & 1580 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1581 if (bootverbose) { 1582 if_printf(ifp, "Flow control is not " 1583 "allowed for half-duplex\n"); 1584 } 1585 return EINVAL; 1586 } 1587 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1588 } 1589 sc->hw.mac.autoneg = FALSE; 1590 sc->hw.phy.autoneg_advertised = 0; 1591 break; 1592 1593 default: 1594 if (bootverbose) { 1595 if_printf(ifp, "Unsupported media type %d\n", 1596 IFM_SUBTYPE(ifm->ifm_media)); 1597 } 1598 return EINVAL; 1599 } 1600 sc->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1601 1602 if (ifp->if_flags & IFF_RUNNING) 1603 emx_init(sc); 1604 1605 return (0); 1606 } 1607 1608 static int 1609 emx_encap(struct emx_txdata *tdata, struct mbuf **m_headp, 1610 int *segs_used, int *idx) 1611 { 1612 bus_dma_segment_t segs[EMX_MAX_SCATTER]; 1613 bus_dmamap_t map; 1614 struct emx_txbuf *tx_buffer, *tx_buffer_mapped; 1615 struct e1000_tx_desc *ctxd = NULL; 1616 struct mbuf *m_head = *m_headp; 1617 uint32_t txd_upper, txd_lower, cmd = 0; 1618 int maxsegs, nsegs, i, j, first, last = 0, error; 1619 1620 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1621 error = emx_tso_pullup(tdata, m_headp); 1622 if (error) 1623 return error; 1624 m_head = *m_headp; 1625 } 1626 1627 txd_upper = txd_lower = 0; 1628 1629 /* 1630 * Capture the first descriptor index, this descriptor 1631 * will have the index of the EOP which is the only one 1632 * that now gets a DONE bit writeback. 1633 */ 1634 first = tdata->next_avail_tx_desc; 1635 tx_buffer = &tdata->tx_buf[first]; 1636 tx_buffer_mapped = tx_buffer; 1637 map = tx_buffer->map; 1638 1639 maxsegs = tdata->num_tx_desc_avail - EMX_TX_RESERVED; 1640 KASSERT(maxsegs >= tdata->spare_tx_desc, ("not enough spare TX desc")); 1641 if (maxsegs > EMX_MAX_SCATTER) 1642 maxsegs = EMX_MAX_SCATTER; 1643 1644 error = bus_dmamap_load_mbuf_defrag(tdata->txtag, map, m_headp, 1645 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1646 if (error) { 1647 m_freem(*m_headp); 1648 *m_headp = NULL; 1649 return error; 1650 } 1651 bus_dmamap_sync(tdata->txtag, map, BUS_DMASYNC_PREWRITE); 1652 1653 m_head = *m_headp; 1654 tdata->tx_nsegs += nsegs; 1655 *segs_used += nsegs; 1656 1657 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1658 /* TSO will consume one TX desc */ 1659 i = emx_tso_setup(tdata, m_head, &txd_upper, &txd_lower); 1660 tdata->tx_nsegs += i; 1661 *segs_used += i; 1662 } else if (m_head->m_pkthdr.csum_flags & EMX_CSUM_FEATURES) { 1663 /* TX csum offloading will consume one TX desc */ 1664 i = emx_txcsum(tdata, m_head, &txd_upper, &txd_lower); 1665 tdata->tx_nsegs += i; 1666 *segs_used += i; 1667 } 1668 1669 /* Handle VLAN tag */ 1670 if (m_head->m_flags & M_VLANTAG) { 1671 /* Set the vlan id. */ 1672 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1673 /* Tell hardware to add tag */ 1674 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1675 } 1676 1677 i = tdata->next_avail_tx_desc; 1678 1679 /* Set up our transmit descriptors */ 1680 for (j = 0; j < nsegs; j++) { 1681 tx_buffer = &tdata->tx_buf[i]; 1682 ctxd = &tdata->tx_desc_base[i]; 1683 1684 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1685 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1686 txd_lower | segs[j].ds_len); 1687 ctxd->upper.data = htole32(txd_upper); 1688 1689 last = i; 1690 if (++i == tdata->num_tx_desc) 1691 i = 0; 1692 } 1693 1694 tdata->next_avail_tx_desc = i; 1695 1696 KKASSERT(tdata->num_tx_desc_avail > nsegs); 1697 tdata->num_tx_desc_avail -= nsegs; 1698 1699 tx_buffer->m_head = m_head; 1700 tx_buffer_mapped->map = tx_buffer->map; 1701 tx_buffer->map = map; 1702 1703 if (tdata->tx_nsegs >= tdata->tx_intr_nsegs) { 1704 tdata->tx_nsegs = 0; 1705 1706 /* 1707 * Report Status (RS) is turned on 1708 * every tx_intr_nsegs descriptors. 1709 */ 1710 cmd = E1000_TXD_CMD_RS; 1711 1712 /* 1713 * Keep track of the descriptor, which will 1714 * be written back by hardware. 1715 */ 1716 tdata->tx_dd[tdata->tx_dd_tail] = last; 1717 EMX_INC_TXDD_IDX(tdata->tx_dd_tail); 1718 KKASSERT(tdata->tx_dd_tail != tdata->tx_dd_head); 1719 } 1720 1721 /* 1722 * Last Descriptor of Packet needs End Of Packet (EOP) 1723 */ 1724 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1725 1726 /* 1727 * Defer TDT updating, until enough descriptors are setup 1728 */ 1729 *idx = i; 1730 1731 #ifdef EMX_TSS_DEBUG 1732 tdata->tx_pkts++; 1733 #endif 1734 1735 return (0); 1736 } 1737 1738 static void 1739 emx_set_promisc(struct emx_softc *sc) 1740 { 1741 struct ifnet *ifp = &sc->arpcom.ac_if; 1742 uint32_t reg_rctl; 1743 1744 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1745 1746 if (ifp->if_flags & IFF_PROMISC) { 1747 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1748 /* Turn this on if you want to see bad packets */ 1749 if (emx_debug_sbp) 1750 reg_rctl |= E1000_RCTL_SBP; 1751 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1752 } else if (ifp->if_flags & IFF_ALLMULTI) { 1753 reg_rctl |= E1000_RCTL_MPE; 1754 reg_rctl &= ~E1000_RCTL_UPE; 1755 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1756 } 1757 } 1758 1759 static void 1760 emx_disable_promisc(struct emx_softc *sc) 1761 { 1762 uint32_t reg_rctl; 1763 1764 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1765 1766 reg_rctl &= ~E1000_RCTL_UPE; 1767 reg_rctl &= ~E1000_RCTL_MPE; 1768 reg_rctl &= ~E1000_RCTL_SBP; 1769 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1770 } 1771 1772 static void 1773 emx_set_multi(struct emx_softc *sc) 1774 { 1775 struct ifnet *ifp = &sc->arpcom.ac_if; 1776 struct ifmultiaddr *ifma; 1777 uint32_t reg_rctl = 0; 1778 uint8_t *mta; 1779 int mcnt = 0; 1780 1781 mta = sc->mta; 1782 bzero(mta, ETH_ADDR_LEN * EMX_MCAST_ADDR_MAX); 1783 1784 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1785 if (ifma->ifma_addr->sa_family != AF_LINK) 1786 continue; 1787 1788 if (mcnt == EMX_MCAST_ADDR_MAX) 1789 break; 1790 1791 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1792 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1793 mcnt++; 1794 } 1795 1796 if (mcnt >= EMX_MCAST_ADDR_MAX) { 1797 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1798 reg_rctl |= E1000_RCTL_MPE; 1799 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1800 } else { 1801 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1802 } 1803 } 1804 1805 /* 1806 * This routine checks for link status and updates statistics. 1807 */ 1808 static void 1809 emx_timer(void *xsc) 1810 { 1811 struct emx_softc *sc = xsc; 1812 struct ifnet *ifp = &sc->arpcom.ac_if; 1813 1814 lwkt_serialize_enter(&sc->main_serialize); 1815 1816 emx_update_link_status(sc); 1817 emx_update_stats(sc); 1818 1819 /* Reset LAA into RAR[0] on 82571 */ 1820 if (e1000_get_laa_state_82571(&sc->hw) == TRUE) 1821 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 1822 1823 if (emx_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1824 emx_print_hw_stats(sc); 1825 1826 emx_smartspeed(sc); 1827 1828 callout_reset(&sc->timer, hz, emx_timer, sc); 1829 1830 lwkt_serialize_exit(&sc->main_serialize); 1831 } 1832 1833 static void 1834 emx_update_link_status(struct emx_softc *sc) 1835 { 1836 struct e1000_hw *hw = &sc->hw; 1837 struct ifnet *ifp = &sc->arpcom.ac_if; 1838 device_t dev = sc->dev; 1839 uint32_t link_check = 0; 1840 1841 /* Get the cached link value or read phy for real */ 1842 switch (hw->phy.media_type) { 1843 case e1000_media_type_copper: 1844 if (hw->mac.get_link_status) { 1845 /* Do the work to read phy */ 1846 e1000_check_for_link(hw); 1847 link_check = !hw->mac.get_link_status; 1848 if (link_check) /* ESB2 fix */ 1849 e1000_cfg_on_link_up(hw); 1850 } else { 1851 link_check = TRUE; 1852 } 1853 break; 1854 1855 case e1000_media_type_fiber: 1856 e1000_check_for_link(hw); 1857 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1858 break; 1859 1860 case e1000_media_type_internal_serdes: 1861 e1000_check_for_link(hw); 1862 link_check = sc->hw.mac.serdes_has_link; 1863 break; 1864 1865 case e1000_media_type_unknown: 1866 default: 1867 break; 1868 } 1869 1870 /* Now check for a transition */ 1871 if (link_check && sc->link_active == 0) { 1872 e1000_get_speed_and_duplex(hw, &sc->link_speed, 1873 &sc->link_duplex); 1874 1875 /* 1876 * Check if we should enable/disable SPEED_MODE bit on 1877 * 82571EB/82572EI 1878 */ 1879 if (sc->link_speed != SPEED_1000 && 1880 (hw->mac.type == e1000_82571 || 1881 hw->mac.type == e1000_82572)) { 1882 int tarc0; 1883 1884 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1885 tarc0 &= ~EMX_TARC_SPEED_MODE; 1886 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1887 } 1888 if (bootverbose) { 1889 char flowctrl[IFM_ETH_FC_STRLEN]; 1890 1891 e1000_fc2str(hw->fc.current_mode, flowctrl, 1892 sizeof(flowctrl)); 1893 device_printf(dev, "Link is up %d Mbps %s, " 1894 "Flow control: %s\n", 1895 sc->link_speed, 1896 (sc->link_duplex == FULL_DUPLEX) ? 1897 "Full Duplex" : "Half Duplex", 1898 flowctrl); 1899 } 1900 if (sc->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1901 e1000_force_flowctrl(hw, sc->ifm_flowctrl); 1902 sc->link_active = 1; 1903 sc->smartspeed = 0; 1904 ifp->if_baudrate = sc->link_speed * 1000000; 1905 ifp->if_link_state = LINK_STATE_UP; 1906 if_link_state_change(ifp); 1907 } else if (!link_check && sc->link_active == 1) { 1908 ifp->if_baudrate = sc->link_speed = 0; 1909 sc->link_duplex = 0; 1910 if (bootverbose) 1911 device_printf(dev, "Link is Down\n"); 1912 sc->link_active = 0; 1913 ifp->if_link_state = LINK_STATE_DOWN; 1914 if_link_state_change(ifp); 1915 } 1916 } 1917 1918 static void 1919 emx_stop(struct emx_softc *sc) 1920 { 1921 struct ifnet *ifp = &sc->arpcom.ac_if; 1922 int i; 1923 1924 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1925 1926 emx_disable_intr(sc); 1927 1928 callout_stop(&sc->timer); 1929 1930 ifp->if_flags &= ~IFF_RUNNING; 1931 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1932 struct emx_txdata *tdata = &sc->tx_data[i]; 1933 1934 ifsq_clr_oactive(tdata->ifsq); 1935 ifsq_watchdog_stop(&tdata->tx_watchdog); 1936 tdata->tx_flags &= ~EMX_TXFLAG_ENABLED; 1937 } 1938 1939 /* 1940 * Disable multiple receive queues. 1941 * 1942 * NOTE: 1943 * We should disable multiple receive queues before 1944 * resetting the hardware. 1945 */ 1946 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 0); 1947 1948 e1000_reset_hw(&sc->hw); 1949 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1950 1951 for (i = 0; i < sc->tx_ring_cnt; ++i) 1952 emx_free_tx_ring(&sc->tx_data[i]); 1953 for (i = 0; i < sc->rx_ring_cnt; ++i) 1954 emx_free_rx_ring(&sc->rx_data[i]); 1955 } 1956 1957 static int 1958 emx_reset(struct emx_softc *sc) 1959 { 1960 device_t dev = sc->dev; 1961 uint16_t rx_buffer_size; 1962 uint32_t pba; 1963 1964 /* Set up smart power down as default off on newer adapters. */ 1965 if (!emx_smart_pwr_down && 1966 (sc->hw.mac.type == e1000_82571 || 1967 sc->hw.mac.type == e1000_82572)) { 1968 uint16_t phy_tmp = 0; 1969 1970 /* Speed up time to link by disabling smart power down. */ 1971 e1000_read_phy_reg(&sc->hw, 1972 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 1973 phy_tmp &= ~IGP02E1000_PM_SPD; 1974 e1000_write_phy_reg(&sc->hw, 1975 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 1976 } 1977 1978 /* 1979 * Packet Buffer Allocation (PBA) 1980 * Writing PBA sets the receive portion of the buffer 1981 * the remainder is used for the transmit buffer. 1982 */ 1983 switch (sc->hw.mac.type) { 1984 /* Total Packet Buffer on these is 48K */ 1985 case e1000_82571: 1986 case e1000_82572: 1987 case e1000_80003es2lan: 1988 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1989 break; 1990 1991 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1992 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1993 break; 1994 1995 case e1000_82574: 1996 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1997 break; 1998 1999 case e1000_pch_lpt: 2000 case e1000_pch_spt: 2001 pba = E1000_PBA_26K; 2002 break; 2003 2004 default: 2005 /* Devices before 82547 had a Packet Buffer of 64K. */ 2006 if (sc->hw.mac.max_frame_size > 8192) 2007 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2008 else 2009 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2010 } 2011 E1000_WRITE_REG(&sc->hw, E1000_PBA, pba); 2012 2013 /* 2014 * These parameters control the automatic generation (Tx) and 2015 * response (Rx) to Ethernet PAUSE frames. 2016 * - High water mark should allow for at least two frames to be 2017 * received after sending an XOFF. 2018 * - Low water mark works best when it is very near the high water mark. 2019 * This allows the receiver to restart by sending XON when it has 2020 * drained a bit. Here we use an arbitary value of 1500 which will 2021 * restart after one full frame is pulled from the buffer. There 2022 * could be several smaller frames in the buffer and if so they will 2023 * not trigger the XON until their total number reduces the buffer 2024 * by 1500. 2025 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2026 */ 2027 rx_buffer_size = (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) << 10; 2028 2029 sc->hw.fc.high_water = rx_buffer_size - 2030 roundup2(sc->hw.mac.max_frame_size, 1024); 2031 sc->hw.fc.low_water = sc->hw.fc.high_water - 1500; 2032 2033 sc->hw.fc.pause_time = EMX_FC_PAUSE_TIME; 2034 sc->hw.fc.send_xon = TRUE; 2035 sc->hw.fc.requested_mode = e1000_ifmedia2fc(sc->ifm_flowctrl); 2036 2037 /* 2038 * Device specific overrides/settings 2039 */ 2040 if (sc->hw.mac.type == e1000_pch_lpt || 2041 sc->hw.mac.type == e1000_pch_spt) { 2042 sc->hw.fc.high_water = 0x5C20; 2043 sc->hw.fc.low_water = 0x5048; 2044 sc->hw.fc.pause_time = 0x0650; 2045 sc->hw.fc.refresh_time = 0x0400; 2046 /* Jumbos need adjusted PBA */ 2047 if (sc->arpcom.ac_if.if_mtu > ETHERMTU) 2048 E1000_WRITE_REG(&sc->hw, E1000_PBA, 12); 2049 else 2050 E1000_WRITE_REG(&sc->hw, E1000_PBA, 26); 2051 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2052 sc->hw.fc.pause_time = 0xFFFF; 2053 } 2054 2055 /* Issue a global reset */ 2056 e1000_reset_hw(&sc->hw); 2057 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 2058 emx_disable_aspm(sc); 2059 2060 if (e1000_init_hw(&sc->hw) < 0) { 2061 device_printf(dev, "Hardware Initialization Failed\n"); 2062 return (EIO); 2063 } 2064 2065 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 2066 e1000_get_phy_info(&sc->hw); 2067 e1000_check_for_link(&sc->hw); 2068 2069 return (0); 2070 } 2071 2072 static void 2073 emx_setup_ifp(struct emx_softc *sc) 2074 { 2075 struct ifnet *ifp = &sc->arpcom.ac_if; 2076 int i; 2077 2078 if_initname(ifp, device_get_name(sc->dev), 2079 device_get_unit(sc->dev)); 2080 ifp->if_softc = sc; 2081 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2082 ifp->if_init = emx_init; 2083 ifp->if_ioctl = emx_ioctl; 2084 ifp->if_start = emx_start; 2085 #ifdef IFPOLL_ENABLE 2086 ifp->if_npoll = emx_npoll; 2087 #endif 2088 ifp->if_serialize = emx_serialize; 2089 ifp->if_deserialize = emx_deserialize; 2090 ifp->if_tryserialize = emx_tryserialize; 2091 #ifdef INVARIANTS 2092 ifp->if_serialize_assert = emx_serialize_assert; 2093 #endif 2094 2095 ifp->if_nmbclusters = sc->rx_ring_cnt * sc->rx_data[0].num_rx_desc; 2096 2097 ifq_set_maxlen(&ifp->if_snd, sc->tx_data[0].num_tx_desc - 1); 2098 ifq_set_ready(&ifp->if_snd); 2099 ifq_set_subq_cnt(&ifp->if_snd, sc->tx_ring_cnt); 2100 2101 ifp->if_mapsubq = ifq_mapsubq_mask; 2102 ifq_set_subq_mask(&ifp->if_snd, 0); 2103 2104 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 2105 2106 ifp->if_capabilities = IFCAP_HWCSUM | 2107 IFCAP_VLAN_HWTAGGING | 2108 IFCAP_VLAN_MTU | 2109 IFCAP_TSO; 2110 if (sc->rx_ring_cnt > 1) 2111 ifp->if_capabilities |= IFCAP_RSS; 2112 ifp->if_capenable = ifp->if_capabilities; 2113 ifp->if_hwassist = EMX_CSUM_FEATURES | CSUM_TSO; 2114 2115 /* 2116 * Tell the upper layer(s) we support long frames. 2117 */ 2118 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2119 2120 for (i = 0; i < sc->tx_ring_cnt; ++i) { 2121 struct ifaltq_subque *ifsq = ifq_get_subq(&ifp->if_snd, i); 2122 struct emx_txdata *tdata = &sc->tx_data[i]; 2123 2124 ifsq_set_cpuid(ifsq, rman_get_cpuid(sc->intr_res)); 2125 ifsq_set_priv(ifsq, tdata); 2126 ifsq_set_hw_serialize(ifsq, &tdata->tx_serialize); 2127 tdata->ifsq = ifsq; 2128 2129 ifsq_watchdog_init(&tdata->tx_watchdog, ifsq, emx_watchdog); 2130 } 2131 2132 /* 2133 * Specify the media types supported by this sc and register 2134 * callbacks to update media and link information 2135 */ 2136 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2137 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 2138 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 2139 0, NULL); 2140 } else { 2141 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 2142 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2143 0, NULL); 2144 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 2145 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2146 0, NULL); 2147 if (sc->hw.phy.type != e1000_phy_ife) { 2148 ifmedia_add(&sc->media, 2149 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2150 } 2151 } 2152 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2153 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO | sc->ifm_flowctrl); 2154 } 2155 2156 /* 2157 * Workaround for SmartSpeed on 82541 and 82547 controllers 2158 */ 2159 static void 2160 emx_smartspeed(struct emx_softc *sc) 2161 { 2162 uint16_t phy_tmp; 2163 2164 if (sc->link_active || sc->hw.phy.type != e1000_phy_igp || 2165 sc->hw.mac.autoneg == 0 || 2166 (sc->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2167 return; 2168 2169 if (sc->smartspeed == 0) { 2170 /* 2171 * If Master/Slave config fault is asserted twice, 2172 * we assume back-to-back 2173 */ 2174 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2175 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2176 return; 2177 e1000_read_phy_reg(&sc->hw, PHY_1000T_STATUS, &phy_tmp); 2178 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2179 e1000_read_phy_reg(&sc->hw, 2180 PHY_1000T_CTRL, &phy_tmp); 2181 if (phy_tmp & CR_1000T_MS_ENABLE) { 2182 phy_tmp &= ~CR_1000T_MS_ENABLE; 2183 e1000_write_phy_reg(&sc->hw, 2184 PHY_1000T_CTRL, phy_tmp); 2185 sc->smartspeed++; 2186 if (sc->hw.mac.autoneg && 2187 !e1000_phy_setup_autoneg(&sc->hw) && 2188 !e1000_read_phy_reg(&sc->hw, 2189 PHY_CONTROL, &phy_tmp)) { 2190 phy_tmp |= MII_CR_AUTO_NEG_EN | 2191 MII_CR_RESTART_AUTO_NEG; 2192 e1000_write_phy_reg(&sc->hw, 2193 PHY_CONTROL, phy_tmp); 2194 } 2195 } 2196 } 2197 return; 2198 } else if (sc->smartspeed == EMX_SMARTSPEED_DOWNSHIFT) { 2199 /* If still no link, perhaps using 2/3 pair cable */ 2200 e1000_read_phy_reg(&sc->hw, PHY_1000T_CTRL, &phy_tmp); 2201 phy_tmp |= CR_1000T_MS_ENABLE; 2202 e1000_write_phy_reg(&sc->hw, PHY_1000T_CTRL, phy_tmp); 2203 if (sc->hw.mac.autoneg && 2204 !e1000_phy_setup_autoneg(&sc->hw) && 2205 !e1000_read_phy_reg(&sc->hw, PHY_CONTROL, &phy_tmp)) { 2206 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2207 e1000_write_phy_reg(&sc->hw, PHY_CONTROL, phy_tmp); 2208 } 2209 } 2210 2211 /* Restart process after EMX_SMARTSPEED_MAX iterations */ 2212 if (sc->smartspeed++ == EMX_SMARTSPEED_MAX) 2213 sc->smartspeed = 0; 2214 } 2215 2216 static int 2217 emx_create_tx_ring(struct emx_txdata *tdata) 2218 { 2219 device_t dev = tdata->sc->dev; 2220 struct emx_txbuf *tx_buffer; 2221 int error, i, tsize, ntxd; 2222 2223 /* 2224 * Validate number of transmit descriptors. It must not exceed 2225 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2226 */ 2227 ntxd = device_getenv_int(dev, "txd", emx_txd); 2228 if ((ntxd * sizeof(struct e1000_tx_desc)) % EMX_DBA_ALIGN != 0 || 2229 ntxd > EMX_MAX_TXD || ntxd < EMX_MIN_TXD) { 2230 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 2231 EMX_DEFAULT_TXD, ntxd); 2232 tdata->num_tx_desc = EMX_DEFAULT_TXD; 2233 } else { 2234 tdata->num_tx_desc = ntxd; 2235 } 2236 2237 /* 2238 * Allocate Transmit Descriptor ring 2239 */ 2240 tsize = roundup2(tdata->num_tx_desc * sizeof(struct e1000_tx_desc), 2241 EMX_DBA_ALIGN); 2242 tdata->tx_desc_base = bus_dmamem_coherent_any(tdata->sc->parent_dtag, 2243 EMX_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 2244 &tdata->tx_desc_dtag, &tdata->tx_desc_dmap, 2245 &tdata->tx_desc_paddr); 2246 if (tdata->tx_desc_base == NULL) { 2247 device_printf(dev, "Unable to allocate tx_desc memory\n"); 2248 return ENOMEM; 2249 } 2250 2251 tsize = __VM_CACHELINE_ALIGN( 2252 sizeof(struct emx_txbuf) * tdata->num_tx_desc); 2253 tdata->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 2254 2255 /* 2256 * Create DMA tags for tx buffers 2257 */ 2258 error = bus_dma_tag_create(tdata->sc->parent_dtag, /* parent */ 2259 1, 0, /* alignment, bounds */ 2260 BUS_SPACE_MAXADDR, /* lowaddr */ 2261 BUS_SPACE_MAXADDR, /* highaddr */ 2262 NULL, NULL, /* filter, filterarg */ 2263 EMX_TSO_SIZE, /* maxsize */ 2264 EMX_MAX_SCATTER, /* nsegments */ 2265 EMX_MAX_SEGSIZE, /* maxsegsize */ 2266 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2267 BUS_DMA_ONEBPAGE, /* flags */ 2268 &tdata->txtag); 2269 if (error) { 2270 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2271 kfree(tdata->tx_buf, M_DEVBUF); 2272 tdata->tx_buf = NULL; 2273 return error; 2274 } 2275 2276 /* 2277 * Create DMA maps for tx buffers 2278 */ 2279 for (i = 0; i < tdata->num_tx_desc; i++) { 2280 tx_buffer = &tdata->tx_buf[i]; 2281 2282 error = bus_dmamap_create(tdata->txtag, 2283 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2284 &tx_buffer->map); 2285 if (error) { 2286 device_printf(dev, "Unable to create TX DMA map\n"); 2287 emx_destroy_tx_ring(tdata, i); 2288 return error; 2289 } 2290 } 2291 2292 /* 2293 * Setup TX parameters 2294 */ 2295 tdata->spare_tx_desc = EMX_TX_SPARE; 2296 tdata->tx_wreg_nsegs = EMX_DEFAULT_TXWREG; 2297 2298 /* 2299 * Keep following relationship between spare_tx_desc, oact_tx_desc 2300 * and tx_intr_nsegs: 2301 * (spare_tx_desc + EMX_TX_RESERVED) <= 2302 * oact_tx_desc <= EMX_TX_OACTIVE_MAX <= tx_intr_nsegs 2303 */ 2304 tdata->oact_tx_desc = tdata->num_tx_desc / 8; 2305 if (tdata->oact_tx_desc > EMX_TX_OACTIVE_MAX) 2306 tdata->oact_tx_desc = EMX_TX_OACTIVE_MAX; 2307 if (tdata->oact_tx_desc < tdata->spare_tx_desc + EMX_TX_RESERVED) 2308 tdata->oact_tx_desc = tdata->spare_tx_desc + EMX_TX_RESERVED; 2309 2310 tdata->tx_intr_nsegs = tdata->num_tx_desc / 16; 2311 if (tdata->tx_intr_nsegs < tdata->oact_tx_desc) 2312 tdata->tx_intr_nsegs = tdata->oact_tx_desc; 2313 2314 /* 2315 * Pullup extra 4bytes into the first data segment for TSO, see: 2316 * 82571/82572 specification update errata #7 2317 * 2318 * Same applies to I217 (and maybe I218 and I219). 2319 * 2320 * NOTE: 2321 * 4bytes instead of 2bytes, which are mentioned in the errata, 2322 * are pulled; mainly to keep rest of the data properly aligned. 2323 */ 2324 if (tdata->sc->hw.mac.type == e1000_82571 || 2325 tdata->sc->hw.mac.type == e1000_82572 || 2326 tdata->sc->hw.mac.type == e1000_pch_lpt || 2327 tdata->sc->hw.mac.type == e1000_pch_spt) 2328 tdata->tx_flags |= EMX_TXFLAG_TSO_PULLEX; 2329 2330 return (0); 2331 } 2332 2333 static void 2334 emx_init_tx_ring(struct emx_txdata *tdata) 2335 { 2336 /* Clear the old ring contents */ 2337 bzero(tdata->tx_desc_base, 2338 sizeof(struct e1000_tx_desc) * tdata->num_tx_desc); 2339 2340 /* Reset state */ 2341 tdata->next_avail_tx_desc = 0; 2342 tdata->next_tx_to_clean = 0; 2343 tdata->num_tx_desc_avail = tdata->num_tx_desc; 2344 2345 tdata->tx_flags |= EMX_TXFLAG_ENABLED; 2346 if (tdata->sc->tx_ring_inuse > 1) { 2347 tdata->tx_flags |= EMX_TXFLAG_FORCECTX; 2348 if (bootverbose) { 2349 if_printf(&tdata->sc->arpcom.ac_if, 2350 "TX %d force ctx setup\n", tdata->idx); 2351 } 2352 } 2353 } 2354 2355 static void 2356 emx_init_tx_unit(struct emx_softc *sc) 2357 { 2358 uint32_t tctl, tarc, tipg = 0, txdctl; 2359 int i; 2360 2361 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2362 struct emx_txdata *tdata = &sc->tx_data[i]; 2363 uint64_t bus_addr; 2364 2365 /* Setup the Base and Length of the Tx Descriptor Ring */ 2366 bus_addr = tdata->tx_desc_paddr; 2367 E1000_WRITE_REG(&sc->hw, E1000_TDLEN(i), 2368 tdata->num_tx_desc * sizeof(struct e1000_tx_desc)); 2369 E1000_WRITE_REG(&sc->hw, E1000_TDBAH(i), 2370 (uint32_t)(bus_addr >> 32)); 2371 E1000_WRITE_REG(&sc->hw, E1000_TDBAL(i), 2372 (uint32_t)bus_addr); 2373 /* Setup the HW Tx Head and Tail descriptor pointers */ 2374 E1000_WRITE_REG(&sc->hw, E1000_TDT(i), 0); 2375 E1000_WRITE_REG(&sc->hw, E1000_TDH(i), 0); 2376 } 2377 2378 /* Set the default values for the Tx Inter Packet Gap timer */ 2379 switch (sc->hw.mac.type) { 2380 case e1000_80003es2lan: 2381 tipg = DEFAULT_82543_TIPG_IPGR1; 2382 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2383 E1000_TIPG_IPGR2_SHIFT; 2384 break; 2385 2386 default: 2387 if (sc->hw.phy.media_type == e1000_media_type_fiber || 2388 sc->hw.phy.media_type == e1000_media_type_internal_serdes) 2389 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2390 else 2391 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2392 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2393 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2394 break; 2395 } 2396 2397 E1000_WRITE_REG(&sc->hw, E1000_TIPG, tipg); 2398 2399 /* NOTE: 0 is not allowed for TIDV */ 2400 E1000_WRITE_REG(&sc->hw, E1000_TIDV, 1); 2401 E1000_WRITE_REG(&sc->hw, E1000_TADV, 0); 2402 2403 /* 2404 * Errata workaround (obtained from Linux). This is necessary 2405 * to make multiple TX queues work on 82574. 2406 * XXX can't find it in any published errata though. 2407 */ 2408 txdctl = E1000_READ_REG(&sc->hw, E1000_TXDCTL(0)); 2409 E1000_WRITE_REG(&sc->hw, E1000_TXDCTL(1), txdctl); 2410 2411 if (sc->hw.mac.type == e1000_82571 || 2412 sc->hw.mac.type == e1000_82572) { 2413 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2414 tarc |= EMX_TARC_SPEED_MODE; 2415 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2416 } else if (sc->hw.mac.type == e1000_80003es2lan) { 2417 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2418 tarc |= 1; 2419 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2420 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2421 tarc |= 1; 2422 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2423 } 2424 2425 /* Program the Transmit Control Register */ 2426 tctl = E1000_READ_REG(&sc->hw, E1000_TCTL); 2427 tctl &= ~E1000_TCTL_CT; 2428 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2429 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2430 tctl |= E1000_TCTL_MULR; 2431 2432 /* This write will effectively turn on the transmit unit. */ 2433 E1000_WRITE_REG(&sc->hw, E1000_TCTL, tctl); 2434 2435 if (sc->hw.mac.type == e1000_82571 || 2436 sc->hw.mac.type == e1000_82572 || 2437 sc->hw.mac.type == e1000_80003es2lan) { 2438 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2439 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2440 tarc &= ~(1 << 28); 2441 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2442 } 2443 2444 if (sc->tx_ring_inuse > 1) { 2445 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(0)); 2446 tarc &= ~EMX_TARC_COUNT_MASK; 2447 tarc |= 1; 2448 E1000_WRITE_REG(&sc->hw, E1000_TARC(0), tarc); 2449 2450 tarc = E1000_READ_REG(&sc->hw, E1000_TARC(1)); 2451 tarc &= ~EMX_TARC_COUNT_MASK; 2452 tarc |= 1; 2453 E1000_WRITE_REG(&sc->hw, E1000_TARC(1), tarc); 2454 } 2455 } 2456 2457 static void 2458 emx_destroy_tx_ring(struct emx_txdata *tdata, int ndesc) 2459 { 2460 struct emx_txbuf *tx_buffer; 2461 int i; 2462 2463 /* Free Transmit Descriptor ring */ 2464 if (tdata->tx_desc_base) { 2465 bus_dmamap_unload(tdata->tx_desc_dtag, tdata->tx_desc_dmap); 2466 bus_dmamem_free(tdata->tx_desc_dtag, tdata->tx_desc_base, 2467 tdata->tx_desc_dmap); 2468 bus_dma_tag_destroy(tdata->tx_desc_dtag); 2469 2470 tdata->tx_desc_base = NULL; 2471 } 2472 2473 if (tdata->tx_buf == NULL) 2474 return; 2475 2476 for (i = 0; i < ndesc; i++) { 2477 tx_buffer = &tdata->tx_buf[i]; 2478 2479 KKASSERT(tx_buffer->m_head == NULL); 2480 bus_dmamap_destroy(tdata->txtag, tx_buffer->map); 2481 } 2482 bus_dma_tag_destroy(tdata->txtag); 2483 2484 kfree(tdata->tx_buf, M_DEVBUF); 2485 tdata->tx_buf = NULL; 2486 } 2487 2488 /* 2489 * The offload context needs to be set when we transfer the first 2490 * packet of a particular protocol (TCP/UDP). This routine has been 2491 * enhanced to deal with inserted VLAN headers. 2492 * 2493 * If the new packet's ether header length, ip header length and 2494 * csum offloading type are same as the previous packet, we should 2495 * avoid allocating a new csum context descriptor; mainly to take 2496 * advantage of the pipeline effect of the TX data read request. 2497 * 2498 * This function returns number of TX descrptors allocated for 2499 * csum context. 2500 */ 2501 static int 2502 emx_txcsum(struct emx_txdata *tdata, struct mbuf *mp, 2503 uint32_t *txd_upper, uint32_t *txd_lower) 2504 { 2505 struct e1000_context_desc *TXD; 2506 int curr_txd, ehdrlen, csum_flags; 2507 uint32_t cmd, hdr_len, ip_hlen; 2508 2509 csum_flags = mp->m_pkthdr.csum_flags & EMX_CSUM_FEATURES; 2510 ip_hlen = mp->m_pkthdr.csum_iphlen; 2511 ehdrlen = mp->m_pkthdr.csum_lhlen; 2512 2513 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 2514 tdata->csum_lhlen == ehdrlen && tdata->csum_iphlen == ip_hlen && 2515 tdata->csum_flags == csum_flags) { 2516 /* 2517 * Same csum offload context as the previous packets; 2518 * just return. 2519 */ 2520 *txd_upper = tdata->csum_txd_upper; 2521 *txd_lower = tdata->csum_txd_lower; 2522 return 0; 2523 } 2524 2525 /* 2526 * Setup a new csum offload context. 2527 */ 2528 2529 curr_txd = tdata->next_avail_tx_desc; 2530 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 2531 2532 cmd = 0; 2533 2534 /* Setup of IP header checksum. */ 2535 if (csum_flags & CSUM_IP) { 2536 /* 2537 * Start offset for header checksum calculation. 2538 * End offset for header checksum calculation. 2539 * Offset of place to put the checksum. 2540 */ 2541 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2542 TXD->lower_setup.ip_fields.ipcse = 2543 htole16(ehdrlen + ip_hlen - 1); 2544 TXD->lower_setup.ip_fields.ipcso = 2545 ehdrlen + offsetof(struct ip, ip_sum); 2546 cmd |= E1000_TXD_CMD_IP; 2547 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2548 } 2549 hdr_len = ehdrlen + ip_hlen; 2550 2551 if (csum_flags & CSUM_TCP) { 2552 /* 2553 * Start offset for payload checksum calculation. 2554 * End offset for payload checksum calculation. 2555 * Offset of place to put the checksum. 2556 */ 2557 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2558 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2559 TXD->upper_setup.tcp_fields.tucso = 2560 hdr_len + offsetof(struct tcphdr, th_sum); 2561 cmd |= E1000_TXD_CMD_TCP; 2562 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2563 } else if (csum_flags & CSUM_UDP) { 2564 /* 2565 * Start offset for header checksum calculation. 2566 * End offset for header checksum calculation. 2567 * Offset of place to put the checksum. 2568 */ 2569 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2570 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2571 TXD->upper_setup.tcp_fields.tucso = 2572 hdr_len + offsetof(struct udphdr, uh_sum); 2573 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2574 } 2575 2576 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2577 E1000_TXD_DTYP_D; /* Data descr */ 2578 2579 /* Save the information for this csum offloading context */ 2580 tdata->csum_lhlen = ehdrlen; 2581 tdata->csum_iphlen = ip_hlen; 2582 tdata->csum_flags = csum_flags; 2583 tdata->csum_txd_upper = *txd_upper; 2584 tdata->csum_txd_lower = *txd_lower; 2585 2586 TXD->tcp_seg_setup.data = htole32(0); 2587 TXD->cmd_and_length = 2588 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2589 2590 if (++curr_txd == tdata->num_tx_desc) 2591 curr_txd = 0; 2592 2593 KKASSERT(tdata->num_tx_desc_avail > 0); 2594 tdata->num_tx_desc_avail--; 2595 2596 tdata->next_avail_tx_desc = curr_txd; 2597 return 1; 2598 } 2599 2600 static void 2601 emx_txeof(struct emx_txdata *tdata) 2602 { 2603 struct emx_txbuf *tx_buffer; 2604 int first, num_avail; 2605 2606 if (tdata->tx_dd_head == tdata->tx_dd_tail) 2607 return; 2608 2609 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2610 return; 2611 2612 num_avail = tdata->num_tx_desc_avail; 2613 first = tdata->next_tx_to_clean; 2614 2615 while (tdata->tx_dd_head != tdata->tx_dd_tail) { 2616 int dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2617 struct e1000_tx_desc *tx_desc; 2618 2619 tx_desc = &tdata->tx_desc_base[dd_idx]; 2620 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2621 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2622 2623 if (++dd_idx == tdata->num_tx_desc) 2624 dd_idx = 0; 2625 2626 while (first != dd_idx) { 2627 logif(pkt_txclean); 2628 2629 num_avail++; 2630 2631 tx_buffer = &tdata->tx_buf[first]; 2632 if (tx_buffer->m_head) { 2633 bus_dmamap_unload(tdata->txtag, 2634 tx_buffer->map); 2635 m_freem(tx_buffer->m_head); 2636 tx_buffer->m_head = NULL; 2637 } 2638 2639 if (++first == tdata->num_tx_desc) 2640 first = 0; 2641 } 2642 } else { 2643 break; 2644 } 2645 } 2646 tdata->next_tx_to_clean = first; 2647 tdata->num_tx_desc_avail = num_avail; 2648 2649 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2650 tdata->tx_dd_head = 0; 2651 tdata->tx_dd_tail = 0; 2652 } 2653 2654 if (!EMX_IS_OACTIVE(tdata)) { 2655 ifsq_clr_oactive(tdata->ifsq); 2656 2657 /* All clean, turn off the timer */ 2658 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2659 tdata->tx_watchdog.wd_timer = 0; 2660 } 2661 } 2662 2663 static void 2664 emx_tx_collect(struct emx_txdata *tdata) 2665 { 2666 struct emx_txbuf *tx_buffer; 2667 int tdh, first, num_avail, dd_idx = -1; 2668 2669 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2670 return; 2671 2672 tdh = E1000_READ_REG(&tdata->sc->hw, E1000_TDH(tdata->idx)); 2673 if (tdh == tdata->next_tx_to_clean) 2674 return; 2675 2676 if (tdata->tx_dd_head != tdata->tx_dd_tail) 2677 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2678 2679 num_avail = tdata->num_tx_desc_avail; 2680 first = tdata->next_tx_to_clean; 2681 2682 while (first != tdh) { 2683 logif(pkt_txclean); 2684 2685 num_avail++; 2686 2687 tx_buffer = &tdata->tx_buf[first]; 2688 if (tx_buffer->m_head) { 2689 bus_dmamap_unload(tdata->txtag, 2690 tx_buffer->map); 2691 m_freem(tx_buffer->m_head); 2692 tx_buffer->m_head = NULL; 2693 } 2694 2695 if (first == dd_idx) { 2696 EMX_INC_TXDD_IDX(tdata->tx_dd_head); 2697 if (tdata->tx_dd_head == tdata->tx_dd_tail) { 2698 tdata->tx_dd_head = 0; 2699 tdata->tx_dd_tail = 0; 2700 dd_idx = -1; 2701 } else { 2702 dd_idx = tdata->tx_dd[tdata->tx_dd_head]; 2703 } 2704 } 2705 2706 if (++first == tdata->num_tx_desc) 2707 first = 0; 2708 } 2709 tdata->next_tx_to_clean = first; 2710 tdata->num_tx_desc_avail = num_avail; 2711 2712 if (!EMX_IS_OACTIVE(tdata)) { 2713 ifsq_clr_oactive(tdata->ifsq); 2714 2715 /* All clean, turn off the timer */ 2716 if (tdata->num_tx_desc_avail == tdata->num_tx_desc) 2717 tdata->tx_watchdog.wd_timer = 0; 2718 } 2719 } 2720 2721 /* 2722 * When Link is lost sometimes there is work still in the TX ring 2723 * which will result in a watchdog, rather than allow that do an 2724 * attempted cleanup and then reinit here. Note that this has been 2725 * seens mostly with fiber adapters. 2726 */ 2727 static void 2728 emx_tx_purge(struct emx_softc *sc) 2729 { 2730 int i; 2731 2732 if (sc->link_active) 2733 return; 2734 2735 for (i = 0; i < sc->tx_ring_inuse; ++i) { 2736 struct emx_txdata *tdata = &sc->tx_data[i]; 2737 2738 if (tdata->tx_watchdog.wd_timer) { 2739 emx_tx_collect(tdata); 2740 if (tdata->tx_watchdog.wd_timer) { 2741 if_printf(&sc->arpcom.ac_if, 2742 "Link lost, TX pending, reinit\n"); 2743 emx_init(sc); 2744 return; 2745 } 2746 } 2747 } 2748 } 2749 2750 static int 2751 emx_newbuf(struct emx_rxdata *rdata, int i, int init) 2752 { 2753 struct mbuf *m; 2754 bus_dma_segment_t seg; 2755 bus_dmamap_t map; 2756 struct emx_rxbuf *rx_buffer; 2757 int error, nseg; 2758 2759 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 2760 if (m == NULL) { 2761 if (init) { 2762 if_printf(&rdata->sc->arpcom.ac_if, 2763 "Unable to allocate RX mbuf\n"); 2764 } 2765 return (ENOBUFS); 2766 } 2767 m->m_len = m->m_pkthdr.len = MCLBYTES; 2768 2769 if (rdata->sc->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 2770 m_adj(m, ETHER_ALIGN); 2771 2772 error = bus_dmamap_load_mbuf_segment(rdata->rxtag, 2773 rdata->rx_sparemap, m, 2774 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2775 if (error) { 2776 m_freem(m); 2777 if (init) { 2778 if_printf(&rdata->sc->arpcom.ac_if, 2779 "Unable to load RX mbuf\n"); 2780 } 2781 return (error); 2782 } 2783 2784 rx_buffer = &rdata->rx_buf[i]; 2785 if (rx_buffer->m_head != NULL) 2786 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2787 2788 map = rx_buffer->map; 2789 rx_buffer->map = rdata->rx_sparemap; 2790 rdata->rx_sparemap = map; 2791 2792 rx_buffer->m_head = m; 2793 rx_buffer->paddr = seg.ds_addr; 2794 2795 emx_setup_rxdesc(&rdata->rx_desc[i], rx_buffer); 2796 return (0); 2797 } 2798 2799 static int 2800 emx_create_rx_ring(struct emx_rxdata *rdata) 2801 { 2802 device_t dev = rdata->sc->dev; 2803 struct emx_rxbuf *rx_buffer; 2804 int i, error, rsize, nrxd; 2805 2806 /* 2807 * Validate number of receive descriptors. It must not exceed 2808 * hardware maximum, and must be multiple of E1000_DBA_ALIGN. 2809 */ 2810 nrxd = device_getenv_int(dev, "rxd", emx_rxd); 2811 if ((nrxd * sizeof(emx_rxdesc_t)) % EMX_DBA_ALIGN != 0 || 2812 nrxd > EMX_MAX_RXD || nrxd < EMX_MIN_RXD) { 2813 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 2814 EMX_DEFAULT_RXD, nrxd); 2815 rdata->num_rx_desc = EMX_DEFAULT_RXD; 2816 } else { 2817 rdata->num_rx_desc = nrxd; 2818 } 2819 2820 /* 2821 * Allocate Receive Descriptor ring 2822 */ 2823 rsize = roundup2(rdata->num_rx_desc * sizeof(emx_rxdesc_t), 2824 EMX_DBA_ALIGN); 2825 rdata->rx_desc = bus_dmamem_coherent_any(rdata->sc->parent_dtag, 2826 EMX_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2827 &rdata->rx_desc_dtag, &rdata->rx_desc_dmap, 2828 &rdata->rx_desc_paddr); 2829 if (rdata->rx_desc == NULL) { 2830 device_printf(dev, "Unable to allocate rx_desc memory\n"); 2831 return ENOMEM; 2832 } 2833 2834 rsize = __VM_CACHELINE_ALIGN( 2835 sizeof(struct emx_rxbuf) * rdata->num_rx_desc); 2836 rdata->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2837 2838 /* 2839 * Create DMA tag for rx buffers 2840 */ 2841 error = bus_dma_tag_create(rdata->sc->parent_dtag, /* parent */ 2842 1, 0, /* alignment, bounds */ 2843 BUS_SPACE_MAXADDR, /* lowaddr */ 2844 BUS_SPACE_MAXADDR, /* highaddr */ 2845 NULL, NULL, /* filter, filterarg */ 2846 MCLBYTES, /* maxsize */ 2847 1, /* nsegments */ 2848 MCLBYTES, /* maxsegsize */ 2849 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2850 &rdata->rxtag); 2851 if (error) { 2852 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2853 kfree(rdata->rx_buf, M_DEVBUF); 2854 rdata->rx_buf = NULL; 2855 return error; 2856 } 2857 2858 /* 2859 * Create spare DMA map for rx buffers 2860 */ 2861 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2862 &rdata->rx_sparemap); 2863 if (error) { 2864 device_printf(dev, "Unable to create spare RX DMA map\n"); 2865 bus_dma_tag_destroy(rdata->rxtag); 2866 kfree(rdata->rx_buf, M_DEVBUF); 2867 rdata->rx_buf = NULL; 2868 return error; 2869 } 2870 2871 /* 2872 * Create DMA maps for rx buffers 2873 */ 2874 for (i = 0; i < rdata->num_rx_desc; i++) { 2875 rx_buffer = &rdata->rx_buf[i]; 2876 2877 error = bus_dmamap_create(rdata->rxtag, BUS_DMA_WAITOK, 2878 &rx_buffer->map); 2879 if (error) { 2880 device_printf(dev, "Unable to create RX DMA map\n"); 2881 emx_destroy_rx_ring(rdata, i); 2882 return error; 2883 } 2884 } 2885 return (0); 2886 } 2887 2888 static void 2889 emx_free_rx_ring(struct emx_rxdata *rdata) 2890 { 2891 int i; 2892 2893 for (i = 0; i < rdata->num_rx_desc; i++) { 2894 struct emx_rxbuf *rx_buffer = &rdata->rx_buf[i]; 2895 2896 if (rx_buffer->m_head != NULL) { 2897 bus_dmamap_unload(rdata->rxtag, rx_buffer->map); 2898 m_freem(rx_buffer->m_head); 2899 rx_buffer->m_head = NULL; 2900 } 2901 } 2902 2903 if (rdata->fmp != NULL) 2904 m_freem(rdata->fmp); 2905 rdata->fmp = NULL; 2906 rdata->lmp = NULL; 2907 } 2908 2909 static void 2910 emx_free_tx_ring(struct emx_txdata *tdata) 2911 { 2912 int i; 2913 2914 for (i = 0; i < tdata->num_tx_desc; i++) { 2915 struct emx_txbuf *tx_buffer = &tdata->tx_buf[i]; 2916 2917 if (tx_buffer->m_head != NULL) { 2918 bus_dmamap_unload(tdata->txtag, tx_buffer->map); 2919 m_freem(tx_buffer->m_head); 2920 tx_buffer->m_head = NULL; 2921 } 2922 } 2923 2924 tdata->tx_flags &= ~EMX_TXFLAG_FORCECTX; 2925 2926 tdata->csum_flags = 0; 2927 tdata->csum_lhlen = 0; 2928 tdata->csum_iphlen = 0; 2929 tdata->csum_thlen = 0; 2930 tdata->csum_mss = 0; 2931 tdata->csum_pktlen = 0; 2932 2933 tdata->tx_dd_head = 0; 2934 tdata->tx_dd_tail = 0; 2935 tdata->tx_nsegs = 0; 2936 } 2937 2938 static int 2939 emx_init_rx_ring(struct emx_rxdata *rdata) 2940 { 2941 int i, error; 2942 2943 /* Reset descriptor ring */ 2944 bzero(rdata->rx_desc, sizeof(emx_rxdesc_t) * rdata->num_rx_desc); 2945 2946 /* Allocate new ones. */ 2947 for (i = 0; i < rdata->num_rx_desc; i++) { 2948 error = emx_newbuf(rdata, i, 1); 2949 if (error) 2950 return (error); 2951 } 2952 2953 /* Setup our descriptor pointers */ 2954 rdata->next_rx_desc_to_check = 0; 2955 2956 return (0); 2957 } 2958 2959 static void 2960 emx_init_rx_unit(struct emx_softc *sc) 2961 { 2962 struct ifnet *ifp = &sc->arpcom.ac_if; 2963 uint64_t bus_addr; 2964 uint32_t rctl, itr, rfctl; 2965 int i; 2966 2967 /* 2968 * Make sure receives are disabled while setting 2969 * up the descriptor ring 2970 */ 2971 rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 2972 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2973 2974 /* 2975 * Set the interrupt throttling rate. Value is calculated 2976 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 2977 */ 2978 if (sc->int_throttle_ceil) 2979 itr = 1000000000 / 256 / sc->int_throttle_ceil; 2980 else 2981 itr = 0; 2982 emx_set_itr(sc, itr); 2983 2984 /* Use extended RX descriptor */ 2985 rfctl = E1000_RFCTL_EXTEN; 2986 2987 /* Disable accelerated ackknowledge */ 2988 if (sc->hw.mac.type == e1000_82574) 2989 rfctl |= E1000_RFCTL_ACK_DIS; 2990 2991 E1000_WRITE_REG(&sc->hw, E1000_RFCTL, rfctl); 2992 2993 /* 2994 * Receive Checksum Offload for TCP and UDP 2995 * 2996 * Checksum offloading is also enabled if multiple receive 2997 * queue is to be supported, since we need it to figure out 2998 * packet type. 2999 */ 3000 if ((ifp->if_capenable & IFCAP_RXCSUM) || 3001 sc->rx_ring_cnt > 1) { 3002 uint32_t rxcsum; 3003 3004 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 3005 3006 /* 3007 * NOTE: 3008 * PCSD must be enabled to enable multiple 3009 * receive queues. 3010 */ 3011 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 3012 E1000_RXCSUM_PCSD; 3013 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 3014 } 3015 3016 /* 3017 * Configure multiple receive queue (RSS) 3018 */ 3019 if (sc->rx_ring_cnt > 1) { 3020 uint8_t key[EMX_NRSSRK * EMX_RSSRK_SIZE]; 3021 uint32_t reta; 3022 3023 KASSERT(sc->rx_ring_cnt == EMX_NRX_RING, 3024 ("invalid number of RX ring (%d)", sc->rx_ring_cnt)); 3025 3026 /* 3027 * NOTE: 3028 * When we reach here, RSS has already been disabled 3029 * in emx_stop(), so we could safely configure RSS key 3030 * and redirect table. 3031 */ 3032 3033 /* 3034 * Configure RSS key 3035 */ 3036 toeplitz_get_key(key, sizeof(key)); 3037 for (i = 0; i < EMX_NRSSRK; ++i) { 3038 uint32_t rssrk; 3039 3040 rssrk = EMX_RSSRK_VAL(key, i); 3041 EMX_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 3042 3043 E1000_WRITE_REG(&sc->hw, E1000_RSSRK(i), rssrk); 3044 } 3045 3046 /* 3047 * Configure RSS redirect table in following fashion: 3048 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 3049 */ 3050 reta = 0; 3051 for (i = 0; i < EMX_RETA_SIZE; ++i) { 3052 uint32_t q; 3053 3054 q = (i % sc->rx_ring_cnt) << EMX_RETA_RINGIDX_SHIFT; 3055 reta |= q << (8 * i); 3056 } 3057 EMX_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 3058 3059 for (i = 0; i < EMX_NRETA; ++i) 3060 E1000_WRITE_REG(&sc->hw, E1000_RETA(i), reta); 3061 3062 /* 3063 * Enable multiple receive queues. 3064 * Enable IPv4 RSS standard hash functions. 3065 * Disable RSS interrupt. 3066 */ 3067 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 3068 E1000_MRQC_ENABLE_RSS_2Q | 3069 E1000_MRQC_RSS_FIELD_IPV4_TCP | 3070 E1000_MRQC_RSS_FIELD_IPV4); 3071 } 3072 3073 /* 3074 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3075 * long latencies are observed, like Lenovo X60. This 3076 * change eliminates the problem, but since having positive 3077 * values in RDTR is a known source of problems on other 3078 * platforms another solution is being sought. 3079 */ 3080 if (emx_82573_workaround && sc->hw.mac.type == e1000_82573) { 3081 E1000_WRITE_REG(&sc->hw, E1000_RADV, EMX_RADV_82573); 3082 E1000_WRITE_REG(&sc->hw, E1000_RDTR, EMX_RDTR_82573); 3083 } 3084 3085 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3086 struct emx_rxdata *rdata = &sc->rx_data[i]; 3087 3088 /* 3089 * Setup the Base and Length of the Rx Descriptor Ring 3090 */ 3091 bus_addr = rdata->rx_desc_paddr; 3092 E1000_WRITE_REG(&sc->hw, E1000_RDLEN(i), 3093 rdata->num_rx_desc * sizeof(emx_rxdesc_t)); 3094 E1000_WRITE_REG(&sc->hw, E1000_RDBAH(i), 3095 (uint32_t)(bus_addr >> 32)); 3096 E1000_WRITE_REG(&sc->hw, E1000_RDBAL(i), 3097 (uint32_t)bus_addr); 3098 3099 /* 3100 * Setup the HW Rx Head and Tail Descriptor Pointers 3101 */ 3102 E1000_WRITE_REG(&sc->hw, E1000_RDH(i), 0); 3103 E1000_WRITE_REG(&sc->hw, E1000_RDT(i), 3104 sc->rx_data[i].num_rx_desc - 1); 3105 } 3106 3107 if (sc->hw.mac.type >= e1000_pch2lan) { 3108 if (ifp->if_mtu > ETHERMTU) 3109 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, TRUE); 3110 else 3111 e1000_lv_jumbo_workaround_ich8lan(&sc->hw, FALSE); 3112 } 3113 3114 /* Setup the Receive Control Register */ 3115 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3116 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3117 E1000_RCTL_RDMTS_HALF | E1000_RCTL_SECRC | 3118 (sc->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3119 3120 /* Make sure VLAN Filters are off */ 3121 rctl &= ~E1000_RCTL_VFE; 3122 3123 /* Don't store bad paket */ 3124 rctl &= ~E1000_RCTL_SBP; 3125 3126 /* MCLBYTES */ 3127 rctl |= E1000_RCTL_SZ_2048; 3128 3129 if (ifp->if_mtu > ETHERMTU) 3130 rctl |= E1000_RCTL_LPE; 3131 else 3132 rctl &= ~E1000_RCTL_LPE; 3133 3134 /* Enable Receives */ 3135 E1000_WRITE_REG(&sc->hw, E1000_RCTL, rctl); 3136 } 3137 3138 static void 3139 emx_destroy_rx_ring(struct emx_rxdata *rdata, int ndesc) 3140 { 3141 struct emx_rxbuf *rx_buffer; 3142 int i; 3143 3144 /* Free Receive Descriptor ring */ 3145 if (rdata->rx_desc) { 3146 bus_dmamap_unload(rdata->rx_desc_dtag, rdata->rx_desc_dmap); 3147 bus_dmamem_free(rdata->rx_desc_dtag, rdata->rx_desc, 3148 rdata->rx_desc_dmap); 3149 bus_dma_tag_destroy(rdata->rx_desc_dtag); 3150 3151 rdata->rx_desc = NULL; 3152 } 3153 3154 if (rdata->rx_buf == NULL) 3155 return; 3156 3157 for (i = 0; i < ndesc; i++) { 3158 rx_buffer = &rdata->rx_buf[i]; 3159 3160 KKASSERT(rx_buffer->m_head == NULL); 3161 bus_dmamap_destroy(rdata->rxtag, rx_buffer->map); 3162 } 3163 bus_dmamap_destroy(rdata->rxtag, rdata->rx_sparemap); 3164 bus_dma_tag_destroy(rdata->rxtag); 3165 3166 kfree(rdata->rx_buf, M_DEVBUF); 3167 rdata->rx_buf = NULL; 3168 } 3169 3170 static void 3171 emx_rxeof(struct emx_rxdata *rdata, int count) 3172 { 3173 struct ifnet *ifp = &rdata->sc->arpcom.ac_if; 3174 uint32_t staterr; 3175 emx_rxdesc_t *current_desc; 3176 struct mbuf *mp; 3177 int i, cpuid = mycpuid; 3178 3179 i = rdata->next_rx_desc_to_check; 3180 current_desc = &rdata->rx_desc[i]; 3181 staterr = le32toh(current_desc->rxd_staterr); 3182 3183 if (!(staterr & E1000_RXD_STAT_DD)) 3184 return; 3185 3186 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 3187 struct pktinfo *pi = NULL, pi0; 3188 struct emx_rxbuf *rx_buf = &rdata->rx_buf[i]; 3189 struct mbuf *m = NULL; 3190 int eop, len; 3191 3192 logif(pkt_receive); 3193 3194 mp = rx_buf->m_head; 3195 3196 /* 3197 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3198 * needs to access the last received byte in the mbuf. 3199 */ 3200 bus_dmamap_sync(rdata->rxtag, rx_buf->map, 3201 BUS_DMASYNC_POSTREAD); 3202 3203 len = le16toh(current_desc->rxd_length); 3204 if (staterr & E1000_RXD_STAT_EOP) { 3205 count--; 3206 eop = 1; 3207 } else { 3208 eop = 0; 3209 } 3210 3211 if (!(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { 3212 uint16_t vlan = 0; 3213 uint32_t mrq, rss_hash; 3214 3215 /* 3216 * Save several necessary information, 3217 * before emx_newbuf() destroy it. 3218 */ 3219 if ((staterr & E1000_RXD_STAT_VP) && eop) 3220 vlan = le16toh(current_desc->rxd_vlan); 3221 3222 mrq = le32toh(current_desc->rxd_mrq); 3223 rss_hash = le32toh(current_desc->rxd_rss); 3224 3225 EMX_RSS_DPRINTF(rdata->sc, 10, 3226 "ring%d, mrq 0x%08x, rss_hash 0x%08x\n", 3227 rdata->idx, mrq, rss_hash); 3228 3229 if (emx_newbuf(rdata, i, 0) != 0) { 3230 IFNET_STAT_INC(ifp, iqdrops, 1); 3231 goto discard; 3232 } 3233 3234 /* Assign correct length to the current fragment */ 3235 mp->m_len = len; 3236 3237 if (rdata->fmp == NULL) { 3238 mp->m_pkthdr.len = len; 3239 rdata->fmp = mp; /* Store the first mbuf */ 3240 rdata->lmp = mp; 3241 } else { 3242 /* 3243 * Chain mbuf's together 3244 */ 3245 rdata->lmp->m_next = mp; 3246 rdata->lmp = rdata->lmp->m_next; 3247 rdata->fmp->m_pkthdr.len += len; 3248 } 3249 3250 if (eop) { 3251 rdata->fmp->m_pkthdr.rcvif = ifp; 3252 IFNET_STAT_INC(ifp, ipackets, 1); 3253 3254 if (ifp->if_capenable & IFCAP_RXCSUM) 3255 emx_rxcsum(staterr, rdata->fmp); 3256 3257 if (staterr & E1000_RXD_STAT_VP) { 3258 rdata->fmp->m_pkthdr.ether_vlantag = 3259 vlan; 3260 rdata->fmp->m_flags |= M_VLANTAG; 3261 } 3262 m = rdata->fmp; 3263 rdata->fmp = NULL; 3264 rdata->lmp = NULL; 3265 3266 if (ifp->if_capenable & IFCAP_RSS) { 3267 pi = emx_rssinfo(m, &pi0, mrq, 3268 rss_hash, staterr); 3269 } 3270 #ifdef EMX_RSS_DEBUG 3271 rdata->rx_pkts++; 3272 #endif 3273 } 3274 } else { 3275 IFNET_STAT_INC(ifp, ierrors, 1); 3276 discard: 3277 emx_setup_rxdesc(current_desc, rx_buf); 3278 if (rdata->fmp != NULL) { 3279 m_freem(rdata->fmp); 3280 rdata->fmp = NULL; 3281 rdata->lmp = NULL; 3282 } 3283 m = NULL; 3284 } 3285 3286 if (m != NULL) 3287 ifp->if_input(ifp, m, pi, cpuid); 3288 3289 /* Advance our pointers to the next descriptor. */ 3290 if (++i == rdata->num_rx_desc) 3291 i = 0; 3292 3293 current_desc = &rdata->rx_desc[i]; 3294 staterr = le32toh(current_desc->rxd_staterr); 3295 } 3296 rdata->next_rx_desc_to_check = i; 3297 3298 /* Advance the E1000's Receive Queue "Tail Pointer". */ 3299 if (--i < 0) 3300 i = rdata->num_rx_desc - 1; 3301 E1000_WRITE_REG(&rdata->sc->hw, E1000_RDT(rdata->idx), i); 3302 } 3303 3304 static void 3305 emx_enable_intr(struct emx_softc *sc) 3306 { 3307 uint32_t ims_mask = IMS_ENABLE_MASK; 3308 3309 lwkt_serialize_handler_enable(&sc->main_serialize); 3310 3311 #if 0 3312 if (sc->hw.mac.type == e1000_82574) { 3313 E1000_WRITE_REG(hw, EMX_EIAC, EM_MSIX_MASK); 3314 ims_mask |= EM_MSIX_MASK; 3315 } 3316 #endif 3317 E1000_WRITE_REG(&sc->hw, E1000_IMS, ims_mask); 3318 } 3319 3320 static void 3321 emx_disable_intr(struct emx_softc *sc) 3322 { 3323 if (sc->hw.mac.type == e1000_82574) 3324 E1000_WRITE_REG(&sc->hw, EMX_EIAC, 0); 3325 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 3326 3327 lwkt_serialize_handler_disable(&sc->main_serialize); 3328 } 3329 3330 /* 3331 * Bit of a misnomer, what this really means is 3332 * to enable OS management of the system... aka 3333 * to disable special hardware management features 3334 */ 3335 static void 3336 emx_get_mgmt(struct emx_softc *sc) 3337 { 3338 /* A shared code workaround */ 3339 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3340 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 3341 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3342 3343 /* disable hardware interception of ARP */ 3344 manc &= ~(E1000_MANC_ARP_EN); 3345 3346 /* enable receiving management packets to the host */ 3347 manc |= E1000_MANC_EN_MNG2HOST; 3348 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3349 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3350 manc2h |= E1000_MNG2HOST_PORT_623; 3351 manc2h |= E1000_MNG2HOST_PORT_664; 3352 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 3353 3354 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3355 } 3356 } 3357 3358 /* 3359 * Give control back to hardware management 3360 * controller if there is one. 3361 */ 3362 static void 3363 emx_rel_mgmt(struct emx_softc *sc) 3364 { 3365 if (sc->flags & EMX_FLAG_HAS_MGMT) { 3366 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 3367 3368 /* re-enable hardware interception of ARP */ 3369 manc |= E1000_MANC_ARP_EN; 3370 manc &= ~E1000_MANC_EN_MNG2HOST; 3371 3372 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 3373 } 3374 } 3375 3376 /* 3377 * emx_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3378 * For ASF and Pass Through versions of f/w this means that 3379 * the driver is loaded. For AMT version (only with 82573) 3380 * of the f/w this means that the network i/f is open. 3381 */ 3382 static void 3383 emx_get_hw_control(struct emx_softc *sc) 3384 { 3385 /* Let firmware know the driver has taken over */ 3386 if (sc->hw.mac.type == e1000_82573) { 3387 uint32_t swsm; 3388 3389 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3390 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3391 swsm | E1000_SWSM_DRV_LOAD); 3392 } else { 3393 uint32_t ctrl_ext; 3394 3395 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3396 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3397 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3398 } 3399 sc->flags |= EMX_FLAG_HW_CTRL; 3400 } 3401 3402 /* 3403 * emx_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3404 * For ASF and Pass Through versions of f/w this means that the 3405 * driver is no longer loaded. For AMT version (only with 82573) 3406 * of the f/w this means that the network i/f is closed. 3407 */ 3408 static void 3409 emx_rel_hw_control(struct emx_softc *sc) 3410 { 3411 if ((sc->flags & EMX_FLAG_HW_CTRL) == 0) 3412 return; 3413 sc->flags &= ~EMX_FLAG_HW_CTRL; 3414 3415 /* Let firmware taken over control of h/w */ 3416 if (sc->hw.mac.type == e1000_82573) { 3417 uint32_t swsm; 3418 3419 swsm = E1000_READ_REG(&sc->hw, E1000_SWSM); 3420 E1000_WRITE_REG(&sc->hw, E1000_SWSM, 3421 swsm & ~E1000_SWSM_DRV_LOAD); 3422 } else { 3423 uint32_t ctrl_ext; 3424 3425 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 3426 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 3427 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3428 } 3429 } 3430 3431 static int 3432 emx_is_valid_eaddr(const uint8_t *addr) 3433 { 3434 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3435 3436 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3437 return (FALSE); 3438 3439 return (TRUE); 3440 } 3441 3442 /* 3443 * Enable PCI Wake On Lan capability 3444 */ 3445 void 3446 emx_enable_wol(device_t dev) 3447 { 3448 uint16_t cap, status; 3449 uint8_t id; 3450 3451 /* First find the capabilities pointer*/ 3452 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3453 3454 /* Read the PM Capabilities */ 3455 id = pci_read_config(dev, cap, 1); 3456 if (id != PCIY_PMG) /* Something wrong */ 3457 return; 3458 3459 /* 3460 * OK, we have the power capabilities, 3461 * so now get the status register 3462 */ 3463 cap += PCIR_POWER_STATUS; 3464 status = pci_read_config(dev, cap, 2); 3465 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3466 pci_write_config(dev, cap, status, 2); 3467 } 3468 3469 static void 3470 emx_update_stats(struct emx_softc *sc) 3471 { 3472 struct ifnet *ifp = &sc->arpcom.ac_if; 3473 3474 if (sc->hw.phy.media_type == e1000_media_type_copper || 3475 (E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3476 sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS); 3477 sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC); 3478 } 3479 sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS); 3480 sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC); 3481 sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC); 3482 sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL); 3483 3484 sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC); 3485 sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL); 3486 sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC); 3487 sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC); 3488 sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC); 3489 sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC); 3490 sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC); 3491 sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC); 3492 sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC); 3493 sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC); 3494 sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64); 3495 sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127); 3496 sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255); 3497 sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511); 3498 sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023); 3499 sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522); 3500 sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC); 3501 sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC); 3502 sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC); 3503 sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC); 3504 3505 /* For the 64-bit byte counters the low dword must be read first. */ 3506 /* Both registers clear on the read of the high dword */ 3507 3508 sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCH); 3509 sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCH); 3510 3511 sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC); 3512 sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC); 3513 sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC); 3514 sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC); 3515 sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC); 3516 3517 sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH); 3518 sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH); 3519 3520 sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR); 3521 sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT); 3522 sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64); 3523 sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127); 3524 sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255); 3525 sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511); 3526 sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023); 3527 sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522); 3528 sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC); 3529 sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC); 3530 3531 sc->stats.algnerrc += E1000_READ_REG(&sc->hw, E1000_ALGNERRC); 3532 sc->stats.rxerrc += E1000_READ_REG(&sc->hw, E1000_RXERRC); 3533 sc->stats.tncrs += E1000_READ_REG(&sc->hw, E1000_TNCRS); 3534 sc->stats.cexterr += E1000_READ_REG(&sc->hw, E1000_CEXTERR); 3535 sc->stats.tsctc += E1000_READ_REG(&sc->hw, E1000_TSCTC); 3536 sc->stats.tsctfc += E1000_READ_REG(&sc->hw, E1000_TSCTFC); 3537 3538 IFNET_STAT_SET(ifp, collisions, sc->stats.colc); 3539 3540 /* Rx Errors */ 3541 IFNET_STAT_SET(ifp, ierrors, 3542 sc->stats.rxerrc + sc->stats.crcerrs + sc->stats.algnerrc + 3543 sc->stats.ruc + sc->stats.roc + sc->stats.mpc + sc->stats.cexterr); 3544 3545 /* Tx Errors */ 3546 IFNET_STAT_SET(ifp, oerrors, sc->stats.ecol + sc->stats.latecol); 3547 } 3548 3549 static void 3550 emx_print_debug_info(struct emx_softc *sc) 3551 { 3552 device_t dev = sc->dev; 3553 uint8_t *hw_addr = sc->hw.hw_addr; 3554 int i; 3555 3556 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3557 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3558 E1000_READ_REG(&sc->hw, E1000_CTRL), 3559 E1000_READ_REG(&sc->hw, E1000_RCTL)); 3560 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3561 ((E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3562 (E1000_READ_REG(&sc->hw, E1000_PBA) & 0xffff) ); 3563 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3564 sc->hw.fc.high_water, sc->hw.fc.low_water); 3565 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3566 E1000_READ_REG(&sc->hw, E1000_TIDV), 3567 E1000_READ_REG(&sc->hw, E1000_TADV)); 3568 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3569 E1000_READ_REG(&sc->hw, E1000_RDTR), 3570 E1000_READ_REG(&sc->hw, E1000_RADV)); 3571 3572 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3573 device_printf(dev, "hw %d tdh = %d, hw tdt = %d\n", i, 3574 E1000_READ_REG(&sc->hw, E1000_TDH(i)), 3575 E1000_READ_REG(&sc->hw, E1000_TDT(i))); 3576 } 3577 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3578 device_printf(dev, "hw %d rdh = %d, hw rdt = %d\n", i, 3579 E1000_READ_REG(&sc->hw, E1000_RDH(i)), 3580 E1000_READ_REG(&sc->hw, E1000_RDT(i))); 3581 } 3582 3583 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3584 device_printf(dev, "TX %d Tx descriptors avail = %d\n", i, 3585 sc->tx_data[i].num_tx_desc_avail); 3586 device_printf(dev, "TX %d TSO segments = %lu\n", i, 3587 sc->tx_data[i].tso_segments); 3588 device_printf(dev, "TX %d TSO ctx reused = %lu\n", i, 3589 sc->tx_data[i].tso_ctx_reused); 3590 } 3591 } 3592 3593 static void 3594 emx_print_hw_stats(struct emx_softc *sc) 3595 { 3596 device_t dev = sc->dev; 3597 3598 device_printf(dev, "Excessive collisions = %lld\n", 3599 (long long)sc->stats.ecol); 3600 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3601 device_printf(dev, "Symbol errors = %lld\n", 3602 (long long)sc->stats.symerrs); 3603 #endif 3604 device_printf(dev, "Sequence errors = %lld\n", 3605 (long long)sc->stats.sec); 3606 device_printf(dev, "Defer count = %lld\n", 3607 (long long)sc->stats.dc); 3608 device_printf(dev, "Missed Packets = %lld\n", 3609 (long long)sc->stats.mpc); 3610 device_printf(dev, "Receive No Buffers = %lld\n", 3611 (long long)sc->stats.rnbc); 3612 /* RLEC is inaccurate on some hardware, calculate our own. */ 3613 device_printf(dev, "Receive Length Errors = %lld\n", 3614 ((long long)sc->stats.roc + (long long)sc->stats.ruc)); 3615 device_printf(dev, "Receive errors = %lld\n", 3616 (long long)sc->stats.rxerrc); 3617 device_printf(dev, "Crc errors = %lld\n", 3618 (long long)sc->stats.crcerrs); 3619 device_printf(dev, "Alignment errors = %lld\n", 3620 (long long)sc->stats.algnerrc); 3621 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3622 (long long)sc->stats.cexterr); 3623 device_printf(dev, "RX overruns = %ld\n", sc->rx_overruns); 3624 device_printf(dev, "XON Rcvd = %lld\n", 3625 (long long)sc->stats.xonrxc); 3626 device_printf(dev, "XON Xmtd = %lld\n", 3627 (long long)sc->stats.xontxc); 3628 device_printf(dev, "XOFF Rcvd = %lld\n", 3629 (long long)sc->stats.xoffrxc); 3630 device_printf(dev, "XOFF Xmtd = %lld\n", 3631 (long long)sc->stats.xofftxc); 3632 device_printf(dev, "Good Packets Rcvd = %lld\n", 3633 (long long)sc->stats.gprc); 3634 device_printf(dev, "Good Packets Xmtd = %lld\n", 3635 (long long)sc->stats.gptc); 3636 } 3637 3638 static void 3639 emx_print_nvm_info(struct emx_softc *sc) 3640 { 3641 uint16_t eeprom_data; 3642 int i, j, row = 0; 3643 3644 /* Its a bit crude, but it gets the job done */ 3645 kprintf("\nInterface EEPROM Dump:\n"); 3646 kprintf("Offset\n0x0000 "); 3647 for (i = 0, j = 0; i < 32; i++, j++) { 3648 if (j == 8) { /* Make the offset block */ 3649 j = 0; ++row; 3650 kprintf("\n0x00%x0 ",row); 3651 } 3652 e1000_read_nvm(&sc->hw, i, 1, &eeprom_data); 3653 kprintf("%04x ", eeprom_data); 3654 } 3655 kprintf("\n"); 3656 } 3657 3658 static int 3659 emx_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3660 { 3661 struct emx_softc *sc; 3662 struct ifnet *ifp; 3663 int error, result; 3664 3665 result = -1; 3666 error = sysctl_handle_int(oidp, &result, 0, req); 3667 if (error || !req->newptr) 3668 return (error); 3669 3670 sc = (struct emx_softc *)arg1; 3671 ifp = &sc->arpcom.ac_if; 3672 3673 ifnet_serialize_all(ifp); 3674 3675 if (result == 1) 3676 emx_print_debug_info(sc); 3677 3678 /* 3679 * This value will cause a hex dump of the 3680 * first 32 16-bit words of the EEPROM to 3681 * the screen. 3682 */ 3683 if (result == 2) 3684 emx_print_nvm_info(sc); 3685 3686 ifnet_deserialize_all(ifp); 3687 3688 return (error); 3689 } 3690 3691 static int 3692 emx_sysctl_stats(SYSCTL_HANDLER_ARGS) 3693 { 3694 int error, result; 3695 3696 result = -1; 3697 error = sysctl_handle_int(oidp, &result, 0, req); 3698 if (error || !req->newptr) 3699 return (error); 3700 3701 if (result == 1) { 3702 struct emx_softc *sc = (struct emx_softc *)arg1; 3703 struct ifnet *ifp = &sc->arpcom.ac_if; 3704 3705 ifnet_serialize_all(ifp); 3706 emx_print_hw_stats(sc); 3707 ifnet_deserialize_all(ifp); 3708 } 3709 return (error); 3710 } 3711 3712 static void 3713 emx_add_sysctl(struct emx_softc *sc) 3714 { 3715 struct sysctl_ctx_list *ctx; 3716 struct sysctl_oid *tree; 3717 #if defined(EMX_RSS_DEBUG) || defined(EMX_TSS_DEBUG) 3718 char pkt_desc[32]; 3719 int i; 3720 #endif 3721 3722 ctx = device_get_sysctl_ctx(sc->dev); 3723 tree = device_get_sysctl_tree(sc->dev); 3724 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3725 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3726 emx_sysctl_debug_info, "I", "Debug Information"); 3727 3728 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3729 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3730 emx_sysctl_stats, "I", "Statistics"); 3731 3732 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3733 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_data[0].num_rx_desc, 0, 3734 "# of RX descs"); 3735 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3736 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_data[0].num_tx_desc, 0, 3737 "# of TX descs"); 3738 3739 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3740 OID_AUTO, "int_throttle_ceil", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3741 emx_sysctl_int_throttle, "I", "interrupt throttling rate"); 3742 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3743 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3744 emx_sysctl_tx_intr_nsegs, "I", "# segments per TX interrupt"); 3745 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3746 OID_AUTO, "tx_wreg_nsegs", CTLTYPE_INT|CTLFLAG_RW, sc, 0, 3747 emx_sysctl_tx_wreg_nsegs, "I", 3748 "# segments sent before write to hardware register"); 3749 3750 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3751 OID_AUTO, "rx_ring_cnt", CTLFLAG_RD, &sc->rx_ring_cnt, 0, 3752 "# of RX rings"); 3753 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3754 OID_AUTO, "tx_ring_cnt", CTLFLAG_RD, &sc->tx_ring_cnt, 0, 3755 "# of TX rings"); 3756 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3757 OID_AUTO, "tx_ring_inuse", CTLFLAG_RD, &sc->tx_ring_inuse, 0, 3758 "# of TX rings used"); 3759 3760 #ifdef IFPOLL_ENABLE 3761 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3762 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 3763 sc, 0, emx_sysctl_npoll_rxoff, "I", 3764 "NPOLLING RX cpu offset"); 3765 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 3766 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 3767 sc, 0, emx_sysctl_npoll_txoff, "I", 3768 "NPOLLING TX cpu offset"); 3769 #endif 3770 3771 #ifdef EMX_RSS_DEBUG 3772 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 3773 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 3774 0, "RSS debug level"); 3775 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3776 ksnprintf(pkt_desc, sizeof(pkt_desc), "rx%d_pkt", i); 3777 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3778 pkt_desc, CTLFLAG_RW, &sc->rx_data[i].rx_pkts, 3779 "RXed packets"); 3780 } 3781 #endif 3782 #ifdef EMX_TSS_DEBUG 3783 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3784 ksnprintf(pkt_desc, sizeof(pkt_desc), "tx%d_pkt", i); 3785 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 3786 pkt_desc, CTLFLAG_RW, &sc->tx_data[i].tx_pkts, 3787 "TXed packets"); 3788 } 3789 #endif 3790 } 3791 3792 static int 3793 emx_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3794 { 3795 struct emx_softc *sc = (void *)arg1; 3796 struct ifnet *ifp = &sc->arpcom.ac_if; 3797 int error, throttle; 3798 3799 throttle = sc->int_throttle_ceil; 3800 error = sysctl_handle_int(oidp, &throttle, 0, req); 3801 if (error || req->newptr == NULL) 3802 return error; 3803 if (throttle < 0 || throttle > 1000000000 / 256) 3804 return EINVAL; 3805 3806 if (throttle) { 3807 /* 3808 * Set the interrupt throttling rate in 256ns increments, 3809 * recalculate sysctl value assignment to get exact frequency. 3810 */ 3811 throttle = 1000000000 / 256 / throttle; 3812 3813 /* Upper 16bits of ITR is reserved and should be zero */ 3814 if (throttle & 0xffff0000) 3815 return EINVAL; 3816 } 3817 3818 ifnet_serialize_all(ifp); 3819 3820 if (throttle) 3821 sc->int_throttle_ceil = 1000000000 / 256 / throttle; 3822 else 3823 sc->int_throttle_ceil = 0; 3824 3825 if (ifp->if_flags & IFF_RUNNING) 3826 emx_set_itr(sc, throttle); 3827 3828 ifnet_deserialize_all(ifp); 3829 3830 if (bootverbose) { 3831 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3832 sc->int_throttle_ceil); 3833 } 3834 return 0; 3835 } 3836 3837 static int 3838 emx_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3839 { 3840 struct emx_softc *sc = (void *)arg1; 3841 struct ifnet *ifp = &sc->arpcom.ac_if; 3842 struct emx_txdata *tdata = &sc->tx_data[0]; 3843 int error, segs; 3844 3845 segs = tdata->tx_intr_nsegs; 3846 error = sysctl_handle_int(oidp, &segs, 0, req); 3847 if (error || req->newptr == NULL) 3848 return error; 3849 if (segs <= 0) 3850 return EINVAL; 3851 3852 ifnet_serialize_all(ifp); 3853 3854 /* 3855 * Don't allow tx_intr_nsegs to become: 3856 * o Less the oact_tx_desc 3857 * o Too large that no TX desc will cause TX interrupt to 3858 * be generated (OACTIVE will never recover) 3859 * o Too small that will cause tx_dd[] overflow 3860 */ 3861 if (segs < tdata->oact_tx_desc || 3862 segs >= tdata->num_tx_desc - tdata->oact_tx_desc || 3863 segs < tdata->num_tx_desc / EMX_TXDD_SAFE) { 3864 error = EINVAL; 3865 } else { 3866 int i; 3867 3868 error = 0; 3869 for (i = 0; i < sc->tx_ring_cnt; ++i) 3870 sc->tx_data[i].tx_intr_nsegs = segs; 3871 } 3872 3873 ifnet_deserialize_all(ifp); 3874 3875 return error; 3876 } 3877 3878 static int 3879 emx_sysctl_tx_wreg_nsegs(SYSCTL_HANDLER_ARGS) 3880 { 3881 struct emx_softc *sc = (void *)arg1; 3882 struct ifnet *ifp = &sc->arpcom.ac_if; 3883 int error, nsegs, i; 3884 3885 nsegs = sc->tx_data[0].tx_wreg_nsegs; 3886 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3887 if (error || req->newptr == NULL) 3888 return error; 3889 3890 ifnet_serialize_all(ifp); 3891 for (i = 0; i < sc->tx_ring_cnt; ++i) 3892 sc->tx_data[i].tx_wreg_nsegs =nsegs; 3893 ifnet_deserialize_all(ifp); 3894 3895 return 0; 3896 } 3897 3898 #ifdef IFPOLL_ENABLE 3899 3900 static int 3901 emx_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3902 { 3903 struct emx_softc *sc = (void *)arg1; 3904 struct ifnet *ifp = &sc->arpcom.ac_if; 3905 int error, off; 3906 3907 off = sc->rx_npoll_off; 3908 error = sysctl_handle_int(oidp, &off, 0, req); 3909 if (error || req->newptr == NULL) 3910 return error; 3911 if (off < 0) 3912 return EINVAL; 3913 3914 ifnet_serialize_all(ifp); 3915 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3916 error = EINVAL; 3917 } else { 3918 error = 0; 3919 sc->rx_npoll_off = off; 3920 } 3921 ifnet_deserialize_all(ifp); 3922 3923 return error; 3924 } 3925 3926 static int 3927 emx_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3928 { 3929 struct emx_softc *sc = (void *)arg1; 3930 struct ifnet *ifp = &sc->arpcom.ac_if; 3931 int error, off; 3932 3933 off = sc->tx_npoll_off; 3934 error = sysctl_handle_int(oidp, &off, 0, req); 3935 if (error || req->newptr == NULL) 3936 return error; 3937 if (off < 0) 3938 return EINVAL; 3939 3940 ifnet_serialize_all(ifp); 3941 if (off >= ncpus2 || off % sc->tx_ring_cnt != 0) { 3942 error = EINVAL; 3943 } else { 3944 error = 0; 3945 sc->tx_npoll_off = off; 3946 } 3947 ifnet_deserialize_all(ifp); 3948 3949 return error; 3950 } 3951 3952 #endif /* IFPOLL_ENABLE */ 3953 3954 static int 3955 emx_dma_alloc(struct emx_softc *sc) 3956 { 3957 int error, i; 3958 3959 /* 3960 * Create top level busdma tag 3961 */ 3962 error = bus_dma_tag_create(NULL, 1, 0, 3963 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3964 NULL, NULL, 3965 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 3966 0, &sc->parent_dtag); 3967 if (error) { 3968 device_printf(sc->dev, "could not create top level DMA tag\n"); 3969 return error; 3970 } 3971 3972 /* 3973 * Allocate transmit descriptors ring and buffers 3974 */ 3975 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3976 error = emx_create_tx_ring(&sc->tx_data[i]); 3977 if (error) { 3978 device_printf(sc->dev, 3979 "Could not setup transmit structures\n"); 3980 return error; 3981 } 3982 } 3983 3984 /* 3985 * Allocate receive descriptors ring and buffers 3986 */ 3987 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3988 error = emx_create_rx_ring(&sc->rx_data[i]); 3989 if (error) { 3990 device_printf(sc->dev, 3991 "Could not setup receive structures\n"); 3992 return error; 3993 } 3994 } 3995 return 0; 3996 } 3997 3998 static void 3999 emx_dma_free(struct emx_softc *sc) 4000 { 4001 int i; 4002 4003 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4004 emx_destroy_tx_ring(&sc->tx_data[i], 4005 sc->tx_data[i].num_tx_desc); 4006 } 4007 4008 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4009 emx_destroy_rx_ring(&sc->rx_data[i], 4010 sc->rx_data[i].num_rx_desc); 4011 } 4012 4013 /* Free top level busdma tag */ 4014 if (sc->parent_dtag != NULL) 4015 bus_dma_tag_destroy(sc->parent_dtag); 4016 } 4017 4018 static void 4019 emx_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 4020 { 4021 struct emx_softc *sc = ifp->if_softc; 4022 4023 ifnet_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, slz); 4024 } 4025 4026 static void 4027 emx_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4028 { 4029 struct emx_softc *sc = ifp->if_softc; 4030 4031 ifnet_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, slz); 4032 } 4033 4034 static int 4035 emx_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 4036 { 4037 struct emx_softc *sc = ifp->if_softc; 4038 4039 return ifnet_serialize_array_try(sc->serializes, EMX_NSERIALIZE, slz); 4040 } 4041 4042 static void 4043 emx_serialize_skipmain(struct emx_softc *sc) 4044 { 4045 lwkt_serialize_array_enter(sc->serializes, EMX_NSERIALIZE, 1); 4046 } 4047 4048 static void 4049 emx_deserialize_skipmain(struct emx_softc *sc) 4050 { 4051 lwkt_serialize_array_exit(sc->serializes, EMX_NSERIALIZE, 1); 4052 } 4053 4054 #ifdef INVARIANTS 4055 4056 static void 4057 emx_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 4058 boolean_t serialized) 4059 { 4060 struct emx_softc *sc = ifp->if_softc; 4061 4062 ifnet_serialize_array_assert(sc->serializes, EMX_NSERIALIZE, 4063 slz, serialized); 4064 } 4065 4066 #endif /* INVARIANTS */ 4067 4068 #ifdef IFPOLL_ENABLE 4069 4070 static void 4071 emx_npoll_status(struct ifnet *ifp) 4072 { 4073 struct emx_softc *sc = ifp->if_softc; 4074 uint32_t reg_icr; 4075 4076 ASSERT_SERIALIZED(&sc->main_serialize); 4077 4078 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4079 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 4080 callout_stop(&sc->timer); 4081 sc->hw.mac.get_link_status = 1; 4082 emx_update_link_status(sc); 4083 callout_reset(&sc->timer, hz, emx_timer, sc); 4084 } 4085 } 4086 4087 static void 4088 emx_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 4089 { 4090 struct emx_txdata *tdata = arg; 4091 4092 ASSERT_SERIALIZED(&tdata->tx_serialize); 4093 4094 emx_txeof(tdata); 4095 if (!ifsq_is_empty(tdata->ifsq)) 4096 ifsq_devstart(tdata->ifsq); 4097 } 4098 4099 static void 4100 emx_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 4101 { 4102 struct emx_rxdata *rdata = arg; 4103 4104 ASSERT_SERIALIZED(&rdata->rx_serialize); 4105 4106 emx_rxeof(rdata, cycle); 4107 } 4108 4109 static void 4110 emx_npoll(struct ifnet *ifp, struct ifpoll_info *info) 4111 { 4112 struct emx_softc *sc = ifp->if_softc; 4113 int i, txr_cnt; 4114 4115 ASSERT_IFNET_SERIALIZED_ALL(ifp); 4116 4117 if (info) { 4118 int off; 4119 4120 info->ifpi_status.status_func = emx_npoll_status; 4121 info->ifpi_status.serializer = &sc->main_serialize; 4122 4123 txr_cnt = emx_get_txring_inuse(sc, TRUE); 4124 off = sc->tx_npoll_off; 4125 for (i = 0; i < txr_cnt; ++i) { 4126 struct emx_txdata *tdata = &sc->tx_data[i]; 4127 int idx = i + off; 4128 4129 KKASSERT(idx < ncpus2); 4130 info->ifpi_tx[idx].poll_func = emx_npoll_tx; 4131 info->ifpi_tx[idx].arg = tdata; 4132 info->ifpi_tx[idx].serializer = &tdata->tx_serialize; 4133 ifsq_set_cpuid(tdata->ifsq, idx); 4134 } 4135 4136 off = sc->rx_npoll_off; 4137 for (i = 0; i < sc->rx_ring_cnt; ++i) { 4138 struct emx_rxdata *rdata = &sc->rx_data[i]; 4139 int idx = i + off; 4140 4141 KKASSERT(idx < ncpus2); 4142 info->ifpi_rx[idx].poll_func = emx_npoll_rx; 4143 info->ifpi_rx[idx].arg = rdata; 4144 info->ifpi_rx[idx].serializer = &rdata->rx_serialize; 4145 } 4146 4147 if (ifp->if_flags & IFF_RUNNING) { 4148 if (txr_cnt == sc->tx_ring_inuse) 4149 emx_disable_intr(sc); 4150 else 4151 emx_init(sc); 4152 } 4153 } else { 4154 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4155 struct emx_txdata *tdata = &sc->tx_data[i]; 4156 4157 ifsq_set_cpuid(tdata->ifsq, 4158 rman_get_cpuid(sc->intr_res)); 4159 } 4160 4161 if (ifp->if_flags & IFF_RUNNING) { 4162 txr_cnt = emx_get_txring_inuse(sc, FALSE); 4163 if (txr_cnt == sc->tx_ring_inuse) 4164 emx_enable_intr(sc); 4165 else 4166 emx_init(sc); 4167 } 4168 } 4169 } 4170 4171 #endif /* IFPOLL_ENABLE */ 4172 4173 static void 4174 emx_set_itr(struct emx_softc *sc, uint32_t itr) 4175 { 4176 E1000_WRITE_REG(&sc->hw, E1000_ITR, itr); 4177 if (sc->hw.mac.type == e1000_82574) { 4178 int i; 4179 4180 /* 4181 * When using MSIX interrupts we need to 4182 * throttle using the EITR register 4183 */ 4184 for (i = 0; i < 4; ++i) 4185 E1000_WRITE_REG(&sc->hw, E1000_EITR_82574(i), itr); 4186 } 4187 } 4188 4189 /* 4190 * Disable the L0s, 82574L Errata #20 4191 */ 4192 static void 4193 emx_disable_aspm(struct emx_softc *sc) 4194 { 4195 uint16_t link_cap, link_ctrl, disable; 4196 uint8_t pcie_ptr, reg; 4197 device_t dev = sc->dev; 4198 4199 switch (sc->hw.mac.type) { 4200 case e1000_82571: 4201 case e1000_82572: 4202 case e1000_82573: 4203 /* 4204 * 82573 specification update 4205 * errata #8 disable L0s 4206 * errata #41 disable L1 4207 * 4208 * 82571/82572 specification update 4209 # errata #13 disable L1 4210 * errata #68 disable L0s 4211 */ 4212 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4213 break; 4214 4215 case e1000_82574: 4216 /* 4217 * 82574 specification update errata #20 4218 * 4219 * There is no need to disable L1 4220 */ 4221 disable = PCIEM_LNKCTL_ASPM_L0S; 4222 break; 4223 4224 default: 4225 return; 4226 } 4227 4228 pcie_ptr = pci_get_pciecap_ptr(dev); 4229 if (pcie_ptr == 0) 4230 return; 4231 4232 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4233 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4234 return; 4235 4236 if (bootverbose) 4237 if_printf(&sc->arpcom.ac_if, "disable ASPM %#02x\n", disable); 4238 4239 reg = pcie_ptr + PCIER_LINKCTRL; 4240 link_ctrl = pci_read_config(dev, reg, 2); 4241 link_ctrl &= ~disable; 4242 pci_write_config(dev, reg, link_ctrl, 2); 4243 } 4244 4245 static int 4246 emx_tso_pullup(struct emx_txdata *tdata, struct mbuf **mp) 4247 { 4248 int iphlen, hoff, thoff, ex = 0; 4249 struct mbuf *m; 4250 struct ip *ip; 4251 4252 m = *mp; 4253 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4254 4255 iphlen = m->m_pkthdr.csum_iphlen; 4256 thoff = m->m_pkthdr.csum_thlen; 4257 hoff = m->m_pkthdr.csum_lhlen; 4258 4259 KASSERT(iphlen > 0, ("invalid ip hlen")); 4260 KASSERT(thoff > 0, ("invalid tcp hlen")); 4261 KASSERT(hoff > 0, ("invalid ether hlen")); 4262 4263 if (tdata->tx_flags & EMX_TXFLAG_TSO_PULLEX) 4264 ex = 4; 4265 4266 if (m->m_len < hoff + iphlen + thoff + ex) { 4267 m = m_pullup(m, hoff + iphlen + thoff + ex); 4268 if (m == NULL) { 4269 *mp = NULL; 4270 return ENOBUFS; 4271 } 4272 *mp = m; 4273 } 4274 ip = mtodoff(m, struct ip *, hoff); 4275 ip->ip_len = 0; 4276 4277 return 0; 4278 } 4279 4280 static int 4281 emx_tso_setup(struct emx_txdata *tdata, struct mbuf *mp, 4282 uint32_t *txd_upper, uint32_t *txd_lower) 4283 { 4284 struct e1000_context_desc *TXD; 4285 int hoff, iphlen, thoff, hlen; 4286 int mss, pktlen, curr_txd; 4287 4288 #ifdef EMX_TSO_DEBUG 4289 tdata->tso_segments++; 4290 #endif 4291 4292 iphlen = mp->m_pkthdr.csum_iphlen; 4293 thoff = mp->m_pkthdr.csum_thlen; 4294 hoff = mp->m_pkthdr.csum_lhlen; 4295 mss = mp->m_pkthdr.tso_segsz; 4296 pktlen = mp->m_pkthdr.len; 4297 4298 if ((tdata->tx_flags & EMX_TXFLAG_FORCECTX) == 0 && 4299 tdata->csum_flags == CSUM_TSO && 4300 tdata->csum_iphlen == iphlen && 4301 tdata->csum_lhlen == hoff && 4302 tdata->csum_thlen == thoff && 4303 tdata->csum_mss == mss && 4304 tdata->csum_pktlen == pktlen) { 4305 *txd_upper = tdata->csum_txd_upper; 4306 *txd_lower = tdata->csum_txd_lower; 4307 #ifdef EMX_TSO_DEBUG 4308 tdata->tso_ctx_reused++; 4309 #endif 4310 return 0; 4311 } 4312 hlen = hoff + iphlen + thoff; 4313 4314 /* 4315 * Setup a new TSO context. 4316 */ 4317 4318 curr_txd = tdata->next_avail_tx_desc; 4319 TXD = (struct e1000_context_desc *)&tdata->tx_desc_base[curr_txd]; 4320 4321 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4322 E1000_TXD_DTYP_D | /* Data descr type */ 4323 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4324 4325 /* IP and/or TCP header checksum calculation and insertion. */ 4326 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4327 4328 /* 4329 * Start offset for header checksum calculation. 4330 * End offset for header checksum calculation. 4331 * Offset of place put the checksum. 4332 */ 4333 TXD->lower_setup.ip_fields.ipcss = hoff; 4334 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4335 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4336 4337 /* 4338 * Start offset for payload checksum calculation. 4339 * End offset for payload checksum calculation. 4340 * Offset of place to put the checksum. 4341 */ 4342 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4343 TXD->upper_setup.tcp_fields.tucse = 0; 4344 TXD->upper_setup.tcp_fields.tucso = 4345 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4346 4347 /* 4348 * Payload size per packet w/o any headers. 4349 * Length of all headers up to payload. 4350 */ 4351 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4352 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4353 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4354 E1000_TXD_CMD_DEXT | /* Extended descr */ 4355 E1000_TXD_CMD_TSE | /* TSE context */ 4356 E1000_TXD_CMD_IP | /* Do IP csum */ 4357 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4358 (pktlen - hlen)); /* Total len */ 4359 4360 /* Save the information for this TSO context */ 4361 tdata->csum_flags = CSUM_TSO; 4362 tdata->csum_lhlen = hoff; 4363 tdata->csum_iphlen = iphlen; 4364 tdata->csum_thlen = thoff; 4365 tdata->csum_mss = mss; 4366 tdata->csum_pktlen = pktlen; 4367 tdata->csum_txd_upper = *txd_upper; 4368 tdata->csum_txd_lower = *txd_lower; 4369 4370 if (++curr_txd == tdata->num_tx_desc) 4371 curr_txd = 0; 4372 4373 KKASSERT(tdata->num_tx_desc_avail > 0); 4374 tdata->num_tx_desc_avail--; 4375 4376 tdata->next_avail_tx_desc = curr_txd; 4377 return 1; 4378 } 4379 4380 static int 4381 emx_get_txring_inuse(const struct emx_softc *sc, boolean_t polling) 4382 { 4383 if (polling) 4384 return sc->tx_ring_cnt; 4385 else 4386 return 1; 4387 } 4388