1 /* 2 * Copyright (c) 2001-2011, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_ifpoll.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/tcp.h> 68 #include <netinet/udp.h> 69 70 #include <bus/pci/pcivar.h> 71 #include <bus/pci/pcireg.h> 72 73 #include <dev/netif/ig_hal/e1000_api.h> 74 #include <dev/netif/ig_hal/e1000_82575.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 125 /* required last entry */ 126 IGB_DEVICE_NULL 127 }; 128 129 static int igb_probe(device_t); 130 static int igb_attach(device_t); 131 static int igb_detach(device_t); 132 static int igb_shutdown(device_t); 133 static int igb_suspend(device_t); 134 static int igb_resume(device_t); 135 136 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 137 static void igb_setup_ifp(struct igb_softc *); 138 static boolean_t igb_txcsum_ctx(struct igb_tx_ring *, struct mbuf *); 139 static int igb_tso_pullup(struct igb_tx_ring *, struct mbuf **); 140 static void igb_tso_ctx(struct igb_tx_ring *, struct mbuf *, uint32_t *); 141 static void igb_add_sysctl(struct igb_softc *); 142 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 143 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS); 144 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 145 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 146 #ifdef IFPOLL_ENABLE 147 static int igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS); 148 static int igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS); 149 #endif 150 151 static void igb_vf_init_stats(struct igb_softc *); 152 static void igb_reset(struct igb_softc *); 153 static void igb_update_stats_counters(struct igb_softc *); 154 static void igb_update_vf_stats_counters(struct igb_softc *); 155 static void igb_update_link_status(struct igb_softc *); 156 static void igb_init_tx_unit(struct igb_softc *); 157 static void igb_init_rx_unit(struct igb_softc *); 158 159 static void igb_set_vlan(struct igb_softc *); 160 static void igb_set_multi(struct igb_softc *); 161 static void igb_set_promisc(struct igb_softc *); 162 static void igb_disable_promisc(struct igb_softc *); 163 164 static int igb_alloc_rings(struct igb_softc *); 165 static void igb_free_rings(struct igb_softc *); 166 static int igb_create_tx_ring(struct igb_tx_ring *); 167 static int igb_create_rx_ring(struct igb_rx_ring *); 168 static void igb_free_tx_ring(struct igb_tx_ring *); 169 static void igb_free_rx_ring(struct igb_rx_ring *); 170 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 171 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 172 static void igb_init_tx_ring(struct igb_tx_ring *); 173 static int igb_init_rx_ring(struct igb_rx_ring *); 174 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 175 static int igb_encap(struct igb_tx_ring *, struct mbuf **, int *, int *); 176 static void igb_rx_refresh(struct igb_rx_ring *, int); 177 178 static void igb_stop(struct igb_softc *); 179 static void igb_init(void *); 180 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 181 static void igb_media_status(struct ifnet *, struct ifmediareq *); 182 static int igb_media_change(struct ifnet *); 183 static void igb_timer(void *); 184 static void igb_watchdog(struct ifnet *); 185 static void igb_start(struct ifnet *, struct ifaltq_subque *); 186 #ifdef IFPOLL_ENABLE 187 static void igb_npoll(struct ifnet *, struct ifpoll_info *); 188 static void igb_npoll_rx(struct ifnet *, void *, int); 189 static void igb_npoll_tx(struct ifnet *, void *, int); 190 static void igb_npoll_status(struct ifnet *); 191 #endif 192 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 193 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 194 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 195 #ifdef INVARIANTS 196 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 197 boolean_t); 198 #endif 199 200 static void igb_intr(void *); 201 static void igb_intr_shared(void *); 202 static void igb_rxeof(struct igb_rx_ring *, int); 203 static void igb_txeof(struct igb_tx_ring *); 204 static void igb_set_eitr(struct igb_softc *, int, int); 205 static void igb_enable_intr(struct igb_softc *); 206 static void igb_disable_intr(struct igb_softc *); 207 static void igb_init_unshared_intr(struct igb_softc *); 208 static void igb_init_intr(struct igb_softc *); 209 static int igb_setup_intr(struct igb_softc *); 210 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 211 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 212 static void igb_set_intr_mask(struct igb_softc *); 213 static int igb_alloc_intr(struct igb_softc *); 214 static void igb_free_intr(struct igb_softc *); 215 static void igb_teardown_intr(struct igb_softc *); 216 static void igb_msix_try_alloc(struct igb_softc *); 217 static void igb_msix_free(struct igb_softc *, boolean_t); 218 static int igb_msix_setup(struct igb_softc *); 219 static void igb_msix_teardown(struct igb_softc *, int); 220 static void igb_msix_rx(void *); 221 static void igb_msix_tx(void *); 222 static void igb_msix_status(void *); 223 224 /* Management and WOL Support */ 225 static void igb_get_mgmt(struct igb_softc *); 226 static void igb_rel_mgmt(struct igb_softc *); 227 static void igb_get_hw_control(struct igb_softc *); 228 static void igb_rel_hw_control(struct igb_softc *); 229 static void igb_enable_wol(device_t); 230 231 static device_method_t igb_methods[] = { 232 /* Device interface */ 233 DEVMETHOD(device_probe, igb_probe), 234 DEVMETHOD(device_attach, igb_attach), 235 DEVMETHOD(device_detach, igb_detach), 236 DEVMETHOD(device_shutdown, igb_shutdown), 237 DEVMETHOD(device_suspend, igb_suspend), 238 DEVMETHOD(device_resume, igb_resume), 239 { 0, 0 } 240 }; 241 242 static driver_t igb_driver = { 243 "igb", 244 igb_methods, 245 sizeof(struct igb_softc), 246 }; 247 248 static devclass_t igb_devclass; 249 250 DECLARE_DUMMY_MODULE(if_igb); 251 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 252 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 253 254 static int igb_rxd = IGB_DEFAULT_RXD; 255 static int igb_txd = IGB_DEFAULT_TXD; 256 static int igb_rxr = 0; 257 static int igb_msi_enable = 1; 258 static int igb_msix_enable = 1; 259 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 260 static int igb_fc_setting = e1000_fc_full; 261 262 /* 263 * DMA Coalescing, only for i350 - default to off, 264 * this feature is for power savings 265 */ 266 static int igb_dma_coalesce = 0; 267 268 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 269 TUNABLE_INT("hw.igb.txd", &igb_txd); 270 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 271 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 272 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 273 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); 274 275 /* i350 specific */ 276 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 277 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 278 279 static __inline void 280 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 281 { 282 /* Ignore Checksum bit is set */ 283 if (staterr & E1000_RXD_STAT_IXSM) 284 return; 285 286 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 287 E1000_RXD_STAT_IPCS) 288 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 289 290 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 291 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 292 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 293 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 294 mp->m_pkthdr.csum_data = htons(0xffff); 295 } 296 } 297 } 298 299 static __inline struct pktinfo * 300 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 301 uint32_t hash, uint32_t hashtype, uint32_t staterr) 302 { 303 switch (hashtype) { 304 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 305 pi->pi_netisr = NETISR_IP; 306 pi->pi_flags = 0; 307 pi->pi_l3proto = IPPROTO_TCP; 308 break; 309 310 case E1000_RXDADV_RSSTYPE_IPV4: 311 if (staterr & E1000_RXD_STAT_IXSM) 312 return NULL; 313 314 if ((staterr & 315 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 316 E1000_RXD_STAT_TCPCS) { 317 pi->pi_netisr = NETISR_IP; 318 pi->pi_flags = 0; 319 pi->pi_l3proto = IPPROTO_UDP; 320 break; 321 } 322 /* FALL THROUGH */ 323 default: 324 return NULL; 325 } 326 327 m->m_flags |= M_HASH; 328 m->m_pkthdr.hash = toeplitz_hash(hash); 329 return pi; 330 } 331 332 static int 333 igb_probe(device_t dev) 334 { 335 const struct igb_device *d; 336 uint16_t vid, did; 337 338 vid = pci_get_vendor(dev); 339 did = pci_get_device(dev); 340 341 for (d = igb_devices; d->desc != NULL; ++d) { 342 if (vid == d->vid && did == d->did) { 343 device_set_desc(dev, d->desc); 344 return 0; 345 } 346 } 347 return ENXIO; 348 } 349 350 static int 351 igb_attach(device_t dev) 352 { 353 struct igb_softc *sc = device_get_softc(dev); 354 uint16_t eeprom_data; 355 int error = 0, i, j, ring_max; 356 #ifdef IFPOLL_ENABLE 357 int offset, offset_def; 358 #endif 359 360 #ifdef notyet 361 /* SYSCTL stuff */ 362 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 363 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 364 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 365 igb_sysctl_nvm_info, "I", "NVM Information"); 366 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 367 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 368 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, 369 adapter, 0, igb_set_flowcntl, "I", "Flow Control"); 370 #endif 371 372 callout_init_mp(&sc->timer); 373 lwkt_serialize_init(&sc->main_serialize); 374 375 if_initname(&sc->arpcom.ac_if, device_get_name(dev), 376 device_get_unit(dev)); 377 sc->dev = sc->osdep.dev = dev; 378 379 /* 380 * Determine hardware and mac type 381 */ 382 sc->hw.vendor_id = pci_get_vendor(dev); 383 sc->hw.device_id = pci_get_device(dev); 384 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 385 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 386 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 387 388 if (e1000_set_mac_type(&sc->hw)) 389 return ENXIO; 390 391 /* Are we a VF device? */ 392 if (sc->hw.mac.type == e1000_vfadapt || 393 sc->hw.mac.type == e1000_vfadapt_i350) 394 sc->vf_ifp = 1; 395 else 396 sc->vf_ifp = 0; 397 398 /* 399 * Configure total supported RX/TX ring count 400 */ 401 switch (sc->hw.mac.type) { 402 case e1000_82575: 403 ring_max = IGB_MAX_RING_82575; 404 break; 405 case e1000_82580: 406 ring_max = IGB_MAX_RING_82580; 407 break; 408 case e1000_i350: 409 ring_max = IGB_MAX_RING_I350; 410 break; 411 case e1000_82576: 412 ring_max = IGB_MAX_RING_82576; 413 break; 414 default: 415 ring_max = IGB_MIN_RING; 416 break; 417 } 418 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr); 419 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max); 420 #ifdef IGB_RSS_DEBUG 421 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt); 422 #endif 423 sc->rx_ring_inuse = sc->rx_ring_cnt; 424 sc->tx_ring_cnt = 1; /* XXX */ 425 426 if (sc->hw.mac.type == e1000_82575) 427 sc->flags |= IGB_FLAG_TSO_IPLEN0; 428 429 /* Enable bus mastering */ 430 pci_enable_busmaster(dev); 431 432 /* 433 * Allocate IO memory 434 */ 435 sc->mem_rid = PCIR_BAR(0); 436 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 437 RF_ACTIVE); 438 if (sc->mem_res == NULL) { 439 device_printf(dev, "Unable to allocate bus resource: memory\n"); 440 error = ENXIO; 441 goto failed; 442 } 443 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 444 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 445 446 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 447 448 /* Save PCI command register for Shared Code */ 449 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 450 sc->hw.back = &sc->osdep; 451 452 /* Do Shared Code initialization */ 453 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 454 device_printf(dev, "Setup of Shared code failed\n"); 455 error = ENXIO; 456 goto failed; 457 } 458 459 e1000_get_bus_info(&sc->hw); 460 461 sc->hw.mac.autoneg = DO_AUTO_NEG; 462 sc->hw.phy.autoneg_wait_to_complete = FALSE; 463 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 464 465 /* Copper options */ 466 if (sc->hw.phy.media_type == e1000_media_type_copper) { 467 sc->hw.phy.mdix = AUTO_ALL_MODES; 468 sc->hw.phy.disable_polarity_correction = FALSE; 469 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 470 } 471 472 /* Set the frame limits assuming standard ethernet sized frames. */ 473 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 474 475 /* Allocate RX/TX rings */ 476 error = igb_alloc_rings(sc); 477 if (error) 478 goto failed; 479 480 #ifdef IFPOLL_ENABLE 481 /* 482 * NPOLLING RX CPU offset 483 */ 484 if (sc->rx_ring_cnt == ncpus2) { 485 offset = 0; 486 } else { 487 offset_def = (sc->rx_ring_cnt * device_get_unit(dev)) % ncpus2; 488 offset = device_getenv_int(dev, "npoll.rxoff", offset_def); 489 if (offset >= ncpus2 || 490 offset % sc->rx_ring_cnt != 0) { 491 device_printf(dev, "invalid npoll.rxoff %d, use %d\n", 492 offset, offset_def); 493 offset = offset_def; 494 } 495 } 496 sc->rx_npoll_off = offset; 497 498 /* 499 * NPOLLING TX CPU offset 500 */ 501 offset_def = sc->rx_npoll_off; 502 offset = device_getenv_int(dev, "npoll.txoff", offset_def); 503 if (offset >= ncpus2) { 504 device_printf(dev, "invalid npoll.txoff %d, use %d\n", 505 offset, offset_def); 506 offset = offset_def; 507 } 508 sc->tx_npoll_off = offset; 509 #endif 510 511 /* Allocate interrupt */ 512 error = igb_alloc_intr(sc); 513 if (error) 514 goto failed; 515 516 /* 517 * Setup serializers 518 */ 519 i = 0; 520 sc->serializes[i++] = &sc->main_serialize; 521 522 sc->tx_serialize = i; 523 for (j = 0; j < sc->tx_ring_cnt; ++j) 524 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 525 526 sc->rx_serialize = i; 527 for (j = 0; j < sc->rx_ring_cnt; ++j) 528 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 529 530 sc->serialize_cnt = i; 531 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE); 532 533 /* Allocate the appropriate stats memory */ 534 if (sc->vf_ifp) { 535 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 536 M_WAITOK | M_ZERO); 537 igb_vf_init_stats(sc); 538 } else { 539 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 540 M_WAITOK | M_ZERO); 541 } 542 543 /* Allocate multicast array memory. */ 544 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 545 M_DEVBUF, M_WAITOK); 546 547 /* Some adapter-specific advanced features */ 548 if (sc->hw.mac.type >= e1000_i350) { 549 #ifdef notyet 550 igb_set_sysctl_value(adapter, "dma_coalesce", 551 "configure dma coalesce", 552 &adapter->dma_coalesce, igb_dma_coalesce); 553 igb_set_sysctl_value(adapter, "eee_disabled", 554 "enable Energy Efficient Ethernet", 555 &adapter->hw.dev_spec._82575.eee_disable, 556 igb_eee_disabled); 557 #else 558 sc->dma_coalesce = igb_dma_coalesce; 559 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 560 #endif 561 e1000_set_eee_i350(&sc->hw); 562 } 563 564 /* 565 * Start from a known state, this is important in reading the nvm and 566 * mac from that. 567 */ 568 e1000_reset_hw(&sc->hw); 569 570 /* Make sure we have a good EEPROM before we read from it */ 571 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 572 /* 573 * Some PCI-E parts fail the first check due to 574 * the link being in sleep state, call it again, 575 * if it fails a second time its a real issue. 576 */ 577 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 578 device_printf(dev, 579 "The EEPROM Checksum Is Not Valid\n"); 580 error = EIO; 581 goto failed; 582 } 583 } 584 585 /* Copy the permanent MAC address out of the EEPROM */ 586 if (e1000_read_mac_addr(&sc->hw) < 0) { 587 device_printf(dev, "EEPROM read error while reading MAC" 588 " address\n"); 589 error = EIO; 590 goto failed; 591 } 592 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 593 device_printf(dev, "Invalid MAC address\n"); 594 error = EIO; 595 goto failed; 596 } 597 598 /* Setup OS specific network interface */ 599 igb_setup_ifp(sc); 600 601 /* Add sysctl tree, must after igb_setup_ifp() */ 602 igb_add_sysctl(sc); 603 604 /* Now get a good starting state */ 605 igb_reset(sc); 606 607 /* Initialize statistics */ 608 igb_update_stats_counters(sc); 609 610 sc->hw.mac.get_link_status = 1; 611 igb_update_link_status(sc); 612 613 /* Indicate SOL/IDER usage */ 614 if (e1000_check_reset_block(&sc->hw)) { 615 device_printf(dev, 616 "PHY reset is blocked due to SOL/IDER session.\n"); 617 } 618 619 /* Determine if we have to control management hardware */ 620 if (e1000_enable_mng_pass_thru(&sc->hw)) 621 sc->flags |= IGB_FLAG_HAS_MGMT; 622 623 /* 624 * Setup Wake-on-Lan 625 */ 626 /* APME bit in EEPROM is mapped to WUC.APME */ 627 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 628 if (eeprom_data) 629 sc->wol = E1000_WUFC_MAG; 630 /* XXX disable WOL */ 631 sc->wol = 0; 632 633 #ifdef notyet 634 /* Register for VLAN events */ 635 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 636 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 637 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 638 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 639 #endif 640 641 #ifdef notyet 642 igb_add_hw_stats(adapter); 643 #endif 644 645 error = igb_setup_intr(sc); 646 if (error) { 647 ether_ifdetach(&sc->arpcom.ac_if); 648 goto failed; 649 } 650 651 for (i = 0; i < sc->tx_ring_cnt; ++i) { 652 struct ifaltq_subque *ifsq = 653 ifq_get_subq(&sc->arpcom.ac_if.if_snd, i); 654 struct igb_tx_ring *txr = &sc->tx_rings[i]; 655 656 ifsq_set_cpuid(ifsq, txr->tx_intr_cpuid); 657 ifsq_set_priv(ifsq, txr); 658 txr->ifsq = ifsq; 659 } 660 661 return 0; 662 663 failed: 664 igb_detach(dev); 665 return error; 666 } 667 668 static int 669 igb_detach(device_t dev) 670 { 671 struct igb_softc *sc = device_get_softc(dev); 672 673 if (device_is_attached(dev)) { 674 struct ifnet *ifp = &sc->arpcom.ac_if; 675 676 ifnet_serialize_all(ifp); 677 678 igb_stop(sc); 679 680 e1000_phy_hw_reset(&sc->hw); 681 682 /* Give control back to firmware */ 683 igb_rel_mgmt(sc); 684 igb_rel_hw_control(sc); 685 686 if (sc->wol) { 687 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 688 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 689 igb_enable_wol(dev); 690 } 691 692 igb_teardown_intr(sc); 693 694 ifnet_deserialize_all(ifp); 695 696 ether_ifdetach(ifp); 697 } else if (sc->mem_res != NULL) { 698 igb_rel_hw_control(sc); 699 } 700 bus_generic_detach(dev); 701 702 if (sc->sysctl_tree != NULL) 703 sysctl_ctx_free(&sc->sysctl_ctx); 704 705 igb_free_intr(sc); 706 707 if (sc->msix_mem_res != NULL) { 708 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 709 sc->msix_mem_res); 710 } 711 if (sc->mem_res != NULL) { 712 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 713 sc->mem_res); 714 } 715 716 igb_free_rings(sc); 717 718 if (sc->mta != NULL) 719 kfree(sc->mta, M_DEVBUF); 720 if (sc->stats != NULL) 721 kfree(sc->stats, M_DEVBUF); 722 723 return 0; 724 } 725 726 static int 727 igb_shutdown(device_t dev) 728 { 729 return igb_suspend(dev); 730 } 731 732 static int 733 igb_suspend(device_t dev) 734 { 735 struct igb_softc *sc = device_get_softc(dev); 736 struct ifnet *ifp = &sc->arpcom.ac_if; 737 738 ifnet_serialize_all(ifp); 739 740 igb_stop(sc); 741 742 igb_rel_mgmt(sc); 743 igb_rel_hw_control(sc); 744 745 if (sc->wol) { 746 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 747 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 748 igb_enable_wol(dev); 749 } 750 751 ifnet_deserialize_all(ifp); 752 753 return bus_generic_suspend(dev); 754 } 755 756 static int 757 igb_resume(device_t dev) 758 { 759 struct igb_softc *sc = device_get_softc(dev); 760 struct ifnet *ifp = &sc->arpcom.ac_if; 761 int i; 762 763 ifnet_serialize_all(ifp); 764 765 igb_init(sc); 766 igb_get_mgmt(sc); 767 768 for (i = 0; i < sc->tx_ring_cnt; ++i) 769 ifsq_devstart(sc->tx_rings[i].ifsq); 770 771 ifnet_deserialize_all(ifp); 772 773 return bus_generic_resume(dev); 774 } 775 776 static int 777 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 778 { 779 struct igb_softc *sc = ifp->if_softc; 780 struct ifreq *ifr = (struct ifreq *)data; 781 int max_frame_size, mask, reinit; 782 int error = 0; 783 784 ASSERT_IFNET_SERIALIZED_ALL(ifp); 785 786 switch (command) { 787 case SIOCSIFMTU: 788 max_frame_size = 9234; 789 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 790 ETHER_CRC_LEN) { 791 error = EINVAL; 792 break; 793 } 794 795 ifp->if_mtu = ifr->ifr_mtu; 796 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 797 ETHER_CRC_LEN; 798 799 if (ifp->if_flags & IFF_RUNNING) 800 igb_init(sc); 801 break; 802 803 case SIOCSIFFLAGS: 804 if (ifp->if_flags & IFF_UP) { 805 if (ifp->if_flags & IFF_RUNNING) { 806 if ((ifp->if_flags ^ sc->if_flags) & 807 (IFF_PROMISC | IFF_ALLMULTI)) { 808 igb_disable_promisc(sc); 809 igb_set_promisc(sc); 810 } 811 } else { 812 igb_init(sc); 813 } 814 } else if (ifp->if_flags & IFF_RUNNING) { 815 igb_stop(sc); 816 } 817 sc->if_flags = ifp->if_flags; 818 break; 819 820 case SIOCADDMULTI: 821 case SIOCDELMULTI: 822 if (ifp->if_flags & IFF_RUNNING) { 823 igb_disable_intr(sc); 824 igb_set_multi(sc); 825 #ifdef IFPOLL_ENABLE 826 if (!(ifp->if_flags & IFF_NPOLLING)) 827 #endif 828 igb_enable_intr(sc); 829 } 830 break; 831 832 case SIOCSIFMEDIA: 833 /* 834 * As the speed/duplex settings are being 835 * changed, we need toreset the PHY. 836 */ 837 sc->hw.phy.reset_disable = FALSE; 838 839 /* Check SOL/IDER usage */ 840 if (e1000_check_reset_block(&sc->hw)) { 841 if_printf(ifp, "Media change is " 842 "blocked due to SOL/IDER session.\n"); 843 break; 844 } 845 /* FALL THROUGH */ 846 847 case SIOCGIFMEDIA: 848 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 849 break; 850 851 case SIOCSIFCAP: 852 reinit = 0; 853 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 854 if (mask & IFCAP_RXCSUM) { 855 ifp->if_capenable ^= IFCAP_RXCSUM; 856 reinit = 1; 857 } 858 if (mask & IFCAP_VLAN_HWTAGGING) { 859 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 860 reinit = 1; 861 } 862 if (mask & IFCAP_TXCSUM) { 863 ifp->if_capenable ^= IFCAP_TXCSUM; 864 if (ifp->if_capenable & IFCAP_TXCSUM) 865 ifp->if_hwassist |= IGB_CSUM_FEATURES; 866 else 867 ifp->if_hwassist &= ~IGB_CSUM_FEATURES; 868 } 869 if (mask & IFCAP_TSO) { 870 ifp->if_capenable ^= IFCAP_TSO; 871 if (ifp->if_capenable & IFCAP_TSO) 872 ifp->if_hwassist |= CSUM_TSO; 873 else 874 ifp->if_hwassist &= ~CSUM_TSO; 875 } 876 if (mask & IFCAP_RSS) 877 ifp->if_capenable ^= IFCAP_RSS; 878 if (reinit && (ifp->if_flags & IFF_RUNNING)) 879 igb_init(sc); 880 break; 881 882 default: 883 error = ether_ioctl(ifp, command, data); 884 break; 885 } 886 return error; 887 } 888 889 static void 890 igb_init(void *xsc) 891 { 892 struct igb_softc *sc = xsc; 893 struct ifnet *ifp = &sc->arpcom.ac_if; 894 boolean_t polling; 895 int i; 896 897 ASSERT_IFNET_SERIALIZED_ALL(ifp); 898 899 igb_stop(sc); 900 901 /* Get the latest mac address, User can use a LAA */ 902 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 903 904 /* Put the address into the Receive Address Array */ 905 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 906 907 igb_reset(sc); 908 igb_update_link_status(sc); 909 910 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 911 912 /* Configure for OS presence */ 913 igb_get_mgmt(sc); 914 915 polling = FALSE; 916 #ifdef IFPOLL_ENABLE 917 if (ifp->if_flags & IFF_NPOLLING) 918 polling = TRUE; 919 #endif 920 921 /* Configured used RX/TX rings */ 922 igb_set_ring_inuse(sc, polling); 923 924 /* Initialize interrupt */ 925 igb_init_intr(sc); 926 927 /* Prepare transmit descriptors and buffers */ 928 for (i = 0; i < sc->tx_ring_cnt; ++i) 929 igb_init_tx_ring(&sc->tx_rings[i]); 930 igb_init_tx_unit(sc); 931 932 /* Setup Multicast table */ 933 igb_set_multi(sc); 934 935 #if 0 936 /* 937 * Figure out the desired mbuf pool 938 * for doing jumbo/packetsplit 939 */ 940 if (adapter->max_frame_size <= 2048) 941 adapter->rx_mbuf_sz = MCLBYTES; 942 else if (adapter->max_frame_size <= 4096) 943 adapter->rx_mbuf_sz = MJUMPAGESIZE; 944 else 945 adapter->rx_mbuf_sz = MJUM9BYTES; 946 #endif 947 948 /* Prepare receive descriptors and buffers */ 949 for (i = 0; i < sc->rx_ring_inuse; ++i) { 950 int error; 951 952 error = igb_init_rx_ring(&sc->rx_rings[i]); 953 if (error) { 954 if_printf(ifp, "Could not setup receive structures\n"); 955 igb_stop(sc); 956 return; 957 } 958 } 959 igb_init_rx_unit(sc); 960 961 /* Enable VLAN support */ 962 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 963 igb_set_vlan(sc); 964 965 /* Don't lose promiscuous settings */ 966 igb_set_promisc(sc); 967 968 ifp->if_flags |= IFF_RUNNING; 969 for (i = 0; i < sc->tx_ring_cnt; ++i) 970 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 971 972 if (polling || sc->intr_type == PCI_INTR_TYPE_MSIX) 973 sc->timer_cpuid = 0; /* XXX fixed */ 974 else 975 sc->timer_cpuid = rman_get_cpuid(sc->intr_res); 976 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 977 e1000_clear_hw_cntrs_base_generic(&sc->hw); 978 979 /* This clears any pending interrupts */ 980 E1000_READ_REG(&sc->hw, E1000_ICR); 981 982 /* 983 * Only enable interrupts if we are not polling, make sure 984 * they are off otherwise. 985 */ 986 if (polling) { 987 igb_disable_intr(sc); 988 } else { 989 igb_enable_intr(sc); 990 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 991 } 992 993 /* Set Energy Efficient Ethernet */ 994 e1000_set_eee_i350(&sc->hw); 995 996 /* Don't reset the phy next time init gets called */ 997 sc->hw.phy.reset_disable = TRUE; 998 } 999 1000 static void 1001 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1002 { 1003 struct igb_softc *sc = ifp->if_softc; 1004 u_char fiber_type = IFM_1000_SX; 1005 1006 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1007 1008 igb_update_link_status(sc); 1009 1010 ifmr->ifm_status = IFM_AVALID; 1011 ifmr->ifm_active = IFM_ETHER; 1012 1013 if (!sc->link_active) 1014 return; 1015 1016 ifmr->ifm_status |= IFM_ACTIVE; 1017 1018 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1019 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1020 ifmr->ifm_active |= fiber_type | IFM_FDX; 1021 } else { 1022 switch (sc->link_speed) { 1023 case 10: 1024 ifmr->ifm_active |= IFM_10_T; 1025 break; 1026 1027 case 100: 1028 ifmr->ifm_active |= IFM_100_TX; 1029 break; 1030 1031 case 1000: 1032 ifmr->ifm_active |= IFM_1000_T; 1033 break; 1034 } 1035 if (sc->link_duplex == FULL_DUPLEX) 1036 ifmr->ifm_active |= IFM_FDX; 1037 else 1038 ifmr->ifm_active |= IFM_HDX; 1039 } 1040 } 1041 1042 static int 1043 igb_media_change(struct ifnet *ifp) 1044 { 1045 struct igb_softc *sc = ifp->if_softc; 1046 struct ifmedia *ifm = &sc->media; 1047 1048 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1049 1050 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1051 return EINVAL; 1052 1053 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1054 case IFM_AUTO: 1055 sc->hw.mac.autoneg = DO_AUTO_NEG; 1056 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1057 break; 1058 1059 case IFM_1000_LX: 1060 case IFM_1000_SX: 1061 case IFM_1000_T: 1062 sc->hw.mac.autoneg = DO_AUTO_NEG; 1063 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1064 break; 1065 1066 case IFM_100_TX: 1067 sc->hw.mac.autoneg = FALSE; 1068 sc->hw.phy.autoneg_advertised = 0; 1069 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1070 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1071 else 1072 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1073 break; 1074 1075 case IFM_10_T: 1076 sc->hw.mac.autoneg = FALSE; 1077 sc->hw.phy.autoneg_advertised = 0; 1078 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1079 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1080 else 1081 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1082 break; 1083 1084 default: 1085 if_printf(ifp, "Unsupported media type\n"); 1086 break; 1087 } 1088 1089 igb_init(sc); 1090 1091 return 0; 1092 } 1093 1094 static void 1095 igb_set_promisc(struct igb_softc *sc) 1096 { 1097 struct ifnet *ifp = &sc->arpcom.ac_if; 1098 struct e1000_hw *hw = &sc->hw; 1099 uint32_t reg; 1100 1101 if (sc->vf_ifp) { 1102 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1103 return; 1104 } 1105 1106 reg = E1000_READ_REG(hw, E1000_RCTL); 1107 if (ifp->if_flags & IFF_PROMISC) { 1108 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1109 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1110 } else if (ifp->if_flags & IFF_ALLMULTI) { 1111 reg |= E1000_RCTL_MPE; 1112 reg &= ~E1000_RCTL_UPE; 1113 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1114 } 1115 } 1116 1117 static void 1118 igb_disable_promisc(struct igb_softc *sc) 1119 { 1120 struct e1000_hw *hw = &sc->hw; 1121 uint32_t reg; 1122 1123 if (sc->vf_ifp) { 1124 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1125 return; 1126 } 1127 reg = E1000_READ_REG(hw, E1000_RCTL); 1128 reg &= ~E1000_RCTL_UPE; 1129 reg &= ~E1000_RCTL_MPE; 1130 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1131 } 1132 1133 static void 1134 igb_set_multi(struct igb_softc *sc) 1135 { 1136 struct ifnet *ifp = &sc->arpcom.ac_if; 1137 struct ifmultiaddr *ifma; 1138 uint32_t reg_rctl = 0; 1139 uint8_t *mta; 1140 int mcnt = 0; 1141 1142 mta = sc->mta; 1143 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1144 1145 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1146 if (ifma->ifma_addr->sa_family != AF_LINK) 1147 continue; 1148 1149 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1150 break; 1151 1152 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1153 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1154 mcnt++; 1155 } 1156 1157 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1158 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1159 reg_rctl |= E1000_RCTL_MPE; 1160 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1161 } else { 1162 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1163 } 1164 } 1165 1166 static void 1167 igb_timer(void *xsc) 1168 { 1169 struct igb_softc *sc = xsc; 1170 1171 lwkt_serialize_enter(&sc->main_serialize); 1172 1173 igb_update_link_status(sc); 1174 igb_update_stats_counters(sc); 1175 1176 callout_reset_bycpu(&sc->timer, hz, igb_timer, sc, sc->timer_cpuid); 1177 1178 lwkt_serialize_exit(&sc->main_serialize); 1179 } 1180 1181 static void 1182 igb_update_link_status(struct igb_softc *sc) 1183 { 1184 struct ifnet *ifp = &sc->arpcom.ac_if; 1185 struct e1000_hw *hw = &sc->hw; 1186 uint32_t link_check, thstat, ctrl; 1187 1188 link_check = thstat = ctrl = 0; 1189 1190 /* Get the cached link value or read for real */ 1191 switch (hw->phy.media_type) { 1192 case e1000_media_type_copper: 1193 if (hw->mac.get_link_status) { 1194 /* Do the work to read phy */ 1195 e1000_check_for_link(hw); 1196 link_check = !hw->mac.get_link_status; 1197 } else { 1198 link_check = TRUE; 1199 } 1200 break; 1201 1202 case e1000_media_type_fiber: 1203 e1000_check_for_link(hw); 1204 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1205 break; 1206 1207 case e1000_media_type_internal_serdes: 1208 e1000_check_for_link(hw); 1209 link_check = hw->mac.serdes_has_link; 1210 break; 1211 1212 /* VF device is type_unknown */ 1213 case e1000_media_type_unknown: 1214 e1000_check_for_link(hw); 1215 link_check = !hw->mac.get_link_status; 1216 /* Fall thru */ 1217 default: 1218 break; 1219 } 1220 1221 /* Check for thermal downshift or shutdown */ 1222 if (hw->mac.type == e1000_i350) { 1223 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1224 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1225 } 1226 1227 /* Now we check if a transition has happened */ 1228 if (link_check && sc->link_active == 0) { 1229 e1000_get_speed_and_duplex(hw, 1230 &sc->link_speed, &sc->link_duplex); 1231 if (bootverbose) { 1232 if_printf(ifp, "Link is up %d Mbps %s\n", 1233 sc->link_speed, 1234 sc->link_duplex == FULL_DUPLEX ? 1235 "Full Duplex" : "Half Duplex"); 1236 } 1237 sc->link_active = 1; 1238 1239 ifp->if_baudrate = sc->link_speed * 1000000; 1240 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1241 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1242 if_printf(ifp, "Link: thermal downshift\n"); 1243 /* This can sleep */ 1244 ifp->if_link_state = LINK_STATE_UP; 1245 if_link_state_change(ifp); 1246 } else if (!link_check && sc->link_active == 1) { 1247 ifp->if_baudrate = sc->link_speed = 0; 1248 sc->link_duplex = 0; 1249 if (bootverbose) 1250 if_printf(ifp, "Link is Down\n"); 1251 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1252 (thstat & E1000_THSTAT_PWR_DOWN)) 1253 if_printf(ifp, "Link: thermal shutdown\n"); 1254 sc->link_active = 0; 1255 /* This can sleep */ 1256 ifp->if_link_state = LINK_STATE_DOWN; 1257 if_link_state_change(ifp); 1258 } 1259 } 1260 1261 static void 1262 igb_stop(struct igb_softc *sc) 1263 { 1264 struct ifnet *ifp = &sc->arpcom.ac_if; 1265 int i; 1266 1267 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1268 1269 igb_disable_intr(sc); 1270 1271 callout_stop(&sc->timer); 1272 1273 ifp->if_flags &= ~IFF_RUNNING; 1274 for (i = 0; i < sc->tx_ring_cnt; ++i) 1275 ifsq_clr_oactive(sc->tx_rings[i].ifsq); 1276 ifp->if_timer = 0; 1277 1278 e1000_reset_hw(&sc->hw); 1279 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1280 1281 e1000_led_off(&sc->hw); 1282 e1000_cleanup_led(&sc->hw); 1283 1284 for (i = 0; i < sc->tx_ring_cnt; ++i) 1285 igb_free_tx_ring(&sc->tx_rings[i]); 1286 for (i = 0; i < sc->rx_ring_cnt; ++i) 1287 igb_free_rx_ring(&sc->rx_rings[i]); 1288 } 1289 1290 static void 1291 igb_reset(struct igb_softc *sc) 1292 { 1293 struct ifnet *ifp = &sc->arpcom.ac_if; 1294 struct e1000_hw *hw = &sc->hw; 1295 struct e1000_fc_info *fc = &hw->fc; 1296 uint32_t pba = 0; 1297 uint16_t hwm; 1298 1299 /* Let the firmware know the OS is in control */ 1300 igb_get_hw_control(sc); 1301 1302 /* 1303 * Packet Buffer Allocation (PBA) 1304 * Writing PBA sets the receive portion of the buffer 1305 * the remainder is used for the transmit buffer. 1306 */ 1307 switch (hw->mac.type) { 1308 case e1000_82575: 1309 pba = E1000_PBA_32K; 1310 break; 1311 1312 case e1000_82576: 1313 case e1000_vfadapt: 1314 pba = E1000_READ_REG(hw, E1000_RXPBS); 1315 pba &= E1000_RXPBS_SIZE_MASK_82576; 1316 break; 1317 1318 case e1000_82580: 1319 case e1000_i350: 1320 case e1000_vfadapt_i350: 1321 pba = E1000_READ_REG(hw, E1000_RXPBS); 1322 pba = e1000_rxpbs_adjust_82580(pba); 1323 break; 1324 /* XXX pba = E1000_PBA_35K; */ 1325 1326 default: 1327 break; 1328 } 1329 1330 /* Special needs in case of Jumbo frames */ 1331 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1332 uint32_t tx_space, min_tx, min_rx; 1333 1334 pba = E1000_READ_REG(hw, E1000_PBA); 1335 tx_space = pba >> 16; 1336 pba &= 0xffff; 1337 1338 min_tx = (sc->max_frame_size + 1339 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1340 min_tx = roundup2(min_tx, 1024); 1341 min_tx >>= 10; 1342 min_rx = sc->max_frame_size; 1343 min_rx = roundup2(min_rx, 1024); 1344 min_rx >>= 10; 1345 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1346 pba = pba - (min_tx - tx_space); 1347 /* 1348 * if short on rx space, rx wins 1349 * and must trump tx adjustment 1350 */ 1351 if (pba < min_rx) 1352 pba = min_rx; 1353 } 1354 E1000_WRITE_REG(hw, E1000_PBA, pba); 1355 } 1356 1357 /* 1358 * These parameters control the automatic generation (Tx) and 1359 * response (Rx) to Ethernet PAUSE frames. 1360 * - High water mark should allow for at least two frames to be 1361 * received after sending an XOFF. 1362 * - Low water mark works best when it is very near the high water mark. 1363 * This allows the receiver to restart by sending XON when it has 1364 * drained a bit. 1365 */ 1366 hwm = min(((pba << 10) * 9 / 10), 1367 ((pba << 10) - 2 * sc->max_frame_size)); 1368 1369 if (hw->mac.type < e1000_82576) { 1370 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1371 fc->low_water = fc->high_water - 8; 1372 } else { 1373 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1374 fc->low_water = fc->high_water - 16; 1375 } 1376 fc->pause_time = IGB_FC_PAUSE_TIME; 1377 fc->send_xon = TRUE; 1378 1379 /* Issue a global reset */ 1380 e1000_reset_hw(hw); 1381 E1000_WRITE_REG(hw, E1000_WUC, 0); 1382 1383 if (e1000_init_hw(hw) < 0) 1384 if_printf(ifp, "Hardware Initialization Failed\n"); 1385 1386 /* Setup DMA Coalescing */ 1387 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) { 1388 uint32_t reg; 1389 1390 hwm = (pba - 4) << 10; 1391 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT) 1392 & E1000_DMACR_DMACTHR_MASK; 1393 1394 /* transition to L0x or L1 if available..*/ 1395 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 1396 1397 /* timer = +-1000 usec in 32usec intervals */ 1398 reg |= (1000 >> 5); 1399 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1400 1401 /* No lower threshold */ 1402 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 1403 1404 /* set hwm to PBA - 2 * max frame size */ 1405 E1000_WRITE_REG(hw, E1000_FCRTC, hwm); 1406 1407 /* Set the interval before transition */ 1408 reg = E1000_READ_REG(hw, E1000_DMCTLX); 1409 reg |= 0x800000FF; /* 255 usec */ 1410 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 1411 1412 /* free space in tx packet buffer to wake from DMA coal */ 1413 E1000_WRITE_REG(hw, E1000_DMCTXTH, 1414 (20480 - (2 * sc->max_frame_size)) >> 6); 1415 1416 /* make low power state decision controlled by DMA coal */ 1417 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1418 E1000_WRITE_REG(hw, E1000_PCIEMISC, 1419 reg | E1000_PCIEMISC_LX_DECISION); 1420 if_printf(ifp, "DMA Coalescing enabled\n"); 1421 } 1422 1423 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1424 e1000_get_phy_info(hw); 1425 e1000_check_for_link(hw); 1426 } 1427 1428 static void 1429 igb_setup_ifp(struct igb_softc *sc) 1430 { 1431 struct ifnet *ifp = &sc->arpcom.ac_if; 1432 1433 ifp->if_softc = sc; 1434 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1435 ifp->if_init = igb_init; 1436 ifp->if_ioctl = igb_ioctl; 1437 ifp->if_start = igb_start; 1438 ifp->if_serialize = igb_serialize; 1439 ifp->if_deserialize = igb_deserialize; 1440 ifp->if_tryserialize = igb_tryserialize; 1441 #ifdef INVARIANTS 1442 ifp->if_serialize_assert = igb_serialize_assert; 1443 #endif 1444 #ifdef IFPOLL_ENABLE 1445 ifp->if_npoll = igb_npoll; 1446 #endif 1447 ifp->if_watchdog = igb_watchdog; 1448 1449 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1450 ifq_set_ready(&ifp->if_snd); 1451 1452 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1453 1454 ifp->if_capabilities = 1455 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_TSO; 1456 if (IGB_ENABLE_HWRSS(sc)) 1457 ifp->if_capabilities |= IFCAP_RSS; 1458 ifp->if_capenable = ifp->if_capabilities; 1459 ifp->if_hwassist = IGB_CSUM_FEATURES | CSUM_TSO; 1460 1461 /* 1462 * Tell the upper layer(s) we support long frames 1463 */ 1464 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1465 1466 /* 1467 * Specify the media types supported by this adapter and register 1468 * callbacks to update media and link information 1469 */ 1470 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status); 1471 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1472 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1473 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1474 0, NULL); 1475 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1476 } else { 1477 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1478 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1479 0, NULL); 1480 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1481 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1482 0, NULL); 1483 if (sc->hw.phy.type != e1000_phy_ife) { 1484 ifmedia_add(&sc->media, 1485 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1486 ifmedia_add(&sc->media, 1487 IFM_ETHER | IFM_1000_T, 0, NULL); 1488 } 1489 } 1490 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1491 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1492 } 1493 1494 static void 1495 igb_add_sysctl(struct igb_softc *sc) 1496 { 1497 char node[32]; 1498 int i; 1499 1500 sysctl_ctx_init(&sc->sysctl_ctx); 1501 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1502 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1503 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, ""); 1504 if (sc->sysctl_tree == NULL) { 1505 device_printf(sc->dev, "can't add sysctl node\n"); 1506 return; 1507 } 1508 1509 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1510 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1511 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1512 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1513 "# of RX rings used"); 1514 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1515 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1516 "# of RX descs"); 1517 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1518 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1519 "# of TX descs"); 1520 1521 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 1522 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1523 SYSCTL_CHILDREN(sc->sysctl_tree), 1524 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1525 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate"); 1526 } else { 1527 for (i = 0; i < sc->msix_cnt; ++i) { 1528 struct igb_msix_data *msix = &sc->msix_data[i]; 1529 1530 ksnprintf(node, sizeof(node), "msix%d_rate", i); 1531 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1532 SYSCTL_CHILDREN(sc->sysctl_tree), 1533 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW, 1534 msix, 0, igb_sysctl_msix_rate, "I", 1535 msix->msix_rate_desc); 1536 } 1537 } 1538 1539 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1540 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1541 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1542 "# of segments per TX interrupt"); 1543 1544 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1545 OID_AUTO, "tx_wreg_nsegs", CTLFLAG_RW, 1546 &sc->tx_rings[0].wreg_nsegs, 0, 1547 "# of segments before write to hardare register"); 1548 1549 #ifdef IFPOLL_ENABLE 1550 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1551 OID_AUTO, "npoll_rxoff", CTLTYPE_INT|CTLFLAG_RW, 1552 sc, 0, igb_sysctl_npoll_rxoff, "I", "NPOLLING RX cpu offset"); 1553 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1554 OID_AUTO, "npoll_txoff", CTLTYPE_INT|CTLFLAG_RW, 1555 sc, 0, igb_sysctl_npoll_txoff, "I", "NPOLLING TX cpu offset"); 1556 #endif 1557 1558 #ifdef IGB_RSS_DEBUG 1559 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1560 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1561 "RSS debug level"); 1562 #endif 1563 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1564 #ifdef IGB_RSS_DEBUG 1565 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1566 SYSCTL_ADD_ULONG(&sc->sysctl_ctx, 1567 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node, 1568 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1569 #endif 1570 ksnprintf(node, sizeof(node), "rx%d_wreg", i); 1571 SYSCTL_ADD_INT(&sc->sysctl_ctx, 1572 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node, 1573 CTLFLAG_RW, &sc->rx_rings[i].rx_wreg, 0, 1574 "# of segments before write to hardare register"); 1575 } 1576 } 1577 1578 static int 1579 igb_alloc_rings(struct igb_softc *sc) 1580 { 1581 int error, i; 1582 1583 /* 1584 * Create top level busdma tag 1585 */ 1586 error = bus_dma_tag_create(NULL, 1, 0, 1587 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1588 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1589 &sc->parent_tag); 1590 if (error) { 1591 device_printf(sc->dev, "could not create top level DMA tag\n"); 1592 return error; 1593 } 1594 1595 /* 1596 * Allocate TX descriptor rings and buffers 1597 */ 1598 sc->tx_rings = kmalloc_cachealign( 1599 sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1600 M_DEVBUF, M_WAITOK | M_ZERO); 1601 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1602 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1603 1604 /* Set up some basics */ 1605 txr->sc = sc; 1606 txr->me = i; 1607 lwkt_serialize_init(&txr->tx_serialize); 1608 1609 error = igb_create_tx_ring(txr); 1610 if (error) 1611 return error; 1612 } 1613 1614 /* 1615 * Allocate RX descriptor rings and buffers 1616 */ 1617 sc->rx_rings = kmalloc_cachealign( 1618 sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1619 M_DEVBUF, M_WAITOK | M_ZERO); 1620 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1621 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1622 1623 /* Set up some basics */ 1624 rxr->sc = sc; 1625 rxr->me = i; 1626 lwkt_serialize_init(&rxr->rx_serialize); 1627 1628 error = igb_create_rx_ring(rxr); 1629 if (error) 1630 return error; 1631 } 1632 1633 return 0; 1634 } 1635 1636 static void 1637 igb_free_rings(struct igb_softc *sc) 1638 { 1639 int i; 1640 1641 if (sc->tx_rings != NULL) { 1642 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1643 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1644 1645 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1646 } 1647 kfree(sc->tx_rings, M_DEVBUF); 1648 } 1649 1650 if (sc->rx_rings != NULL) { 1651 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1652 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1653 1654 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1655 } 1656 kfree(sc->rx_rings, M_DEVBUF); 1657 } 1658 } 1659 1660 static int 1661 igb_create_tx_ring(struct igb_tx_ring *txr) 1662 { 1663 int tsize, error, i, ntxd; 1664 1665 /* 1666 * Validate number of transmit descriptors. It must not exceed 1667 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1668 */ 1669 ntxd = device_getenv_int(txr->sc->dev, "txd", igb_txd); 1670 if ((ntxd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN != 0 || 1671 ntxd > IGB_MAX_TXD || ntxd < IGB_MIN_TXD) { 1672 device_printf(txr->sc->dev, 1673 "Using %d TX descriptors instead of %d!\n", 1674 IGB_DEFAULT_TXD, ntxd); 1675 txr->num_tx_desc = IGB_DEFAULT_TXD; 1676 } else { 1677 txr->num_tx_desc = ntxd; 1678 } 1679 1680 /* 1681 * Allocate TX descriptor ring 1682 */ 1683 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1684 IGB_DBA_ALIGN); 1685 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1686 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1687 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1688 if (txr->txdma.dma_vaddr == NULL) { 1689 device_printf(txr->sc->dev, 1690 "Unable to allocate TX Descriptor memory\n"); 1691 return ENOMEM; 1692 } 1693 txr->tx_base = txr->txdma.dma_vaddr; 1694 bzero(txr->tx_base, tsize); 1695 1696 tsize = __VM_CACHELINE_ALIGN( 1697 sizeof(struct igb_tx_buf) * txr->num_tx_desc); 1698 txr->tx_buf = kmalloc_cachealign(tsize, M_DEVBUF, M_WAITOK | M_ZERO); 1699 1700 /* 1701 * Allocate TX head write-back buffer 1702 */ 1703 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1704 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1705 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1706 if (txr->tx_hdr == NULL) { 1707 device_printf(txr->sc->dev, 1708 "Unable to allocate TX head write-back buffer\n"); 1709 return ENOMEM; 1710 } 1711 1712 /* 1713 * Create DMA tag for TX buffers 1714 */ 1715 error = bus_dma_tag_create(txr->sc->parent_tag, 1716 1, 0, /* alignment, bounds */ 1717 BUS_SPACE_MAXADDR, /* lowaddr */ 1718 BUS_SPACE_MAXADDR, /* highaddr */ 1719 NULL, NULL, /* filter, filterarg */ 1720 IGB_TSO_SIZE, /* maxsize */ 1721 IGB_MAX_SCATTER, /* nsegments */ 1722 PAGE_SIZE, /* maxsegsize */ 1723 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1724 BUS_DMA_ONEBPAGE, /* flags */ 1725 &txr->tx_tag); 1726 if (error) { 1727 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1728 kfree(txr->tx_buf, M_DEVBUF); 1729 txr->tx_buf = NULL; 1730 return error; 1731 } 1732 1733 /* 1734 * Create DMA maps for TX buffers 1735 */ 1736 for (i = 0; i < txr->num_tx_desc; ++i) { 1737 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1738 1739 error = bus_dmamap_create(txr->tx_tag, 1740 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1741 if (error) { 1742 device_printf(txr->sc->dev, 1743 "Unable to create TX DMA map\n"); 1744 igb_destroy_tx_ring(txr, i); 1745 return error; 1746 } 1747 } 1748 1749 /* 1750 * Initialize various watermark 1751 */ 1752 txr->spare_desc = IGB_TX_SPARE; 1753 txr->intr_nsegs = txr->num_tx_desc / 16; 1754 txr->wreg_nsegs = 8; 1755 txr->oact_hi_desc = txr->num_tx_desc / 2; 1756 txr->oact_lo_desc = txr->num_tx_desc / 8; 1757 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX) 1758 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX; 1759 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED) 1760 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED; 1761 1762 return 0; 1763 } 1764 1765 static void 1766 igb_free_tx_ring(struct igb_tx_ring *txr) 1767 { 1768 int i; 1769 1770 for (i = 0; i < txr->num_tx_desc; ++i) { 1771 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1772 1773 if (txbuf->m_head != NULL) { 1774 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1775 m_freem(txbuf->m_head); 1776 txbuf->m_head = NULL; 1777 } 1778 } 1779 } 1780 1781 static void 1782 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1783 { 1784 int i; 1785 1786 if (txr->txdma.dma_vaddr != NULL) { 1787 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1788 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1789 txr->txdma.dma_map); 1790 bus_dma_tag_destroy(txr->txdma.dma_tag); 1791 txr->txdma.dma_vaddr = NULL; 1792 } 1793 1794 if (txr->tx_hdr != NULL) { 1795 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1796 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1797 txr->tx_hdr_dmap); 1798 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1799 txr->tx_hdr = NULL; 1800 } 1801 1802 if (txr->tx_buf == NULL) 1803 return; 1804 1805 for (i = 0; i < ndesc; ++i) { 1806 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1807 1808 KKASSERT(txbuf->m_head == NULL); 1809 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1810 } 1811 bus_dma_tag_destroy(txr->tx_tag); 1812 1813 kfree(txr->tx_buf, M_DEVBUF); 1814 txr->tx_buf = NULL; 1815 } 1816 1817 static void 1818 igb_init_tx_ring(struct igb_tx_ring *txr) 1819 { 1820 /* Clear the old descriptor contents */ 1821 bzero(txr->tx_base, 1822 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1823 1824 /* Clear TX head write-back buffer */ 1825 *(txr->tx_hdr) = 0; 1826 1827 /* Reset indices */ 1828 txr->next_avail_desc = 0; 1829 txr->next_to_clean = 0; 1830 txr->tx_nsegs = 0; 1831 1832 /* Set number of descriptors available */ 1833 txr->tx_avail = txr->num_tx_desc; 1834 } 1835 1836 static void 1837 igb_init_tx_unit(struct igb_softc *sc) 1838 { 1839 struct e1000_hw *hw = &sc->hw; 1840 uint32_t tctl; 1841 int i; 1842 1843 /* Setup the Tx Descriptor Rings */ 1844 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1845 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1846 uint64_t bus_addr = txr->txdma.dma_paddr; 1847 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1848 uint32_t txdctl = 0; 1849 uint32_t dca_txctrl; 1850 1851 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1852 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 1853 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1854 (uint32_t)(bus_addr >> 32)); 1855 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1856 (uint32_t)bus_addr); 1857 1858 /* Setup the HW Tx Head and Tail descriptor pointers */ 1859 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1860 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1861 1862 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1863 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1864 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1865 1866 /* 1867 * Don't set WB_on_EITR: 1868 * - 82575 does not have it 1869 * - It almost has no effect on 82576, see: 1870 * 82576 specification update errata #26 1871 * - It causes unnecessary bus traffic 1872 */ 1873 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1874 (uint32_t)(hdr_paddr >> 32)); 1875 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1876 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1877 1878 /* 1879 * WTHRESH is ignored by the hardware, since header 1880 * write back mode is used. 1881 */ 1882 txdctl |= IGB_TX_PTHRESH; 1883 txdctl |= IGB_TX_HTHRESH << 8; 1884 txdctl |= IGB_TX_WTHRESH << 16; 1885 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1886 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 1887 } 1888 1889 if (sc->vf_ifp) 1890 return; 1891 1892 e1000_config_collision_dist(hw); 1893 1894 /* Program the Transmit Control Register */ 1895 tctl = E1000_READ_REG(hw, E1000_TCTL); 1896 tctl &= ~E1000_TCTL_CT; 1897 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 1898 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 1899 1900 /* This write will effectively turn on the transmit unit. */ 1901 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1902 } 1903 1904 static boolean_t 1905 igb_txcsum_ctx(struct igb_tx_ring *txr, struct mbuf *mp) 1906 { 1907 struct e1000_adv_tx_context_desc *TXD; 1908 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 1909 int ehdrlen, ctxd, ip_hlen = 0; 1910 boolean_t offload = TRUE; 1911 1912 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 1913 offload = FALSE; 1914 1915 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 1916 1917 ctxd = txr->next_avail_desc; 1918 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 1919 1920 /* 1921 * In advanced descriptors the vlan tag must 1922 * be placed into the context descriptor, thus 1923 * we need to be here just for that setup. 1924 */ 1925 if (mp->m_flags & M_VLANTAG) { 1926 uint16_t vlantag; 1927 1928 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 1929 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 1930 } else if (!offload) { 1931 return FALSE; 1932 } 1933 1934 ehdrlen = mp->m_pkthdr.csum_lhlen; 1935 KASSERT(ehdrlen > 0, ("invalid ether hlen")); 1936 1937 /* Set the ether header length */ 1938 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 1939 if (mp->m_pkthdr.csum_flags & CSUM_IP) { 1940 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 1941 ip_hlen = mp->m_pkthdr.csum_iphlen; 1942 KASSERT(ip_hlen > 0, ("invalid ip hlen")); 1943 } 1944 vlan_macip_lens |= ip_hlen; 1945 1946 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 1947 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 1948 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 1949 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 1950 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 1951 1952 /* 82575 needs the queue index added */ 1953 if (txr->sc->hw.mac.type == e1000_82575) 1954 mss_l4len_idx = txr->me << 4; 1955 1956 /* Now copy bits into descriptor */ 1957 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 1958 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 1959 TXD->seqnum_seed = htole32(0); 1960 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 1961 1962 /* We've consumed the first desc, adjust counters */ 1963 if (++ctxd == txr->num_tx_desc) 1964 ctxd = 0; 1965 txr->next_avail_desc = ctxd; 1966 --txr->tx_avail; 1967 1968 return offload; 1969 } 1970 1971 static void 1972 igb_txeof(struct igb_tx_ring *txr) 1973 { 1974 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 1975 int first, hdr, avail; 1976 1977 if (txr->tx_avail == txr->num_tx_desc) 1978 return; 1979 1980 first = txr->next_to_clean; 1981 hdr = *(txr->tx_hdr); 1982 1983 if (first == hdr) 1984 return; 1985 1986 avail = txr->tx_avail; 1987 while (first != hdr) { 1988 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 1989 1990 ++avail; 1991 if (txbuf->m_head) { 1992 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1993 m_freem(txbuf->m_head); 1994 txbuf->m_head = NULL; 1995 ++ifp->if_opackets; 1996 } 1997 if (++first == txr->num_tx_desc) 1998 first = 0; 1999 } 2000 txr->next_to_clean = first; 2001 txr->tx_avail = avail; 2002 2003 /* 2004 * If we have a minimum free, clear OACTIVE 2005 * to tell the stack that it is OK to send packets. 2006 */ 2007 if (IGB_IS_NOT_OACTIVE(txr)) { 2008 ifsq_clr_oactive(txr->ifsq); 2009 2010 /* 2011 * We have enough TX descriptors, turn off 2012 * the watchdog. We allow small amount of 2013 * packets (roughly intr_nsegs) pending on 2014 * the transmit ring. 2015 */ 2016 ifp->if_timer = 0; 2017 } 2018 } 2019 2020 static int 2021 igb_create_rx_ring(struct igb_rx_ring *rxr) 2022 { 2023 int rsize, i, error, nrxd; 2024 2025 /* 2026 * Validate number of receive descriptors. It must not exceed 2027 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 2028 */ 2029 nrxd = device_getenv_int(rxr->sc->dev, "rxd", igb_rxd); 2030 if ((nrxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN != 0 || 2031 nrxd > IGB_MAX_RXD || nrxd < IGB_MIN_RXD) { 2032 device_printf(rxr->sc->dev, 2033 "Using %d RX descriptors instead of %d!\n", 2034 IGB_DEFAULT_RXD, nrxd); 2035 rxr->num_rx_desc = IGB_DEFAULT_RXD; 2036 } else { 2037 rxr->num_rx_desc = nrxd; 2038 } 2039 2040 /* 2041 * Allocate RX descriptor ring 2042 */ 2043 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2044 IGB_DBA_ALIGN); 2045 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2046 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2047 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2048 &rxr->rxdma.dma_paddr); 2049 if (rxr->rxdma.dma_vaddr == NULL) { 2050 device_printf(rxr->sc->dev, 2051 "Unable to allocate RxDescriptor memory\n"); 2052 return ENOMEM; 2053 } 2054 rxr->rx_base = rxr->rxdma.dma_vaddr; 2055 bzero(rxr->rx_base, rsize); 2056 2057 rsize = __VM_CACHELINE_ALIGN( 2058 sizeof(struct igb_rx_buf) * rxr->num_rx_desc); 2059 rxr->rx_buf = kmalloc_cachealign(rsize, M_DEVBUF, M_WAITOK | M_ZERO); 2060 2061 /* 2062 * Create DMA tag for RX buffers 2063 */ 2064 error = bus_dma_tag_create(rxr->sc->parent_tag, 2065 1, 0, /* alignment, bounds */ 2066 BUS_SPACE_MAXADDR, /* lowaddr */ 2067 BUS_SPACE_MAXADDR, /* highaddr */ 2068 NULL, NULL, /* filter, filterarg */ 2069 MCLBYTES, /* maxsize */ 2070 1, /* nsegments */ 2071 MCLBYTES, /* maxsegsize */ 2072 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2073 &rxr->rx_tag); 2074 if (error) { 2075 device_printf(rxr->sc->dev, 2076 "Unable to create RX payload DMA tag\n"); 2077 kfree(rxr->rx_buf, M_DEVBUF); 2078 rxr->rx_buf = NULL; 2079 return error; 2080 } 2081 2082 /* 2083 * Create spare DMA map for RX buffers 2084 */ 2085 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2086 &rxr->rx_sparemap); 2087 if (error) { 2088 device_printf(rxr->sc->dev, 2089 "Unable to create spare RX DMA maps\n"); 2090 bus_dma_tag_destroy(rxr->rx_tag); 2091 kfree(rxr->rx_buf, M_DEVBUF); 2092 rxr->rx_buf = NULL; 2093 return error; 2094 } 2095 2096 /* 2097 * Create DMA maps for RX buffers 2098 */ 2099 for (i = 0; i < rxr->num_rx_desc; i++) { 2100 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2101 2102 error = bus_dmamap_create(rxr->rx_tag, 2103 BUS_DMA_WAITOK, &rxbuf->map); 2104 if (error) { 2105 device_printf(rxr->sc->dev, 2106 "Unable to create RX DMA maps\n"); 2107 igb_destroy_rx_ring(rxr, i); 2108 return error; 2109 } 2110 } 2111 2112 /* 2113 * Initialize various watermark 2114 */ 2115 rxr->rx_wreg = 32; 2116 2117 return 0; 2118 } 2119 2120 static void 2121 igb_free_rx_ring(struct igb_rx_ring *rxr) 2122 { 2123 int i; 2124 2125 for (i = 0; i < rxr->num_rx_desc; ++i) { 2126 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2127 2128 if (rxbuf->m_head != NULL) { 2129 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2130 m_freem(rxbuf->m_head); 2131 rxbuf->m_head = NULL; 2132 } 2133 } 2134 2135 if (rxr->fmp != NULL) 2136 m_freem(rxr->fmp); 2137 rxr->fmp = NULL; 2138 rxr->lmp = NULL; 2139 } 2140 2141 static void 2142 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2143 { 2144 int i; 2145 2146 if (rxr->rxdma.dma_vaddr != NULL) { 2147 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2148 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2149 rxr->rxdma.dma_map); 2150 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2151 rxr->rxdma.dma_vaddr = NULL; 2152 } 2153 2154 if (rxr->rx_buf == NULL) 2155 return; 2156 2157 for (i = 0; i < ndesc; ++i) { 2158 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2159 2160 KKASSERT(rxbuf->m_head == NULL); 2161 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2162 } 2163 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2164 bus_dma_tag_destroy(rxr->rx_tag); 2165 2166 kfree(rxr->rx_buf, M_DEVBUF); 2167 rxr->rx_buf = NULL; 2168 } 2169 2170 static void 2171 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2172 { 2173 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2174 rxd->wb.upper.status_error = 0; 2175 } 2176 2177 static int 2178 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2179 { 2180 struct mbuf *m; 2181 bus_dma_segment_t seg; 2182 bus_dmamap_t map; 2183 struct igb_rx_buf *rxbuf; 2184 int error, nseg; 2185 2186 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2187 if (m == NULL) { 2188 if (wait) { 2189 if_printf(&rxr->sc->arpcom.ac_if, 2190 "Unable to allocate RX mbuf\n"); 2191 } 2192 return ENOBUFS; 2193 } 2194 m->m_len = m->m_pkthdr.len = MCLBYTES; 2195 2196 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2197 m_adj(m, ETHER_ALIGN); 2198 2199 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2200 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2201 if (error) { 2202 m_freem(m); 2203 if (wait) { 2204 if_printf(&rxr->sc->arpcom.ac_if, 2205 "Unable to load RX mbuf\n"); 2206 } 2207 return error; 2208 } 2209 2210 rxbuf = &rxr->rx_buf[i]; 2211 if (rxbuf->m_head != NULL) 2212 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2213 2214 map = rxbuf->map; 2215 rxbuf->map = rxr->rx_sparemap; 2216 rxr->rx_sparemap = map; 2217 2218 rxbuf->m_head = m; 2219 rxbuf->paddr = seg.ds_addr; 2220 2221 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2222 return 0; 2223 } 2224 2225 static int 2226 igb_init_rx_ring(struct igb_rx_ring *rxr) 2227 { 2228 int i; 2229 2230 /* Clear the ring contents */ 2231 bzero(rxr->rx_base, 2232 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2233 2234 /* Now replenish the ring mbufs */ 2235 for (i = 0; i < rxr->num_rx_desc; ++i) { 2236 int error; 2237 2238 error = igb_newbuf(rxr, i, TRUE); 2239 if (error) 2240 return error; 2241 } 2242 2243 /* Setup our descriptor indices */ 2244 rxr->next_to_check = 0; 2245 2246 rxr->fmp = NULL; 2247 rxr->lmp = NULL; 2248 rxr->discard = FALSE; 2249 2250 return 0; 2251 } 2252 2253 static void 2254 igb_init_rx_unit(struct igb_softc *sc) 2255 { 2256 struct ifnet *ifp = &sc->arpcom.ac_if; 2257 struct e1000_hw *hw = &sc->hw; 2258 uint32_t rctl, rxcsum, srrctl = 0; 2259 int i; 2260 2261 /* 2262 * Make sure receives are disabled while setting 2263 * up the descriptor ring 2264 */ 2265 rctl = E1000_READ_REG(hw, E1000_RCTL); 2266 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2267 2268 #if 0 2269 /* 2270 ** Set up for header split 2271 */ 2272 if (igb_header_split) { 2273 /* Use a standard mbuf for the header */ 2274 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2275 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2276 } else 2277 #endif 2278 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2279 2280 /* 2281 ** Set up for jumbo frames 2282 */ 2283 if (ifp->if_mtu > ETHERMTU) { 2284 rctl |= E1000_RCTL_LPE; 2285 #if 0 2286 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2287 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2288 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2289 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2290 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2291 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2292 } 2293 /* Set maximum packet len */ 2294 psize = adapter->max_frame_size; 2295 /* are we on a vlan? */ 2296 if (adapter->ifp->if_vlantrunk != NULL) 2297 psize += VLAN_TAG_SIZE; 2298 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2299 #else 2300 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2301 rctl |= E1000_RCTL_SZ_2048; 2302 #endif 2303 } else { 2304 rctl &= ~E1000_RCTL_LPE; 2305 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2306 rctl |= E1000_RCTL_SZ_2048; 2307 } 2308 2309 /* Setup the Base and Length of the Rx Descriptor Rings */ 2310 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2311 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2312 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2313 uint32_t rxdctl; 2314 2315 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2316 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2317 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2318 (uint32_t)(bus_addr >> 32)); 2319 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2320 (uint32_t)bus_addr); 2321 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2322 /* Enable this Queue */ 2323 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2324 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2325 rxdctl &= 0xFFF00000; 2326 rxdctl |= IGB_RX_PTHRESH; 2327 rxdctl |= IGB_RX_HTHRESH << 8; 2328 /* 2329 * Don't set WTHRESH to a value above 1 on 82576, see: 2330 * 82576 specification update errata #26 2331 */ 2332 rxdctl |= IGB_RX_WTHRESH << 16; 2333 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2334 } 2335 2336 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2337 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2338 2339 /* 2340 * Receive Checksum Offload for TCP and UDP 2341 * 2342 * Checksum offloading is also enabled if multiple receive 2343 * queue is to be supported, since we need it to figure out 2344 * fragments. 2345 */ 2346 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2347 /* 2348 * NOTE: 2349 * PCSD must be enabled to enable multiple 2350 * receive queues. 2351 */ 2352 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2353 E1000_RXCSUM_PCSD; 2354 } else { 2355 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2356 E1000_RXCSUM_PCSD); 2357 } 2358 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2359 2360 if (IGB_ENABLE_HWRSS(sc)) { 2361 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2362 uint32_t reta_shift; 2363 int j, r; 2364 2365 /* 2366 * NOTE: 2367 * When we reach here, RSS has already been disabled 2368 * in igb_stop(), so we could safely configure RSS key 2369 * and redirect table. 2370 */ 2371 2372 /* 2373 * Configure RSS key 2374 */ 2375 toeplitz_get_key(key, sizeof(key)); 2376 for (i = 0; i < IGB_NRSSRK; ++i) { 2377 uint32_t rssrk; 2378 2379 rssrk = IGB_RSSRK_VAL(key, i); 2380 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2381 2382 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2383 } 2384 2385 /* 2386 * Configure RSS redirect table in following fashion: 2387 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2388 */ 2389 reta_shift = IGB_RETA_SHIFT; 2390 if (hw->mac.type == e1000_82575) 2391 reta_shift = IGB_RETA_SHIFT_82575; 2392 2393 r = 0; 2394 for (j = 0; j < IGB_NRETA; ++j) { 2395 uint32_t reta = 0; 2396 2397 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2398 uint32_t q; 2399 2400 q = (r % sc->rx_ring_inuse) << reta_shift; 2401 reta |= q << (8 * i); 2402 ++r; 2403 } 2404 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2405 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2406 } 2407 2408 /* 2409 * Enable multiple receive queues. 2410 * Enable IPv4 RSS standard hash functions. 2411 * Disable RSS interrupt on 82575 2412 */ 2413 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2414 E1000_MRQC_ENABLE_RSS_4Q | 2415 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2416 E1000_MRQC_RSS_FIELD_IPV4); 2417 } 2418 2419 /* Setup the Receive Control Register */ 2420 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2421 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2422 E1000_RCTL_RDMTS_HALF | 2423 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2424 /* Strip CRC bytes. */ 2425 rctl |= E1000_RCTL_SECRC; 2426 /* Make sure VLAN Filters are off */ 2427 rctl &= ~E1000_RCTL_VFE; 2428 /* Don't store bad packets */ 2429 rctl &= ~E1000_RCTL_SBP; 2430 2431 /* Enable Receives */ 2432 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2433 2434 /* 2435 * Setup the HW Rx Head and Tail Descriptor Pointers 2436 * - needs to be after enable 2437 */ 2438 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2439 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2440 2441 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2442 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2443 } 2444 } 2445 2446 static void 2447 igb_rx_refresh(struct igb_rx_ring *rxr, int i) 2448 { 2449 if (--i < 0) 2450 i = rxr->num_rx_desc - 1; 2451 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2452 } 2453 2454 static void 2455 igb_rxeof(struct igb_rx_ring *rxr, int count) 2456 { 2457 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2458 union e1000_adv_rx_desc *cur; 2459 uint32_t staterr; 2460 int i, ncoll = 0; 2461 2462 i = rxr->next_to_check; 2463 cur = &rxr->rx_base[i]; 2464 staterr = le32toh(cur->wb.upper.status_error); 2465 2466 if ((staterr & E1000_RXD_STAT_DD) == 0) 2467 return; 2468 2469 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2470 struct pktinfo *pi = NULL, pi0; 2471 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2472 struct mbuf *m = NULL; 2473 boolean_t eop; 2474 2475 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2476 if (eop) 2477 --count; 2478 2479 ++ncoll; 2480 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2481 !rxr->discard) { 2482 struct mbuf *mp = rxbuf->m_head; 2483 uint32_t hash, hashtype; 2484 uint16_t vlan; 2485 int len; 2486 2487 len = le16toh(cur->wb.upper.length); 2488 if (rxr->sc->hw.mac.type == e1000_i350 && 2489 (staterr & E1000_RXDEXT_STATERR_LB)) 2490 vlan = be16toh(cur->wb.upper.vlan); 2491 else 2492 vlan = le16toh(cur->wb.upper.vlan); 2493 2494 hash = le32toh(cur->wb.lower.hi_dword.rss); 2495 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2496 E1000_RXDADV_RSSTYPE_MASK; 2497 2498 IGB_RSS_DPRINTF(rxr->sc, 10, 2499 "ring%d, hash 0x%08x, hashtype %u\n", 2500 rxr->me, hash, hashtype); 2501 2502 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2503 BUS_DMASYNC_POSTREAD); 2504 2505 if (igb_newbuf(rxr, i, FALSE) != 0) { 2506 ifp->if_iqdrops++; 2507 goto discard; 2508 } 2509 2510 mp->m_len = len; 2511 if (rxr->fmp == NULL) { 2512 mp->m_pkthdr.len = len; 2513 rxr->fmp = mp; 2514 rxr->lmp = mp; 2515 } else { 2516 rxr->lmp->m_next = mp; 2517 rxr->lmp = rxr->lmp->m_next; 2518 rxr->fmp->m_pkthdr.len += len; 2519 } 2520 2521 if (eop) { 2522 m = rxr->fmp; 2523 rxr->fmp = NULL; 2524 rxr->lmp = NULL; 2525 2526 m->m_pkthdr.rcvif = ifp; 2527 ifp->if_ipackets++; 2528 2529 if (ifp->if_capenable & IFCAP_RXCSUM) 2530 igb_rxcsum(staterr, m); 2531 2532 if (staterr & E1000_RXD_STAT_VP) { 2533 m->m_pkthdr.ether_vlantag = vlan; 2534 m->m_flags |= M_VLANTAG; 2535 } 2536 2537 if (ifp->if_capenable & IFCAP_RSS) { 2538 pi = igb_rssinfo(m, &pi0, 2539 hash, hashtype, staterr); 2540 } 2541 #ifdef IGB_RSS_DEBUG 2542 rxr->rx_packets++; 2543 #endif 2544 } 2545 } else { 2546 ifp->if_ierrors++; 2547 discard: 2548 igb_setup_rxdesc(cur, rxbuf); 2549 if (!eop) 2550 rxr->discard = TRUE; 2551 else 2552 rxr->discard = FALSE; 2553 if (rxr->fmp != NULL) { 2554 m_freem(rxr->fmp); 2555 rxr->fmp = NULL; 2556 rxr->lmp = NULL; 2557 } 2558 m = NULL; 2559 } 2560 2561 if (m != NULL) 2562 ether_input_pkt(ifp, m, pi); 2563 2564 /* Advance our pointers to the next descriptor. */ 2565 if (++i == rxr->num_rx_desc) 2566 i = 0; 2567 2568 if (ncoll >= rxr->rx_wreg) { 2569 igb_rx_refresh(rxr, i); 2570 ncoll = 0; 2571 } 2572 2573 cur = &rxr->rx_base[i]; 2574 staterr = le32toh(cur->wb.upper.status_error); 2575 } 2576 rxr->next_to_check = i; 2577 2578 if (ncoll > 0) 2579 igb_rx_refresh(rxr, i); 2580 } 2581 2582 2583 static void 2584 igb_set_vlan(struct igb_softc *sc) 2585 { 2586 struct e1000_hw *hw = &sc->hw; 2587 uint32_t reg; 2588 #if 0 2589 struct ifnet *ifp = sc->arpcom.ac_if; 2590 #endif 2591 2592 if (sc->vf_ifp) { 2593 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2594 return; 2595 } 2596 2597 reg = E1000_READ_REG(hw, E1000_CTRL); 2598 reg |= E1000_CTRL_VME; 2599 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2600 2601 #if 0 2602 /* Enable the Filter Table */ 2603 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2604 reg = E1000_READ_REG(hw, E1000_RCTL); 2605 reg &= ~E1000_RCTL_CFIEN; 2606 reg |= E1000_RCTL_VFE; 2607 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2608 } 2609 #endif 2610 2611 /* Update the frame size */ 2612 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2613 sc->max_frame_size + VLAN_TAG_SIZE); 2614 2615 #if 0 2616 /* Don't bother with table if no vlans */ 2617 if ((adapter->num_vlans == 0) || 2618 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2619 return; 2620 /* 2621 ** A soft reset zero's out the VFTA, so 2622 ** we need to repopulate it now. 2623 */ 2624 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2625 if (adapter->shadow_vfta[i] != 0) { 2626 if (adapter->vf_ifp) 2627 e1000_vfta_set_vf(hw, 2628 adapter->shadow_vfta[i], TRUE); 2629 else 2630 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2631 i, adapter->shadow_vfta[i]); 2632 } 2633 #endif 2634 } 2635 2636 static void 2637 igb_enable_intr(struct igb_softc *sc) 2638 { 2639 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2640 lwkt_serialize_handler_enable(&sc->main_serialize); 2641 } else { 2642 int i; 2643 2644 for (i = 0; i < sc->msix_cnt; ++i) { 2645 lwkt_serialize_handler_enable( 2646 sc->msix_data[i].msix_serialize); 2647 } 2648 } 2649 2650 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2651 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2652 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2653 else 2654 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2655 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2656 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2657 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2658 } else { 2659 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2660 } 2661 E1000_WRITE_FLUSH(&sc->hw); 2662 } 2663 2664 static void 2665 igb_disable_intr(struct igb_softc *sc) 2666 { 2667 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2668 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2669 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2670 } 2671 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2672 E1000_WRITE_FLUSH(&sc->hw); 2673 2674 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2675 lwkt_serialize_handler_disable(&sc->main_serialize); 2676 } else { 2677 int i; 2678 2679 for (i = 0; i < sc->msix_cnt; ++i) { 2680 lwkt_serialize_handler_disable( 2681 sc->msix_data[i].msix_serialize); 2682 } 2683 } 2684 } 2685 2686 /* 2687 * Bit of a misnomer, what this really means is 2688 * to enable OS management of the system... aka 2689 * to disable special hardware management features 2690 */ 2691 static void 2692 igb_get_mgmt(struct igb_softc *sc) 2693 { 2694 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2695 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2696 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2697 2698 /* disable hardware interception of ARP */ 2699 manc &= ~E1000_MANC_ARP_EN; 2700 2701 /* enable receiving management packets to the host */ 2702 manc |= E1000_MANC_EN_MNG2HOST; 2703 manc2h |= 1 << 5; /* Mng Port 623 */ 2704 manc2h |= 1 << 6; /* Mng Port 664 */ 2705 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2706 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2707 } 2708 } 2709 2710 /* 2711 * Give control back to hardware management controller 2712 * if there is one. 2713 */ 2714 static void 2715 igb_rel_mgmt(struct igb_softc *sc) 2716 { 2717 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2718 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2719 2720 /* Re-enable hardware interception of ARP */ 2721 manc |= E1000_MANC_ARP_EN; 2722 manc &= ~E1000_MANC_EN_MNG2HOST; 2723 2724 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2725 } 2726 } 2727 2728 /* 2729 * Sets CTRL_EXT:DRV_LOAD bit. 2730 * 2731 * For ASF and Pass Through versions of f/w this means that 2732 * the driver is loaded. 2733 */ 2734 static void 2735 igb_get_hw_control(struct igb_softc *sc) 2736 { 2737 uint32_t ctrl_ext; 2738 2739 if (sc->vf_ifp) 2740 return; 2741 2742 /* Let firmware know the driver has taken over */ 2743 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2744 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2745 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2746 } 2747 2748 /* 2749 * Resets CTRL_EXT:DRV_LOAD bit. 2750 * 2751 * For ASF and Pass Through versions of f/w this means that the 2752 * driver is no longer loaded. 2753 */ 2754 static void 2755 igb_rel_hw_control(struct igb_softc *sc) 2756 { 2757 uint32_t ctrl_ext; 2758 2759 if (sc->vf_ifp) 2760 return; 2761 2762 /* Let firmware taken over control of h/w */ 2763 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2764 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2765 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2766 } 2767 2768 static int 2769 igb_is_valid_ether_addr(const uint8_t *addr) 2770 { 2771 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2772 2773 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2774 return FALSE; 2775 return TRUE; 2776 } 2777 2778 /* 2779 * Enable PCI Wake On Lan capability 2780 */ 2781 static void 2782 igb_enable_wol(device_t dev) 2783 { 2784 uint16_t cap, status; 2785 uint8_t id; 2786 2787 /* First find the capabilities pointer*/ 2788 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2789 2790 /* Read the PM Capabilities */ 2791 id = pci_read_config(dev, cap, 1); 2792 if (id != PCIY_PMG) /* Something wrong */ 2793 return; 2794 2795 /* 2796 * OK, we have the power capabilities, 2797 * so now get the status register 2798 */ 2799 cap += PCIR_POWER_STATUS; 2800 status = pci_read_config(dev, cap, 2); 2801 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2802 pci_write_config(dev, cap, status, 2); 2803 } 2804 2805 static void 2806 igb_update_stats_counters(struct igb_softc *sc) 2807 { 2808 struct e1000_hw *hw = &sc->hw; 2809 struct e1000_hw_stats *stats; 2810 struct ifnet *ifp = &sc->arpcom.ac_if; 2811 2812 /* 2813 * The virtual function adapter has only a 2814 * small controlled set of stats, do only 2815 * those and return. 2816 */ 2817 if (sc->vf_ifp) { 2818 igb_update_vf_stats_counters(sc); 2819 return; 2820 } 2821 stats = sc->stats; 2822 2823 if (sc->hw.phy.media_type == e1000_media_type_copper || 2824 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2825 stats->symerrs += 2826 E1000_READ_REG(hw,E1000_SYMERRS); 2827 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2828 } 2829 2830 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2831 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2832 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2833 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2834 2835 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2836 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2837 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2838 stats->dc += E1000_READ_REG(hw, E1000_DC); 2839 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2840 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2841 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2842 2843 /* 2844 * For watchdog management we need to know if we have been 2845 * paused during the last interval, so capture that here. 2846 */ 2847 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2848 stats->xoffrxc += sc->pause_frames; 2849 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2850 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2851 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2852 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2853 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2854 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2855 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2856 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2857 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2858 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2859 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2860 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2861 2862 /* For the 64-bit byte counters the low dword must be read first. */ 2863 /* Both registers clear on the read of the high dword */ 2864 2865 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2866 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2867 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2868 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2869 2870 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2871 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2872 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2873 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2874 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2875 2876 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2877 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2878 2879 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2880 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2881 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2882 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2883 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2884 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 2885 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 2886 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 2887 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 2888 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 2889 2890 /* Interrupt Counts */ 2891 2892 stats->iac += E1000_READ_REG(hw, E1000_IAC); 2893 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 2894 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 2895 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 2896 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 2897 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 2898 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 2899 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 2900 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 2901 2902 /* Host to Card Statistics */ 2903 2904 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 2905 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 2906 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 2907 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 2908 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 2909 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 2910 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 2911 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 2912 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 2913 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 2914 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 2915 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 2916 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 2917 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 2918 2919 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 2920 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 2921 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 2922 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 2923 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 2924 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 2925 2926 ifp->if_collisions = stats->colc; 2927 2928 /* Rx Errors */ 2929 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc + 2930 stats->ruc + stats->roc + stats->mpc + stats->cexterr; 2931 2932 /* Tx Errors */ 2933 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events; 2934 2935 /* Driver specific counters */ 2936 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 2937 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 2938 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 2939 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 2940 sc->packet_buf_alloc_tx = 2941 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 2942 sc->packet_buf_alloc_rx = 2943 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 2944 } 2945 2946 static void 2947 igb_vf_init_stats(struct igb_softc *sc) 2948 { 2949 struct e1000_hw *hw = &sc->hw; 2950 struct e1000_vf_stats *stats; 2951 2952 stats = sc->stats; 2953 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 2954 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 2955 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 2956 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 2957 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 2958 } 2959 2960 static void 2961 igb_update_vf_stats_counters(struct igb_softc *sc) 2962 { 2963 struct e1000_hw *hw = &sc->hw; 2964 struct e1000_vf_stats *stats; 2965 2966 if (sc->link_speed == 0) 2967 return; 2968 2969 stats = sc->stats; 2970 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 2971 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 2972 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 2973 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 2974 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 2975 } 2976 2977 #ifdef IFPOLL_ENABLE 2978 2979 static void 2980 igb_npoll_status(struct ifnet *ifp) 2981 { 2982 struct igb_softc *sc = ifp->if_softc; 2983 uint32_t reg_icr; 2984 2985 ASSERT_SERIALIZED(&sc->main_serialize); 2986 2987 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 2988 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 2989 sc->hw.mac.get_link_status = 1; 2990 igb_update_link_status(sc); 2991 } 2992 } 2993 2994 static void 2995 igb_npoll_tx(struct ifnet *ifp, void *arg, int cycle __unused) 2996 { 2997 struct igb_tx_ring *txr = arg; 2998 2999 ASSERT_SERIALIZED(&txr->tx_serialize); 3000 3001 igb_txeof(txr); 3002 if (!ifsq_is_empty(txr->ifsq)) 3003 ifsq_devstart(txr->ifsq); 3004 } 3005 3006 static void 3007 igb_npoll_rx(struct ifnet *ifp __unused, void *arg, int cycle) 3008 { 3009 struct igb_rx_ring *rxr = arg; 3010 3011 ASSERT_SERIALIZED(&rxr->rx_serialize); 3012 3013 igb_rxeof(rxr, cycle); 3014 } 3015 3016 static void 3017 igb_npoll(struct ifnet *ifp, struct ifpoll_info *info) 3018 { 3019 struct igb_softc *sc = ifp->if_softc; 3020 int i; 3021 3022 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3023 3024 if (info) { 3025 int off; 3026 3027 info->ifpi_status.status_func = igb_npoll_status; 3028 info->ifpi_status.serializer = &sc->main_serialize; 3029 3030 off = sc->tx_npoll_off; 3031 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3032 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3033 int idx = i + off; 3034 3035 KKASSERT(idx < ncpus2); 3036 info->ifpi_tx[idx].poll_func = igb_npoll_tx; 3037 info->ifpi_tx[idx].arg = txr; 3038 info->ifpi_tx[idx].serializer = &txr->tx_serialize; 3039 ifsq_set_cpuid(txr->ifsq, idx); 3040 } 3041 3042 off = sc->rx_npoll_off; 3043 for (i = 0; i < sc->rx_ring_cnt; ++i) { 3044 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3045 int idx = i + off; 3046 3047 KKASSERT(idx < ncpus2); 3048 info->ifpi_rx[idx].poll_func = igb_npoll_rx; 3049 info->ifpi_rx[idx].arg = rxr; 3050 info->ifpi_rx[idx].serializer = &rxr->rx_serialize; 3051 } 3052 3053 if (ifp->if_flags & IFF_RUNNING) { 3054 if (sc->rx_ring_inuse == sc->rx_ring_cnt) 3055 igb_disable_intr(sc); 3056 else 3057 igb_init(sc); 3058 } 3059 } else { 3060 if (ifp->if_flags & IFF_RUNNING) { 3061 if (sc->rx_ring_inuse == sc->rx_ring_cnt) 3062 igb_enable_intr(sc); 3063 else 3064 igb_init(sc); 3065 } 3066 3067 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3068 struct igb_tx_ring *txr = &sc->tx_rings[i]; 3069 3070 ifsq_set_cpuid(txr->ifsq, txr->tx_intr_cpuid); 3071 } 3072 } 3073 } 3074 3075 #endif /* IFPOLL_ENABLE */ 3076 3077 static void 3078 igb_intr(void *xsc) 3079 { 3080 struct igb_softc *sc = xsc; 3081 struct ifnet *ifp = &sc->arpcom.ac_if; 3082 uint32_t eicr; 3083 3084 ASSERT_SERIALIZED(&sc->main_serialize); 3085 3086 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 3087 3088 if (eicr == 0) 3089 return; 3090 3091 if (ifp->if_flags & IFF_RUNNING) { 3092 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3093 int i; 3094 3095 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3096 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3097 3098 if (eicr & rxr->rx_intr_mask) { 3099 lwkt_serialize_enter(&rxr->rx_serialize); 3100 igb_rxeof(rxr, -1); 3101 lwkt_serialize_exit(&rxr->rx_serialize); 3102 } 3103 } 3104 3105 if (eicr & txr->tx_intr_mask) { 3106 lwkt_serialize_enter(&txr->tx_serialize); 3107 igb_txeof(txr); 3108 if (!ifsq_is_empty(txr->ifsq)) 3109 ifsq_devstart(txr->ifsq); 3110 lwkt_serialize_exit(&txr->tx_serialize); 3111 } 3112 } 3113 3114 if (eicr & E1000_EICR_OTHER) { 3115 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3116 3117 /* Link status change */ 3118 if (icr & E1000_ICR_LSC) { 3119 sc->hw.mac.get_link_status = 1; 3120 igb_update_link_status(sc); 3121 } 3122 } 3123 3124 /* 3125 * Reading EICR has the side effect to clear interrupt mask, 3126 * so all interrupts need to be enabled here. 3127 */ 3128 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3129 } 3130 3131 static void 3132 igb_intr_shared(void *xsc) 3133 { 3134 struct igb_softc *sc = xsc; 3135 struct ifnet *ifp = &sc->arpcom.ac_if; 3136 uint32_t reg_icr; 3137 3138 ASSERT_SERIALIZED(&sc->main_serialize); 3139 3140 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3141 3142 /* Hot eject? */ 3143 if (reg_icr == 0xffffffff) 3144 return; 3145 3146 /* Definitely not our interrupt. */ 3147 if (reg_icr == 0x0) 3148 return; 3149 3150 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3151 return; 3152 3153 if (ifp->if_flags & IFF_RUNNING) { 3154 if (reg_icr & 3155 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3156 int i; 3157 3158 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3159 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3160 3161 lwkt_serialize_enter(&rxr->rx_serialize); 3162 igb_rxeof(rxr, -1); 3163 lwkt_serialize_exit(&rxr->rx_serialize); 3164 } 3165 } 3166 3167 if (reg_icr & E1000_ICR_TXDW) { 3168 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3169 3170 lwkt_serialize_enter(&txr->tx_serialize); 3171 igb_txeof(txr); 3172 if (!ifsq_is_empty(txr->ifsq)) 3173 ifsq_devstart(txr->ifsq); 3174 lwkt_serialize_exit(&txr->tx_serialize); 3175 } 3176 } 3177 3178 /* Link status change */ 3179 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3180 sc->hw.mac.get_link_status = 1; 3181 igb_update_link_status(sc); 3182 } 3183 3184 if (reg_icr & E1000_ICR_RXO) 3185 sc->rx_overruns++; 3186 } 3187 3188 static int 3189 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp, 3190 int *segs_used, int *idx) 3191 { 3192 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3193 bus_dmamap_t map; 3194 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3195 union e1000_adv_tx_desc *txd = NULL; 3196 struct mbuf *m_head = *m_headp; 3197 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3198 int maxsegs, nsegs, i, j, error; 3199 uint32_t hdrlen = 0; 3200 3201 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3202 error = igb_tso_pullup(txr, m_headp); 3203 if (error) 3204 return error; 3205 m_head = *m_headp; 3206 } 3207 3208 /* Set basic descriptor constants */ 3209 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3210 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3211 if (m_head->m_flags & M_VLANTAG) 3212 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3213 3214 /* 3215 * Map the packet for DMA. 3216 */ 3217 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3218 tx_buf_mapped = tx_buf; 3219 map = tx_buf->map; 3220 3221 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3222 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n")); 3223 if (maxsegs > IGB_MAX_SCATTER) 3224 maxsegs = IGB_MAX_SCATTER; 3225 3226 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3227 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3228 if (error) { 3229 if (error == ENOBUFS) 3230 txr->sc->mbuf_defrag_failed++; 3231 else 3232 txr->sc->no_tx_dma_setup++; 3233 3234 m_freem(*m_headp); 3235 *m_headp = NULL; 3236 return error; 3237 } 3238 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3239 3240 m_head = *m_headp; 3241 3242 /* 3243 * Set up the TX context descriptor, if any hardware offloading is 3244 * needed. This includes CSUM, VLAN, and TSO. It will consume one 3245 * TX descriptor. 3246 * 3247 * Unlike these chips' predecessors (em/emx), TX context descriptor 3248 * will _not_ interfere TX data fetching pipelining. 3249 */ 3250 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3251 igb_tso_ctx(txr, m_head, &hdrlen); 3252 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3253 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3254 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3255 txr->tx_nsegs++; 3256 (*segs_used)++; 3257 } else if (igb_txcsum_ctx(txr, m_head)) { 3258 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 3259 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3260 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3261 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3262 txr->tx_nsegs++; 3263 (*segs_used)++; 3264 } 3265 3266 *segs_used += nsegs; 3267 txr->tx_nsegs += nsegs; 3268 if (txr->tx_nsegs >= txr->intr_nsegs) { 3269 /* 3270 * Report Status (RS) is turned on every intr_nsegs 3271 * descriptors (roughly). 3272 */ 3273 txr->tx_nsegs = 0; 3274 cmd_rs = E1000_ADVTXD_DCMD_RS; 3275 } 3276 3277 /* Calculate payload length */ 3278 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3279 << E1000_ADVTXD_PAYLEN_SHIFT); 3280 3281 /* 82575 needs the queue index added */ 3282 if (txr->sc->hw.mac.type == e1000_82575) 3283 olinfo_status |= txr->me << 4; 3284 3285 /* Set up our transmit descriptors */ 3286 i = txr->next_avail_desc; 3287 for (j = 0; j < nsegs; j++) { 3288 bus_size_t seg_len; 3289 bus_addr_t seg_addr; 3290 3291 tx_buf = &txr->tx_buf[i]; 3292 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3293 seg_addr = segs[j].ds_addr; 3294 seg_len = segs[j].ds_len; 3295 3296 txd->read.buffer_addr = htole64(seg_addr); 3297 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3298 txd->read.olinfo_status = htole32(olinfo_status); 3299 if (++i == txr->num_tx_desc) 3300 i = 0; 3301 tx_buf->m_head = NULL; 3302 } 3303 3304 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3305 txr->next_avail_desc = i; 3306 txr->tx_avail -= nsegs; 3307 3308 tx_buf->m_head = m_head; 3309 tx_buf_mapped->map = tx_buf->map; 3310 tx_buf->map = map; 3311 3312 /* 3313 * Last Descriptor of Packet needs End Of Packet (EOP) 3314 */ 3315 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3316 3317 /* 3318 * Defer TDT updating, until enough descrptors are setup 3319 */ 3320 *idx = i; 3321 ++txr->tx_packets; 3322 3323 return 0; 3324 } 3325 3326 static void 3327 igb_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 3328 { 3329 struct igb_softc *sc = ifp->if_softc; 3330 struct igb_tx_ring *txr = ifsq_get_priv(ifsq); 3331 struct mbuf *m_head; 3332 int idx = -1, nsegs = 0; 3333 3334 KKASSERT(txr->ifsq == ifsq); 3335 ASSERT_SERIALIZED(&txr->tx_serialize); 3336 3337 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifsq_is_oactive(ifsq)) 3338 return; 3339 3340 if (!sc->link_active) { 3341 ifsq_purge(ifsq); 3342 return; 3343 } 3344 3345 if (!IGB_IS_NOT_OACTIVE(txr)) 3346 igb_txeof(txr); 3347 3348 while (!ifsq_is_empty(ifsq)) { 3349 if (IGB_IS_OACTIVE(txr)) { 3350 ifsq_set_oactive(ifsq); 3351 /* Set watchdog on */ 3352 ifp->if_timer = 5; 3353 break; 3354 } 3355 3356 m_head = ifsq_dequeue(ifsq, NULL); 3357 if (m_head == NULL) 3358 break; 3359 3360 if (igb_encap(txr, &m_head, &nsegs, &idx)) { 3361 ifp->if_oerrors++; 3362 continue; 3363 } 3364 3365 if (nsegs >= txr->wreg_nsegs) { 3366 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3367 idx = -1; 3368 nsegs = 0; 3369 } 3370 3371 /* Send a copy of the frame to the BPF listener */ 3372 ETHER_BPF_MTAP(ifp, m_head); 3373 } 3374 if (idx >= 0) 3375 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), idx); 3376 } 3377 3378 static void 3379 igb_watchdog(struct ifnet *ifp) 3380 { 3381 struct igb_softc *sc = ifp->if_softc; 3382 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3383 3384 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3385 3386 /* 3387 * If flow control has paused us since last checking 3388 * it invalidates the watchdog timing, so dont run it. 3389 */ 3390 if (sc->pause_frames) { 3391 sc->pause_frames = 0; 3392 ifp->if_timer = 5; 3393 return; 3394 } 3395 3396 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3397 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3398 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3399 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3400 if_printf(ifp, "TX(%d) desc avail = %d, " 3401 "Next TX to Clean = %d\n", 3402 txr->me, txr->tx_avail, txr->next_to_clean); 3403 3404 ifp->if_oerrors++; 3405 sc->watchdog_events++; 3406 3407 igb_init(sc); 3408 if (!ifsq_is_empty(txr->ifsq)) 3409 ifsq_devstart(txr->ifsq); 3410 } 3411 3412 static void 3413 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3414 { 3415 uint32_t eitr = 0; 3416 3417 if (rate > 0) { 3418 if (sc->hw.mac.type == e1000_82575) { 3419 eitr = 1000000000 / 256 / rate; 3420 /* 3421 * NOTE: 3422 * Document is wrong on the 2 bits left shift 3423 */ 3424 } else { 3425 eitr = 1000000 / rate; 3426 eitr <<= IGB_EITR_INTVL_SHIFT; 3427 } 3428 3429 if (eitr == 0) { 3430 /* Don't disable it */ 3431 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3432 } else if (eitr > IGB_EITR_INTVL_MASK) { 3433 /* Don't allow it to be too large */ 3434 eitr = IGB_EITR_INTVL_MASK; 3435 } 3436 } 3437 if (sc->hw.mac.type == e1000_82575) 3438 eitr |= eitr << 16; 3439 else 3440 eitr |= E1000_EITR_CNT_IGNR; 3441 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3442 } 3443 3444 static int 3445 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3446 { 3447 struct igb_softc *sc = (void *)arg1; 3448 struct ifnet *ifp = &sc->arpcom.ac_if; 3449 int error, intr_rate; 3450 3451 intr_rate = sc->intr_rate; 3452 error = sysctl_handle_int(oidp, &intr_rate, 0, req); 3453 if (error || req->newptr == NULL) 3454 return error; 3455 if (intr_rate < 0) 3456 return EINVAL; 3457 3458 ifnet_serialize_all(ifp); 3459 3460 sc->intr_rate = intr_rate; 3461 if (ifp->if_flags & IFF_RUNNING) 3462 igb_set_eitr(sc, 0, sc->intr_rate); 3463 3464 if (bootverbose) 3465 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate); 3466 3467 ifnet_deserialize_all(ifp); 3468 3469 return 0; 3470 } 3471 3472 static int 3473 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS) 3474 { 3475 struct igb_msix_data *msix = (void *)arg1; 3476 struct igb_softc *sc = msix->msix_sc; 3477 struct ifnet *ifp = &sc->arpcom.ac_if; 3478 int error, msix_rate; 3479 3480 msix_rate = msix->msix_rate; 3481 error = sysctl_handle_int(oidp, &msix_rate, 0, req); 3482 if (error || req->newptr == NULL) 3483 return error; 3484 if (msix_rate < 0) 3485 return EINVAL; 3486 3487 lwkt_serialize_enter(msix->msix_serialize); 3488 3489 msix->msix_rate = msix_rate; 3490 if (ifp->if_flags & IFF_RUNNING) 3491 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate); 3492 3493 if (bootverbose) { 3494 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc, 3495 msix->msix_rate); 3496 } 3497 3498 lwkt_serialize_exit(msix->msix_serialize); 3499 3500 return 0; 3501 } 3502 3503 static int 3504 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3505 { 3506 struct igb_softc *sc = (void *)arg1; 3507 struct ifnet *ifp = &sc->arpcom.ac_if; 3508 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3509 int error, nsegs; 3510 3511 nsegs = txr->intr_nsegs; 3512 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3513 if (error || req->newptr == NULL) 3514 return error; 3515 if (nsegs <= 0) 3516 return EINVAL; 3517 3518 ifnet_serialize_all(ifp); 3519 3520 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc || 3521 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) { 3522 error = EINVAL; 3523 } else { 3524 error = 0; 3525 txr->intr_nsegs = nsegs; 3526 } 3527 3528 ifnet_deserialize_all(ifp); 3529 3530 return error; 3531 } 3532 3533 #ifdef IFPOLL_ENABLE 3534 3535 static int 3536 igb_sysctl_npoll_rxoff(SYSCTL_HANDLER_ARGS) 3537 { 3538 struct igb_softc *sc = (void *)arg1; 3539 struct ifnet *ifp = &sc->arpcom.ac_if; 3540 int error, off; 3541 3542 off = sc->rx_npoll_off; 3543 error = sysctl_handle_int(oidp, &off, 0, req); 3544 if (error || req->newptr == NULL) 3545 return error; 3546 if (off < 0) 3547 return EINVAL; 3548 3549 ifnet_serialize_all(ifp); 3550 if (off >= ncpus2 || off % sc->rx_ring_cnt != 0) { 3551 error = EINVAL; 3552 } else { 3553 error = 0; 3554 sc->rx_npoll_off = off; 3555 } 3556 ifnet_deserialize_all(ifp); 3557 3558 return error; 3559 } 3560 3561 static int 3562 igb_sysctl_npoll_txoff(SYSCTL_HANDLER_ARGS) 3563 { 3564 struct igb_softc *sc = (void *)arg1; 3565 struct ifnet *ifp = &sc->arpcom.ac_if; 3566 int error, off; 3567 3568 off = sc->tx_npoll_off; 3569 error = sysctl_handle_int(oidp, &off, 0, req); 3570 if (error || req->newptr == NULL) 3571 return error; 3572 if (off < 0) 3573 return EINVAL; 3574 3575 ifnet_serialize_all(ifp); 3576 if (off >= ncpus2) { 3577 error = EINVAL; 3578 } else { 3579 error = 0; 3580 sc->tx_npoll_off = off; 3581 } 3582 ifnet_deserialize_all(ifp); 3583 3584 return error; 3585 } 3586 3587 #endif /* IFPOLL_ENABLE */ 3588 3589 static void 3590 igb_init_intr(struct igb_softc *sc) 3591 { 3592 igb_set_intr_mask(sc); 3593 3594 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3595 igb_init_unshared_intr(sc); 3596 3597 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3598 igb_set_eitr(sc, 0, sc->intr_rate); 3599 } else { 3600 int i; 3601 3602 for (i = 0; i < sc->msix_cnt; ++i) 3603 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate); 3604 } 3605 } 3606 3607 static void 3608 igb_init_unshared_intr(struct igb_softc *sc) 3609 { 3610 struct e1000_hw *hw = &sc->hw; 3611 const struct igb_rx_ring *rxr; 3612 const struct igb_tx_ring *txr; 3613 uint32_t ivar, index; 3614 int i; 3615 3616 /* 3617 * Enable extended mode 3618 */ 3619 if (sc->hw.mac.type != e1000_82575) { 3620 uint32_t gpie; 3621 int ivar_max; 3622 3623 gpie = E1000_GPIE_NSICR; 3624 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3625 gpie |= E1000_GPIE_MSIX_MODE | 3626 E1000_GPIE_EIAME | 3627 E1000_GPIE_PBA; 3628 } 3629 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3630 3631 /* 3632 * Clear IVARs 3633 */ 3634 switch (sc->hw.mac.type) { 3635 case e1000_82580: 3636 ivar_max = IGB_MAX_IVAR_82580; 3637 break; 3638 3639 case e1000_i350: 3640 ivar_max = IGB_MAX_IVAR_I350; 3641 break; 3642 3643 case e1000_vfadapt: 3644 case e1000_vfadapt_i350: 3645 ivar_max = IGB_MAX_IVAR_VF; 3646 break; 3647 3648 case e1000_82576: 3649 ivar_max = IGB_MAX_IVAR_82576; 3650 break; 3651 3652 default: 3653 panic("unknown mac type %d\n", sc->hw.mac.type); 3654 } 3655 for (i = 0; i < ivar_max; ++i) 3656 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3657 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3658 } else { 3659 uint32_t tmp; 3660 3661 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3662 ("82575 w/ MSI-X")); 3663 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3664 tmp |= E1000_CTRL_EXT_IRCA; 3665 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3666 } 3667 3668 /* 3669 * Map TX/RX interrupts to EICR 3670 */ 3671 switch (sc->hw.mac.type) { 3672 case e1000_82580: 3673 case e1000_i350: 3674 case e1000_vfadapt: 3675 case e1000_vfadapt_i350: 3676 /* RX entries */ 3677 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3678 rxr = &sc->rx_rings[i]; 3679 3680 index = i >> 1; 3681 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3682 3683 if (i & 1) { 3684 ivar &= 0xff00ffff; 3685 ivar |= 3686 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3687 } else { 3688 ivar &= 0xffffff00; 3689 ivar |= 3690 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3691 } 3692 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3693 } 3694 /* TX entries */ 3695 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3696 txr = &sc->tx_rings[i]; 3697 3698 index = i >> 1; 3699 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3700 3701 if (i & 1) { 3702 ivar &= 0x00ffffff; 3703 ivar |= 3704 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3705 } else { 3706 ivar &= 0xffff00ff; 3707 ivar |= 3708 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3709 } 3710 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3711 } 3712 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3713 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3714 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3715 } 3716 break; 3717 3718 case e1000_82576: 3719 /* RX entries */ 3720 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3721 rxr = &sc->rx_rings[i]; 3722 3723 index = i & 0x7; /* Each IVAR has two entries */ 3724 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3725 3726 if (i < 8) { 3727 ivar &= 0xffffff00; 3728 ivar |= 3729 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3730 } else { 3731 ivar &= 0xff00ffff; 3732 ivar |= 3733 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3734 } 3735 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3736 } 3737 /* TX entries */ 3738 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3739 txr = &sc->tx_rings[i]; 3740 3741 index = i & 0x7; /* Each IVAR has two entries */ 3742 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3743 3744 if (i < 8) { 3745 ivar &= 0xffff00ff; 3746 ivar |= 3747 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3748 } else { 3749 ivar &= 0x00ffffff; 3750 ivar |= 3751 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3752 } 3753 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3754 } 3755 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3756 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3757 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3758 } 3759 break; 3760 3761 case e1000_82575: 3762 /* 3763 * Enable necessary interrupt bits. 3764 * 3765 * The name of the register is confusing; in addition to 3766 * configuring the first vector of MSI-X, it also configures 3767 * which bits of EICR could be set by the hardware even when 3768 * MSI or line interrupt is used; it thus controls interrupt 3769 * generation. It MUST be configured explicitly; the default 3770 * value mentioned in the datasheet is wrong: RX queue0 and 3771 * TX queue0 are NOT enabled by default. 3772 */ 3773 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3774 break; 3775 3776 default: 3777 panic("unknown mac type %d\n", sc->hw.mac.type); 3778 } 3779 } 3780 3781 static int 3782 igb_setup_intr(struct igb_softc *sc) 3783 { 3784 int error; 3785 3786 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 3787 return igb_msix_setup(sc); 3788 3789 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE, 3790 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr, 3791 sc, &sc->intr_tag, &sc->main_serialize); 3792 if (error) { 3793 device_printf(sc->dev, "Failed to register interrupt handler"); 3794 return error; 3795 } 3796 sc->tx_rings[0].tx_intr_cpuid = rman_get_cpuid(sc->intr_res); 3797 3798 return 0; 3799 } 3800 3801 static void 3802 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax) 3803 { 3804 if (txr->sc->hw.mac.type == e1000_82575) { 3805 txr->tx_intr_bit = 0; /* unused */ 3806 switch (txr->me) { 3807 case 0: 3808 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3809 break; 3810 case 1: 3811 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3812 break; 3813 case 2: 3814 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 3815 break; 3816 case 3: 3817 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 3818 break; 3819 default: 3820 panic("unsupported # of TX ring, %d\n", txr->me); 3821 } 3822 } else { 3823 int intr_bit = *intr_bit0; 3824 3825 txr->tx_intr_bit = intr_bit % intr_bitmax; 3826 txr->tx_intr_mask = 1 << txr->tx_intr_bit; 3827 3828 *intr_bit0 = intr_bit + 1; 3829 } 3830 } 3831 3832 static void 3833 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax) 3834 { 3835 if (rxr->sc->hw.mac.type == e1000_82575) { 3836 rxr->rx_intr_bit = 0; /* unused */ 3837 switch (rxr->me) { 3838 case 0: 3839 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 3840 break; 3841 case 1: 3842 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 3843 break; 3844 case 2: 3845 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 3846 break; 3847 case 3: 3848 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 3849 break; 3850 default: 3851 panic("unsupported # of RX ring, %d\n", rxr->me); 3852 } 3853 } else { 3854 int intr_bit = *intr_bit0; 3855 3856 rxr->rx_intr_bit = intr_bit % intr_bitmax; 3857 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit; 3858 3859 *intr_bit0 = intr_bit + 1; 3860 } 3861 } 3862 3863 static void 3864 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3865 { 3866 struct igb_softc *sc = ifp->if_softc; 3867 3868 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, 3869 sc->tx_serialize, sc->rx_serialize, slz); 3870 } 3871 3872 static void 3873 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3874 { 3875 struct igb_softc *sc = ifp->if_softc; 3876 3877 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, 3878 sc->tx_serialize, sc->rx_serialize, slz); 3879 } 3880 3881 static int 3882 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3883 { 3884 struct igb_softc *sc = ifp->if_softc; 3885 3886 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 3887 sc->tx_serialize, sc->rx_serialize, slz); 3888 } 3889 3890 #ifdef INVARIANTS 3891 3892 static void 3893 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3894 boolean_t serialized) 3895 { 3896 struct igb_softc *sc = ifp->if_softc; 3897 3898 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 3899 sc->tx_serialize, sc->rx_serialize, slz, serialized); 3900 } 3901 3902 #endif /* INVARIANTS */ 3903 3904 static void 3905 igb_set_intr_mask(struct igb_softc *sc) 3906 { 3907 int i; 3908 3909 sc->intr_mask = sc->sts_intr_mask; 3910 for (i = 0; i < sc->rx_ring_inuse; ++i) 3911 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 3912 for (i = 0; i < sc->tx_ring_cnt; ++i) 3913 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 3914 if (bootverbose) { 3915 if_printf(&sc->arpcom.ac_if, "intr mask 0x%08x\n", 3916 sc->intr_mask); 3917 } 3918 } 3919 3920 static int 3921 igb_alloc_intr(struct igb_softc *sc) 3922 { 3923 int i, intr_bit, intr_bitmax; 3924 u_int intr_flags; 3925 3926 igb_msix_try_alloc(sc); 3927 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 3928 goto done; 3929 3930 /* 3931 * Allocate MSI/legacy interrupt resource 3932 */ 3933 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 3934 &sc->intr_rid, &intr_flags); 3935 3936 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 3937 int unshared; 3938 3939 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 3940 if (!unshared) { 3941 sc->flags |= IGB_FLAG_SHARED_INTR; 3942 if (bootverbose) 3943 device_printf(sc->dev, "IRQ shared\n"); 3944 } else { 3945 intr_flags &= ~RF_SHAREABLE; 3946 if (bootverbose) 3947 device_printf(sc->dev, "IRQ unshared\n"); 3948 } 3949 } 3950 3951 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 3952 &sc->intr_rid, intr_flags); 3953 if (sc->intr_res == NULL) { 3954 device_printf(sc->dev, "Unable to allocate bus resource: " 3955 "interrupt\n"); 3956 return ENXIO; 3957 } 3958 3959 /* 3960 * Setup MSI/legacy interrupt mask 3961 */ 3962 switch (sc->hw.mac.type) { 3963 case e1000_82575: 3964 intr_bitmax = IGB_MAX_TXRXINT_82575; 3965 break; 3966 case e1000_82580: 3967 intr_bitmax = IGB_MAX_TXRXINT_82580; 3968 break; 3969 case e1000_i350: 3970 intr_bitmax = IGB_MAX_TXRXINT_I350; 3971 break; 3972 case e1000_82576: 3973 intr_bitmax = IGB_MAX_TXRXINT_82576; 3974 break; 3975 default: 3976 intr_bitmax = IGB_MIN_TXRXINT; 3977 break; 3978 } 3979 intr_bit = 0; 3980 for (i = 0; i < sc->tx_ring_cnt; ++i) 3981 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax); 3982 for (i = 0; i < sc->rx_ring_cnt; ++i) 3983 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax); 3984 sc->sts_intr_bit = 0; 3985 sc->sts_intr_mask = E1000_EICR_OTHER; 3986 3987 /* Initialize interrupt rate */ 3988 sc->intr_rate = IGB_INTR_RATE; 3989 done: 3990 igb_set_ring_inuse(sc, FALSE); 3991 igb_set_intr_mask(sc); 3992 return 0; 3993 } 3994 3995 static void 3996 igb_free_intr(struct igb_softc *sc) 3997 { 3998 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3999 if (sc->intr_res != NULL) { 4000 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid, 4001 sc->intr_res); 4002 } 4003 if (sc->intr_type == PCI_INTR_TYPE_MSI) 4004 pci_release_msi(sc->dev); 4005 } else { 4006 igb_msix_free(sc, TRUE); 4007 } 4008 } 4009 4010 static void 4011 igb_teardown_intr(struct igb_softc *sc) 4012 { 4013 if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4014 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag); 4015 else 4016 igb_msix_teardown(sc, sc->msix_cnt); 4017 } 4018 4019 static void 4020 igb_msix_try_alloc(struct igb_softc *sc) 4021 { 4022 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 4023 int i, x, error; 4024 struct igb_msix_data *msix; 4025 boolean_t aggregate, setup = FALSE; 4026 4027 /* 4028 * Don't enable MSI-X on 82575, see: 4029 * 82575 specification update errata #25 4030 */ 4031 if (sc->hw.mac.type == e1000_82575) 4032 return; 4033 4034 /* Don't enable MSI-X on VF */ 4035 if (sc->vf_ifp) 4036 return; 4037 4038 msix_enable = device_getenv_int(sc->dev, "msix.enable", 4039 igb_msix_enable); 4040 if (!msix_enable) 4041 return; 4042 4043 msix_cnt = pci_msix_count(sc->dev); 4044 #ifdef IGB_MSIX_DEBUG 4045 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 4046 #endif 4047 if (msix_cnt <= 1) { 4048 /* One MSI-X model does not make sense */ 4049 return; 4050 } 4051 4052 i = 0; 4053 while ((1 << (i + 1)) <= msix_cnt) 4054 ++i; 4055 msix_cnt2 = 1 << i; 4056 4057 if (bootverbose) { 4058 device_printf(sc->dev, "MSI-X count %d/%d\n", 4059 msix_cnt2, msix_cnt); 4060 } 4061 4062 KKASSERT(msix_cnt2 <= msix_cnt); 4063 if (msix_cnt == msix_cnt2) { 4064 /* We need at least one MSI-X for link status */ 4065 msix_cnt2 >>= 1; 4066 if (msix_cnt2 <= 1) { 4067 /* One MSI-X for RX/TX does not make sense */ 4068 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 4069 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 4070 return; 4071 } 4072 KKASSERT(msix_cnt > msix_cnt2); 4073 4074 if (bootverbose) { 4075 device_printf(sc->dev, "MSI-X count fixup %d/%d\n", 4076 msix_cnt2, msix_cnt); 4077 } 4078 } 4079 4080 sc->rx_ring_msix = sc->rx_ring_cnt; 4081 if (sc->rx_ring_msix > msix_cnt2) 4082 sc->rx_ring_msix = msix_cnt2; 4083 4084 if (msix_cnt >= sc->tx_ring_cnt + sc->rx_ring_msix + 1) { 4085 /* 4086 * Independent TX/RX MSI-X 4087 */ 4088 aggregate = FALSE; 4089 if (bootverbose) 4090 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 4091 alloc_cnt = sc->tx_ring_cnt + sc->rx_ring_msix; 4092 } else { 4093 /* 4094 * Aggregate TX/RX MSI-X 4095 */ 4096 aggregate = TRUE; 4097 if (bootverbose) 4098 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 4099 alloc_cnt = msix_cnt2; 4100 if (alloc_cnt > ncpus2) 4101 alloc_cnt = ncpus2; 4102 if (sc->rx_ring_msix > alloc_cnt) 4103 sc->rx_ring_msix = alloc_cnt; 4104 } 4105 ++alloc_cnt; /* For link status */ 4106 4107 if (bootverbose) { 4108 device_printf(sc->dev, "MSI-X alloc %d, RX ring %d\n", 4109 alloc_cnt, sc->rx_ring_msix); 4110 } 4111 4112 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4113 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4114 &sc->msix_mem_rid, RF_ACTIVE); 4115 if (sc->msix_mem_res == NULL) { 4116 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4117 return; 4118 } 4119 4120 sc->msix_cnt = alloc_cnt; 4121 sc->msix_data = kmalloc_cachealign( 4122 sizeof(struct igb_msix_data) * sc->msix_cnt, 4123 M_DEVBUF, M_WAITOK | M_ZERO); 4124 for (x = 0; x < sc->msix_cnt; ++x) { 4125 msix = &sc->msix_data[x]; 4126 4127 lwkt_serialize_init(&msix->msix_serialize0); 4128 msix->msix_sc = sc; 4129 msix->msix_rid = -1; 4130 msix->msix_vector = x; 4131 msix->msix_mask = 1 << msix->msix_vector; 4132 msix->msix_rate = IGB_INTR_RATE; 4133 } 4134 4135 x = 0; 4136 if (!aggregate) { 4137 int offset, offset_def; 4138 4139 if (sc->rx_ring_msix == ncpus2) { 4140 offset = 0; 4141 } else { 4142 offset_def = (sc->rx_ring_msix * 4143 device_get_unit(sc->dev)) % ncpus2; 4144 4145 offset = device_getenv_int(sc->dev, 4146 "msix.rxoff", offset_def); 4147 if (offset >= ncpus2 || 4148 offset % sc->rx_ring_msix != 0) { 4149 device_printf(sc->dev, 4150 "invalid msix.rxoff %d, use %d\n", 4151 offset, offset_def); 4152 offset = offset_def; 4153 } 4154 } 4155 4156 /* RX rings */ 4157 for (i = 0; i < sc->rx_ring_msix; ++i) { 4158 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4159 4160 KKASSERT(x < sc->msix_cnt); 4161 msix = &sc->msix_data[x++]; 4162 rxr->rx_intr_bit = msix->msix_vector; 4163 rxr->rx_intr_mask = msix->msix_mask; 4164 4165 msix->msix_serialize = &rxr->rx_serialize; 4166 msix->msix_func = igb_msix_rx; 4167 msix->msix_arg = rxr; 4168 msix->msix_cpuid = i + offset; 4169 KKASSERT(msix->msix_cpuid < ncpus2); 4170 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4171 "%s rx%d", device_get_nameunit(sc->dev), i); 4172 msix->msix_rate = IGB_MSIX_RX_RATE; 4173 ksnprintf(msix->msix_rate_desc, 4174 sizeof(msix->msix_rate_desc), 4175 "RX%d interrupt rate", i); 4176 } 4177 4178 offset_def = device_get_unit(sc->dev) % ncpus2; 4179 offset = device_getenv_int(sc->dev, "msix.txoff", offset_def); 4180 if (offset >= ncpus2) { 4181 device_printf(sc->dev, "invalid msix.txoff %d, " 4182 "use %d\n", offset, offset_def); 4183 offset = offset_def; 4184 } 4185 4186 /* TX rings */ 4187 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4188 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4189 4190 KKASSERT(x < sc->msix_cnt); 4191 msix = &sc->msix_data[x++]; 4192 txr->tx_intr_bit = msix->msix_vector; 4193 txr->tx_intr_mask = msix->msix_mask; 4194 4195 msix->msix_serialize = &txr->tx_serialize; 4196 msix->msix_func = igb_msix_tx; 4197 msix->msix_arg = txr; 4198 msix->msix_cpuid = i + offset; 4199 txr->tx_intr_cpuid = msix->msix_cpuid; 4200 KKASSERT(msix->msix_cpuid < ncpus2); 4201 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4202 "%s tx%d", device_get_nameunit(sc->dev), i); 4203 msix->msix_rate = IGB_MSIX_TX_RATE; 4204 ksnprintf(msix->msix_rate_desc, 4205 sizeof(msix->msix_rate_desc), 4206 "TX%d interrupt rate", i); 4207 } 4208 } else { 4209 /* TODO */ 4210 error = EOPNOTSUPP; 4211 goto back; 4212 } 4213 4214 /* 4215 * Link status 4216 */ 4217 KKASSERT(x < sc->msix_cnt); 4218 msix = &sc->msix_data[x++]; 4219 sc->sts_intr_bit = msix->msix_vector; 4220 sc->sts_intr_mask = msix->msix_mask; 4221 4222 msix->msix_serialize = &sc->main_serialize; 4223 msix->msix_func = igb_msix_status; 4224 msix->msix_arg = sc; 4225 msix->msix_cpuid = 0; /* TODO tunable */ 4226 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts", 4227 device_get_nameunit(sc->dev)); 4228 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4229 "status interrupt rate"); 4230 4231 KKASSERT(x == sc->msix_cnt); 4232 4233 error = pci_setup_msix(sc->dev); 4234 if (error) { 4235 device_printf(sc->dev, "Setup MSI-X failed\n"); 4236 goto back; 4237 } 4238 setup = TRUE; 4239 4240 for (i = 0; i < sc->msix_cnt; ++i) { 4241 msix = &sc->msix_data[i]; 4242 4243 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector, 4244 &msix->msix_rid, msix->msix_cpuid); 4245 if (error) { 4246 device_printf(sc->dev, 4247 "Unable to allocate MSI-X %d on cpu%d\n", 4248 msix->msix_vector, msix->msix_cpuid); 4249 goto back; 4250 } 4251 4252 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4253 &msix->msix_rid, RF_ACTIVE); 4254 if (msix->msix_res == NULL) { 4255 device_printf(sc->dev, 4256 "Unable to allocate MSI-X %d resource\n", 4257 msix->msix_vector); 4258 error = ENOMEM; 4259 goto back; 4260 } 4261 } 4262 4263 pci_enable_msix(sc->dev); 4264 sc->intr_type = PCI_INTR_TYPE_MSIX; 4265 back: 4266 if (error) 4267 igb_msix_free(sc, setup); 4268 } 4269 4270 static void 4271 igb_msix_free(struct igb_softc *sc, boolean_t setup) 4272 { 4273 int i; 4274 4275 KKASSERT(sc->msix_cnt > 1); 4276 4277 for (i = 0; i < sc->msix_cnt; ++i) { 4278 struct igb_msix_data *msix = &sc->msix_data[i]; 4279 4280 if (msix->msix_res != NULL) { 4281 bus_release_resource(sc->dev, SYS_RES_IRQ, 4282 msix->msix_rid, msix->msix_res); 4283 } 4284 if (msix->msix_rid >= 0) 4285 pci_release_msix_vector(sc->dev, msix->msix_rid); 4286 } 4287 if (setup) 4288 pci_teardown_msix(sc->dev); 4289 4290 sc->msix_cnt = 0; 4291 kfree(sc->msix_data, M_DEVBUF); 4292 sc->msix_data = NULL; 4293 } 4294 4295 static int 4296 igb_msix_setup(struct igb_softc *sc) 4297 { 4298 int i; 4299 4300 for (i = 0; i < sc->msix_cnt; ++i) { 4301 struct igb_msix_data *msix = &sc->msix_data[i]; 4302 int error; 4303 4304 error = bus_setup_intr_descr(sc->dev, msix->msix_res, 4305 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 4306 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 4307 if (error) { 4308 device_printf(sc->dev, "could not set up %s " 4309 "interrupt handler.\n", msix->msix_desc); 4310 igb_msix_teardown(sc, i); 4311 return error; 4312 } 4313 } 4314 return 0; 4315 } 4316 4317 static void 4318 igb_msix_teardown(struct igb_softc *sc, int msix_cnt) 4319 { 4320 int i; 4321 4322 for (i = 0; i < msix_cnt; ++i) { 4323 struct igb_msix_data *msix = &sc->msix_data[i]; 4324 4325 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle); 4326 } 4327 } 4328 4329 static void 4330 igb_msix_rx(void *arg) 4331 { 4332 struct igb_rx_ring *rxr = arg; 4333 4334 ASSERT_SERIALIZED(&rxr->rx_serialize); 4335 igb_rxeof(rxr, -1); 4336 4337 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4338 } 4339 4340 static void 4341 igb_msix_tx(void *arg) 4342 { 4343 struct igb_tx_ring *txr = arg; 4344 4345 ASSERT_SERIALIZED(&txr->tx_serialize); 4346 4347 igb_txeof(txr); 4348 if (!ifsq_is_empty(txr->ifsq)) 4349 ifsq_devstart(txr->ifsq); 4350 4351 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4352 } 4353 4354 static void 4355 igb_msix_status(void *arg) 4356 { 4357 struct igb_softc *sc = arg; 4358 uint32_t icr; 4359 4360 ASSERT_SERIALIZED(&sc->main_serialize); 4361 4362 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4363 if (icr & E1000_ICR_LSC) { 4364 sc->hw.mac.get_link_status = 1; 4365 igb_update_link_status(sc); 4366 } 4367 4368 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4369 } 4370 4371 static void 4372 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4373 { 4374 if (!IGB_ENABLE_HWRSS(sc)) 4375 return; 4376 4377 if (polling) 4378 sc->rx_ring_inuse = sc->rx_ring_cnt; 4379 else if (sc->intr_type != PCI_INTR_TYPE_MSIX) 4380 sc->rx_ring_inuse = IGB_MIN_RING_RSS; 4381 else 4382 sc->rx_ring_inuse = sc->rx_ring_msix; 4383 if (bootverbose) { 4384 if_printf(&sc->arpcom.ac_if, "RX rings %d/%d\n", 4385 sc->rx_ring_inuse, sc->rx_ring_cnt); 4386 } 4387 } 4388 4389 static int 4390 igb_tso_pullup(struct igb_tx_ring *txr, struct mbuf **mp) 4391 { 4392 int hoff, iphlen, thoff; 4393 struct mbuf *m; 4394 4395 m = *mp; 4396 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4397 4398 iphlen = m->m_pkthdr.csum_iphlen; 4399 thoff = m->m_pkthdr.csum_thlen; 4400 hoff = m->m_pkthdr.csum_lhlen; 4401 4402 KASSERT(iphlen > 0, ("invalid ip hlen")); 4403 KASSERT(thoff > 0, ("invalid tcp hlen")); 4404 KASSERT(hoff > 0, ("invalid ether hlen")); 4405 4406 if (__predict_false(m->m_len < hoff + iphlen + thoff)) { 4407 m = m_pullup(m, hoff + iphlen + thoff); 4408 if (m == NULL) { 4409 *mp = NULL; 4410 return ENOBUFS; 4411 } 4412 *mp = m; 4413 } 4414 if (txr->sc->flags & IGB_FLAG_TSO_IPLEN0) { 4415 struct ip *ip; 4416 4417 ip = mtodoff(m, struct ip *, hoff); 4418 ip->ip_len = 0; 4419 } 4420 4421 return 0; 4422 } 4423 4424 static void 4425 igb_tso_ctx(struct igb_tx_ring *txr, struct mbuf *m, uint32_t *hlen) 4426 { 4427 struct e1000_adv_tx_context_desc *TXD; 4428 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 4429 int hoff, ctxd, iphlen, thoff; 4430 4431 iphlen = m->m_pkthdr.csum_iphlen; 4432 thoff = m->m_pkthdr.csum_thlen; 4433 hoff = m->m_pkthdr.csum_lhlen; 4434 4435 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 4436 4437 ctxd = txr->next_avail_desc; 4438 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 4439 4440 if (m->m_flags & M_VLANTAG) { 4441 uint16_t vlantag; 4442 4443 vlantag = htole16(m->m_pkthdr.ether_vlantag); 4444 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 4445 } 4446 4447 vlan_macip_lens |= (hoff << E1000_ADVTXD_MACLEN_SHIFT); 4448 vlan_macip_lens |= iphlen; 4449 4450 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4451 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 4452 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 4453 4454 mss_l4len_idx |= (m->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); 4455 mss_l4len_idx |= (thoff << E1000_ADVTXD_L4LEN_SHIFT); 4456 /* 82575 needs the queue index added */ 4457 if (txr->sc->hw.mac.type == e1000_82575) 4458 mss_l4len_idx |= txr->me << 4; 4459 4460 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 4461 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 4462 TXD->seqnum_seed = htole32(0); 4463 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 4464 4465 /* We've consumed the first desc, adjust counters */ 4466 if (++ctxd == txr->num_tx_desc) 4467 ctxd = 0; 4468 txr->next_avail_desc = ctxd; 4469 --txr->tx_avail; 4470 4471 *hlen = hoff + iphlen + thoff; 4472 } 4473