1 /* 2 * Copyright (c) 2001-2011, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "opt_polling.h" 33 #include "opt_igb.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/proc.h> 43 #include <sys/rman.h> 44 #include <sys/serialize.h> 45 #include <sys/serialize2.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 #include <sys/systm.h> 50 51 #include <net/bpf.h> 52 #include <net/ethernet.h> 53 #include <net/if.h> 54 #include <net/if_arp.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 #include <net/ifq_var.h> 58 #include <net/toeplitz.h> 59 #include <net/toeplitz2.h> 60 #include <net/vlan/if_vlan_var.h> 61 #include <net/vlan/if_vlan_ether.h> 62 #include <net/if_poll.h> 63 64 #include <netinet/in_systm.h> 65 #include <netinet/in.h> 66 #include <netinet/ip.h> 67 #include <netinet/tcp.h> 68 #include <netinet/udp.h> 69 70 #include <bus/pci/pcivar.h> 71 #include <bus/pci/pcireg.h> 72 73 #include <dev/netif/ig_hal/e1000_api.h> 74 #include <dev/netif/ig_hal/e1000_82575.h> 75 #include <dev/netif/igb/if_igb.h> 76 77 #ifdef IGB_RSS_DEBUG 78 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) \ 79 do { \ 80 if (sc->rss_debug >= lvl) \ 81 if_printf(&sc->arpcom.ac_if, fmt, __VA_ARGS__); \ 82 } while (0) 83 #else /* !IGB_RSS_DEBUG */ 84 #define IGB_RSS_DPRINTF(sc, lvl, fmt, ...) ((void)0) 85 #endif /* IGB_RSS_DEBUG */ 86 87 #define IGB_NAME "Intel(R) PRO/1000 " 88 #define IGB_DEVICE(id) \ 89 { IGB_VENDOR_ID, E1000_DEV_ID_##id, IGB_NAME #id } 90 #define IGB_DEVICE_NULL { 0, 0, NULL } 91 92 static struct igb_device { 93 uint16_t vid; 94 uint16_t did; 95 const char *desc; 96 } igb_devices[] = { 97 IGB_DEVICE(82575EB_COPPER), 98 IGB_DEVICE(82575EB_FIBER_SERDES), 99 IGB_DEVICE(82575GB_QUAD_COPPER), 100 IGB_DEVICE(82576), 101 IGB_DEVICE(82576_NS), 102 IGB_DEVICE(82576_NS_SERDES), 103 IGB_DEVICE(82576_FIBER), 104 IGB_DEVICE(82576_SERDES), 105 IGB_DEVICE(82576_SERDES_QUAD), 106 IGB_DEVICE(82576_QUAD_COPPER), 107 IGB_DEVICE(82576_QUAD_COPPER_ET2), 108 IGB_DEVICE(82576_VF), 109 IGB_DEVICE(82580_COPPER), 110 IGB_DEVICE(82580_FIBER), 111 IGB_DEVICE(82580_SERDES), 112 IGB_DEVICE(82580_SGMII), 113 IGB_DEVICE(82580_COPPER_DUAL), 114 IGB_DEVICE(82580_QUAD_FIBER), 115 IGB_DEVICE(DH89XXCC_SERDES), 116 IGB_DEVICE(DH89XXCC_SGMII), 117 IGB_DEVICE(DH89XXCC_SFP), 118 IGB_DEVICE(DH89XXCC_BACKPLANE), 119 IGB_DEVICE(I350_COPPER), 120 IGB_DEVICE(I350_FIBER), 121 IGB_DEVICE(I350_SERDES), 122 IGB_DEVICE(I350_SGMII), 123 IGB_DEVICE(I350_VF), 124 125 /* required last entry */ 126 IGB_DEVICE_NULL 127 }; 128 129 static int igb_probe(device_t); 130 static int igb_attach(device_t); 131 static int igb_detach(device_t); 132 static int igb_shutdown(device_t); 133 static int igb_suspend(device_t); 134 static int igb_resume(device_t); 135 136 static boolean_t igb_is_valid_ether_addr(const uint8_t *); 137 static void igb_setup_ifp(struct igb_softc *); 138 static int igb_txctx_pullup(struct igb_tx_ring *, struct mbuf **); 139 static boolean_t igb_txctx(struct igb_tx_ring *, struct mbuf *); 140 static void igb_add_sysctl(struct igb_softc *); 141 static int igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS); 142 static int igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS); 143 static int igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS); 144 static void igb_set_ring_inuse(struct igb_softc *, boolean_t); 145 146 static void igb_vf_init_stats(struct igb_softc *); 147 static void igb_reset(struct igb_softc *); 148 static void igb_update_stats_counters(struct igb_softc *); 149 static void igb_update_vf_stats_counters(struct igb_softc *); 150 static void igb_update_link_status(struct igb_softc *); 151 static void igb_init_tx_unit(struct igb_softc *); 152 static void igb_init_rx_unit(struct igb_softc *); 153 154 static void igb_set_vlan(struct igb_softc *); 155 static void igb_set_multi(struct igb_softc *); 156 static void igb_set_promisc(struct igb_softc *); 157 static void igb_disable_promisc(struct igb_softc *); 158 159 static int igb_alloc_rings(struct igb_softc *); 160 static void igb_free_rings(struct igb_softc *); 161 static int igb_create_tx_ring(struct igb_tx_ring *); 162 static int igb_create_rx_ring(struct igb_rx_ring *); 163 static void igb_free_tx_ring(struct igb_tx_ring *); 164 static void igb_free_rx_ring(struct igb_rx_ring *); 165 static void igb_destroy_tx_ring(struct igb_tx_ring *, int); 166 static void igb_destroy_rx_ring(struct igb_rx_ring *, int); 167 static void igb_init_tx_ring(struct igb_tx_ring *); 168 static int igb_init_rx_ring(struct igb_rx_ring *); 169 static int igb_newbuf(struct igb_rx_ring *, int, boolean_t); 170 static int igb_encap(struct igb_tx_ring *, struct mbuf **); 171 172 static void igb_stop(struct igb_softc *); 173 static void igb_init(void *); 174 static int igb_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 175 static void igb_media_status(struct ifnet *, struct ifmediareq *); 176 static int igb_media_change(struct ifnet *); 177 static void igb_timer(void *); 178 static void igb_watchdog(struct ifnet *); 179 static void igb_start(struct ifnet *); 180 #ifdef DEVICE_POLLING 181 static void igb_poll(struct ifnet *, enum poll_cmd, int); 182 #endif 183 static void igb_serialize(struct ifnet *, enum ifnet_serialize); 184 static void igb_deserialize(struct ifnet *, enum ifnet_serialize); 185 static int igb_tryserialize(struct ifnet *, enum ifnet_serialize); 186 #ifdef INVARIANTS 187 static void igb_serialize_assert(struct ifnet *, enum ifnet_serialize, 188 boolean_t); 189 #endif 190 191 static void igb_intr(void *); 192 static void igb_intr_shared(void *); 193 static void igb_rxeof(struct igb_rx_ring *, int); 194 static void igb_txeof(struct igb_tx_ring *); 195 static void igb_set_eitr(struct igb_softc *, int, int); 196 static void igb_enable_intr(struct igb_softc *); 197 static void igb_disable_intr(struct igb_softc *); 198 static void igb_init_unshared_intr(struct igb_softc *); 199 static void igb_init_intr(struct igb_softc *); 200 static int igb_setup_intr(struct igb_softc *); 201 static void igb_set_txintr_mask(struct igb_tx_ring *, int *, int); 202 static void igb_set_rxintr_mask(struct igb_rx_ring *, int *, int); 203 static void igb_set_intr_mask(struct igb_softc *); 204 static int igb_alloc_intr(struct igb_softc *); 205 static void igb_free_intr(struct igb_softc *); 206 static void igb_teardown_intr(struct igb_softc *); 207 static void igb_msix_try_alloc(struct igb_softc *); 208 static void igb_msix_free(struct igb_softc *, boolean_t); 209 static int igb_msix_setup(struct igb_softc *); 210 static void igb_msix_teardown(struct igb_softc *, int); 211 static void igb_msix_rx(void *); 212 static void igb_msix_tx(void *); 213 static void igb_msix_status(void *); 214 215 /* Management and WOL Support */ 216 static void igb_get_mgmt(struct igb_softc *); 217 static void igb_rel_mgmt(struct igb_softc *); 218 static void igb_get_hw_control(struct igb_softc *); 219 static void igb_rel_hw_control(struct igb_softc *); 220 static void igb_enable_wol(device_t); 221 222 static device_method_t igb_methods[] = { 223 /* Device interface */ 224 DEVMETHOD(device_probe, igb_probe), 225 DEVMETHOD(device_attach, igb_attach), 226 DEVMETHOD(device_detach, igb_detach), 227 DEVMETHOD(device_shutdown, igb_shutdown), 228 DEVMETHOD(device_suspend, igb_suspend), 229 DEVMETHOD(device_resume, igb_resume), 230 { 0, 0 } 231 }; 232 233 static driver_t igb_driver = { 234 "igb", 235 igb_methods, 236 sizeof(struct igb_softc), 237 }; 238 239 static devclass_t igb_devclass; 240 241 DECLARE_DUMMY_MODULE(if_igb); 242 MODULE_DEPEND(igb, ig_hal, 1, 1, 1); 243 DRIVER_MODULE(if_igb, pci, igb_driver, igb_devclass, NULL, NULL); 244 245 static int igb_rxd = IGB_DEFAULT_RXD; 246 static int igb_txd = IGB_DEFAULT_TXD; 247 static int igb_rxr = 0; 248 static int igb_msi_enable = 1; 249 static int igb_msix_enable = 1; 250 static int igb_eee_disabled = 1; /* Energy Efficient Ethernet */ 251 static int igb_fc_setting = e1000_fc_full; 252 253 /* 254 * DMA Coalescing, only for i350 - default to off, 255 * this feature is for power savings 256 */ 257 static int igb_dma_coalesce = 0; 258 259 TUNABLE_INT("hw.igb.rxd", &igb_rxd); 260 TUNABLE_INT("hw.igb.txd", &igb_txd); 261 TUNABLE_INT("hw.igb.rxr", &igb_rxr); 262 TUNABLE_INT("hw.igb.msi.enable", &igb_msi_enable); 263 TUNABLE_INT("hw.igb.msix.enable", &igb_msix_enable); 264 TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); 265 266 /* i350 specific */ 267 TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); 268 TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); 269 270 static __inline void 271 igb_rxcsum(uint32_t staterr, struct mbuf *mp) 272 { 273 /* Ignore Checksum bit is set */ 274 if (staterr & E1000_RXD_STAT_IXSM) 275 return; 276 277 if ((staterr & (E1000_RXD_STAT_IPCS | E1000_RXDEXT_STATERR_IPE)) == 278 E1000_RXD_STAT_IPCS) 279 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 280 281 if (staterr & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { 282 if ((staterr & E1000_RXDEXT_STATERR_TCPE) == 0) { 283 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 284 CSUM_PSEUDO_HDR | CSUM_FRAG_NOT_CHECKED; 285 mp->m_pkthdr.csum_data = htons(0xffff); 286 } 287 } 288 } 289 290 static __inline struct pktinfo * 291 igb_rssinfo(struct mbuf *m, struct pktinfo *pi, 292 uint32_t hash, uint32_t hashtype, uint32_t staterr) 293 { 294 switch (hashtype) { 295 case E1000_RXDADV_RSSTYPE_IPV4_TCP: 296 pi->pi_netisr = NETISR_IP; 297 pi->pi_flags = 0; 298 pi->pi_l3proto = IPPROTO_TCP; 299 break; 300 301 case E1000_RXDADV_RSSTYPE_IPV4: 302 if (staterr & E1000_RXD_STAT_IXSM) 303 return NULL; 304 305 if ((staterr & 306 (E1000_RXD_STAT_TCPCS | E1000_RXDEXT_STATERR_TCPE)) == 307 E1000_RXD_STAT_TCPCS) { 308 pi->pi_netisr = NETISR_IP; 309 pi->pi_flags = 0; 310 pi->pi_l3proto = IPPROTO_UDP; 311 break; 312 } 313 /* FALL THROUGH */ 314 default: 315 return NULL; 316 } 317 318 m->m_flags |= M_HASH; 319 m->m_pkthdr.hash = toeplitz_hash(hash); 320 return pi; 321 } 322 323 static int 324 igb_probe(device_t dev) 325 { 326 const struct igb_device *d; 327 uint16_t vid, did; 328 329 vid = pci_get_vendor(dev); 330 did = pci_get_device(dev); 331 332 for (d = igb_devices; d->desc != NULL; ++d) { 333 if (vid == d->vid && did == d->did) { 334 device_set_desc(dev, d->desc); 335 return 0; 336 } 337 } 338 return ENXIO; 339 } 340 341 static int 342 igb_attach(device_t dev) 343 { 344 struct igb_softc *sc = device_get_softc(dev); 345 uint16_t eeprom_data; 346 int error = 0, i, j, ring_max; 347 348 #ifdef notyet 349 /* SYSCTL stuff */ 350 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 351 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 352 OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 353 igb_sysctl_nvm_info, "I", "NVM Information"); 354 355 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 356 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 357 OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, 358 &igb_enable_aim, 1, "Interrupt Moderation"); 359 360 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 361 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 362 OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, 363 adapter, 0, igb_set_flowcntl, "I", "Flow Control"); 364 #endif 365 366 callout_init_mp(&sc->timer); 367 lwkt_serialize_init(&sc->main_serialize); 368 369 sc->dev = sc->osdep.dev = dev; 370 371 /* 372 * Determine hardware and mac type 373 */ 374 sc->hw.vendor_id = pci_get_vendor(dev); 375 sc->hw.device_id = pci_get_device(dev); 376 sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); 377 sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); 378 sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); 379 380 if (e1000_set_mac_type(&sc->hw)) 381 return ENXIO; 382 383 /* Are we a VF device? */ 384 if (sc->hw.mac.type == e1000_vfadapt || 385 sc->hw.mac.type == e1000_vfadapt_i350) 386 sc->vf_ifp = 1; 387 else 388 sc->vf_ifp = 0; 389 390 /* 391 * Configure total supported RX/TX ring count 392 */ 393 switch (sc->hw.mac.type) { 394 case e1000_82575: 395 ring_max = IGB_MAX_RING_82575; 396 break; 397 case e1000_82580: 398 ring_max = IGB_MAX_RING_82580; 399 break; 400 case e1000_i350: 401 ring_max = IGB_MAX_RING_I350; 402 break; 403 case e1000_82576: 404 ring_max = IGB_MAX_RING_82576; 405 break; 406 default: 407 ring_max = IGB_MIN_RING; 408 break; 409 } 410 sc->rx_ring_cnt = device_getenv_int(dev, "rxr", igb_rxr); 411 sc->rx_ring_cnt = if_ring_count2(sc->rx_ring_cnt, ring_max); 412 #ifdef IGB_RSS_DEBUG 413 sc->rx_ring_cnt = device_getenv_int(dev, "rxr_debug", sc->rx_ring_cnt); 414 #endif 415 sc->rx_ring_inuse = sc->rx_ring_cnt; 416 sc->tx_ring_cnt = 1; /* XXX */ 417 418 /* Enable bus mastering */ 419 pci_enable_busmaster(dev); 420 421 /* 422 * Allocate IO memory 423 */ 424 sc->mem_rid = PCIR_BAR(0); 425 sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, 426 RF_ACTIVE); 427 if (sc->mem_res == NULL) { 428 device_printf(dev, "Unable to allocate bus resource: memory\n"); 429 error = ENXIO; 430 goto failed; 431 } 432 sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->mem_res); 433 sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->mem_res); 434 435 sc->hw.hw_addr = (uint8_t *)&sc->osdep.mem_bus_space_handle; 436 437 /* Save PCI command register for Shared Code */ 438 sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 439 sc->hw.back = &sc->osdep; 440 441 /* Do Shared Code initialization */ 442 if (e1000_setup_init_funcs(&sc->hw, TRUE)) { 443 device_printf(dev, "Setup of Shared code failed\n"); 444 error = ENXIO; 445 goto failed; 446 } 447 448 e1000_get_bus_info(&sc->hw); 449 450 sc->hw.mac.autoneg = DO_AUTO_NEG; 451 sc->hw.phy.autoneg_wait_to_complete = FALSE; 452 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 453 454 /* Copper options */ 455 if (sc->hw.phy.media_type == e1000_media_type_copper) { 456 sc->hw.phy.mdix = AUTO_ALL_MODES; 457 sc->hw.phy.disable_polarity_correction = FALSE; 458 sc->hw.phy.ms_type = IGB_MASTER_SLAVE; 459 } 460 461 /* Set the frame limits assuming standard ethernet sized frames. */ 462 sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 463 464 /* Allocate RX/TX rings */ 465 error = igb_alloc_rings(sc); 466 if (error) 467 goto failed; 468 469 /* Allocate interrupt */ 470 error = igb_alloc_intr(sc); 471 if (error) 472 goto failed; 473 474 /* 475 * Setup serializers 476 */ 477 i = 0; 478 sc->serializes[i++] = &sc->main_serialize; 479 480 sc->tx_serialize = i; 481 for (j = 0; j < sc->tx_ring_cnt; ++j) 482 sc->serializes[i++] = &sc->tx_rings[j].tx_serialize; 483 484 sc->rx_serialize = i; 485 for (j = 0; j < sc->rx_ring_cnt; ++j) 486 sc->serializes[i++] = &sc->rx_rings[j].rx_serialize; 487 488 sc->serialize_cnt = i; 489 KKASSERT(sc->serialize_cnt <= IGB_NSERIALIZE); 490 491 /* Allocate the appropriate stats memory */ 492 if (sc->vf_ifp) { 493 sc->stats = kmalloc(sizeof(struct e1000_vf_stats), M_DEVBUF, 494 M_WAITOK | M_ZERO); 495 igb_vf_init_stats(sc); 496 } else { 497 sc->stats = kmalloc(sizeof(struct e1000_hw_stats), M_DEVBUF, 498 M_WAITOK | M_ZERO); 499 } 500 501 /* Allocate multicast array memory. */ 502 sc->mta = kmalloc(ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 503 M_DEVBUF, M_WAITOK); 504 505 /* Some adapter-specific advanced features */ 506 if (sc->hw.mac.type >= e1000_i350) { 507 #ifdef notyet 508 igb_set_sysctl_value(adapter, "dma_coalesce", 509 "configure dma coalesce", 510 &adapter->dma_coalesce, igb_dma_coalesce); 511 igb_set_sysctl_value(adapter, "eee_disabled", 512 "enable Energy Efficient Ethernet", 513 &adapter->hw.dev_spec._82575.eee_disable, 514 igb_eee_disabled); 515 #else 516 sc->dma_coalesce = igb_dma_coalesce; 517 sc->hw.dev_spec._82575.eee_disable = igb_eee_disabled; 518 #endif 519 e1000_set_eee_i350(&sc->hw); 520 } 521 522 /* 523 * Start from a known state, this is important in reading the nvm and 524 * mac from that. 525 */ 526 e1000_reset_hw(&sc->hw); 527 528 /* Make sure we have a good EEPROM before we read from it */ 529 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 530 /* 531 * Some PCI-E parts fail the first check due to 532 * the link being in sleep state, call it again, 533 * if it fails a second time its a real issue. 534 */ 535 if (e1000_validate_nvm_checksum(&sc->hw) < 0) { 536 device_printf(dev, 537 "The EEPROM Checksum Is Not Valid\n"); 538 error = EIO; 539 goto failed; 540 } 541 } 542 543 /* Copy the permanent MAC address out of the EEPROM */ 544 if (e1000_read_mac_addr(&sc->hw) < 0) { 545 device_printf(dev, "EEPROM read error while reading MAC" 546 " address\n"); 547 error = EIO; 548 goto failed; 549 } 550 if (!igb_is_valid_ether_addr(sc->hw.mac.addr)) { 551 device_printf(dev, "Invalid MAC address\n"); 552 error = EIO; 553 goto failed; 554 } 555 556 #ifdef notyet 557 /* 558 ** Configure Interrupts 559 */ 560 if ((adapter->msix > 1) && (igb_enable_msix)) 561 error = igb_allocate_msix(adapter); 562 else /* MSI or Legacy */ 563 error = igb_allocate_legacy(adapter); 564 if (error) 565 goto err_late; 566 #endif 567 568 /* Setup OS specific network interface */ 569 igb_setup_ifp(sc); 570 571 /* Add sysctl tree, must after igb_setup_ifp() */ 572 igb_add_sysctl(sc); 573 574 /* Now get a good starting state */ 575 igb_reset(sc); 576 577 /* Initialize statistics */ 578 igb_update_stats_counters(sc); 579 580 sc->hw.mac.get_link_status = 1; 581 igb_update_link_status(sc); 582 583 /* Indicate SOL/IDER usage */ 584 if (e1000_check_reset_block(&sc->hw)) { 585 device_printf(dev, 586 "PHY reset is blocked due to SOL/IDER session.\n"); 587 } 588 589 /* Determine if we have to control management hardware */ 590 if (e1000_enable_mng_pass_thru(&sc->hw)) 591 sc->flags |= IGB_FLAG_HAS_MGMT; 592 593 /* 594 * Setup Wake-on-Lan 595 */ 596 /* APME bit in EEPROM is mapped to WUC.APME */ 597 eeprom_data = E1000_READ_REG(&sc->hw, E1000_WUC) & E1000_WUC_APME; 598 if (eeprom_data) 599 sc->wol = E1000_WUFC_MAG; 600 /* XXX disable WOL */ 601 sc->wol = 0; 602 603 #ifdef notyet 604 /* Register for VLAN events */ 605 adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 606 igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); 607 adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 608 igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); 609 #endif 610 611 #ifdef notyet 612 igb_add_hw_stats(adapter); 613 #endif 614 615 error = igb_setup_intr(sc); 616 if (error) { 617 ether_ifdetach(&sc->arpcom.ac_if); 618 goto failed; 619 } 620 return 0; 621 622 failed: 623 igb_detach(dev); 624 return error; 625 } 626 627 static int 628 igb_detach(device_t dev) 629 { 630 struct igb_softc *sc = device_get_softc(dev); 631 632 if (device_is_attached(dev)) { 633 struct ifnet *ifp = &sc->arpcom.ac_if; 634 635 ifnet_serialize_all(ifp); 636 637 igb_stop(sc); 638 639 e1000_phy_hw_reset(&sc->hw); 640 641 /* Give control back to firmware */ 642 igb_rel_mgmt(sc); 643 igb_rel_hw_control(sc); 644 645 if (sc->wol) { 646 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 647 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 648 igb_enable_wol(dev); 649 } 650 651 igb_teardown_intr(sc); 652 653 ifnet_deserialize_all(ifp); 654 655 ether_ifdetach(ifp); 656 } else if (sc->mem_res != NULL) { 657 igb_rel_hw_control(sc); 658 } 659 bus_generic_detach(dev); 660 661 if (sc->sysctl_tree != NULL) 662 sysctl_ctx_free(&sc->sysctl_ctx); 663 664 igb_free_intr(sc); 665 666 if (sc->msix_mem_res != NULL) { 667 bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_mem_rid, 668 sc->msix_mem_res); 669 } 670 if (sc->mem_res != NULL) { 671 bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, 672 sc->mem_res); 673 } 674 675 igb_free_rings(sc); 676 677 if (sc->mta != NULL) 678 kfree(sc->mta, M_DEVBUF); 679 if (sc->stats != NULL) 680 kfree(sc->stats, M_DEVBUF); 681 682 return 0; 683 } 684 685 static int 686 igb_shutdown(device_t dev) 687 { 688 return igb_suspend(dev); 689 } 690 691 static int 692 igb_suspend(device_t dev) 693 { 694 struct igb_softc *sc = device_get_softc(dev); 695 struct ifnet *ifp = &sc->arpcom.ac_if; 696 697 ifnet_serialize_all(ifp); 698 699 igb_stop(sc); 700 701 igb_rel_mgmt(sc); 702 igb_rel_hw_control(sc); 703 704 if (sc->wol) { 705 E1000_WRITE_REG(&sc->hw, E1000_WUC, E1000_WUC_PME_EN); 706 E1000_WRITE_REG(&sc->hw, E1000_WUFC, sc->wol); 707 igb_enable_wol(dev); 708 } 709 710 ifnet_deserialize_all(ifp); 711 712 return bus_generic_suspend(dev); 713 } 714 715 static int 716 igb_resume(device_t dev) 717 { 718 struct igb_softc *sc = device_get_softc(dev); 719 struct ifnet *ifp = &sc->arpcom.ac_if; 720 721 ifnet_serialize_all(ifp); 722 723 igb_init(sc); 724 igb_get_mgmt(sc); 725 726 if_devstart(ifp); 727 728 ifnet_deserialize_all(ifp); 729 730 return bus_generic_resume(dev); 731 } 732 733 static int 734 igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 735 { 736 struct igb_softc *sc = ifp->if_softc; 737 struct ifreq *ifr = (struct ifreq *)data; 738 int max_frame_size, mask, reinit; 739 int error = 0; 740 741 ASSERT_IFNET_SERIALIZED_ALL(ifp); 742 743 switch (command) { 744 case SIOCSIFMTU: 745 max_frame_size = 9234; 746 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 747 ETHER_CRC_LEN) { 748 error = EINVAL; 749 break; 750 } 751 752 ifp->if_mtu = ifr->ifr_mtu; 753 sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + 754 ETHER_CRC_LEN; 755 756 if (ifp->if_flags & IFF_RUNNING) 757 igb_init(sc); 758 break; 759 760 case SIOCSIFFLAGS: 761 if (ifp->if_flags & IFF_UP) { 762 if (ifp->if_flags & IFF_RUNNING) { 763 if ((ifp->if_flags ^ sc->if_flags) & 764 (IFF_PROMISC | IFF_ALLMULTI)) { 765 igb_disable_promisc(sc); 766 igb_set_promisc(sc); 767 } 768 } else { 769 igb_init(sc); 770 } 771 } else if (ifp->if_flags & IFF_RUNNING) { 772 igb_stop(sc); 773 } 774 sc->if_flags = ifp->if_flags; 775 break; 776 777 case SIOCADDMULTI: 778 case SIOCDELMULTI: 779 if (ifp->if_flags & IFF_RUNNING) { 780 igb_disable_intr(sc); 781 igb_set_multi(sc); 782 #ifdef DEVICE_POLLING 783 if (!(ifp->if_flags & IFF_POLLING)) 784 #endif 785 igb_enable_intr(sc); 786 } 787 break; 788 789 case SIOCSIFMEDIA: 790 /* 791 * As the speed/duplex settings are being 792 * changed, we need toreset the PHY. 793 */ 794 sc->hw.phy.reset_disable = FALSE; 795 796 /* Check SOL/IDER usage */ 797 if (e1000_check_reset_block(&sc->hw)) { 798 if_printf(ifp, "Media change is " 799 "blocked due to SOL/IDER session.\n"); 800 break; 801 } 802 /* FALL THROUGH */ 803 804 case SIOCGIFMEDIA: 805 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 806 break; 807 808 case SIOCSIFCAP: 809 reinit = 0; 810 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 811 if (mask & IFCAP_HWCSUM) { 812 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 813 reinit = 1; 814 } 815 if (mask & IFCAP_VLAN_HWTAGGING) { 816 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 817 reinit = 1; 818 } 819 if (mask & IFCAP_RSS) 820 ifp->if_capenable ^= IFCAP_RSS; 821 if (reinit && (ifp->if_flags & IFF_RUNNING)) 822 igb_init(sc); 823 break; 824 825 default: 826 error = ether_ioctl(ifp, command, data); 827 break; 828 } 829 return error; 830 } 831 832 static void 833 igb_init(void *xsc) 834 { 835 struct igb_softc *sc = xsc; 836 struct ifnet *ifp = &sc->arpcom.ac_if; 837 boolean_t polling; 838 int i; 839 840 ASSERT_IFNET_SERIALIZED_ALL(ifp); 841 842 igb_stop(sc); 843 844 /* Get the latest mac address, User can use a LAA */ 845 bcopy(IF_LLADDR(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN); 846 847 /* Put the address into the Receive Address Array */ 848 e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0); 849 850 igb_reset(sc); 851 igb_update_link_status(sc); 852 853 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 854 855 /* Set hardware offload abilities */ 856 if (ifp->if_capenable & IFCAP_TXCSUM) 857 ifp->if_hwassist = IGB_CSUM_FEATURES; 858 else 859 ifp->if_hwassist = 0; 860 861 /* Configure for OS presence */ 862 igb_get_mgmt(sc); 863 864 polling = FALSE; 865 #ifdef DEVICE_POLLING 866 if (ifp->if_flags & IFF_POLLING) 867 polling = TRUE; 868 #endif 869 870 /* Configured used RX/TX rings */ 871 igb_set_ring_inuse(sc, polling); 872 873 /* Initialize interrupt */ 874 igb_init_intr(sc); 875 876 /* Prepare transmit descriptors and buffers */ 877 for (i = 0; i < sc->tx_ring_cnt; ++i) 878 igb_init_tx_ring(&sc->tx_rings[i]); 879 igb_init_tx_unit(sc); 880 881 /* Setup Multicast table */ 882 igb_set_multi(sc); 883 884 #if 0 885 /* 886 * Figure out the desired mbuf pool 887 * for doing jumbo/packetsplit 888 */ 889 if (adapter->max_frame_size <= 2048) 890 adapter->rx_mbuf_sz = MCLBYTES; 891 else if (adapter->max_frame_size <= 4096) 892 adapter->rx_mbuf_sz = MJUMPAGESIZE; 893 else 894 adapter->rx_mbuf_sz = MJUM9BYTES; 895 #endif 896 897 /* Prepare receive descriptors and buffers */ 898 for (i = 0; i < sc->rx_ring_inuse; ++i) { 899 int error; 900 901 error = igb_init_rx_ring(&sc->rx_rings[i]); 902 if (error) { 903 if_printf(ifp, "Could not setup receive structures\n"); 904 igb_stop(sc); 905 return; 906 } 907 } 908 igb_init_rx_unit(sc); 909 910 /* Enable VLAN support */ 911 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) 912 igb_set_vlan(sc); 913 914 /* Don't lose promiscuous settings */ 915 igb_set_promisc(sc); 916 917 ifp->if_flags |= IFF_RUNNING; 918 ifp->if_flags &= ~IFF_OACTIVE; 919 920 callout_reset(&sc->timer, hz, igb_timer, sc); 921 e1000_clear_hw_cntrs_base_generic(&sc->hw); 922 923 #if 0 924 if (adapter->msix > 1) /* Set up queue routing */ 925 igb_configure_queues(adapter); 926 #endif 927 928 /* This clears any pending interrupts */ 929 E1000_READ_REG(&sc->hw, E1000_ICR); 930 931 /* 932 * Only enable interrupts if we are not polling, make sure 933 * they are off otherwise. 934 */ 935 if (polling) { 936 igb_disable_intr(sc); 937 } else { 938 igb_enable_intr(sc); 939 E1000_WRITE_REG(&sc->hw, E1000_ICS, E1000_ICS_LSC); 940 } 941 942 /* Set Energy Efficient Ethernet */ 943 e1000_set_eee_i350(&sc->hw); 944 945 /* Don't reset the phy next time init gets called */ 946 sc->hw.phy.reset_disable = TRUE; 947 } 948 949 static void 950 igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 951 { 952 struct igb_softc *sc = ifp->if_softc; 953 u_char fiber_type = IFM_1000_SX; 954 955 ASSERT_IFNET_SERIALIZED_ALL(ifp); 956 957 igb_update_link_status(sc); 958 959 ifmr->ifm_status = IFM_AVALID; 960 ifmr->ifm_active = IFM_ETHER; 961 962 if (!sc->link_active) 963 return; 964 965 ifmr->ifm_status |= IFM_ACTIVE; 966 967 if (sc->hw.phy.media_type == e1000_media_type_fiber || 968 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 969 ifmr->ifm_active |= fiber_type | IFM_FDX; 970 } else { 971 switch (sc->link_speed) { 972 case 10: 973 ifmr->ifm_active |= IFM_10_T; 974 break; 975 976 case 100: 977 ifmr->ifm_active |= IFM_100_TX; 978 break; 979 980 case 1000: 981 ifmr->ifm_active |= IFM_1000_T; 982 break; 983 } 984 if (sc->link_duplex == FULL_DUPLEX) 985 ifmr->ifm_active |= IFM_FDX; 986 else 987 ifmr->ifm_active |= IFM_HDX; 988 } 989 } 990 991 static int 992 igb_media_change(struct ifnet *ifp) 993 { 994 struct igb_softc *sc = ifp->if_softc; 995 struct ifmedia *ifm = &sc->media; 996 997 ASSERT_IFNET_SERIALIZED_ALL(ifp); 998 999 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1000 return EINVAL; 1001 1002 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1003 case IFM_AUTO: 1004 sc->hw.mac.autoneg = DO_AUTO_NEG; 1005 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1006 break; 1007 1008 case IFM_1000_LX: 1009 case IFM_1000_SX: 1010 case IFM_1000_T: 1011 sc->hw.mac.autoneg = DO_AUTO_NEG; 1012 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1013 break; 1014 1015 case IFM_100_TX: 1016 sc->hw.mac.autoneg = FALSE; 1017 sc->hw.phy.autoneg_advertised = 0; 1018 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1019 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1020 else 1021 sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1022 break; 1023 1024 case IFM_10_T: 1025 sc->hw.mac.autoneg = FALSE; 1026 sc->hw.phy.autoneg_advertised = 0; 1027 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1028 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1029 else 1030 sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1031 break; 1032 1033 default: 1034 if_printf(ifp, "Unsupported media type\n"); 1035 break; 1036 } 1037 1038 igb_init(sc); 1039 1040 return 0; 1041 } 1042 1043 static void 1044 igb_set_promisc(struct igb_softc *sc) 1045 { 1046 struct ifnet *ifp = &sc->arpcom.ac_if; 1047 struct e1000_hw *hw = &sc->hw; 1048 uint32_t reg; 1049 1050 if (sc->vf_ifp) { 1051 e1000_promisc_set_vf(hw, e1000_promisc_enabled); 1052 return; 1053 } 1054 1055 reg = E1000_READ_REG(hw, E1000_RCTL); 1056 if (ifp->if_flags & IFF_PROMISC) { 1057 reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1058 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1059 } else if (ifp->if_flags & IFF_ALLMULTI) { 1060 reg |= E1000_RCTL_MPE; 1061 reg &= ~E1000_RCTL_UPE; 1062 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1063 } 1064 } 1065 1066 static void 1067 igb_disable_promisc(struct igb_softc *sc) 1068 { 1069 struct e1000_hw *hw = &sc->hw; 1070 uint32_t reg; 1071 1072 if (sc->vf_ifp) { 1073 e1000_promisc_set_vf(hw, e1000_promisc_disabled); 1074 return; 1075 } 1076 reg = E1000_READ_REG(hw, E1000_RCTL); 1077 reg &= ~E1000_RCTL_UPE; 1078 reg &= ~E1000_RCTL_MPE; 1079 E1000_WRITE_REG(hw, E1000_RCTL, reg); 1080 } 1081 1082 static void 1083 igb_set_multi(struct igb_softc *sc) 1084 { 1085 struct ifnet *ifp = &sc->arpcom.ac_if; 1086 struct ifmultiaddr *ifma; 1087 uint32_t reg_rctl = 0; 1088 uint8_t *mta; 1089 int mcnt = 0; 1090 1091 mta = sc->mta; 1092 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1093 1094 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1095 if (ifma->ifma_addr->sa_family != AF_LINK) 1096 continue; 1097 1098 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1099 break; 1100 1101 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1102 &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); 1103 mcnt++; 1104 } 1105 1106 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1107 reg_rctl = E1000_READ_REG(&sc->hw, E1000_RCTL); 1108 reg_rctl |= E1000_RCTL_MPE; 1109 E1000_WRITE_REG(&sc->hw, E1000_RCTL, reg_rctl); 1110 } else { 1111 e1000_update_mc_addr_list(&sc->hw, mta, mcnt); 1112 } 1113 } 1114 1115 static void 1116 igb_timer(void *xsc) 1117 { 1118 struct igb_softc *sc = xsc; 1119 1120 lwkt_serialize_enter(&sc->main_serialize); 1121 1122 igb_update_link_status(sc); 1123 igb_update_stats_counters(sc); 1124 1125 callout_reset(&sc->timer, hz, igb_timer, sc); 1126 1127 lwkt_serialize_exit(&sc->main_serialize); 1128 } 1129 1130 static void 1131 igb_update_link_status(struct igb_softc *sc) 1132 { 1133 struct ifnet *ifp = &sc->arpcom.ac_if; 1134 struct e1000_hw *hw = &sc->hw; 1135 uint32_t link_check, thstat, ctrl; 1136 1137 link_check = thstat = ctrl = 0; 1138 1139 /* Get the cached link value or read for real */ 1140 switch (hw->phy.media_type) { 1141 case e1000_media_type_copper: 1142 if (hw->mac.get_link_status) { 1143 /* Do the work to read phy */ 1144 e1000_check_for_link(hw); 1145 link_check = !hw->mac.get_link_status; 1146 } else { 1147 link_check = TRUE; 1148 } 1149 break; 1150 1151 case e1000_media_type_fiber: 1152 e1000_check_for_link(hw); 1153 link_check = E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1154 break; 1155 1156 case e1000_media_type_internal_serdes: 1157 e1000_check_for_link(hw); 1158 link_check = hw->mac.serdes_has_link; 1159 break; 1160 1161 /* VF device is type_unknown */ 1162 case e1000_media_type_unknown: 1163 e1000_check_for_link(hw); 1164 link_check = !hw->mac.get_link_status; 1165 /* Fall thru */ 1166 default: 1167 break; 1168 } 1169 1170 /* Check for thermal downshift or shutdown */ 1171 if (hw->mac.type == e1000_i350) { 1172 thstat = E1000_READ_REG(hw, E1000_THSTAT); 1173 ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); 1174 } 1175 1176 /* Now we check if a transition has happened */ 1177 if (link_check && sc->link_active == 0) { 1178 e1000_get_speed_and_duplex(hw, 1179 &sc->link_speed, &sc->link_duplex); 1180 if (bootverbose) { 1181 if_printf(ifp, "Link is up %d Mbps %s\n", 1182 sc->link_speed, 1183 sc->link_duplex == FULL_DUPLEX ? 1184 "Full Duplex" : "Half Duplex"); 1185 } 1186 sc->link_active = 1; 1187 1188 ifp->if_baudrate = sc->link_speed * 1000000; 1189 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1190 (thstat & E1000_THSTAT_LINK_THROTTLE)) 1191 if_printf(ifp, "Link: thermal downshift\n"); 1192 /* This can sleep */ 1193 ifp->if_link_state = LINK_STATE_UP; 1194 if_link_state_change(ifp); 1195 } else if (!link_check && sc->link_active == 1) { 1196 ifp->if_baudrate = sc->link_speed = 0; 1197 sc->link_duplex = 0; 1198 if (bootverbose) 1199 if_printf(ifp, "Link is Down\n"); 1200 if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && 1201 (thstat & E1000_THSTAT_PWR_DOWN)) 1202 if_printf(ifp, "Link: thermal shutdown\n"); 1203 sc->link_active = 0; 1204 /* This can sleep */ 1205 ifp->if_link_state = LINK_STATE_DOWN; 1206 if_link_state_change(ifp); 1207 } 1208 } 1209 1210 static void 1211 igb_stop(struct igb_softc *sc) 1212 { 1213 struct ifnet *ifp = &sc->arpcom.ac_if; 1214 int i; 1215 1216 ASSERT_IFNET_SERIALIZED_ALL(ifp); 1217 1218 igb_disable_intr(sc); 1219 1220 callout_stop(&sc->timer); 1221 1222 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1223 ifp->if_timer = 0; 1224 1225 e1000_reset_hw(&sc->hw); 1226 E1000_WRITE_REG(&sc->hw, E1000_WUC, 0); 1227 1228 e1000_led_off(&sc->hw); 1229 e1000_cleanup_led(&sc->hw); 1230 1231 for (i = 0; i < sc->tx_ring_cnt; ++i) 1232 igb_free_tx_ring(&sc->tx_rings[i]); 1233 for (i = 0; i < sc->rx_ring_cnt; ++i) 1234 igb_free_rx_ring(&sc->rx_rings[i]); 1235 } 1236 1237 static void 1238 igb_reset(struct igb_softc *sc) 1239 { 1240 struct ifnet *ifp = &sc->arpcom.ac_if; 1241 struct e1000_hw *hw = &sc->hw; 1242 struct e1000_fc_info *fc = &hw->fc; 1243 uint32_t pba = 0; 1244 uint16_t hwm; 1245 1246 /* Let the firmware know the OS is in control */ 1247 igb_get_hw_control(sc); 1248 1249 /* 1250 * Packet Buffer Allocation (PBA) 1251 * Writing PBA sets the receive portion of the buffer 1252 * the remainder is used for the transmit buffer. 1253 */ 1254 switch (hw->mac.type) { 1255 case e1000_82575: 1256 pba = E1000_PBA_32K; 1257 break; 1258 1259 case e1000_82576: 1260 case e1000_vfadapt: 1261 pba = E1000_READ_REG(hw, E1000_RXPBS); 1262 pba &= E1000_RXPBS_SIZE_MASK_82576; 1263 break; 1264 1265 case e1000_82580: 1266 case e1000_i350: 1267 case e1000_vfadapt_i350: 1268 pba = E1000_READ_REG(hw, E1000_RXPBS); 1269 pba = e1000_rxpbs_adjust_82580(pba); 1270 break; 1271 /* XXX pba = E1000_PBA_35K; */ 1272 1273 default: 1274 break; 1275 } 1276 1277 /* Special needs in case of Jumbo frames */ 1278 if (hw->mac.type == e1000_82575 && ifp->if_mtu > ETHERMTU) { 1279 uint32_t tx_space, min_tx, min_rx; 1280 1281 pba = E1000_READ_REG(hw, E1000_PBA); 1282 tx_space = pba >> 16; 1283 pba &= 0xffff; 1284 1285 min_tx = (sc->max_frame_size + 1286 sizeof(struct e1000_tx_desc) - ETHER_CRC_LEN) * 2; 1287 min_tx = roundup2(min_tx, 1024); 1288 min_tx >>= 10; 1289 min_rx = sc->max_frame_size; 1290 min_rx = roundup2(min_rx, 1024); 1291 min_rx >>= 10; 1292 if (tx_space < min_tx && (min_tx - tx_space) < pba) { 1293 pba = pba - (min_tx - tx_space); 1294 /* 1295 * if short on rx space, rx wins 1296 * and must trump tx adjustment 1297 */ 1298 if (pba < min_rx) 1299 pba = min_rx; 1300 } 1301 E1000_WRITE_REG(hw, E1000_PBA, pba); 1302 } 1303 1304 /* 1305 * These parameters control the automatic generation (Tx) and 1306 * response (Rx) to Ethernet PAUSE frames. 1307 * - High water mark should allow for at least two frames to be 1308 * received after sending an XOFF. 1309 * - Low water mark works best when it is very near the high water mark. 1310 * This allows the receiver to restart by sending XON when it has 1311 * drained a bit. 1312 */ 1313 hwm = min(((pba << 10) * 9 / 10), 1314 ((pba << 10) - 2 * sc->max_frame_size)); 1315 1316 if (hw->mac.type < e1000_82576) { 1317 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1318 fc->low_water = fc->high_water - 8; 1319 } else { 1320 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1321 fc->low_water = fc->high_water - 16; 1322 } 1323 fc->pause_time = IGB_FC_PAUSE_TIME; 1324 fc->send_xon = TRUE; 1325 1326 /* Issue a global reset */ 1327 e1000_reset_hw(hw); 1328 E1000_WRITE_REG(hw, E1000_WUC, 0); 1329 1330 if (e1000_init_hw(hw) < 0) 1331 if_printf(ifp, "Hardware Initialization Failed\n"); 1332 1333 /* Setup DMA Coalescing */ 1334 if (hw->mac.type == e1000_i350 && sc->dma_coalesce) { 1335 uint32_t reg; 1336 1337 hwm = (pba - 4) << 10; 1338 reg = ((pba - 6) << E1000_DMACR_DMACTHR_SHIFT) 1339 & E1000_DMACR_DMACTHR_MASK; 1340 1341 /* transition to L0x or L1 if available..*/ 1342 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); 1343 1344 /* timer = +-1000 usec in 32usec intervals */ 1345 reg |= (1000 >> 5); 1346 E1000_WRITE_REG(hw, E1000_DMACR, reg); 1347 1348 /* No lower threshold */ 1349 E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); 1350 1351 /* set hwm to PBA - 2 * max frame size */ 1352 E1000_WRITE_REG(hw, E1000_FCRTC, hwm); 1353 1354 /* Set the interval before transition */ 1355 reg = E1000_READ_REG(hw, E1000_DMCTLX); 1356 reg |= 0x800000FF; /* 255 usec */ 1357 E1000_WRITE_REG(hw, E1000_DMCTLX, reg); 1358 1359 /* free space in tx packet buffer to wake from DMA coal */ 1360 E1000_WRITE_REG(hw, E1000_DMCTXTH, 1361 (20480 - (2 * sc->max_frame_size)) >> 6); 1362 1363 /* make low power state decision controlled by DMA coal */ 1364 reg = E1000_READ_REG(hw, E1000_PCIEMISC); 1365 E1000_WRITE_REG(hw, E1000_PCIEMISC, 1366 reg | E1000_PCIEMISC_LX_DECISION); 1367 if_printf(ifp, "DMA Coalescing enabled\n"); 1368 } 1369 1370 E1000_WRITE_REG(&sc->hw, E1000_VET, ETHERTYPE_VLAN); 1371 e1000_get_phy_info(hw); 1372 e1000_check_for_link(hw); 1373 } 1374 1375 static void 1376 igb_setup_ifp(struct igb_softc *sc) 1377 { 1378 struct ifnet *ifp = &sc->arpcom.ac_if; 1379 1380 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); 1381 ifp->if_softc = sc; 1382 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1383 ifp->if_init = igb_init; 1384 ifp->if_ioctl = igb_ioctl; 1385 ifp->if_start = igb_start; 1386 ifp->if_serialize = igb_serialize; 1387 ifp->if_deserialize = igb_deserialize; 1388 ifp->if_tryserialize = igb_tryserialize; 1389 #ifdef INVARIANTS 1390 ifp->if_serialize_assert = igb_serialize_assert; 1391 #endif 1392 #ifdef DEVICE_POLLING 1393 ifp->if_poll = igb_poll; 1394 #endif 1395 ifp->if_watchdog = igb_watchdog; 1396 1397 ifq_set_maxlen(&ifp->if_snd, sc->tx_rings[0].num_tx_desc - 1); 1398 ifq_set_ready(&ifp->if_snd); 1399 1400 ether_ifattach(ifp, sc->hw.mac.addr, NULL); 1401 1402 ifp->if_capabilities = 1403 IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 1404 if (IGB_ENABLE_HWRSS(sc)) 1405 ifp->if_capabilities |= IFCAP_RSS; 1406 ifp->if_capenable = ifp->if_capabilities; 1407 ifp->if_hwassist = IGB_CSUM_FEATURES; 1408 1409 /* 1410 * Tell the upper layer(s) we support long frames 1411 */ 1412 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1413 1414 /* 1415 * Specify the media types supported by this adapter and register 1416 * callbacks to update media and link information 1417 */ 1418 ifmedia_init(&sc->media, IFM_IMASK, igb_media_change, igb_media_status); 1419 if (sc->hw.phy.media_type == e1000_media_type_fiber || 1420 sc->hw.phy.media_type == e1000_media_type_internal_serdes) { 1421 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 1422 0, NULL); 1423 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); 1424 } else { 1425 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 1426 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 1427 0, NULL); 1428 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 1429 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 1430 0, NULL); 1431 if (sc->hw.phy.type != e1000_phy_ife) { 1432 ifmedia_add(&sc->media, 1433 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 1434 ifmedia_add(&sc->media, 1435 IFM_ETHER | IFM_1000_T, 0, NULL); 1436 } 1437 } 1438 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1439 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1440 } 1441 1442 static void 1443 igb_add_sysctl(struct igb_softc *sc) 1444 { 1445 char node[32]; 1446 int i; 1447 1448 sysctl_ctx_init(&sc->sysctl_ctx); 1449 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1450 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 1451 device_get_nameunit(sc->dev), CTLFLAG_RD, 0, ""); 1452 if (sc->sysctl_tree == NULL) { 1453 device_printf(sc->dev, "can't add sysctl node\n"); 1454 return; 1455 } 1456 1457 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1458 OID_AUTO, "rxr", CTLFLAG_RD, &sc->rx_ring_cnt, 0, "# of RX rings"); 1459 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1460 OID_AUTO, "rxr_inuse", CTLFLAG_RD, &sc->rx_ring_inuse, 0, 1461 "# of RX rings used"); 1462 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1463 OID_AUTO, "rxd", CTLFLAG_RD, &sc->rx_rings[0].num_rx_desc, 0, 1464 "# of RX descs"); 1465 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1466 OID_AUTO, "txd", CTLFLAG_RD, &sc->tx_rings[0].num_tx_desc, 0, 1467 "# of TX descs"); 1468 1469 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 1470 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1471 SYSCTL_CHILDREN(sc->sysctl_tree), 1472 OID_AUTO, "intr_rate", CTLTYPE_INT | CTLFLAG_RW, 1473 sc, 0, igb_sysctl_intr_rate, "I", "interrupt rate"); 1474 } else { 1475 for (i = 0; i < sc->msix_cnt; ++i) { 1476 struct igb_msix_data *msix = &sc->msix_data[i]; 1477 1478 ksnprintf(node, sizeof(node), "msix%d_rate", i); 1479 SYSCTL_ADD_PROC(&sc->sysctl_ctx, 1480 SYSCTL_CHILDREN(sc->sysctl_tree), 1481 OID_AUTO, node, CTLTYPE_INT | CTLFLAG_RW, 1482 msix, 0, igb_sysctl_msix_rate, "I", 1483 msix->msix_rate_desc); 1484 } 1485 } 1486 1487 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1488 OID_AUTO, "tx_intr_nsegs", CTLTYPE_INT | CTLFLAG_RW, 1489 sc, 0, igb_sysctl_tx_intr_nsegs, "I", 1490 "# of segments per TX interrupt"); 1491 1492 #ifdef IGB_RSS_DEBUG 1493 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), 1494 OID_AUTO, "rss_debug", CTLFLAG_RW, &sc->rss_debug, 0, 1495 "RSS debug level"); 1496 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1497 ksnprintf(node, sizeof(node), "rx%d_pkt", i); 1498 SYSCTL_ADD_ULONG(&sc->sysctl_ctx, 1499 SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, node, 1500 CTLFLAG_RW, &sc->rx_rings[i].rx_packets, "RXed packets"); 1501 } 1502 #endif 1503 } 1504 1505 static int 1506 igb_alloc_rings(struct igb_softc *sc) 1507 { 1508 int error, i; 1509 1510 /* 1511 * Create top level busdma tag 1512 */ 1513 error = bus_dma_tag_create(NULL, 1, 0, 1514 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1515 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 1516 &sc->parent_tag); 1517 if (error) { 1518 device_printf(sc->dev, "could not create top level DMA tag\n"); 1519 return error; 1520 } 1521 1522 /* 1523 * Allocate TX descriptor rings and buffers 1524 */ 1525 sc->tx_rings = kmalloc(sizeof(struct igb_tx_ring) * sc->tx_ring_cnt, 1526 M_DEVBUF, M_WAITOK | M_ZERO); 1527 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1528 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1529 1530 /* Set up some basics */ 1531 txr->sc = sc; 1532 txr->me = i; 1533 lwkt_serialize_init(&txr->tx_serialize); 1534 1535 error = igb_create_tx_ring(txr); 1536 if (error) 1537 return error; 1538 } 1539 1540 /* 1541 * Allocate RX descriptor rings and buffers 1542 */ 1543 sc->rx_rings = kmalloc(sizeof(struct igb_rx_ring) * sc->rx_ring_cnt, 1544 M_DEVBUF, M_WAITOK | M_ZERO); 1545 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1546 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1547 1548 /* Set up some basics */ 1549 rxr->sc = sc; 1550 rxr->me = i; 1551 lwkt_serialize_init(&rxr->rx_serialize); 1552 1553 error = igb_create_rx_ring(rxr); 1554 if (error) 1555 return error; 1556 } 1557 1558 return 0; 1559 } 1560 1561 static void 1562 igb_free_rings(struct igb_softc *sc) 1563 { 1564 int i; 1565 1566 if (sc->tx_rings != NULL) { 1567 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1568 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1569 1570 igb_destroy_tx_ring(txr, txr->num_tx_desc); 1571 } 1572 kfree(sc->tx_rings, M_DEVBUF); 1573 } 1574 1575 if (sc->rx_rings != NULL) { 1576 for (i = 0; i < sc->rx_ring_cnt; ++i) { 1577 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 1578 1579 igb_destroy_rx_ring(rxr, rxr->num_rx_desc); 1580 } 1581 kfree(sc->rx_rings, M_DEVBUF); 1582 } 1583 } 1584 1585 static int 1586 igb_create_tx_ring(struct igb_tx_ring *txr) 1587 { 1588 int tsize, error, i; 1589 1590 /* 1591 * Validate number of transmit descriptors. It must not exceed 1592 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1593 */ 1594 if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || 1595 (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { 1596 device_printf(txr->sc->dev, 1597 "Using %d TX descriptors instead of %d!\n", 1598 IGB_DEFAULT_TXD, igb_txd); 1599 txr->num_tx_desc = IGB_DEFAULT_TXD; 1600 } else { 1601 txr->num_tx_desc = igb_txd; 1602 } 1603 1604 /* 1605 * Allocate TX descriptor ring 1606 */ 1607 tsize = roundup2(txr->num_tx_desc * sizeof(union e1000_adv_tx_desc), 1608 IGB_DBA_ALIGN); 1609 txr->txdma.dma_vaddr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1610 IGB_DBA_ALIGN, tsize, BUS_DMA_WAITOK, 1611 &txr->txdma.dma_tag, &txr->txdma.dma_map, &txr->txdma.dma_paddr); 1612 if (txr->txdma.dma_vaddr == NULL) { 1613 device_printf(txr->sc->dev, 1614 "Unable to allocate TX Descriptor memory\n"); 1615 return ENOMEM; 1616 } 1617 txr->tx_base = txr->txdma.dma_vaddr; 1618 bzero(txr->tx_base, tsize); 1619 1620 txr->tx_buf = kmalloc(sizeof(struct igb_tx_buf) * txr->num_tx_desc, 1621 M_DEVBUF, M_WAITOK | M_ZERO); 1622 1623 /* 1624 * Allocate TX head write-back buffer 1625 */ 1626 txr->tx_hdr = bus_dmamem_coherent_any(txr->sc->parent_tag, 1627 __VM_CACHELINE_SIZE, __VM_CACHELINE_SIZE, BUS_DMA_WAITOK, 1628 &txr->tx_hdr_dtag, &txr->tx_hdr_dmap, &txr->tx_hdr_paddr); 1629 if (txr->tx_hdr == NULL) { 1630 device_printf(txr->sc->dev, 1631 "Unable to allocate TX head write-back buffer\n"); 1632 return ENOMEM; 1633 } 1634 1635 /* 1636 * Create DMA tag for TX buffers 1637 */ 1638 error = bus_dma_tag_create(txr->sc->parent_tag, 1639 1, 0, /* alignment, bounds */ 1640 BUS_SPACE_MAXADDR, /* lowaddr */ 1641 BUS_SPACE_MAXADDR, /* highaddr */ 1642 NULL, NULL, /* filter, filterarg */ 1643 IGB_TSO_SIZE, /* maxsize */ 1644 IGB_MAX_SCATTER, /* nsegments */ 1645 PAGE_SIZE, /* maxsegsize */ 1646 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 1647 BUS_DMA_ONEBPAGE, /* flags */ 1648 &txr->tx_tag); 1649 if (error) { 1650 device_printf(txr->sc->dev, "Unable to allocate TX DMA tag\n"); 1651 kfree(txr->tx_buf, M_DEVBUF); 1652 txr->tx_buf = NULL; 1653 return error; 1654 } 1655 1656 /* 1657 * Create DMA maps for TX buffers 1658 */ 1659 for (i = 0; i < txr->num_tx_desc; ++i) { 1660 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1661 1662 error = bus_dmamap_create(txr->tx_tag, 1663 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, &txbuf->map); 1664 if (error) { 1665 device_printf(txr->sc->dev, 1666 "Unable to create TX DMA map\n"); 1667 igb_destroy_tx_ring(txr, i); 1668 return error; 1669 } 1670 } 1671 1672 /* 1673 * Initialize various watermark 1674 */ 1675 txr->spare_desc = IGB_TX_SPARE; 1676 txr->intr_nsegs = txr->num_tx_desc / 16; 1677 txr->oact_hi_desc = txr->num_tx_desc / 2; 1678 txr->oact_lo_desc = txr->num_tx_desc / 8; 1679 if (txr->oact_lo_desc > IGB_TX_OACTIVE_MAX) 1680 txr->oact_lo_desc = IGB_TX_OACTIVE_MAX; 1681 if (txr->oact_lo_desc < txr->spare_desc + IGB_TX_RESERVED) 1682 txr->oact_lo_desc = txr->spare_desc + IGB_TX_RESERVED; 1683 1684 return 0; 1685 } 1686 1687 static void 1688 igb_free_tx_ring(struct igb_tx_ring *txr) 1689 { 1690 int i; 1691 1692 for (i = 0; i < txr->num_tx_desc; ++i) { 1693 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1694 1695 if (txbuf->m_head != NULL) { 1696 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1697 m_freem(txbuf->m_head); 1698 txbuf->m_head = NULL; 1699 } 1700 } 1701 } 1702 1703 static void 1704 igb_destroy_tx_ring(struct igb_tx_ring *txr, int ndesc) 1705 { 1706 int i; 1707 1708 if (txr->txdma.dma_vaddr != NULL) { 1709 bus_dmamap_unload(txr->txdma.dma_tag, txr->txdma.dma_map); 1710 bus_dmamem_free(txr->txdma.dma_tag, txr->txdma.dma_vaddr, 1711 txr->txdma.dma_map); 1712 bus_dma_tag_destroy(txr->txdma.dma_tag); 1713 txr->txdma.dma_vaddr = NULL; 1714 } 1715 1716 if (txr->tx_hdr != NULL) { 1717 bus_dmamap_unload(txr->tx_hdr_dtag, txr->tx_hdr_dmap); 1718 bus_dmamem_free(txr->tx_hdr_dtag, txr->tx_hdr, 1719 txr->tx_hdr_dmap); 1720 bus_dma_tag_destroy(txr->tx_hdr_dtag); 1721 txr->tx_hdr = NULL; 1722 } 1723 1724 if (txr->tx_buf == NULL) 1725 return; 1726 1727 for (i = 0; i < ndesc; ++i) { 1728 struct igb_tx_buf *txbuf = &txr->tx_buf[i]; 1729 1730 KKASSERT(txbuf->m_head == NULL); 1731 bus_dmamap_destroy(txr->tx_tag, txbuf->map); 1732 } 1733 bus_dma_tag_destroy(txr->tx_tag); 1734 1735 kfree(txr->tx_buf, M_DEVBUF); 1736 txr->tx_buf = NULL; 1737 } 1738 1739 static void 1740 igb_init_tx_ring(struct igb_tx_ring *txr) 1741 { 1742 /* Clear the old descriptor contents */ 1743 bzero(txr->tx_base, 1744 sizeof(union e1000_adv_tx_desc) * txr->num_tx_desc); 1745 1746 /* Clear TX head write-back buffer */ 1747 *(txr->tx_hdr) = 0; 1748 1749 /* Reset indices */ 1750 txr->next_avail_desc = 0; 1751 txr->next_to_clean = 0; 1752 txr->tx_nsegs = 0; 1753 1754 /* Set number of descriptors available */ 1755 txr->tx_avail = txr->num_tx_desc; 1756 } 1757 1758 static void 1759 igb_init_tx_unit(struct igb_softc *sc) 1760 { 1761 struct e1000_hw *hw = &sc->hw; 1762 uint32_t tctl; 1763 int i; 1764 1765 /* Setup the Tx Descriptor Rings */ 1766 for (i = 0; i < sc->tx_ring_cnt; ++i) { 1767 struct igb_tx_ring *txr = &sc->tx_rings[i]; 1768 uint64_t bus_addr = txr->txdma.dma_paddr; 1769 uint64_t hdr_paddr = txr->tx_hdr_paddr; 1770 uint32_t txdctl = 0; 1771 uint32_t dca_txctrl; 1772 1773 E1000_WRITE_REG(hw, E1000_TDLEN(i), 1774 txr->num_tx_desc * sizeof(struct e1000_tx_desc)); 1775 E1000_WRITE_REG(hw, E1000_TDBAH(i), 1776 (uint32_t)(bus_addr >> 32)); 1777 E1000_WRITE_REG(hw, E1000_TDBAL(i), 1778 (uint32_t)bus_addr); 1779 1780 /* Setup the HW Tx Head and Tail descriptor pointers */ 1781 E1000_WRITE_REG(hw, E1000_TDT(i), 0); 1782 E1000_WRITE_REG(hw, E1000_TDH(i), 0); 1783 1784 /* 1785 * WTHRESH is ignored by the hardware, since header 1786 * write back mode is used. 1787 */ 1788 txdctl |= IGB_TX_PTHRESH; 1789 txdctl |= IGB_TX_HTHRESH << 8; 1790 txdctl |= IGB_TX_WTHRESH << 16; 1791 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1792 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); 1793 1794 dca_txctrl = E1000_READ_REG(hw, E1000_DCA_TXCTRL(i)); 1795 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1796 E1000_WRITE_REG(hw, E1000_DCA_TXCTRL(i), dca_txctrl); 1797 1798 /* 1799 * Don't set WB_on_EITR: 1800 * - 82575 does not have it 1801 * - It almost has no effect on 82576, see: 1802 * 82576 specification update errata #26 1803 * - It causes unnecessary bus traffic 1804 */ 1805 E1000_WRITE_REG(hw, E1000_TDWBAH(i), 1806 (uint32_t)(hdr_paddr >> 32)); 1807 E1000_WRITE_REG(hw, E1000_TDWBAL(i), 1808 ((uint32_t)hdr_paddr) | E1000_TX_HEAD_WB_ENABLE); 1809 } 1810 1811 if (sc->vf_ifp) 1812 return; 1813 1814 e1000_config_collision_dist(hw); 1815 1816 /* Program the Transmit Control Register */ 1817 tctl = E1000_READ_REG(hw, E1000_TCTL); 1818 tctl &= ~E1000_TCTL_CT; 1819 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 1820 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); 1821 1822 /* This write will effectively turn on the transmit unit. */ 1823 E1000_WRITE_REG(hw, E1000_TCTL, tctl); 1824 } 1825 1826 static boolean_t 1827 igb_txctx(struct igb_tx_ring *txr, struct mbuf *mp) 1828 { 1829 struct e1000_adv_tx_context_desc *TXD; 1830 struct igb_tx_buf *txbuf; 1831 uint32_t vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; 1832 struct ether_vlan_header *eh; 1833 struct ip *ip = NULL; 1834 int ehdrlen, ctxd, ip_hlen = 0; 1835 uint16_t etype, vlantag = 0; 1836 boolean_t offload = TRUE; 1837 1838 if ((mp->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) == 0) 1839 offload = FALSE; 1840 1841 vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; 1842 ctxd = txr->next_avail_desc; 1843 txbuf = &txr->tx_buf[ctxd]; 1844 TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[ctxd]; 1845 1846 /* 1847 * In advanced descriptors the vlan tag must 1848 * be placed into the context descriptor, thus 1849 * we need to be here just for that setup. 1850 */ 1851 if (mp->m_flags & M_VLANTAG) { 1852 vlantag = htole16(mp->m_pkthdr.ether_vlantag); 1853 vlan_macip_lens |= (vlantag << E1000_ADVTXD_VLAN_SHIFT); 1854 } else if (!offload) { 1855 return FALSE; 1856 } 1857 1858 /* 1859 * Determine where frame payload starts. 1860 * Jump over vlan headers if already present, 1861 * helpful for QinQ too. 1862 */ 1863 KASSERT(mp->m_len >= ETHER_HDR_LEN, 1864 ("igb_txctx_pullup is not called (eh)?\n")); 1865 eh = mtod(mp, struct ether_vlan_header *); 1866 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 1867 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 1868 ("igb_txctx_pullup is not called (evh)?\n")); 1869 etype = ntohs(eh->evl_proto); 1870 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 1871 } else { 1872 etype = ntohs(eh->evl_encap_proto); 1873 ehdrlen = ETHER_HDR_LEN; 1874 } 1875 1876 /* Set the ether header length */ 1877 vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; 1878 1879 switch (etype) { 1880 case ETHERTYPE_IP: 1881 KASSERT(mp->m_len >= ehdrlen + IGB_IPVHL_SIZE, 1882 ("igb_txctx_pullup is not called (eh+ip_vhl)?\n")); 1883 1884 /* NOTE: We could only safely access ip.ip_vhl part */ 1885 ip = (struct ip *)(mp->m_data + ehdrlen); 1886 ip_hlen = ip->ip_hl << 2; 1887 1888 if (mp->m_pkthdr.csum_flags & CSUM_IP) 1889 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; 1890 break; 1891 1892 #ifdef notyet 1893 case ETHERTYPE_IPV6: 1894 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 1895 ip_hlen = sizeof(struct ip6_hdr); 1896 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; 1897 break; 1898 #endif 1899 1900 default: 1901 offload = FALSE; 1902 break; 1903 } 1904 1905 vlan_macip_lens |= ip_hlen; 1906 type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 1907 1908 if (mp->m_pkthdr.csum_flags & CSUM_TCP) 1909 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; 1910 else if (mp->m_pkthdr.csum_flags & CSUM_UDP) 1911 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; 1912 1913 /* 82575 needs the queue index added */ 1914 if (txr->sc->hw.mac.type == e1000_82575) 1915 mss_l4len_idx = txr->me << 4; 1916 1917 /* Now copy bits into descriptor */ 1918 TXD->vlan_macip_lens = htole32(vlan_macip_lens); 1919 TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl); 1920 TXD->seqnum_seed = htole32(0); 1921 TXD->mss_l4len_idx = htole32(mss_l4len_idx); 1922 1923 txbuf->m_head = NULL; 1924 1925 /* We've consumed the first desc, adjust counters */ 1926 if (++ctxd == txr->num_tx_desc) 1927 ctxd = 0; 1928 txr->next_avail_desc = ctxd; 1929 --txr->tx_avail; 1930 1931 return offload; 1932 } 1933 1934 static void 1935 igb_txeof(struct igb_tx_ring *txr) 1936 { 1937 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 1938 int first, hdr, avail; 1939 1940 if (txr->tx_avail == txr->num_tx_desc) 1941 return; 1942 1943 first = txr->next_to_clean; 1944 hdr = *(txr->tx_hdr); 1945 1946 if (first == hdr) 1947 return; 1948 1949 avail = txr->tx_avail; 1950 while (first != hdr) { 1951 struct igb_tx_buf *txbuf = &txr->tx_buf[first]; 1952 1953 ++avail; 1954 if (txbuf->m_head) { 1955 bus_dmamap_unload(txr->tx_tag, txbuf->map); 1956 m_freem(txbuf->m_head); 1957 txbuf->m_head = NULL; 1958 ++ifp->if_opackets; 1959 } 1960 if (++first == txr->num_tx_desc) 1961 first = 0; 1962 } 1963 txr->next_to_clean = first; 1964 txr->tx_avail = avail; 1965 1966 /* 1967 * If we have a minimum free, clear IFF_OACTIVE 1968 * to tell the stack that it is OK to send packets. 1969 */ 1970 if (IGB_IS_NOT_OACTIVE(txr)) { 1971 ifp->if_flags &= ~IFF_OACTIVE; 1972 1973 /* 1974 * We have enough TX descriptors, turn off 1975 * the watchdog. We allow small amount of 1976 * packets (roughly intr_nsegs) pending on 1977 * the transmit ring. 1978 */ 1979 ifp->if_timer = 0; 1980 } 1981 } 1982 1983 static int 1984 igb_create_rx_ring(struct igb_rx_ring *rxr) 1985 { 1986 int rsize, i, error; 1987 1988 /* 1989 * Validate number of receive descriptors. It must not exceed 1990 * hardware maximum, and must be multiple of IGB_DBA_ALIGN. 1991 */ 1992 if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || 1993 (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { 1994 device_printf(rxr->sc->dev, 1995 "Using %d RX descriptors instead of %d!\n", 1996 IGB_DEFAULT_RXD, igb_rxd); 1997 rxr->num_rx_desc = IGB_DEFAULT_RXD; 1998 } else { 1999 rxr->num_rx_desc = igb_rxd; 2000 } 2001 2002 /* 2003 * Allocate RX descriptor ring 2004 */ 2005 rsize = roundup2(rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc), 2006 IGB_DBA_ALIGN); 2007 rxr->rxdma.dma_vaddr = bus_dmamem_coherent_any(rxr->sc->parent_tag, 2008 IGB_DBA_ALIGN, rsize, BUS_DMA_WAITOK, 2009 &rxr->rxdma.dma_tag, &rxr->rxdma.dma_map, 2010 &rxr->rxdma.dma_paddr); 2011 if (rxr->rxdma.dma_vaddr == NULL) { 2012 device_printf(rxr->sc->dev, 2013 "Unable to allocate RxDescriptor memory\n"); 2014 return ENOMEM; 2015 } 2016 rxr->rx_base = rxr->rxdma.dma_vaddr; 2017 bzero(rxr->rx_base, rsize); 2018 2019 rxr->rx_buf = kmalloc(sizeof(struct igb_rx_buf) * rxr->num_rx_desc, 2020 M_DEVBUF, M_WAITOK | M_ZERO); 2021 2022 /* 2023 * Create DMA tag for RX buffers 2024 */ 2025 error = bus_dma_tag_create(rxr->sc->parent_tag, 2026 1, 0, /* alignment, bounds */ 2027 BUS_SPACE_MAXADDR, /* lowaddr */ 2028 BUS_SPACE_MAXADDR, /* highaddr */ 2029 NULL, NULL, /* filter, filterarg */ 2030 MCLBYTES, /* maxsize */ 2031 1, /* nsegments */ 2032 MCLBYTES, /* maxsegsize */ 2033 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2034 &rxr->rx_tag); 2035 if (error) { 2036 device_printf(rxr->sc->dev, 2037 "Unable to create RX payload DMA tag\n"); 2038 kfree(rxr->rx_buf, M_DEVBUF); 2039 rxr->rx_buf = NULL; 2040 return error; 2041 } 2042 2043 /* 2044 * Create spare DMA map for RX buffers 2045 */ 2046 error = bus_dmamap_create(rxr->rx_tag, BUS_DMA_WAITOK, 2047 &rxr->rx_sparemap); 2048 if (error) { 2049 device_printf(rxr->sc->dev, 2050 "Unable to create spare RX DMA maps\n"); 2051 bus_dma_tag_destroy(rxr->rx_tag); 2052 kfree(rxr->rx_buf, M_DEVBUF); 2053 rxr->rx_buf = NULL; 2054 return error; 2055 } 2056 2057 /* 2058 * Create DMA maps for RX buffers 2059 */ 2060 for (i = 0; i < rxr->num_rx_desc; i++) { 2061 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2062 2063 error = bus_dmamap_create(rxr->rx_tag, 2064 BUS_DMA_WAITOK, &rxbuf->map); 2065 if (error) { 2066 device_printf(rxr->sc->dev, 2067 "Unable to create RX DMA maps\n"); 2068 igb_destroy_rx_ring(rxr, i); 2069 return error; 2070 } 2071 } 2072 return 0; 2073 } 2074 2075 static void 2076 igb_free_rx_ring(struct igb_rx_ring *rxr) 2077 { 2078 int i; 2079 2080 for (i = 0; i < rxr->num_rx_desc; ++i) { 2081 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2082 2083 if (rxbuf->m_head != NULL) { 2084 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2085 m_freem(rxbuf->m_head); 2086 rxbuf->m_head = NULL; 2087 } 2088 } 2089 2090 if (rxr->fmp != NULL) 2091 m_freem(rxr->fmp); 2092 rxr->fmp = NULL; 2093 rxr->lmp = NULL; 2094 } 2095 2096 static void 2097 igb_destroy_rx_ring(struct igb_rx_ring *rxr, int ndesc) 2098 { 2099 int i; 2100 2101 if (rxr->rxdma.dma_vaddr != NULL) { 2102 bus_dmamap_unload(rxr->rxdma.dma_tag, rxr->rxdma.dma_map); 2103 bus_dmamem_free(rxr->rxdma.dma_tag, rxr->rxdma.dma_vaddr, 2104 rxr->rxdma.dma_map); 2105 bus_dma_tag_destroy(rxr->rxdma.dma_tag); 2106 rxr->rxdma.dma_vaddr = NULL; 2107 } 2108 2109 if (rxr->rx_buf == NULL) 2110 return; 2111 2112 for (i = 0; i < ndesc; ++i) { 2113 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2114 2115 KKASSERT(rxbuf->m_head == NULL); 2116 bus_dmamap_destroy(rxr->rx_tag, rxbuf->map); 2117 } 2118 bus_dmamap_destroy(rxr->rx_tag, rxr->rx_sparemap); 2119 bus_dma_tag_destroy(rxr->rx_tag); 2120 2121 kfree(rxr->rx_buf, M_DEVBUF); 2122 rxr->rx_buf = NULL; 2123 } 2124 2125 static void 2126 igb_setup_rxdesc(union e1000_adv_rx_desc *rxd, const struct igb_rx_buf *rxbuf) 2127 { 2128 rxd->read.pkt_addr = htole64(rxbuf->paddr); 2129 rxd->wb.upper.status_error = 0; 2130 } 2131 2132 static int 2133 igb_newbuf(struct igb_rx_ring *rxr, int i, boolean_t wait) 2134 { 2135 struct mbuf *m; 2136 bus_dma_segment_t seg; 2137 bus_dmamap_t map; 2138 struct igb_rx_buf *rxbuf; 2139 int error, nseg; 2140 2141 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2142 if (m == NULL) { 2143 if (wait) { 2144 if_printf(&rxr->sc->arpcom.ac_if, 2145 "Unable to allocate RX mbuf\n"); 2146 } 2147 return ENOBUFS; 2148 } 2149 m->m_len = m->m_pkthdr.len = MCLBYTES; 2150 2151 if (rxr->sc->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2152 m_adj(m, ETHER_ALIGN); 2153 2154 error = bus_dmamap_load_mbuf_segment(rxr->rx_tag, 2155 rxr->rx_sparemap, m, &seg, 1, &nseg, BUS_DMA_NOWAIT); 2156 if (error) { 2157 m_freem(m); 2158 if (wait) { 2159 if_printf(&rxr->sc->arpcom.ac_if, 2160 "Unable to load RX mbuf\n"); 2161 } 2162 return error; 2163 } 2164 2165 rxbuf = &rxr->rx_buf[i]; 2166 if (rxbuf->m_head != NULL) 2167 bus_dmamap_unload(rxr->rx_tag, rxbuf->map); 2168 2169 map = rxbuf->map; 2170 rxbuf->map = rxr->rx_sparemap; 2171 rxr->rx_sparemap = map; 2172 2173 rxbuf->m_head = m; 2174 rxbuf->paddr = seg.ds_addr; 2175 2176 igb_setup_rxdesc(&rxr->rx_base[i], rxbuf); 2177 return 0; 2178 } 2179 2180 static int 2181 igb_init_rx_ring(struct igb_rx_ring *rxr) 2182 { 2183 int i; 2184 2185 /* Clear the ring contents */ 2186 bzero(rxr->rx_base, 2187 rxr->num_rx_desc * sizeof(union e1000_adv_rx_desc)); 2188 2189 /* Now replenish the ring mbufs */ 2190 for (i = 0; i < rxr->num_rx_desc; ++i) { 2191 int error; 2192 2193 error = igb_newbuf(rxr, i, TRUE); 2194 if (error) 2195 return error; 2196 } 2197 2198 /* Setup our descriptor indices */ 2199 rxr->next_to_check = 0; 2200 2201 rxr->fmp = NULL; 2202 rxr->lmp = NULL; 2203 rxr->discard = FALSE; 2204 2205 return 0; 2206 } 2207 2208 static void 2209 igb_init_rx_unit(struct igb_softc *sc) 2210 { 2211 struct ifnet *ifp = &sc->arpcom.ac_if; 2212 struct e1000_hw *hw = &sc->hw; 2213 uint32_t rctl, rxcsum, srrctl = 0; 2214 int i; 2215 2216 /* 2217 * Make sure receives are disabled while setting 2218 * up the descriptor ring 2219 */ 2220 rctl = E1000_READ_REG(hw, E1000_RCTL); 2221 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 2222 2223 #if 0 2224 /* 2225 ** Set up for header split 2226 */ 2227 if (igb_header_split) { 2228 /* Use a standard mbuf for the header */ 2229 srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 2230 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 2231 } else 2232 #endif 2233 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2234 2235 /* 2236 ** Set up for jumbo frames 2237 */ 2238 if (ifp->if_mtu > ETHERMTU) { 2239 rctl |= E1000_RCTL_LPE; 2240 #if 0 2241 if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { 2242 srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2243 rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; 2244 } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { 2245 srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2246 rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; 2247 } 2248 /* Set maximum packet len */ 2249 psize = adapter->max_frame_size; 2250 /* are we on a vlan? */ 2251 if (adapter->ifp->if_vlantrunk != NULL) 2252 psize += VLAN_TAG_SIZE; 2253 E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); 2254 #else 2255 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2256 rctl |= E1000_RCTL_SZ_2048; 2257 #endif 2258 } else { 2259 rctl &= ~E1000_RCTL_LPE; 2260 srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; 2261 rctl |= E1000_RCTL_SZ_2048; 2262 } 2263 2264 /* Setup the Base and Length of the Rx Descriptor Rings */ 2265 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2266 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2267 uint64_t bus_addr = rxr->rxdma.dma_paddr; 2268 uint32_t rxdctl; 2269 2270 E1000_WRITE_REG(hw, E1000_RDLEN(i), 2271 rxr->num_rx_desc * sizeof(struct e1000_rx_desc)); 2272 E1000_WRITE_REG(hw, E1000_RDBAH(i), 2273 (uint32_t)(bus_addr >> 32)); 2274 E1000_WRITE_REG(hw, E1000_RDBAL(i), 2275 (uint32_t)bus_addr); 2276 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); 2277 /* Enable this Queue */ 2278 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); 2279 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 2280 rxdctl &= 0xFFF00000; 2281 rxdctl |= IGB_RX_PTHRESH; 2282 rxdctl |= IGB_RX_HTHRESH << 8; 2283 /* 2284 * Don't set WTHRESH to a value above 1 on 82576, see: 2285 * 82576 specification update errata #26 2286 */ 2287 rxdctl |= IGB_RX_WTHRESH << 16; 2288 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); 2289 } 2290 2291 rxcsum = E1000_READ_REG(&sc->hw, E1000_RXCSUM); 2292 rxcsum &= ~(E1000_RXCSUM_PCSS_MASK | E1000_RXCSUM_IPPCSE); 2293 2294 /* 2295 * Receive Checksum Offload for TCP and UDP 2296 * 2297 * Checksum offloading is also enabled if multiple receive 2298 * queue is to be supported, since we need it to figure out 2299 * fragments. 2300 */ 2301 if ((ifp->if_capenable & IFCAP_RXCSUM) || IGB_ENABLE_HWRSS(sc)) { 2302 /* 2303 * NOTE: 2304 * PCSD must be enabled to enable multiple 2305 * receive queues. 2306 */ 2307 rxcsum |= E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2308 E1000_RXCSUM_PCSD; 2309 } else { 2310 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | 2311 E1000_RXCSUM_PCSD); 2312 } 2313 E1000_WRITE_REG(&sc->hw, E1000_RXCSUM, rxcsum); 2314 2315 if (IGB_ENABLE_HWRSS(sc)) { 2316 uint8_t key[IGB_NRSSRK * IGB_RSSRK_SIZE]; 2317 uint32_t reta_shift; 2318 int j, r; 2319 2320 /* 2321 * NOTE: 2322 * When we reach here, RSS has already been disabled 2323 * in igb_stop(), so we could safely configure RSS key 2324 * and redirect table. 2325 */ 2326 2327 /* 2328 * Configure RSS key 2329 */ 2330 toeplitz_get_key(key, sizeof(key)); 2331 for (i = 0; i < IGB_NRSSRK; ++i) { 2332 uint32_t rssrk; 2333 2334 rssrk = IGB_RSSRK_VAL(key, i); 2335 IGB_RSS_DPRINTF(sc, 1, "rssrk%d 0x%08x\n", i, rssrk); 2336 2337 E1000_WRITE_REG(hw, E1000_RSSRK(i), rssrk); 2338 } 2339 2340 /* 2341 * Configure RSS redirect table in following fashion: 2342 * (hash & ring_cnt_mask) == rdr_table[(hash & rdr_table_mask)] 2343 */ 2344 reta_shift = IGB_RETA_SHIFT; 2345 if (hw->mac.type == e1000_82575) 2346 reta_shift = IGB_RETA_SHIFT_82575; 2347 2348 r = 0; 2349 for (j = 0; j < IGB_NRETA; ++j) { 2350 uint32_t reta = 0; 2351 2352 for (i = 0; i < IGB_RETA_SIZE; ++i) { 2353 uint32_t q; 2354 2355 q = (r % sc->rx_ring_inuse) << reta_shift; 2356 reta |= q << (8 * i); 2357 ++r; 2358 } 2359 IGB_RSS_DPRINTF(sc, 1, "reta 0x%08x\n", reta); 2360 E1000_WRITE_REG(hw, E1000_RETA(j), reta); 2361 } 2362 2363 /* 2364 * Enable multiple receive queues. 2365 * Enable IPv4 RSS standard hash functions. 2366 * Disable RSS interrupt on 82575 2367 */ 2368 E1000_WRITE_REG(&sc->hw, E1000_MRQC, 2369 E1000_MRQC_ENABLE_RSS_4Q | 2370 E1000_MRQC_RSS_FIELD_IPV4_TCP | 2371 E1000_MRQC_RSS_FIELD_IPV4); 2372 } 2373 2374 /* Setup the Receive Control Register */ 2375 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2376 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 2377 E1000_RCTL_RDMTS_HALF | 2378 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2379 /* Strip CRC bytes. */ 2380 rctl |= E1000_RCTL_SECRC; 2381 /* Make sure VLAN Filters are off */ 2382 rctl &= ~E1000_RCTL_VFE; 2383 /* Don't store bad packets */ 2384 rctl &= ~E1000_RCTL_SBP; 2385 2386 /* Enable Receives */ 2387 E1000_WRITE_REG(hw, E1000_RCTL, rctl); 2388 2389 /* 2390 * Setup the HW Rx Head and Tail Descriptor Pointers 2391 * - needs to be after enable 2392 */ 2393 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2394 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2395 2396 E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); 2397 E1000_WRITE_REG(hw, E1000_RDT(i), rxr->num_rx_desc - 1); 2398 } 2399 } 2400 2401 static void 2402 igb_rxeof(struct igb_rx_ring *rxr, int count) 2403 { 2404 struct ifnet *ifp = &rxr->sc->arpcom.ac_if; 2405 union e1000_adv_rx_desc *cur; 2406 uint32_t staterr; 2407 int i; 2408 2409 i = rxr->next_to_check; 2410 cur = &rxr->rx_base[i]; 2411 staterr = le32toh(cur->wb.upper.status_error); 2412 2413 if ((staterr & E1000_RXD_STAT_DD) == 0) 2414 return; 2415 2416 while ((staterr & E1000_RXD_STAT_DD) && count != 0) { 2417 struct pktinfo *pi = NULL, pi0; 2418 struct igb_rx_buf *rxbuf = &rxr->rx_buf[i]; 2419 struct mbuf *m = NULL; 2420 boolean_t eop; 2421 2422 eop = (staterr & E1000_RXD_STAT_EOP) ? TRUE : FALSE; 2423 if (eop) 2424 --count; 2425 2426 if ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) == 0 && 2427 !rxr->discard) { 2428 struct mbuf *mp = rxbuf->m_head; 2429 uint32_t hash, hashtype; 2430 uint16_t vlan; 2431 int len; 2432 2433 len = le16toh(cur->wb.upper.length); 2434 if (rxr->sc->hw.mac.type == e1000_i350 && 2435 (staterr & E1000_RXDEXT_STATERR_LB)) 2436 vlan = be16toh(cur->wb.upper.vlan); 2437 else 2438 vlan = le16toh(cur->wb.upper.vlan); 2439 2440 hash = le32toh(cur->wb.lower.hi_dword.rss); 2441 hashtype = le32toh(cur->wb.lower.lo_dword.data) & 2442 E1000_RXDADV_RSSTYPE_MASK; 2443 2444 IGB_RSS_DPRINTF(rxr->sc, 10, 2445 "ring%d, hash 0x%08x, hashtype %u\n", 2446 rxr->me, hash, hashtype); 2447 2448 bus_dmamap_sync(rxr->rx_tag, rxbuf->map, 2449 BUS_DMASYNC_POSTREAD); 2450 2451 if (igb_newbuf(rxr, i, FALSE) != 0) { 2452 ifp->if_iqdrops++; 2453 goto discard; 2454 } 2455 2456 mp->m_len = len; 2457 if (rxr->fmp == NULL) { 2458 mp->m_pkthdr.len = len; 2459 rxr->fmp = mp; 2460 rxr->lmp = mp; 2461 } else { 2462 rxr->lmp->m_next = mp; 2463 rxr->lmp = rxr->lmp->m_next; 2464 rxr->fmp->m_pkthdr.len += len; 2465 } 2466 2467 if (eop) { 2468 m = rxr->fmp; 2469 rxr->fmp = NULL; 2470 rxr->lmp = NULL; 2471 2472 m->m_pkthdr.rcvif = ifp; 2473 ifp->if_ipackets++; 2474 2475 if (ifp->if_capenable & IFCAP_RXCSUM) 2476 igb_rxcsum(staterr, m); 2477 2478 if (staterr & E1000_RXD_STAT_VP) { 2479 m->m_pkthdr.ether_vlantag = vlan; 2480 m->m_flags |= M_VLANTAG; 2481 } 2482 2483 if (ifp->if_capenable & IFCAP_RSS) { 2484 pi = igb_rssinfo(m, &pi0, 2485 hash, hashtype, staterr); 2486 } 2487 #ifdef IGB_RSS_DEBUG 2488 rxr->rx_packets++; 2489 #endif 2490 } 2491 } else { 2492 ifp->if_ierrors++; 2493 discard: 2494 igb_setup_rxdesc(cur, rxbuf); 2495 if (!eop) 2496 rxr->discard = TRUE; 2497 else 2498 rxr->discard = FALSE; 2499 if (rxr->fmp != NULL) { 2500 m_freem(rxr->fmp); 2501 rxr->fmp = NULL; 2502 rxr->lmp = NULL; 2503 } 2504 m = NULL; 2505 } 2506 2507 if (m != NULL) 2508 ether_input_pkt(ifp, m, pi); 2509 2510 /* Advance our pointers to the next descriptor. */ 2511 if (++i == rxr->num_rx_desc) 2512 i = 0; 2513 2514 cur = &rxr->rx_base[i]; 2515 staterr = le32toh(cur->wb.upper.status_error); 2516 } 2517 rxr->next_to_check = i; 2518 2519 if (--i < 0) 2520 i = rxr->num_rx_desc - 1; 2521 E1000_WRITE_REG(&rxr->sc->hw, E1000_RDT(rxr->me), i); 2522 } 2523 2524 2525 static void 2526 igb_set_vlan(struct igb_softc *sc) 2527 { 2528 struct e1000_hw *hw = &sc->hw; 2529 uint32_t reg; 2530 #if 0 2531 struct ifnet *ifp = sc->arpcom.ac_if; 2532 #endif 2533 2534 if (sc->vf_ifp) { 2535 e1000_rlpml_set_vf(hw, sc->max_frame_size + VLAN_TAG_SIZE); 2536 return; 2537 } 2538 2539 reg = E1000_READ_REG(hw, E1000_CTRL); 2540 reg |= E1000_CTRL_VME; 2541 E1000_WRITE_REG(hw, E1000_CTRL, reg); 2542 2543 #if 0 2544 /* Enable the Filter Table */ 2545 if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { 2546 reg = E1000_READ_REG(hw, E1000_RCTL); 2547 reg &= ~E1000_RCTL_CFIEN; 2548 reg |= E1000_RCTL_VFE; 2549 E1000_WRITE_REG(hw, E1000_RCTL, reg); 2550 } 2551 #endif 2552 2553 /* Update the frame size */ 2554 E1000_WRITE_REG(&sc->hw, E1000_RLPML, 2555 sc->max_frame_size + VLAN_TAG_SIZE); 2556 2557 #if 0 2558 /* Don't bother with table if no vlans */ 2559 if ((adapter->num_vlans == 0) || 2560 ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) 2561 return; 2562 /* 2563 ** A soft reset zero's out the VFTA, so 2564 ** we need to repopulate it now. 2565 */ 2566 for (int i = 0; i < IGB_VFTA_SIZE; i++) 2567 if (adapter->shadow_vfta[i] != 0) { 2568 if (adapter->vf_ifp) 2569 e1000_vfta_set_vf(hw, 2570 adapter->shadow_vfta[i], TRUE); 2571 else 2572 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, 2573 i, adapter->shadow_vfta[i]); 2574 } 2575 #endif 2576 } 2577 2578 static void 2579 igb_enable_intr(struct igb_softc *sc) 2580 { 2581 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2582 lwkt_serialize_handler_enable(&sc->main_serialize); 2583 } else { 2584 int i; 2585 2586 for (i = 0; i < sc->msix_cnt; ++i) { 2587 lwkt_serialize_handler_enable( 2588 sc->msix_data[i].msix_serialize); 2589 } 2590 } 2591 2592 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2593 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 2594 E1000_WRITE_REG(&sc->hw, E1000_EIAC, sc->intr_mask); 2595 else 2596 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2597 E1000_WRITE_REG(&sc->hw, E1000_EIAM, sc->intr_mask); 2598 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 2599 E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC); 2600 } else { 2601 E1000_WRITE_REG(&sc->hw, E1000_IMS, IMS_ENABLE_MASK); 2602 } 2603 E1000_WRITE_FLUSH(&sc->hw); 2604 } 2605 2606 static void 2607 igb_disable_intr(struct igb_softc *sc) 2608 { 2609 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) { 2610 E1000_WRITE_REG(&sc->hw, E1000_EIMC, 0xffffffff); 2611 E1000_WRITE_REG(&sc->hw, E1000_EIAC, 0); 2612 } 2613 E1000_WRITE_REG(&sc->hw, E1000_IMC, 0xffffffff); 2614 E1000_WRITE_FLUSH(&sc->hw); 2615 2616 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 2617 lwkt_serialize_handler_disable(&sc->main_serialize); 2618 } else { 2619 int i; 2620 2621 for (i = 0; i < sc->msix_cnt; ++i) { 2622 lwkt_serialize_handler_disable( 2623 sc->msix_data[i].msix_serialize); 2624 } 2625 } 2626 } 2627 2628 /* 2629 * Bit of a misnomer, what this really means is 2630 * to enable OS management of the system... aka 2631 * to disable special hardware management features 2632 */ 2633 static void 2634 igb_get_mgmt(struct igb_softc *sc) 2635 { 2636 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2637 int manc2h = E1000_READ_REG(&sc->hw, E1000_MANC2H); 2638 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2639 2640 /* disable hardware interception of ARP */ 2641 manc &= ~E1000_MANC_ARP_EN; 2642 2643 /* enable receiving management packets to the host */ 2644 manc |= E1000_MANC_EN_MNG2HOST; 2645 manc2h |= 1 << 5; /* Mng Port 623 */ 2646 manc2h |= 1 << 6; /* Mng Port 664 */ 2647 E1000_WRITE_REG(&sc->hw, E1000_MANC2H, manc2h); 2648 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2649 } 2650 } 2651 2652 /* 2653 * Give control back to hardware management controller 2654 * if there is one. 2655 */ 2656 static void 2657 igb_rel_mgmt(struct igb_softc *sc) 2658 { 2659 if (sc->flags & IGB_FLAG_HAS_MGMT) { 2660 int manc = E1000_READ_REG(&sc->hw, E1000_MANC); 2661 2662 /* Re-enable hardware interception of ARP */ 2663 manc |= E1000_MANC_ARP_EN; 2664 manc &= ~E1000_MANC_EN_MNG2HOST; 2665 2666 E1000_WRITE_REG(&sc->hw, E1000_MANC, manc); 2667 } 2668 } 2669 2670 /* 2671 * Sets CTRL_EXT:DRV_LOAD bit. 2672 * 2673 * For ASF and Pass Through versions of f/w this means that 2674 * the driver is loaded. 2675 */ 2676 static void 2677 igb_get_hw_control(struct igb_softc *sc) 2678 { 2679 uint32_t ctrl_ext; 2680 2681 if (sc->vf_ifp) 2682 return; 2683 2684 /* Let firmware know the driver has taken over */ 2685 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2686 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2687 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 2688 } 2689 2690 /* 2691 * Resets CTRL_EXT:DRV_LOAD bit. 2692 * 2693 * For ASF and Pass Through versions of f/w this means that the 2694 * driver is no longer loaded. 2695 */ 2696 static void 2697 igb_rel_hw_control(struct igb_softc *sc) 2698 { 2699 uint32_t ctrl_ext; 2700 2701 if (sc->vf_ifp) 2702 return; 2703 2704 /* Let firmware taken over control of h/w */ 2705 ctrl_ext = E1000_READ_REG(&sc->hw, E1000_CTRL_EXT); 2706 E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, 2707 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 2708 } 2709 2710 static int 2711 igb_is_valid_ether_addr(const uint8_t *addr) 2712 { 2713 uint8_t zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 2714 2715 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 2716 return FALSE; 2717 return TRUE; 2718 } 2719 2720 /* 2721 * Enable PCI Wake On Lan capability 2722 */ 2723 static void 2724 igb_enable_wol(device_t dev) 2725 { 2726 uint16_t cap, status; 2727 uint8_t id; 2728 2729 /* First find the capabilities pointer*/ 2730 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 2731 2732 /* Read the PM Capabilities */ 2733 id = pci_read_config(dev, cap, 1); 2734 if (id != PCIY_PMG) /* Something wrong */ 2735 return; 2736 2737 /* 2738 * OK, we have the power capabilities, 2739 * so now get the status register 2740 */ 2741 cap += PCIR_POWER_STATUS; 2742 status = pci_read_config(dev, cap, 2); 2743 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2744 pci_write_config(dev, cap, status, 2); 2745 } 2746 2747 static void 2748 igb_update_stats_counters(struct igb_softc *sc) 2749 { 2750 struct e1000_hw *hw = &sc->hw; 2751 struct e1000_hw_stats *stats; 2752 struct ifnet *ifp = &sc->arpcom.ac_if; 2753 2754 /* 2755 * The virtual function adapter has only a 2756 * small controlled set of stats, do only 2757 * those and return. 2758 */ 2759 if (sc->vf_ifp) { 2760 igb_update_vf_stats_counters(sc); 2761 return; 2762 } 2763 stats = sc->stats; 2764 2765 if (sc->hw.phy.media_type == e1000_media_type_copper || 2766 (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { 2767 stats->symerrs += 2768 E1000_READ_REG(hw,E1000_SYMERRS); 2769 stats->sec += E1000_READ_REG(hw, E1000_SEC); 2770 } 2771 2772 stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); 2773 stats->mpc += E1000_READ_REG(hw, E1000_MPC); 2774 stats->scc += E1000_READ_REG(hw, E1000_SCC); 2775 stats->ecol += E1000_READ_REG(hw, E1000_ECOL); 2776 2777 stats->mcc += E1000_READ_REG(hw, E1000_MCC); 2778 stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); 2779 stats->colc += E1000_READ_REG(hw, E1000_COLC); 2780 stats->dc += E1000_READ_REG(hw, E1000_DC); 2781 stats->rlec += E1000_READ_REG(hw, E1000_RLEC); 2782 stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); 2783 stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); 2784 2785 /* 2786 * For watchdog management we need to know if we have been 2787 * paused during the last interval, so capture that here. 2788 */ 2789 sc->pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); 2790 stats->xoffrxc += sc->pause_frames; 2791 stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); 2792 stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); 2793 stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); 2794 stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); 2795 stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); 2796 stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); 2797 stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); 2798 stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); 2799 stats->gprc += E1000_READ_REG(hw, E1000_GPRC); 2800 stats->bprc += E1000_READ_REG(hw, E1000_BPRC); 2801 stats->mprc += E1000_READ_REG(hw, E1000_MPRC); 2802 stats->gptc += E1000_READ_REG(hw, E1000_GPTC); 2803 2804 /* For the 64-bit byte counters the low dword must be read first. */ 2805 /* Both registers clear on the read of the high dword */ 2806 2807 stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + 2808 ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); 2809 stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + 2810 ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); 2811 2812 stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); 2813 stats->ruc += E1000_READ_REG(hw, E1000_RUC); 2814 stats->rfc += E1000_READ_REG(hw, E1000_RFC); 2815 stats->roc += E1000_READ_REG(hw, E1000_ROC); 2816 stats->rjc += E1000_READ_REG(hw, E1000_RJC); 2817 2818 stats->tor += E1000_READ_REG(hw, E1000_TORH); 2819 stats->tot += E1000_READ_REG(hw, E1000_TOTH); 2820 2821 stats->tpr += E1000_READ_REG(hw, E1000_TPR); 2822 stats->tpt += E1000_READ_REG(hw, E1000_TPT); 2823 stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); 2824 stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); 2825 stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); 2826 stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); 2827 stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); 2828 stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); 2829 stats->mptc += E1000_READ_REG(hw, E1000_MPTC); 2830 stats->bptc += E1000_READ_REG(hw, E1000_BPTC); 2831 2832 /* Interrupt Counts */ 2833 2834 stats->iac += E1000_READ_REG(hw, E1000_IAC); 2835 stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); 2836 stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); 2837 stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); 2838 stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); 2839 stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); 2840 stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); 2841 stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); 2842 stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); 2843 2844 /* Host to Card Statistics */ 2845 2846 stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); 2847 stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); 2848 stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); 2849 stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); 2850 stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); 2851 stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); 2852 stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); 2853 stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + 2854 ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32)); 2855 stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + 2856 ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); 2857 stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); 2858 stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); 2859 stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); 2860 2861 stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); 2862 stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); 2863 stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); 2864 stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); 2865 stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); 2866 stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); 2867 2868 ifp->if_collisions = stats->colc; 2869 2870 /* Rx Errors */ 2871 ifp->if_ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc + 2872 stats->ruc + stats->roc + stats->mpc + stats->cexterr; 2873 2874 /* Tx Errors */ 2875 ifp->if_oerrors = stats->ecol + stats->latecol + sc->watchdog_events; 2876 2877 /* Driver specific counters */ 2878 sc->device_control = E1000_READ_REG(hw, E1000_CTRL); 2879 sc->rx_control = E1000_READ_REG(hw, E1000_RCTL); 2880 sc->int_mask = E1000_READ_REG(hw, E1000_IMS); 2881 sc->eint_mask = E1000_READ_REG(hw, E1000_EIMS); 2882 sc->packet_buf_alloc_tx = 2883 ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); 2884 sc->packet_buf_alloc_rx = 2885 (E1000_READ_REG(hw, E1000_PBA) & 0xffff); 2886 } 2887 2888 static void 2889 igb_vf_init_stats(struct igb_softc *sc) 2890 { 2891 struct e1000_hw *hw = &sc->hw; 2892 struct e1000_vf_stats *stats; 2893 2894 stats = sc->stats; 2895 stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); 2896 stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); 2897 stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); 2898 stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); 2899 stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); 2900 } 2901 2902 static void 2903 igb_update_vf_stats_counters(struct igb_softc *sc) 2904 { 2905 struct e1000_hw *hw = &sc->hw; 2906 struct e1000_vf_stats *stats; 2907 2908 if (sc->link_speed == 0) 2909 return; 2910 2911 stats = sc->stats; 2912 UPDATE_VF_REG(E1000_VFGPRC, stats->last_gprc, stats->gprc); 2913 UPDATE_VF_REG(E1000_VFGORC, stats->last_gorc, stats->gorc); 2914 UPDATE_VF_REG(E1000_VFGPTC, stats->last_gptc, stats->gptc); 2915 UPDATE_VF_REG(E1000_VFGOTC, stats->last_gotc, stats->gotc); 2916 UPDATE_VF_REG(E1000_VFMPRC, stats->last_mprc, stats->mprc); 2917 } 2918 2919 #ifdef DEVICE_POLLING 2920 2921 static void 2922 igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2923 { 2924 struct igb_softc *sc = ifp->if_softc; 2925 uint32_t reg_icr; 2926 2927 switch (cmd) { 2928 case POLL_REGISTER: 2929 case POLL_DEREGISTER: 2930 ASSERT_IFNET_SERIALIZED_ALL(ifp); 2931 igb_init(sc); 2932 break; 2933 2934 case POLL_AND_CHECK_STATUS: 2935 ASSERT_SERIALIZED(&sc->main_serialize); 2936 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 2937 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 2938 sc->hw.mac.get_link_status = 1; 2939 igb_update_link_status(sc); 2940 } 2941 /* FALL THROUGH */ 2942 case POLL_ONLY: 2943 ASSERT_SERIALIZED(&sc->main_serialize); 2944 if (ifp->if_flags & IFF_RUNNING) { 2945 struct igb_tx_ring *txr; 2946 int i; 2947 2948 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2949 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2950 2951 lwkt_serialize_enter(&rxr->rx_serialize); 2952 igb_rxeof(rxr, count); 2953 lwkt_serialize_exit(&rxr->rx_serialize); 2954 } 2955 2956 txr = &sc->tx_rings[0]; 2957 lwkt_serialize_enter(&txr->tx_serialize); 2958 igb_txeof(txr); 2959 if (!ifq_is_empty(&ifp->if_snd)) 2960 if_devstart(ifp); 2961 lwkt_serialize_exit(&txr->tx_serialize); 2962 } 2963 break; 2964 } 2965 } 2966 2967 #endif /* DEVICE_POLLING */ 2968 2969 static void 2970 igb_intr(void *xsc) 2971 { 2972 struct igb_softc *sc = xsc; 2973 struct ifnet *ifp = &sc->arpcom.ac_if; 2974 uint32_t eicr; 2975 2976 ASSERT_SERIALIZED(&sc->main_serialize); 2977 2978 eicr = E1000_READ_REG(&sc->hw, E1000_EICR); 2979 2980 if (eicr == 0) 2981 return; 2982 2983 if (ifp->if_flags & IFF_RUNNING) { 2984 struct igb_tx_ring *txr; 2985 int i; 2986 2987 for (i = 0; i < sc->rx_ring_inuse; ++i) { 2988 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 2989 2990 if (eicr & rxr->rx_intr_mask) { 2991 lwkt_serialize_enter(&rxr->rx_serialize); 2992 igb_rxeof(rxr, -1); 2993 lwkt_serialize_exit(&rxr->rx_serialize); 2994 } 2995 } 2996 2997 txr = &sc->tx_rings[0]; 2998 if (eicr & txr->tx_intr_mask) { 2999 lwkt_serialize_enter(&txr->tx_serialize); 3000 igb_txeof(txr); 3001 if (!ifq_is_empty(&ifp->if_snd)) 3002 if_devstart(ifp); 3003 lwkt_serialize_exit(&txr->tx_serialize); 3004 } 3005 } 3006 3007 if (eicr & E1000_EICR_OTHER) { 3008 uint32_t icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3009 3010 /* Link status change */ 3011 if (icr & E1000_ICR_LSC) { 3012 sc->hw.mac.get_link_status = 1; 3013 igb_update_link_status(sc); 3014 } 3015 } 3016 3017 /* 3018 * Reading EICR has the side effect to clear interrupt mask, 3019 * so all interrupts need to be enabled here. 3020 */ 3021 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->intr_mask); 3022 } 3023 3024 static void 3025 igb_intr_shared(void *xsc) 3026 { 3027 struct igb_softc *sc = xsc; 3028 struct ifnet *ifp = &sc->arpcom.ac_if; 3029 uint32_t reg_icr; 3030 3031 ASSERT_SERIALIZED(&sc->main_serialize); 3032 3033 reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR); 3034 3035 /* Hot eject? */ 3036 if (reg_icr == 0xffffffff) 3037 return; 3038 3039 /* Definitely not our interrupt. */ 3040 if (reg_icr == 0x0) 3041 return; 3042 3043 if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) 3044 return; 3045 3046 if (ifp->if_flags & IFF_RUNNING) { 3047 if (reg_icr & 3048 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) { 3049 int i; 3050 3051 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3052 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 3053 3054 lwkt_serialize_enter(&rxr->rx_serialize); 3055 igb_rxeof(rxr, -1); 3056 lwkt_serialize_exit(&rxr->rx_serialize); 3057 } 3058 } 3059 3060 if (reg_icr & E1000_ICR_TXDW) { 3061 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3062 3063 lwkt_serialize_enter(&txr->tx_serialize); 3064 igb_txeof(txr); 3065 if (!ifq_is_empty(&ifp->if_snd)) 3066 if_devstart(ifp); 3067 lwkt_serialize_exit(&txr->tx_serialize); 3068 } 3069 } 3070 3071 /* Link status change */ 3072 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3073 sc->hw.mac.get_link_status = 1; 3074 igb_update_link_status(sc); 3075 } 3076 3077 if (reg_icr & E1000_ICR_RXO) 3078 sc->rx_overruns++; 3079 } 3080 3081 static int 3082 igb_txctx_pullup(struct igb_tx_ring *txr, struct mbuf **m0) 3083 { 3084 struct mbuf *m = *m0; 3085 struct ether_header *eh; 3086 int len; 3087 3088 txr->ctx_try_pullup++; 3089 3090 len = ETHER_HDR_LEN + IGB_IPVHL_SIZE; 3091 3092 if (__predict_false(!M_WRITABLE(m))) { 3093 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 3094 txr->ctx_drop1++; 3095 m_freem(m); 3096 *m0 = NULL; 3097 return ENOBUFS; 3098 } 3099 eh = mtod(m, struct ether_header *); 3100 3101 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 3102 len += EVL_ENCAPLEN; 3103 3104 if (m->m_len < len) { 3105 txr->ctx_drop2++; 3106 m_freem(m); 3107 *m0 = NULL; 3108 return ENOBUFS; 3109 } 3110 return 0; 3111 } 3112 3113 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 3114 txr->ctx_pullup1++; 3115 m = m_pullup(m, ETHER_HDR_LEN); 3116 if (m == NULL) { 3117 txr->ctx_pullup1_failed++; 3118 *m0 = NULL; 3119 return ENOBUFS; 3120 } 3121 *m0 = m; 3122 } 3123 eh = mtod(m, struct ether_header *); 3124 3125 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 3126 len += EVL_ENCAPLEN; 3127 3128 if (m->m_len < len) { 3129 txr->ctx_pullup2++; 3130 m = m_pullup(m, len); 3131 if (m == NULL) { 3132 txr->ctx_pullup2_failed++; 3133 *m0 = NULL; 3134 return ENOBUFS; 3135 } 3136 *m0 = m; 3137 } 3138 return 0; 3139 } 3140 3141 static int 3142 igb_encap(struct igb_tx_ring *txr, struct mbuf **m_headp) 3143 { 3144 bus_dma_segment_t segs[IGB_MAX_SCATTER]; 3145 bus_dmamap_t map; 3146 struct igb_tx_buf *tx_buf, *tx_buf_mapped; 3147 union e1000_adv_tx_desc *txd = NULL; 3148 struct mbuf *m_head = *m_headp; 3149 uint32_t olinfo_status = 0, cmd_type_len = 0, cmd_rs = 0; 3150 int maxsegs, nsegs, i, j, error, last = 0; 3151 uint32_t hdrlen = 0; 3152 3153 if (m_head->m_len < IGB_TXCSUM_MINHL && 3154 ((m_head->m_pkthdr.csum_flags & IGB_CSUM_FEATURES) || 3155 (m_head->m_flags & M_VLANTAG))) { 3156 /* 3157 * Make sure that ethernet header and ip.ip_hl are in 3158 * contiguous memory, since if TXCSUM or VLANTAG is 3159 * enabled, later TX context descriptor's setup need 3160 * to access ip.ip_hl. 3161 */ 3162 error = igb_txctx_pullup(txr, m_headp); 3163 if (error) { 3164 KKASSERT(*m_headp == NULL); 3165 return error; 3166 } 3167 m_head = *m_headp; 3168 } 3169 3170 /* Set basic descriptor constants */ 3171 cmd_type_len |= E1000_ADVTXD_DTYP_DATA; 3172 cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; 3173 if (m_head->m_flags & M_VLANTAG) 3174 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 3175 3176 /* 3177 * Map the packet for DMA. 3178 */ 3179 tx_buf = &txr->tx_buf[txr->next_avail_desc]; 3180 tx_buf_mapped = tx_buf; 3181 map = tx_buf->map; 3182 3183 maxsegs = txr->tx_avail - IGB_TX_RESERVED; 3184 KASSERT(maxsegs >= txr->spare_desc, ("not enough spare TX desc\n")); 3185 if (maxsegs > IGB_MAX_SCATTER) 3186 maxsegs = IGB_MAX_SCATTER; 3187 3188 error = bus_dmamap_load_mbuf_defrag(txr->tx_tag, map, m_headp, 3189 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 3190 if (error) { 3191 if (error == ENOBUFS) 3192 txr->sc->mbuf_defrag_failed++; 3193 else 3194 txr->sc->no_tx_dma_setup++; 3195 3196 m_freem(*m_headp); 3197 *m_headp = NULL; 3198 return error; 3199 } 3200 bus_dmamap_sync(txr->tx_tag, map, BUS_DMASYNC_PREWRITE); 3201 3202 m_head = *m_headp; 3203 3204 #if 0 3205 /* 3206 * Set up the context descriptor: 3207 * used when any hardware offload is done. 3208 * This includes CSUM, VLAN, and TSO. It 3209 * will use the first descriptor. 3210 */ 3211 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3212 if (igb_tso_setup(txr, m_head, &hdrlen)) { 3213 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 3214 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 3215 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3216 } else 3217 return (ENXIO); 3218 } else if (igb_tx_ctx_setup(txr, m_head)) 3219 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3220 #else 3221 if (igb_txctx(txr, m_head)) { 3222 olinfo_status |= (E1000_TXD_POPTS_IXSM << 8); 3223 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) 3224 olinfo_status |= (E1000_TXD_POPTS_TXSM << 8); 3225 txr->tx_nsegs++; 3226 } 3227 #endif 3228 3229 txr->tx_nsegs += nsegs; 3230 if (txr->tx_nsegs >= txr->intr_nsegs) { 3231 /* 3232 * Report Status (RS) is turned on every intr_nsegs 3233 * descriptors (roughly). 3234 */ 3235 txr->tx_nsegs = 0; 3236 cmd_rs = E1000_ADVTXD_DCMD_RS; 3237 } 3238 3239 /* Calculate payload length */ 3240 olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) 3241 << E1000_ADVTXD_PAYLEN_SHIFT); 3242 3243 /* 82575 needs the queue index added */ 3244 if (txr->sc->hw.mac.type == e1000_82575) 3245 olinfo_status |= txr->me << 4; 3246 3247 /* Set up our transmit descriptors */ 3248 i = txr->next_avail_desc; 3249 for (j = 0; j < nsegs; j++) { 3250 bus_size_t seg_len; 3251 bus_addr_t seg_addr; 3252 3253 tx_buf = &txr->tx_buf[i]; 3254 txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; 3255 seg_addr = segs[j].ds_addr; 3256 seg_len = segs[j].ds_len; 3257 3258 txd->read.buffer_addr = htole64(seg_addr); 3259 txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); 3260 txd->read.olinfo_status = htole32(olinfo_status); 3261 last = i; 3262 if (++i == txr->num_tx_desc) 3263 i = 0; 3264 tx_buf->m_head = NULL; 3265 } 3266 3267 KASSERT(txr->tx_avail > nsegs, ("invalid avail TX desc\n")); 3268 txr->next_avail_desc = i; 3269 txr->tx_avail -= nsegs; 3270 3271 tx_buf->m_head = m_head; 3272 tx_buf_mapped->map = tx_buf->map; 3273 tx_buf->map = map; 3274 3275 /* 3276 * Last Descriptor of Packet needs End Of Packet (EOP) 3277 */ 3278 txd->read.cmd_type_len |= htole32(E1000_ADVTXD_DCMD_EOP | cmd_rs); 3279 3280 /* 3281 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 3282 * that this frame is available to transmit. 3283 */ 3284 E1000_WRITE_REG(&txr->sc->hw, E1000_TDT(txr->me), i); 3285 ++txr->tx_packets; 3286 3287 return 0; 3288 } 3289 3290 static void 3291 igb_start(struct ifnet *ifp) 3292 { 3293 struct igb_softc *sc = ifp->if_softc; 3294 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3295 struct mbuf *m_head; 3296 3297 ASSERT_SERIALIZED(&txr->tx_serialize); 3298 3299 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 3300 return; 3301 3302 if (!sc->link_active) { 3303 ifq_purge(&ifp->if_snd); 3304 return; 3305 } 3306 3307 if (!IGB_IS_NOT_OACTIVE(txr)) 3308 igb_txeof(txr); 3309 3310 while (!ifq_is_empty(&ifp->if_snd)) { 3311 if (IGB_IS_OACTIVE(txr)) { 3312 ifp->if_flags |= IFF_OACTIVE; 3313 /* Set watchdog on */ 3314 ifp->if_timer = 5; 3315 break; 3316 } 3317 3318 m_head = ifq_dequeue(&ifp->if_snd, NULL); 3319 if (m_head == NULL) 3320 break; 3321 3322 if (igb_encap(txr, &m_head)) { 3323 ifp->if_oerrors++; 3324 continue; 3325 } 3326 3327 /* Send a copy of the frame to the BPF listener */ 3328 ETHER_BPF_MTAP(ifp, m_head); 3329 } 3330 } 3331 3332 static void 3333 igb_watchdog(struct ifnet *ifp) 3334 { 3335 struct igb_softc *sc = ifp->if_softc; 3336 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3337 3338 ASSERT_IFNET_SERIALIZED_ALL(ifp); 3339 3340 /* 3341 * If flow control has paused us since last checking 3342 * it invalidates the watchdog timing, so dont run it. 3343 */ 3344 if (sc->pause_frames) { 3345 sc->pause_frames = 0; 3346 ifp->if_timer = 5; 3347 return; 3348 } 3349 3350 if_printf(ifp, "Watchdog timeout -- resetting\n"); 3351 if_printf(ifp, "Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, 3352 E1000_READ_REG(&sc->hw, E1000_TDH(txr->me)), 3353 E1000_READ_REG(&sc->hw, E1000_TDT(txr->me))); 3354 if_printf(ifp, "TX(%d) desc avail = %d, " 3355 "Next TX to Clean = %d\n", 3356 txr->me, txr->tx_avail, txr->next_to_clean); 3357 3358 ifp->if_oerrors++; 3359 sc->watchdog_events++; 3360 3361 igb_init(sc); 3362 if (!ifq_is_empty(&ifp->if_snd)) 3363 if_devstart(ifp); 3364 } 3365 3366 static void 3367 igb_set_eitr(struct igb_softc *sc, int idx, int rate) 3368 { 3369 uint32_t eitr = 0; 3370 3371 if (rate > 0) { 3372 if (sc->hw.mac.type == e1000_82575) { 3373 eitr = 1000000000 / 256 / rate; 3374 /* 3375 * NOTE: 3376 * Document is wrong on the 2 bits left shift 3377 */ 3378 } else { 3379 eitr = 1000000 / rate; 3380 eitr <<= IGB_EITR_INTVL_SHIFT; 3381 } 3382 3383 if (eitr == 0) { 3384 /* Don't disable it */ 3385 eitr = 1 << IGB_EITR_INTVL_SHIFT; 3386 } else if (eitr > IGB_EITR_INTVL_MASK) { 3387 /* Don't allow it to be too large */ 3388 eitr = IGB_EITR_INTVL_MASK; 3389 } 3390 } 3391 if (sc->hw.mac.type == e1000_82575) 3392 eitr |= eitr << 16; 3393 else 3394 eitr |= E1000_EITR_CNT_IGNR; 3395 E1000_WRITE_REG(&sc->hw, E1000_EITR(idx), eitr); 3396 } 3397 3398 static int 3399 igb_sysctl_intr_rate(SYSCTL_HANDLER_ARGS) 3400 { 3401 struct igb_softc *sc = (void *)arg1; 3402 struct ifnet *ifp = &sc->arpcom.ac_if; 3403 int error, intr_rate; 3404 3405 intr_rate = sc->intr_rate; 3406 error = sysctl_handle_int(oidp, &intr_rate, 0, req); 3407 if (error || req->newptr == NULL) 3408 return error; 3409 if (intr_rate < 0) 3410 return EINVAL; 3411 3412 ifnet_serialize_all(ifp); 3413 3414 sc->intr_rate = intr_rate; 3415 if (ifp->if_flags & IFF_RUNNING) 3416 igb_set_eitr(sc, 0, sc->intr_rate); 3417 3418 if (bootverbose) 3419 if_printf(ifp, "interrupt rate set to %d/sec\n", sc->intr_rate); 3420 3421 ifnet_deserialize_all(ifp); 3422 3423 return 0; 3424 } 3425 3426 static int 3427 igb_sysctl_msix_rate(SYSCTL_HANDLER_ARGS) 3428 { 3429 struct igb_msix_data *msix = (void *)arg1; 3430 struct igb_softc *sc = msix->msix_sc; 3431 struct ifnet *ifp = &sc->arpcom.ac_if; 3432 int error, msix_rate; 3433 3434 msix_rate = msix->msix_rate; 3435 error = sysctl_handle_int(oidp, &msix_rate, 0, req); 3436 if (error || req->newptr == NULL) 3437 return error; 3438 if (msix_rate < 0) 3439 return EINVAL; 3440 3441 lwkt_serialize_enter(msix->msix_serialize); 3442 3443 msix->msix_rate = msix_rate; 3444 if (ifp->if_flags & IFF_RUNNING) 3445 igb_set_eitr(sc, msix->msix_vector, msix->msix_rate); 3446 3447 if (bootverbose) { 3448 if_printf(ifp, "%s set to %d/sec\n", msix->msix_rate_desc, 3449 msix->msix_rate); 3450 } 3451 3452 lwkt_serialize_exit(msix->msix_serialize); 3453 3454 return 0; 3455 } 3456 3457 static int 3458 igb_sysctl_tx_intr_nsegs(SYSCTL_HANDLER_ARGS) 3459 { 3460 struct igb_softc *sc = (void *)arg1; 3461 struct ifnet *ifp = &sc->arpcom.ac_if; 3462 struct igb_tx_ring *txr = &sc->tx_rings[0]; 3463 int error, nsegs; 3464 3465 nsegs = txr->intr_nsegs; 3466 error = sysctl_handle_int(oidp, &nsegs, 0, req); 3467 if (error || req->newptr == NULL) 3468 return error; 3469 if (nsegs <= 0) 3470 return EINVAL; 3471 3472 ifnet_serialize_all(ifp); 3473 3474 if (nsegs >= txr->num_tx_desc - txr->oact_lo_desc || 3475 nsegs >= txr->oact_hi_desc - IGB_MAX_SCATTER) { 3476 error = EINVAL; 3477 } else { 3478 error = 0; 3479 txr->intr_nsegs = nsegs; 3480 } 3481 3482 ifnet_deserialize_all(ifp); 3483 3484 return error; 3485 } 3486 3487 static void 3488 igb_init_intr(struct igb_softc *sc) 3489 { 3490 igb_set_intr_mask(sc); 3491 3492 if ((sc->flags & IGB_FLAG_SHARED_INTR) == 0) 3493 igb_init_unshared_intr(sc); 3494 3495 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3496 igb_set_eitr(sc, 0, sc->intr_rate); 3497 } else { 3498 int i; 3499 3500 for (i = 0; i < sc->msix_cnt; ++i) 3501 igb_set_eitr(sc, i, sc->msix_data[i].msix_rate); 3502 } 3503 } 3504 3505 static void 3506 igb_init_unshared_intr(struct igb_softc *sc) 3507 { 3508 struct e1000_hw *hw = &sc->hw; 3509 const struct igb_rx_ring *rxr; 3510 const struct igb_tx_ring *txr; 3511 uint32_t ivar, index; 3512 int i; 3513 3514 /* 3515 * Enable extended mode 3516 */ 3517 if (sc->hw.mac.type != e1000_82575) { 3518 uint32_t gpie; 3519 int ivar_max; 3520 3521 gpie = E1000_GPIE_NSICR; 3522 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3523 gpie |= E1000_GPIE_MSIX_MODE | 3524 E1000_GPIE_EIAME | 3525 E1000_GPIE_PBA; 3526 } 3527 E1000_WRITE_REG(hw, E1000_GPIE, gpie); 3528 3529 /* 3530 * Clear IVARs 3531 */ 3532 switch (sc->hw.mac.type) { 3533 case e1000_82580: 3534 ivar_max = IGB_MAX_IVAR_82580; 3535 break; 3536 3537 case e1000_i350: 3538 ivar_max = IGB_MAX_IVAR_I350; 3539 break; 3540 3541 case e1000_vfadapt: 3542 case e1000_vfadapt_i350: 3543 ivar_max = IGB_MAX_IVAR_VF; 3544 break; 3545 3546 case e1000_82576: 3547 ivar_max = IGB_MAX_IVAR_82576; 3548 break; 3549 3550 default: 3551 panic("unknown mac type %d\n", sc->hw.mac.type); 3552 } 3553 for (i = 0; i < ivar_max; ++i) 3554 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, 0); 3555 E1000_WRITE_REG(hw, E1000_IVAR_MISC, 0); 3556 } else { 3557 uint32_t tmp; 3558 3559 KASSERT(sc->intr_type != PCI_INTR_TYPE_MSIX, 3560 ("82575 w/ MSI-X")); 3561 tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); 3562 tmp |= E1000_CTRL_EXT_IRCA; 3563 E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); 3564 } 3565 3566 /* 3567 * Map TX/RX interrupts to EICR 3568 */ 3569 switch (sc->hw.mac.type) { 3570 case e1000_82580: 3571 case e1000_i350: 3572 case e1000_vfadapt: 3573 case e1000_vfadapt_i350: 3574 /* RX entries */ 3575 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3576 rxr = &sc->rx_rings[i]; 3577 3578 index = i >> 1; 3579 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3580 3581 if (i & 1) { 3582 ivar &= 0xff00ffff; 3583 ivar |= 3584 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3585 } else { 3586 ivar &= 0xffffff00; 3587 ivar |= 3588 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3589 } 3590 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3591 } 3592 /* TX entries */ 3593 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3594 txr = &sc->tx_rings[i]; 3595 3596 index = i >> 1; 3597 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3598 3599 if (i & 1) { 3600 ivar &= 0x00ffffff; 3601 ivar |= 3602 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3603 } else { 3604 ivar &= 0xffff00ff; 3605 ivar |= 3606 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3607 } 3608 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3609 } 3610 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3611 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3612 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3613 } 3614 break; 3615 3616 case e1000_82576: 3617 /* RX entries */ 3618 for (i = 0; i < sc->rx_ring_inuse; ++i) { 3619 rxr = &sc->rx_rings[i]; 3620 3621 index = i & 0x7; /* Each IVAR has two entries */ 3622 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3623 3624 if (i < 8) { 3625 ivar &= 0xffffff00; 3626 ivar |= 3627 (rxr->rx_intr_bit | E1000_IVAR_VALID); 3628 } else { 3629 ivar &= 0xff00ffff; 3630 ivar |= 3631 (rxr->rx_intr_bit | E1000_IVAR_VALID) << 16; 3632 } 3633 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3634 } 3635 /* TX entries */ 3636 for (i = 0; i < sc->tx_ring_cnt; ++i) { 3637 txr = &sc->tx_rings[i]; 3638 3639 index = i & 0x7; /* Each IVAR has two entries */ 3640 ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); 3641 3642 if (i < 8) { 3643 ivar &= 0xffff00ff; 3644 ivar |= 3645 (txr->tx_intr_bit | E1000_IVAR_VALID) << 8; 3646 } else { 3647 ivar &= 0x00ffffff; 3648 ivar |= 3649 (txr->tx_intr_bit | E1000_IVAR_VALID) << 24; 3650 } 3651 E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); 3652 } 3653 if (sc->intr_type == PCI_INTR_TYPE_MSIX) { 3654 ivar = (sc->sts_intr_bit | E1000_IVAR_VALID) << 8; 3655 E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); 3656 } 3657 break; 3658 3659 case e1000_82575: 3660 /* 3661 * Enable necessary interrupt bits. 3662 * 3663 * The name of the register is confusing; in addition to 3664 * configuring the first vector of MSI-X, it also configures 3665 * which bits of EICR could be set by the hardware even when 3666 * MSI or line interrupt is used; it thus controls interrupt 3667 * generation. It MUST be configured explicitly; the default 3668 * value mentioned in the datasheet is wrong: RX queue0 and 3669 * TX queue0 are NOT enabled by default. 3670 */ 3671 E1000_WRITE_REG(&sc->hw, E1000_MSIXBM(0), sc->intr_mask); 3672 break; 3673 3674 default: 3675 panic("unknown mac type %d\n", sc->hw.mac.type); 3676 } 3677 } 3678 3679 static int 3680 igb_setup_intr(struct igb_softc *sc) 3681 { 3682 struct ifnet *ifp = &sc->arpcom.ac_if; 3683 int error; 3684 3685 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 3686 return igb_msix_setup(sc); 3687 3688 error = bus_setup_intr(sc->dev, sc->intr_res, INTR_MPSAFE, 3689 (sc->flags & IGB_FLAG_SHARED_INTR) ? igb_intr_shared : igb_intr, 3690 sc, &sc->intr_tag, &sc->main_serialize); 3691 if (error) { 3692 device_printf(sc->dev, "Failed to register interrupt handler"); 3693 return error; 3694 } 3695 3696 ifp->if_cpuid = rman_get_cpuid(sc->intr_res); 3697 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 3698 3699 return 0; 3700 } 3701 3702 static void 3703 igb_set_txintr_mask(struct igb_tx_ring *txr, int *intr_bit0, int intr_bitmax) 3704 { 3705 if (txr->sc->hw.mac.type == e1000_82575) { 3706 txr->tx_intr_bit = 0; /* unused */ 3707 switch (txr->me) { 3708 case 0: 3709 txr->tx_intr_mask = E1000_EICR_TX_QUEUE0; 3710 break; 3711 case 1: 3712 txr->tx_intr_mask = E1000_EICR_TX_QUEUE1; 3713 break; 3714 case 2: 3715 txr->tx_intr_mask = E1000_EICR_TX_QUEUE2; 3716 break; 3717 case 3: 3718 txr->tx_intr_mask = E1000_EICR_TX_QUEUE3; 3719 break; 3720 default: 3721 panic("unsupported # of TX ring, %d\n", txr->me); 3722 } 3723 } else { 3724 int intr_bit = *intr_bit0; 3725 3726 txr->tx_intr_bit = intr_bit % intr_bitmax; 3727 txr->tx_intr_mask = 1 << txr->tx_intr_bit; 3728 3729 *intr_bit0 = intr_bit + 1; 3730 } 3731 } 3732 3733 static void 3734 igb_set_rxintr_mask(struct igb_rx_ring *rxr, int *intr_bit0, int intr_bitmax) 3735 { 3736 if (rxr->sc->hw.mac.type == e1000_82575) { 3737 rxr->rx_intr_bit = 0; /* unused */ 3738 switch (rxr->me) { 3739 case 0: 3740 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE0; 3741 break; 3742 case 1: 3743 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE1; 3744 break; 3745 case 2: 3746 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE2; 3747 break; 3748 case 3: 3749 rxr->rx_intr_mask = E1000_EICR_RX_QUEUE3; 3750 break; 3751 default: 3752 panic("unsupported # of RX ring, %d\n", rxr->me); 3753 } 3754 } else { 3755 int intr_bit = *intr_bit0; 3756 3757 rxr->rx_intr_bit = intr_bit % intr_bitmax; 3758 rxr->rx_intr_mask = 1 << rxr->rx_intr_bit; 3759 3760 *intr_bit0 = intr_bit + 1; 3761 } 3762 } 3763 3764 static void 3765 igb_serialize(struct ifnet *ifp, enum ifnet_serialize slz) 3766 { 3767 struct igb_softc *sc = ifp->if_softc; 3768 3769 ifnet_serialize_array_enter(sc->serializes, sc->serialize_cnt, 3770 sc->tx_serialize, sc->rx_serialize, slz); 3771 } 3772 3773 static void 3774 igb_deserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3775 { 3776 struct igb_softc *sc = ifp->if_softc; 3777 3778 ifnet_serialize_array_exit(sc->serializes, sc->serialize_cnt, 3779 sc->tx_serialize, sc->rx_serialize, slz); 3780 } 3781 3782 static int 3783 igb_tryserialize(struct ifnet *ifp, enum ifnet_serialize slz) 3784 { 3785 struct igb_softc *sc = ifp->if_softc; 3786 3787 return ifnet_serialize_array_try(sc->serializes, sc->serialize_cnt, 3788 sc->tx_serialize, sc->rx_serialize, slz); 3789 } 3790 3791 #ifdef INVARIANTS 3792 3793 static void 3794 igb_serialize_assert(struct ifnet *ifp, enum ifnet_serialize slz, 3795 boolean_t serialized) 3796 { 3797 struct igb_softc *sc = ifp->if_softc; 3798 3799 ifnet_serialize_array_assert(sc->serializes, sc->serialize_cnt, 3800 sc->tx_serialize, sc->rx_serialize, slz, serialized); 3801 } 3802 3803 #endif /* INVARIANTS */ 3804 3805 static void 3806 igb_set_intr_mask(struct igb_softc *sc) 3807 { 3808 int i; 3809 3810 sc->intr_mask = sc->sts_intr_mask; 3811 for (i = 0; i < sc->rx_ring_inuse; ++i) 3812 sc->intr_mask |= sc->rx_rings[i].rx_intr_mask; 3813 for (i = 0; i < sc->tx_ring_cnt; ++i) 3814 sc->intr_mask |= sc->tx_rings[i].tx_intr_mask; 3815 if (bootverbose) 3816 device_printf(sc->dev, "intr mask 0x%08x\n", sc->intr_mask); 3817 } 3818 3819 static int 3820 igb_alloc_intr(struct igb_softc *sc) 3821 { 3822 int i, intr_bit, intr_bitmax; 3823 u_int intr_flags; 3824 3825 igb_msix_try_alloc(sc); 3826 if (sc->intr_type == PCI_INTR_TYPE_MSIX) 3827 goto done; 3828 3829 /* 3830 * Allocate MSI/legacy interrupt resource 3831 */ 3832 sc->intr_type = pci_alloc_1intr(sc->dev, igb_msi_enable, 3833 &sc->intr_rid, &intr_flags); 3834 3835 if (sc->intr_type == PCI_INTR_TYPE_LEGACY) { 3836 int unshared; 3837 3838 unshared = device_getenv_int(sc->dev, "irq.unshared", 0); 3839 if (!unshared) { 3840 sc->flags |= IGB_FLAG_SHARED_INTR; 3841 if (bootverbose) 3842 device_printf(sc->dev, "IRQ shared\n"); 3843 } else { 3844 intr_flags &= ~RF_SHAREABLE; 3845 if (bootverbose) 3846 device_printf(sc->dev, "IRQ unshared\n"); 3847 } 3848 } 3849 3850 sc->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 3851 &sc->intr_rid, intr_flags); 3852 if (sc->intr_res == NULL) { 3853 device_printf(sc->dev, "Unable to allocate bus resource: " 3854 "interrupt\n"); 3855 return ENXIO; 3856 } 3857 3858 /* 3859 * Setup MSI/legacy interrupt mask 3860 */ 3861 switch (sc->hw.mac.type) { 3862 case e1000_82575: 3863 intr_bitmax = IGB_MAX_TXRXINT_82575; 3864 break; 3865 case e1000_82580: 3866 intr_bitmax = IGB_MAX_TXRXINT_82580; 3867 break; 3868 case e1000_i350: 3869 intr_bitmax = IGB_MAX_TXRXINT_I350; 3870 break; 3871 case e1000_82576: 3872 intr_bitmax = IGB_MAX_TXRXINT_82576; 3873 break; 3874 default: 3875 intr_bitmax = IGB_MIN_TXRXINT; 3876 break; 3877 } 3878 intr_bit = 0; 3879 for (i = 0; i < sc->tx_ring_cnt; ++i) 3880 igb_set_txintr_mask(&sc->tx_rings[i], &intr_bit, intr_bitmax); 3881 for (i = 0; i < sc->rx_ring_cnt; ++i) 3882 igb_set_rxintr_mask(&sc->rx_rings[i], &intr_bit, intr_bitmax); 3883 sc->sts_intr_bit = 0; 3884 sc->sts_intr_mask = E1000_EICR_OTHER; 3885 3886 /* Initialize interrupt rate */ 3887 sc->intr_rate = IGB_INTR_RATE; 3888 done: 3889 igb_set_ring_inuse(sc, FALSE); 3890 igb_set_intr_mask(sc); 3891 return 0; 3892 } 3893 3894 static void 3895 igb_free_intr(struct igb_softc *sc) 3896 { 3897 if (sc->intr_type != PCI_INTR_TYPE_MSIX) { 3898 if (sc->intr_res != NULL) { 3899 bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intr_rid, 3900 sc->intr_res); 3901 } 3902 if (sc->intr_type == PCI_INTR_TYPE_MSI) 3903 pci_release_msi(sc->dev); 3904 } else { 3905 igb_msix_free(sc, TRUE); 3906 } 3907 } 3908 3909 static void 3910 igb_teardown_intr(struct igb_softc *sc) 3911 { 3912 if (sc->intr_type != PCI_INTR_TYPE_MSIX) 3913 bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_tag); 3914 else 3915 igb_msix_teardown(sc, sc->msix_cnt); 3916 } 3917 3918 static void 3919 igb_msix_try_alloc(struct igb_softc *sc) 3920 { 3921 int msix_enable, msix_cnt, msix_cnt2, alloc_cnt; 3922 int i, x, error; 3923 struct igb_msix_data *msix; 3924 boolean_t aggregate, setup = FALSE; 3925 3926 /* 3927 * Don't enable MSI-X on 82575, see: 3928 * 82575 specification update errata #25 3929 */ 3930 if (sc->hw.mac.type == e1000_82575) 3931 return; 3932 3933 /* Don't enable MSI-X on VF */ 3934 if (sc->vf_ifp) 3935 return; 3936 3937 msix_enable = device_getenv_int(sc->dev, "msix.enable", 3938 igb_msix_enable); 3939 if (!msix_enable) 3940 return; 3941 3942 msix_cnt = pci_msix_count(sc->dev); 3943 #ifdef IGB_MSIX_DEBUG 3944 msix_cnt = device_getenv_int(sc->dev, "msix.count", msix_cnt); 3945 #endif 3946 if (msix_cnt <= 1) { 3947 /* One MSI-X model does not make sense */ 3948 return; 3949 } 3950 3951 i = 0; 3952 while ((1 << (i + 1)) <= msix_cnt) 3953 ++i; 3954 msix_cnt2 = 1 << i; 3955 3956 if (bootverbose) { 3957 device_printf(sc->dev, "MSI-X count %d/%d\n", 3958 msix_cnt2, msix_cnt); 3959 } 3960 3961 KKASSERT(msix_cnt2 <= msix_cnt); 3962 if (msix_cnt == msix_cnt2) { 3963 /* We need at least one MSI-X for link status */ 3964 msix_cnt2 >>= 1; 3965 if (msix_cnt2 <= 1) { 3966 /* One MSI-X for RX/TX does not make sense */ 3967 device_printf(sc->dev, "not enough MSI-X for TX/RX, " 3968 "MSI-X count %d/%d\n", msix_cnt2, msix_cnt); 3969 return; 3970 } 3971 KKASSERT(msix_cnt > msix_cnt2); 3972 3973 if (bootverbose) { 3974 device_printf(sc->dev, "MSI-X count fixup %d/%d\n", 3975 msix_cnt2, msix_cnt); 3976 } 3977 } 3978 3979 sc->rx_ring_msix = sc->rx_ring_cnt; 3980 if (sc->rx_ring_msix > msix_cnt2) 3981 sc->rx_ring_msix = msix_cnt2; 3982 3983 if (msix_cnt >= sc->tx_ring_cnt + sc->rx_ring_msix + 1) { 3984 /* 3985 * Independent TX/RX MSI-X 3986 */ 3987 aggregate = FALSE; 3988 if (bootverbose) 3989 device_printf(sc->dev, "independent TX/RX MSI-X\n"); 3990 alloc_cnt = sc->tx_ring_cnt + sc->rx_ring_msix; 3991 } else { 3992 /* 3993 * Aggregate TX/RX MSI-X 3994 */ 3995 aggregate = TRUE; 3996 if (bootverbose) 3997 device_printf(sc->dev, "aggregate TX/RX MSI-X\n"); 3998 alloc_cnt = msix_cnt2; 3999 if (alloc_cnt > ncpus2) 4000 alloc_cnt = ncpus2; 4001 if (sc->rx_ring_msix > alloc_cnt) 4002 sc->rx_ring_msix = alloc_cnt; 4003 } 4004 ++alloc_cnt; /* For link status */ 4005 4006 if (bootverbose) { 4007 device_printf(sc->dev, "MSI-X alloc %d, RX ring %d\n", 4008 alloc_cnt, sc->rx_ring_msix); 4009 } 4010 4011 sc->msix_mem_rid = PCIR_BAR(IGB_MSIX_BAR); 4012 sc->msix_mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, 4013 &sc->msix_mem_rid, RF_ACTIVE); 4014 if (sc->msix_mem_res == NULL) { 4015 device_printf(sc->dev, "Unable to map MSI-X table\n"); 4016 return; 4017 } 4018 4019 sc->msix_cnt = alloc_cnt; 4020 sc->msix_data = kmalloc(sizeof(struct igb_msix_data) * sc->msix_cnt, 4021 M_DEVBUF, M_WAITOK | M_ZERO); 4022 for (x = 0; x < sc->msix_cnt; ++x) { 4023 msix = &sc->msix_data[x]; 4024 4025 lwkt_serialize_init(&msix->msix_serialize0); 4026 msix->msix_sc = sc; 4027 msix->msix_rid = -1; 4028 msix->msix_vector = x; 4029 msix->msix_mask = 1 << msix->msix_vector; 4030 msix->msix_rate = IGB_INTR_RATE; 4031 } 4032 4033 x = 0; 4034 if (!aggregate) { 4035 int offset, offset_def; 4036 4037 if (sc->rx_ring_msix == ncpus2) { 4038 offset = 0; 4039 } else { 4040 offset_def = (sc->rx_ring_msix * 4041 device_get_unit(sc->dev)) % ncpus2; 4042 4043 offset = device_getenv_int(sc->dev, 4044 "msix.rxoff", offset_def); 4045 if (offset >= ncpus2 || 4046 offset % sc->rx_ring_msix != 0) { 4047 device_printf(sc->dev, 4048 "invalid msix.rxoff %d, use %d\n", 4049 offset, offset_def); 4050 offset = offset_def; 4051 } 4052 } 4053 4054 /* RX rings */ 4055 for (i = 0; i < sc->rx_ring_msix; ++i) { 4056 struct igb_rx_ring *rxr = &sc->rx_rings[i]; 4057 4058 KKASSERT(x < sc->msix_cnt); 4059 msix = &sc->msix_data[x++]; 4060 rxr->rx_intr_bit = msix->msix_vector; 4061 rxr->rx_intr_mask = msix->msix_mask; 4062 4063 msix->msix_serialize = &rxr->rx_serialize; 4064 msix->msix_func = igb_msix_rx; 4065 msix->msix_arg = rxr; 4066 msix->msix_cpuid = i + offset; 4067 KKASSERT(msix->msix_cpuid < ncpus2); 4068 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4069 "%s rx%d", device_get_nameunit(sc->dev), i); 4070 msix->msix_rate = IGB_MSIX_RX_RATE; 4071 ksnprintf(msix->msix_rate_desc, 4072 sizeof(msix->msix_rate_desc), 4073 "RX%d interrupt rate", i); 4074 } 4075 4076 offset_def = device_get_unit(sc->dev) % ncpus2; 4077 offset = device_getenv_int(sc->dev, "msix.txoff", offset_def); 4078 if (offset >= ncpus2) { 4079 device_printf(sc->dev, "invalid msix.txoff %d, " 4080 "use %d\n", offset, offset_def); 4081 offset = offset_def; 4082 } 4083 4084 /* TX rings */ 4085 for (i = 0; i < sc->tx_ring_cnt; ++i) { 4086 struct igb_tx_ring *txr = &sc->tx_rings[i]; 4087 4088 KKASSERT(x < sc->msix_cnt); 4089 msix = &sc->msix_data[x++]; 4090 txr->tx_intr_bit = msix->msix_vector; 4091 txr->tx_intr_mask = msix->msix_mask; 4092 4093 msix->msix_serialize = &txr->tx_serialize; 4094 msix->msix_func = igb_msix_tx; 4095 msix->msix_arg = txr; 4096 msix->msix_cpuid = i + offset; 4097 sc->msix_tx_cpuid = msix->msix_cpuid; /* XXX */ 4098 KKASSERT(msix->msix_cpuid < ncpus2); 4099 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), 4100 "%s tx%d", device_get_nameunit(sc->dev), i); 4101 msix->msix_rate = IGB_MSIX_TX_RATE; 4102 ksnprintf(msix->msix_rate_desc, 4103 sizeof(msix->msix_rate_desc), 4104 "TX%d interrupt rate", i); 4105 } 4106 } else { 4107 /* TODO */ 4108 error = EOPNOTSUPP; 4109 goto back; 4110 } 4111 4112 /* 4113 * Link status 4114 */ 4115 KKASSERT(x < sc->msix_cnt); 4116 msix = &sc->msix_data[x++]; 4117 sc->sts_intr_bit = msix->msix_vector; 4118 sc->sts_intr_mask = msix->msix_mask; 4119 4120 msix->msix_serialize = &sc->main_serialize; 4121 msix->msix_func = igb_msix_status; 4122 msix->msix_arg = sc; 4123 msix->msix_cpuid = 0; /* TODO tunable */ 4124 ksnprintf(msix->msix_desc, sizeof(msix->msix_desc), "%s sts", 4125 device_get_nameunit(sc->dev)); 4126 ksnprintf(msix->msix_rate_desc, sizeof(msix->msix_rate_desc), 4127 "status interrupt rate"); 4128 4129 KKASSERT(x == sc->msix_cnt); 4130 4131 error = pci_setup_msix(sc->dev); 4132 if (error) { 4133 device_printf(sc->dev, "Setup MSI-X failed\n"); 4134 goto back; 4135 } 4136 setup = TRUE; 4137 4138 for (i = 0; i < sc->msix_cnt; ++i) { 4139 msix = &sc->msix_data[i]; 4140 4141 error = pci_alloc_msix_vector(sc->dev, msix->msix_vector, 4142 &msix->msix_rid, msix->msix_cpuid); 4143 if (error) { 4144 device_printf(sc->dev, 4145 "Unable to allocate MSI-X %d on cpu%d\n", 4146 msix->msix_vector, msix->msix_cpuid); 4147 goto back; 4148 } 4149 4150 msix->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, 4151 &msix->msix_rid, RF_ACTIVE); 4152 if (msix->msix_res == NULL) { 4153 device_printf(sc->dev, 4154 "Unable to allocate MSI-X %d resource\n", 4155 msix->msix_vector); 4156 error = ENOMEM; 4157 goto back; 4158 } 4159 } 4160 4161 pci_enable_msix(sc->dev); 4162 sc->intr_type = PCI_INTR_TYPE_MSIX; 4163 back: 4164 if (error) 4165 igb_msix_free(sc, setup); 4166 } 4167 4168 static void 4169 igb_msix_free(struct igb_softc *sc, boolean_t setup) 4170 { 4171 int i; 4172 4173 KKASSERT(sc->msix_cnt > 1); 4174 4175 for (i = 0; i < sc->msix_cnt; ++i) { 4176 struct igb_msix_data *msix = &sc->msix_data[i]; 4177 4178 if (msix->msix_res != NULL) { 4179 bus_release_resource(sc->dev, SYS_RES_IRQ, 4180 msix->msix_rid, msix->msix_res); 4181 } 4182 if (msix->msix_rid >= 0) 4183 pci_release_msix_vector(sc->dev, msix->msix_rid); 4184 } 4185 if (setup) 4186 pci_teardown_msix(sc->dev); 4187 4188 sc->msix_cnt = 0; 4189 kfree(sc->msix_data, M_DEVBUF); 4190 sc->msix_data = NULL; 4191 } 4192 4193 static int 4194 igb_msix_setup(struct igb_softc *sc) 4195 { 4196 struct ifnet *ifp = &sc->arpcom.ac_if; 4197 int i; 4198 4199 for (i = 0; i < sc->msix_cnt; ++i) { 4200 struct igb_msix_data *msix = &sc->msix_data[i]; 4201 int error; 4202 4203 error = bus_setup_intr_descr(sc->dev, msix->msix_res, 4204 INTR_MPSAFE, msix->msix_func, msix->msix_arg, 4205 &msix->msix_handle, msix->msix_serialize, msix->msix_desc); 4206 if (error) { 4207 device_printf(sc->dev, "could not set up %s " 4208 "interrupt handler.\n", msix->msix_desc); 4209 igb_msix_teardown(sc, i); 4210 return error; 4211 } 4212 } 4213 ifp->if_cpuid = sc->msix_tx_cpuid; 4214 4215 return 0; 4216 } 4217 4218 static void 4219 igb_msix_teardown(struct igb_softc *sc, int msix_cnt) 4220 { 4221 int i; 4222 4223 for (i = 0; i < msix_cnt; ++i) { 4224 struct igb_msix_data *msix = &sc->msix_data[i]; 4225 4226 bus_teardown_intr(sc->dev, msix->msix_res, msix->msix_handle); 4227 } 4228 } 4229 4230 static void 4231 igb_msix_rx(void *arg) 4232 { 4233 struct igb_rx_ring *rxr = arg; 4234 4235 ASSERT_SERIALIZED(&rxr->rx_serialize); 4236 igb_rxeof(rxr, -1); 4237 4238 E1000_WRITE_REG(&rxr->sc->hw, E1000_EIMS, rxr->rx_intr_mask); 4239 } 4240 4241 static void 4242 igb_msix_tx(void *arg) 4243 { 4244 struct igb_tx_ring *txr = arg; 4245 struct ifnet *ifp = &txr->sc->arpcom.ac_if; 4246 4247 ASSERT_SERIALIZED(&txr->tx_serialize); 4248 4249 igb_txeof(txr); 4250 if (!ifq_is_empty(&ifp->if_snd)) 4251 if_devstart(ifp); 4252 4253 E1000_WRITE_REG(&txr->sc->hw, E1000_EIMS, txr->tx_intr_mask); 4254 } 4255 4256 static void 4257 igb_msix_status(void *arg) 4258 { 4259 struct igb_softc *sc = arg; 4260 uint32_t icr; 4261 4262 ASSERT_SERIALIZED(&sc->main_serialize); 4263 4264 icr = E1000_READ_REG(&sc->hw, E1000_ICR); 4265 if (icr & E1000_ICR_LSC) { 4266 sc->hw.mac.get_link_status = 1; 4267 igb_update_link_status(sc); 4268 } 4269 4270 E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->sts_intr_mask); 4271 } 4272 4273 static void 4274 igb_set_ring_inuse(struct igb_softc *sc, boolean_t polling) 4275 { 4276 if (!IGB_ENABLE_HWRSS(sc)) 4277 return; 4278 4279 if (sc->intr_type != PCI_INTR_TYPE_MSIX || polling) 4280 sc->rx_ring_inuse = IGB_MIN_RING_RSS; 4281 else 4282 sc->rx_ring_inuse = sc->rx_ring_msix; 4283 if (bootverbose) { 4284 device_printf(sc->dev, "RX rings %d/%d\n", 4285 sc->rx_ring_inuse, sc->rx_ring_cnt); 4286 } 4287 } 4288