1 /************************************************************************** 2 3 Copyright (c) 2001-2005, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ***************************************************************************/ 33 34 /* $OpenBSD: if_ixgb.c,v 1.59 2011/04/05 18:01:21 henning Exp $ */ 35 36 #include <dev/pci/if_ixgb.h> 37 38 #ifdef IXGB_DEBUG 39 /********************************************************************* 40 * Set this to one to display debug statistics 41 *********************************************************************/ 42 int ixgb_display_debug_stats = 0; 43 #endif 44 45 /********************************************************************* 46 * Driver version 47 *********************************************************************/ 48 49 #define IXGB_DRIVER_VERSION "6.1.0" 50 51 /********************************************************************* 52 * PCI Device ID Table 53 *********************************************************************/ 54 55 const struct pci_matchid ixgb_devices[] = { 56 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX }, 57 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_SR }, 58 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_LR }, 59 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82597EX_CX4 }, 60 }; 61 62 /********************************************************************* 63 * Function prototypes 64 *********************************************************************/ 65 int ixgb_probe(struct device *, void *, void *); 66 void ixgb_attach(struct device *, struct device *, void *); 67 int ixgb_intr(void *); 68 void ixgb_start(struct ifnet *); 69 int ixgb_ioctl(struct ifnet *, u_long, caddr_t); 70 void ixgb_watchdog(struct ifnet *); 71 void ixgb_init(void *); 72 void ixgb_stop(void *); 73 void ixgb_media_status(struct ifnet *, struct ifmediareq *); 74 int ixgb_media_change(struct ifnet *); 75 void ixgb_identify_hardware(struct ixgb_softc *); 76 int ixgb_allocate_pci_resources(struct ixgb_softc *); 77 void ixgb_free_pci_resources(struct ixgb_softc *); 78 void ixgb_local_timer(void *); 79 int ixgb_hardware_init(struct ixgb_softc *); 80 void ixgb_setup_interface(struct ixgb_softc *); 81 int ixgb_setup_transmit_structures(struct ixgb_softc *); 82 void ixgb_initialize_transmit_unit(struct ixgb_softc *); 83 int ixgb_setup_receive_structures(struct ixgb_softc *); 84 void ixgb_initialize_receive_unit(struct ixgb_softc *); 85 void ixgb_enable_intr(struct ixgb_softc *); 86 void ixgb_disable_intr(struct ixgb_softc *); 87 void ixgb_free_transmit_structures(struct ixgb_softc *); 88 void ixgb_free_receive_structures(struct ixgb_softc *); 89 void ixgb_update_stats_counters(struct ixgb_softc *); 90 void ixgb_txeof(struct ixgb_softc *); 91 int ixgb_allocate_receive_structures(struct ixgb_softc *); 92 int ixgb_allocate_transmit_structures(struct ixgb_softc *); 93 void ixgb_rxeof(struct ixgb_softc *, int); 94 void 95 ixgb_receive_checksum(struct ixgb_softc *, 96 struct ixgb_rx_desc * rx_desc, 97 struct mbuf *); 98 void 99 ixgb_transmit_checksum_setup(struct ixgb_softc *, 100 struct mbuf *, 101 u_int8_t *); 102 void ixgb_set_promisc(struct ixgb_softc *); 103 void ixgb_set_multi(struct ixgb_softc *); 104 #ifdef IXGB_DEBUG 105 void ixgb_print_hw_stats(struct ixgb_softc *); 106 #endif 107 void ixgb_update_link_status(struct ixgb_softc *); 108 int 109 ixgb_get_buf(struct ixgb_softc *, int i, 110 struct mbuf *); 111 void ixgb_enable_hw_vlans(struct ixgb_softc *); 112 int ixgb_encap(struct ixgb_softc *, struct mbuf *); 113 int 114 ixgb_dma_malloc(struct ixgb_softc *, bus_size_t, 115 struct ixgb_dma_alloc *, int); 116 void ixgb_dma_free(struct ixgb_softc *, struct ixgb_dma_alloc *); 117 118 /********************************************************************* 119 * OpenBSD Device Interface Entry Points 120 *********************************************************************/ 121 122 struct cfattach ixgb_ca = { 123 sizeof(struct ixgb_softc), ixgb_probe, ixgb_attach 124 }; 125 126 struct cfdriver ixgb_cd = { 127 NULL, "ixgb", DV_IFNET 128 }; 129 130 /* some defines for controlling descriptor fetches in h/w */ 131 #define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below this */ 132 #define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail is 133 * pushed this many descriptors from 134 * head */ 135 #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ 136 137 138 /********************************************************************* 139 * Device identification routine 140 * 141 * ixgb_probe determines if the driver should be loaded on 142 * adapter based on PCI vendor/device id of the adapter. 143 * 144 * return 0 on no match, positive on match 145 *********************************************************************/ 146 147 int 148 ixgb_probe(struct device *parent, void *match, void *aux) 149 { 150 INIT_DEBUGOUT("ixgb_probe: begin"); 151 152 return (pci_matchbyid((struct pci_attach_args *)aux, ixgb_devices, 153 nitems(ixgb_devices))); 154 } 155 156 /********************************************************************* 157 * Device initialization routine 158 * 159 * The attach entry point is called when the driver is being loaded. 160 * This routine identifies the type of hardware, allocates all resources 161 * and initializes the hardware. 162 * 163 *********************************************************************/ 164 165 void 166 ixgb_attach(struct device *parent, struct device *self, void *aux) 167 { 168 struct pci_attach_args *pa = aux; 169 struct ixgb_softc *sc; 170 int tsize, rsize; 171 172 INIT_DEBUGOUT("ixgb_attach: begin"); 173 174 sc = (struct ixgb_softc *)self; 175 sc->osdep.ixgb_pa = *pa; 176 177 timeout_set(&sc->timer_handle, ixgb_local_timer, sc); 178 179 /* Determine hardware revision */ 180 ixgb_identify_hardware(sc); 181 182 /* Parameters (to be read from user) */ 183 sc->num_tx_desc = IXGB_MAX_TXD; 184 sc->num_rx_desc = IXGB_MAX_RXD; 185 sc->tx_int_delay = TIDV; 186 sc->rx_int_delay = RDTR; 187 sc->rx_buffer_len = IXGB_RXBUFFER_2048; 188 189 /* 190 * These parameters control the automatic generation(Tx) and 191 * response(Rx) to Ethernet PAUSE frames. 192 */ 193 sc->hw.fc.high_water = FCRTH; 194 sc->hw.fc.low_water = FCRTL; 195 sc->hw.fc.pause_time = FCPAUSE; 196 sc->hw.fc.send_xon = TRUE; 197 sc->hw.fc.type = FLOW_CONTROL; 198 199 /* Set the max frame size assuming standard ethernet sized frames */ 200 sc->hw.max_frame_size = IXGB_MAX_JUMBO_FRAME_SIZE; 201 202 if (ixgb_allocate_pci_resources(sc)) 203 goto err_pci; 204 205 tsize = IXGB_ROUNDUP(sc->num_tx_desc * sizeof(struct ixgb_tx_desc), 206 IXGB_MAX_TXD * sizeof(struct ixgb_tx_desc)); 207 tsize = IXGB_ROUNDUP(tsize, PAGE_SIZE); 208 209 /* Allocate Transmit Descriptor ring */ 210 if (ixgb_dma_malloc(sc, tsize, &sc->txdma, BUS_DMA_NOWAIT)) { 211 printf("%s: Unable to allocate TxDescriptor memory\n", 212 sc->sc_dv.dv_xname); 213 goto err_tx_desc; 214 } 215 sc->tx_desc_base = (struct ixgb_tx_desc *) sc->txdma.dma_vaddr; 216 217 rsize = IXGB_ROUNDUP(sc->num_rx_desc * sizeof(struct ixgb_rx_desc), 218 IXGB_MAX_RXD * sizeof(struct ixgb_rx_desc)); 219 rsize = IXGB_ROUNDUP(rsize, PAGE_SIZE); 220 221 /* Allocate Receive Descriptor ring */ 222 if (ixgb_dma_malloc(sc, rsize, &sc->rxdma, BUS_DMA_NOWAIT)) { 223 printf("%s: Unable to allocate rx_desc memory\n", 224 sc->sc_dv.dv_xname); 225 goto err_rx_desc; 226 } 227 sc->rx_desc_base = (struct ixgb_rx_desc *) sc->rxdma.dma_vaddr; 228 229 /* Initialize the hardware */ 230 if (ixgb_hardware_init(sc)) { 231 printf("%s: Unable to initialize the hardware\n", 232 sc->sc_dv.dv_xname); 233 goto err_hw_init; 234 } 235 236 /* Setup OS specific network interface */ 237 ixgb_setup_interface(sc); 238 239 /* Initialize statistics */ 240 ixgb_clear_hw_cntrs(&sc->hw); 241 ixgb_update_stats_counters(sc); 242 ixgb_update_link_status(sc); 243 244 printf(", address %s\n", ether_sprintf(sc->interface_data.ac_enaddr)); 245 246 INIT_DEBUGOUT("ixgb_attach: end"); 247 return; 248 249 err_hw_init: 250 ixgb_dma_free(sc, &sc->rxdma); 251 err_rx_desc: 252 ixgb_dma_free(sc, &sc->txdma); 253 err_tx_desc: 254 err_pci: 255 ixgb_free_pci_resources(sc); 256 } 257 258 /********************************************************************* 259 * Transmit entry point 260 * 261 * ixgb_start is called by the stack to initiate a transmit. 262 * The driver will remain in this routine as long as there are 263 * packets to transmit and transmit resources are available. 264 * In case resources are not available stack is notified and 265 * the packet is requeued. 266 **********************************************************************/ 267 268 void 269 ixgb_start(struct ifnet *ifp) 270 { 271 struct mbuf *m_head; 272 struct ixgb_softc *sc = ifp->if_softc; 273 int post = 0; 274 275 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 276 return; 277 278 if (!sc->link_active) 279 return; 280 281 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0, 282 sc->txdma.dma_map->dm_mapsize, 283 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 284 285 for (;;) { 286 IFQ_POLL(&ifp->if_snd, m_head); 287 if (m_head == NULL) 288 break; 289 290 if (ixgb_encap(sc, m_head)) { 291 ifp->if_flags |= IFF_OACTIVE; 292 break; 293 } 294 295 IFQ_DEQUEUE(&ifp->if_snd, m_head); 296 297 #if NBPFILTER > 0 298 /* Send a copy of the frame to the BPF listener */ 299 if (ifp->if_bpf) 300 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 301 #endif 302 303 /* Set timeout in case hardware has problems transmitting */ 304 ifp->if_timer = IXGB_TX_TIMEOUT; 305 306 post = 1; 307 } 308 309 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0, 310 sc->txdma.dma_map->dm_mapsize, 311 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 312 /* 313 * Advance the Transmit Descriptor Tail (Tdt), 314 * this tells the E1000 that this frame 315 * is available to transmit. 316 */ 317 if (post) 318 IXGB_WRITE_REG(&sc->hw, TDT, sc->next_avail_tx_desc); 319 } 320 321 /********************************************************************* 322 * Ioctl entry point 323 * 324 * ixgb_ioctl is called when the user wants to configure the 325 * interface. 326 * 327 * return 0 on success, positive on failure 328 **********************************************************************/ 329 330 int 331 ixgb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 332 { 333 struct ixgb_softc *sc = ifp->if_softc; 334 struct ifaddr *ifa = (struct ifaddr *) data; 335 struct ifreq *ifr = (struct ifreq *) data; 336 int s, error = 0; 337 338 s = splnet(); 339 340 switch (command) { 341 case SIOCSIFADDR: 342 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFADDR (Set Interface " 343 "Addr)"); 344 ifp->if_flags |= IFF_UP; 345 if (!(ifp->if_flags & IFF_RUNNING)) 346 ixgb_init(sc); 347 #ifdef INET 348 if (ifa->ifa_addr->sa_family == AF_INET) 349 arp_ifinit(&sc->interface_data, ifa); 350 #endif /* INET */ 351 break; 352 353 case SIOCSIFFLAGS: 354 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); 355 if (ifp->if_flags & IFF_UP) { 356 /* 357 * If only the PROMISC or ALLMULTI flag changes, then 358 * don't do a full re-init of the chip, just update 359 * the Rx filter. 360 */ 361 if ((ifp->if_flags & IFF_RUNNING) && 362 ((ifp->if_flags ^ sc->if_flags) & 363 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 364 ixgb_set_promisc(sc); 365 } else { 366 if (!(ifp->if_flags & IFF_RUNNING)) 367 ixgb_init(sc); 368 } 369 } else { 370 if (ifp->if_flags & IFF_RUNNING) 371 ixgb_stop(sc); 372 } 373 sc->if_flags = ifp->if_flags; 374 break; 375 376 case SIOCSIFMEDIA: 377 case SIOCGIFMEDIA: 378 IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); 379 error = ifmedia_ioctl(ifp, ifr, &sc->media, command); 380 break; 381 382 default: 383 error = ether_ioctl(ifp, &sc->interface_data, command, data); 384 } 385 386 if (error == ENETRESET) { 387 if (ifp->if_flags & IFF_RUNNING) { 388 ixgb_disable_intr(sc); 389 ixgb_set_multi(sc); 390 ixgb_enable_intr(sc); 391 } 392 error = 0; 393 } 394 395 splx(s); 396 return (error); 397 } 398 399 /********************************************************************* 400 * Watchdog entry point 401 * 402 * This routine is called whenever hardware quits transmitting. 403 * 404 **********************************************************************/ 405 406 void 407 ixgb_watchdog(struct ifnet * ifp) 408 { 409 struct ixgb_softc *sc = ifp->if_softc; 410 411 /* 412 * If we are in this routine because of pause frames, then don't 413 * reset the hardware. 414 */ 415 if (IXGB_READ_REG(&sc->hw, STATUS) & IXGB_STATUS_TXOFF) { 416 ifp->if_timer = IXGB_TX_TIMEOUT; 417 return; 418 } 419 420 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname); 421 422 ixgb_init(sc); 423 424 sc->watchdog_events++; 425 } 426 427 /********************************************************************* 428 * Init entry point 429 * 430 * This routine is used in two ways. It is used by the stack as 431 * init entry point in network interface structure. It is also used 432 * by the driver as a hw/sw initialization routine to get to a 433 * consistent state. 434 * 435 **********************************************************************/ 436 437 void 438 ixgb_init(void *arg) 439 { 440 struct ixgb_softc *sc = arg; 441 struct ifnet *ifp = &sc->interface_data.ac_if; 442 uint32_t temp_reg; 443 int s; 444 445 INIT_DEBUGOUT("ixgb_init: begin"); 446 447 s = splnet(); 448 449 ixgb_stop(sc); 450 451 /* Get the latest mac address, User can use a LAA */ 452 bcopy(sc->interface_data.ac_enaddr, sc->hw.curr_mac_addr, 453 IXGB_ETH_LENGTH_OF_ADDRESS); 454 455 /* Initialize the hardware */ 456 if (ixgb_hardware_init(sc)) { 457 printf("%s: Unable to initialize the hardware\n", 458 sc->sc_dv.dv_xname); 459 splx(s); 460 return; 461 } 462 463 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 464 ixgb_enable_hw_vlans(sc); 465 466 /* Prepare transmit descriptors and buffers */ 467 if (ixgb_setup_transmit_structures(sc)) { 468 printf("%s: Could not setup transmit structures\n", 469 sc->sc_dv.dv_xname); 470 ixgb_stop(sc); 471 splx(s); 472 return; 473 } 474 ixgb_initialize_transmit_unit(sc); 475 476 /* Setup Multicast table */ 477 ixgb_set_multi(sc); 478 479 /* Prepare receive descriptors and buffers */ 480 if (ixgb_setup_receive_structures(sc)) { 481 printf("%s: Could not setup receive structures\n", 482 sc->sc_dv.dv_xname); 483 ixgb_stop(sc); 484 splx(s); 485 return; 486 } 487 ixgb_initialize_receive_unit(sc); 488 489 /* Don't lose promiscuous settings */ 490 ixgb_set_promisc(sc); 491 492 ifp->if_flags |= IFF_RUNNING; 493 ifp->if_flags &= ~IFF_OACTIVE; 494 495 /* Enable jumbo frames */ 496 IXGB_WRITE_REG(&sc->hw, MFRMS, 497 sc->hw.max_frame_size << IXGB_MFRMS_SHIFT); 498 temp_reg = IXGB_READ_REG(&sc->hw, CTRL0); 499 temp_reg |= IXGB_CTRL0_JFE; 500 IXGB_WRITE_REG(&sc->hw, CTRL0, temp_reg); 501 502 timeout_add_sec(&sc->timer_handle, 1); 503 ixgb_clear_hw_cntrs(&sc->hw); 504 ixgb_enable_intr(sc); 505 506 splx(s); 507 } 508 509 /********************************************************************* 510 * 511 * Interrupt Service routine 512 * 513 **********************************************************************/ 514 515 int 516 ixgb_intr(void *arg) 517 { 518 struct ixgb_softc *sc = arg; 519 struct ifnet *ifp; 520 u_int32_t reg_icr; 521 boolean_t rxdmt0 = FALSE; 522 int claimed = 0; 523 524 ifp = &sc->interface_data.ac_if; 525 526 for (;;) { 527 reg_icr = IXGB_READ_REG(&sc->hw, ICR); 528 if (reg_icr == 0) 529 break; 530 531 claimed = 1; 532 533 if (reg_icr & IXGB_INT_RXDMT0) 534 rxdmt0 = TRUE; 535 536 if (ifp->if_flags & IFF_RUNNING) { 537 ixgb_rxeof(sc, -1); 538 ixgb_txeof(sc); 539 } 540 541 /* Link status change */ 542 if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { 543 timeout_del(&sc->timer_handle); 544 ixgb_check_for_link(&sc->hw); 545 ixgb_update_link_status(sc); 546 timeout_add_sec(&sc->timer_handle, 1); 547 } 548 549 if (rxdmt0 && sc->raidc) { 550 IXGB_WRITE_REG(&sc->hw, IMC, IXGB_INT_RXDMT0); 551 IXGB_WRITE_REG(&sc->hw, IMS, IXGB_INT_RXDMT0); 552 } 553 } 554 555 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 556 ixgb_start(ifp); 557 558 return (claimed); 559 } 560 561 562 /********************************************************************* 563 * 564 * Media Ioctl callback 565 * 566 * This routine is called whenever the user queries the status of 567 * the interface using ifconfig. 568 * 569 **********************************************************************/ 570 void 571 ixgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 572 { 573 struct ixgb_softc *sc = ifp->if_softc; 574 575 INIT_DEBUGOUT("ixgb_media_status: begin"); 576 577 ixgb_check_for_link(&sc->hw); 578 ixgb_update_link_status(sc); 579 580 ifmr->ifm_status = IFM_AVALID; 581 ifmr->ifm_active = IFM_ETHER; 582 583 if (!sc->hw.link_up) { 584 ifmr->ifm_active |= IFM_NONE; 585 return; 586 } 587 588 ifmr->ifm_status |= IFM_ACTIVE; 589 if ((sc->hw.phy_type == ixgb_phy_type_g6104) || 590 (sc->hw.phy_type == ixgb_phy_type_txn17401)) 591 ifmr->ifm_active |= IFM_10G_LR | IFM_FDX; 592 else 593 ifmr->ifm_active |= IFM_10G_SR | IFM_FDX; 594 595 return; 596 } 597 598 /********************************************************************* 599 * 600 * Media Ioctl callback 601 * 602 * This routine is called when the user changes speed/duplex using 603 * media/mediopt option with ifconfig. 604 * 605 **********************************************************************/ 606 int 607 ixgb_media_change(struct ifnet * ifp) 608 { 609 struct ixgb_softc *sc = ifp->if_softc; 610 struct ifmedia *ifm = &sc->media; 611 612 INIT_DEBUGOUT("ixgb_media_change: begin"); 613 614 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 615 return (EINVAL); 616 617 return (0); 618 } 619 620 /********************************************************************* 621 * 622 * This routine maps the mbufs to tx descriptors. 623 * 624 * return 0 on success, positive on failure 625 **********************************************************************/ 626 627 int 628 ixgb_encap(struct ixgb_softc *sc, struct mbuf *m_head) 629 { 630 u_int8_t txd_popts; 631 int i, j, error = 0; 632 bus_dmamap_t map; 633 634 struct ixgb_buffer *tx_buffer; 635 struct ixgb_tx_desc *current_tx_desc = NULL; 636 637 /* 638 * Force a cleanup if number of TX descriptors available hits the 639 * threshold 640 */ 641 if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 642 ixgb_txeof(sc); 643 /* Now do we at least have a minimal? */ 644 if (sc->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { 645 sc->no_tx_desc_avail1++; 646 return (ENOBUFS); 647 } 648 } 649 650 /* 651 * Map the packet for DMA. 652 */ 653 tx_buffer = &sc->tx_buffer_area[sc->next_avail_tx_desc]; 654 map = tx_buffer->map; 655 656 error = bus_dmamap_load_mbuf(sc->txtag, map, 657 m_head, BUS_DMA_NOWAIT); 658 if (error != 0) { 659 sc->no_tx_dma_setup++; 660 return (error); 661 } 662 IXGB_KASSERT(map->dm_nsegs != 0, ("ixgb_encap: empty packet")); 663 664 if (map->dm_nsegs > sc->num_tx_desc_avail) 665 goto fail; 666 667 #ifdef IXGB_CSUM_OFFLOAD 668 ixgb_transmit_checksum_setup(sc, m_head, &txd_popts); 669 #else 670 txd_popts = 0; 671 #endif 672 673 i = sc->next_avail_tx_desc; 674 for (j = 0; j < map->dm_nsegs; j++) { 675 tx_buffer = &sc->tx_buffer_area[i]; 676 current_tx_desc = &sc->tx_desc_base[i]; 677 678 current_tx_desc->buff_addr = htole64(map->dm_segs[j].ds_addr); 679 current_tx_desc->cmd_type_len = htole32((sc->txd_cmd | map->dm_segs[j].ds_len)); 680 current_tx_desc->popts = txd_popts; 681 if (++i == sc->num_tx_desc) 682 i = 0; 683 684 tx_buffer->m_head = NULL; 685 } 686 687 sc->num_tx_desc_avail -= map->dm_nsegs; 688 sc->next_avail_tx_desc = i; 689 690 /* Find out if we are in VLAN mode */ 691 if (m_head->m_flags & M_VLANTAG) { 692 /* Set the VLAN id */ 693 current_tx_desc->vlan = htole16(m_head->m_pkthdr.ether_vtag); 694 695 /* Tell hardware to add tag */ 696 current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_VLE); 697 } 698 699 tx_buffer->m_head = m_head; 700 bus_dmamap_sync(sc->txtag, map, 0, map->dm_mapsize, 701 BUS_DMASYNC_PREWRITE); 702 703 /* 704 * Last Descriptor of Packet needs End Of Packet (EOP) 705 */ 706 current_tx_desc->cmd_type_len |= htole32(IXGB_TX_DESC_CMD_EOP); 707 708 return (0); 709 710 fail: 711 sc->no_tx_desc_avail2++; 712 bus_dmamap_unload(sc->txtag, map); 713 return (ENOBUFS); 714 } 715 716 void 717 ixgb_set_promisc(struct ixgb_softc *sc) 718 { 719 720 u_int32_t reg_rctl; 721 struct ifnet *ifp = &sc->interface_data.ac_if; 722 723 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL); 724 725 if (ifp->if_flags & IFF_PROMISC) { 726 reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 727 } else if (ifp->if_flags & IFF_ALLMULTI) { 728 reg_rctl |= IXGB_RCTL_MPE; 729 reg_rctl &= ~IXGB_RCTL_UPE; 730 } else { 731 reg_rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 732 } 733 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl); 734 } 735 736 /********************************************************************* 737 * Multicast Update 738 * 739 * This routine is called whenever multicast address list is updated. 740 * 741 **********************************************************************/ 742 743 void 744 ixgb_set_multi(struct ixgb_softc *sc) 745 { 746 u_int32_t reg_rctl = 0; 747 u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS]; 748 int mcnt = 0; 749 struct ifnet *ifp = &sc->interface_data.ac_if; 750 struct arpcom *ac = &sc->interface_data; 751 struct ether_multi *enm; 752 struct ether_multistep step; 753 754 IOCTL_DEBUGOUT("ixgb_set_multi: begin"); 755 756 ETHER_FIRST_MULTI(step, ac, enm); 757 while (enm != NULL) { 758 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 759 ifp->if_flags |= IFF_ALLMULTI; 760 mcnt = MAX_NUM_MULTICAST_ADDRESSES; 761 } 762 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 763 break; 764 bcopy(enm->enm_addrlo, &mta[mcnt*IXGB_ETH_LENGTH_OF_ADDRESS], 765 IXGB_ETH_LENGTH_OF_ADDRESS); 766 mcnt++; 767 ETHER_NEXT_MULTI(step, enm); 768 } 769 770 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 771 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL); 772 reg_rctl |= IXGB_RCTL_MPE; 773 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl); 774 } else 775 ixgb_mc_addr_list_update(&sc->hw, mta, mcnt, 0); 776 } 777 778 779 /********************************************************************* 780 * Timer routine 781 * 782 * This routine checks for link status and updates statistics. 783 * 784 **********************************************************************/ 785 786 void 787 ixgb_local_timer(void *arg) 788 { 789 struct ifnet *ifp; 790 struct ixgb_softc *sc = arg; 791 int s; 792 793 ifp = &sc->interface_data.ac_if; 794 795 s = splnet(); 796 797 ixgb_check_for_link(&sc->hw); 798 ixgb_update_link_status(sc); 799 ixgb_update_stats_counters(sc); 800 #ifdef IXGB_DEBUG 801 if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING) 802 ixgb_print_hw_stats(sc); 803 #endif 804 805 timeout_add_sec(&sc->timer_handle, 1); 806 807 splx(s); 808 } 809 810 void 811 ixgb_update_link_status(struct ixgb_softc *sc) 812 { 813 struct ifnet *ifp = &sc->interface_data.ac_if; 814 815 if (sc->hw.link_up) { 816 if (!sc->link_active) { 817 ifp->if_baudrate = IF_Gbps(10); 818 sc->link_active = 1; 819 ifp->if_link_state = LINK_STATE_FULL_DUPLEX; 820 if_link_state_change(ifp); 821 } 822 } else { 823 if (sc->link_active) { 824 ifp->if_baudrate = 0; 825 sc->link_active = 0; 826 ifp->if_link_state = LINK_STATE_DOWN; 827 if_link_state_change(ifp); 828 } 829 } 830 } 831 832 /********************************************************************* 833 * 834 * This routine disables all traffic on the adapter by issuing a 835 * global reset on the MAC and deallocates TX/RX buffers. 836 * 837 **********************************************************************/ 838 839 void 840 ixgb_stop(void *arg) 841 { 842 struct ifnet *ifp; 843 struct ixgb_softc *sc = arg; 844 ifp = &sc->interface_data.ac_if; 845 846 INIT_DEBUGOUT("ixgb_stop: begin\n"); 847 ixgb_disable_intr(sc); 848 sc->hw.adapter_stopped = FALSE; 849 ixgb_adapter_stop(&sc->hw); 850 timeout_del(&sc->timer_handle); 851 852 /* Tell the stack that the interface is no longer active */ 853 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 854 855 ixgb_free_transmit_structures(sc); 856 ixgb_free_receive_structures(sc); 857 } 858 859 860 /********************************************************************* 861 * 862 * Determine hardware revision. 863 * 864 **********************************************************************/ 865 void 866 ixgb_identify_hardware(struct ixgb_softc *sc) 867 { 868 u_int32_t reg; 869 struct pci_attach_args *pa = &sc->osdep.ixgb_pa; 870 871 /* Make sure our PCI config space has the necessary stuff set */ 872 sc->hw.pci_cmd_word = pci_conf_read(pa->pa_pc, pa->pa_tag, 873 PCI_COMMAND_STATUS_REG); 874 875 /* Save off the information about this board */ 876 sc->hw.vendor_id = PCI_VENDOR(pa->pa_id); 877 sc->hw.device_id = PCI_PRODUCT(pa->pa_id); 878 879 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG); 880 sc->hw.revision_id = PCI_REVISION(reg); 881 882 reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 883 sc->hw.subsystem_vendor_id = PCI_VENDOR(reg); 884 sc->hw.subsystem_id = PCI_PRODUCT(reg); 885 886 /* Set MacType, etc. based on this PCI info */ 887 switch (sc->hw.device_id) { 888 case IXGB_DEVICE_ID_82597EX: 889 case IXGB_DEVICE_ID_82597EX_SR: 890 case IXGB_DEVICE_ID_82597EX_LR: 891 case IXGB_DEVICE_ID_82597EX_CX4: 892 sc->hw.mac_type = ixgb_82597; 893 break; 894 default: 895 INIT_DEBUGOUT1("Unknown device if 0x%x", sc->hw.device_id); 896 printf("%s: unsupported device id 0x%x\n", 897 sc->sc_dv.dv_xname, sc->hw.device_id); 898 } 899 } 900 901 int 902 ixgb_allocate_pci_resources(struct ixgb_softc *sc) 903 904 { 905 int val; 906 pci_intr_handle_t ih; 907 const char *intrstr = NULL; 908 struct pci_attach_args *pa = &sc->osdep.ixgb_pa; 909 pci_chipset_tag_t pc = pa->pa_pc; 910 911 val = pci_conf_read(pa->pa_pc, pa->pa_tag, IXGB_MMBA); 912 if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) { 913 printf(": mmba is not mem space\n"); 914 return (ENXIO); 915 } 916 if (pci_mapreg_map(pa, IXGB_MMBA, PCI_MAPREG_MEM_TYPE(val), 0, 917 &sc->osdep.mem_bus_space_tag, &sc->osdep.mem_bus_space_handle, 918 &sc->osdep.ixgb_membase, &sc->osdep.ixgb_memsize, 0)) { 919 printf(": cannot find mem space\n"); 920 return (ENXIO); 921 } 922 923 if (pci_intr_map(pa, &ih)) { 924 printf(": couldn't map interrupt\n"); 925 return (ENXIO); 926 } 927 928 sc->hw.back = &sc->osdep; 929 930 intrstr = pci_intr_string(pc, ih); 931 sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_NET, ixgb_intr, sc, 932 sc->sc_dv.dv_xname); 933 if (sc->sc_intrhand == NULL) { 934 printf(": couldn't establish interrupt"); 935 if (intrstr != NULL) 936 printf(" at %s", intrstr); 937 printf("\n"); 938 return (ENXIO); 939 } 940 printf(": %s", intrstr); 941 942 return (0); 943 } 944 945 void 946 ixgb_free_pci_resources(struct ixgb_softc *sc) 947 { 948 struct pci_attach_args *pa = &sc->osdep.ixgb_pa; 949 pci_chipset_tag_t pc = pa->pa_pc; 950 951 if (sc->sc_intrhand) 952 pci_intr_disestablish(pc, sc->sc_intrhand); 953 sc->sc_intrhand = 0; 954 955 if (sc->osdep.ixgb_membase) 956 bus_space_unmap(sc->osdep.mem_bus_space_tag, sc->osdep.mem_bus_space_handle, 957 sc->osdep.ixgb_memsize); 958 sc->osdep.ixgb_membase = 0; 959 } 960 961 /********************************************************************* 962 * 963 * Initialize the hardware to a configuration as specified by the 964 * adapter structure. The controller is reset, the EEPROM is 965 * verified, the MAC address is set, then the shared initialization 966 * routines are called. 967 * 968 **********************************************************************/ 969 int 970 ixgb_hardware_init(struct ixgb_softc *sc) 971 { 972 /* Issue a global reset */ 973 sc->hw.adapter_stopped = FALSE; 974 ixgb_adapter_stop(&sc->hw); 975 976 /* Make sure we have a good EEPROM before we read from it */ 977 if (!ixgb_validate_eeprom_checksum(&sc->hw)) { 978 printf("%s: The EEPROM Checksum Is Not Valid\n", 979 sc->sc_dv.dv_xname); 980 return (EIO); 981 } 982 if (!ixgb_init_hw(&sc->hw)) { 983 printf("%s: Hardware Initialization Failed", 984 sc->sc_dv.dv_xname); 985 return (EIO); 986 } 987 bcopy(sc->hw.curr_mac_addr, sc->interface_data.ac_enaddr, 988 IXGB_ETH_LENGTH_OF_ADDRESS); 989 990 return (0); 991 } 992 993 /********************************************************************* 994 * 995 * Setup networking device structure and register an interface. 996 * 997 **********************************************************************/ 998 void 999 ixgb_setup_interface(struct ixgb_softc *sc) 1000 { 1001 struct ifnet *ifp; 1002 INIT_DEBUGOUT("ixgb_setup_interface: begin"); 1003 1004 ifp = &sc->interface_data.ac_if; 1005 strlcpy(ifp->if_xname, sc->sc_dv.dv_xname, IFNAMSIZ); 1006 1007 ifp->if_softc = sc; 1008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1009 ifp->if_ioctl = ixgb_ioctl; 1010 ifp->if_start = ixgb_start; 1011 ifp->if_watchdog = ixgb_watchdog; 1012 ifp->if_hardmtu = 1013 IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN - ETHER_CRC_LEN; 1014 IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1); 1015 IFQ_SET_READY(&ifp->if_snd); 1016 1017 ifp->if_capabilities = IFCAP_VLAN_MTU; 1018 1019 #if NVLAN > 0 1020 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1021 #endif 1022 1023 #ifdef IXGB_CSUM_OFFLOAD 1024 ifp->if_capabilities |= IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4; 1025 #endif 1026 1027 /* 1028 * Specify the media types supported by this adapter and register 1029 * callbacks to update media and link information 1030 */ 1031 ifmedia_init(&sc->media, IFM_IMASK, ixgb_media_change, 1032 ixgb_media_status); 1033 if ((sc->hw.phy_type == ixgb_phy_type_g6104) || 1034 (sc->hw.phy_type == ixgb_phy_type_txn17401)) { 1035 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR | 1036 IFM_FDX, 0, NULL); 1037 } else { 1038 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR | 1039 IFM_FDX, 0, NULL); 1040 } 1041 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 1042 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 1043 1044 if_attach(ifp); 1045 ether_ifattach(ifp); 1046 } 1047 1048 /******************************************************************** 1049 * Manage DMA'able memory. 1050 *******************************************************************/ 1051 int 1052 ixgb_dma_malloc(struct ixgb_softc *sc, bus_size_t size, 1053 struct ixgb_dma_alloc * dma, int mapflags) 1054 { 1055 int r; 1056 1057 dma->dma_tag = sc->osdep.ixgb_pa.pa_dmat; 1058 r = bus_dmamap_create(dma->dma_tag, size, 1, 1059 size, 0, BUS_DMA_NOWAIT, &dma->dma_map); 1060 if (r != 0) { 1061 printf("%s: ixgb_dma_malloc: bus_dmamap_create failed; " 1062 "error %u\n", sc->sc_dv.dv_xname, r); 1063 goto fail_0; 1064 } 1065 1066 r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg, 1067 1, &dma->dma_nseg, BUS_DMA_NOWAIT); 1068 if (r != 0) { 1069 printf("%s: ixgb_dma_malloc: bus_dmammem_alloc failed; " 1070 "size %lu, error %d\n", sc->sc_dv.dv_xname, 1071 (unsigned long)size, r); 1072 goto fail_1; 1073 } 1074 1075 r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size, 1076 &dma->dma_vaddr, BUS_DMA_NOWAIT); 1077 if (r != 0) { 1078 printf("%s: ixgb_dma_malloc: bus_dmammem_map failed; " 1079 "size %lu, error %d\n", sc->sc_dv.dv_xname, 1080 (unsigned long)size, r); 1081 goto fail_2; 1082 } 1083 1084 r = bus_dmamap_load(sc->osdep.ixgb_pa.pa_dmat, dma->dma_map, 1085 dma->dma_vaddr, size, NULL, 1086 mapflags | BUS_DMA_NOWAIT); 1087 if (r != 0) { 1088 printf("%s: ixgb_dma_malloc: bus_dmamap_load failed; " 1089 "error %u\n", sc->sc_dv.dv_xname, r); 1090 goto fail_3; 1091 } 1092 1093 dma->dma_size = size; 1094 return (0); 1095 1096 fail_3: 1097 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size); 1098 fail_2: 1099 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 1100 fail_1: 1101 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1102 fail_0: 1103 dma->dma_map = NULL; 1104 dma->dma_tag = NULL; 1105 1106 return (r); 1107 } 1108 1109 void 1110 ixgb_dma_free(struct ixgb_softc *sc, struct ixgb_dma_alloc *dma) 1111 { 1112 if (dma->dma_tag == NULL) 1113 return; 1114 1115 if (dma->dma_map != NULL) { 1116 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, 1117 dma->dma_map->dm_mapsize, 1118 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1119 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1120 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); 1121 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 1122 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 1123 } 1124 } 1125 1126 /********************************************************************* 1127 * 1128 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1129 * the information needed to transmit a packet on the wire. 1130 * 1131 **********************************************************************/ 1132 int 1133 ixgb_allocate_transmit_structures(struct ixgb_softc *sc) 1134 { 1135 if (!(sc->tx_buffer_area = malloc(sizeof(struct ixgb_buffer) * 1136 sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1137 printf("%s: Unable to allocate tx_buffer memory\n", 1138 sc->sc_dv.dv_xname); 1139 return (ENOMEM); 1140 } 1141 1142 return (0); 1143 } 1144 1145 /********************************************************************* 1146 * 1147 * Allocate and initialize transmit structures. 1148 * 1149 **********************************************************************/ 1150 int 1151 ixgb_setup_transmit_structures(struct ixgb_softc *sc) 1152 { 1153 struct ixgb_buffer *tx_buffer; 1154 int error, i; 1155 1156 if ((error = ixgb_allocate_transmit_structures(sc)) != 0) 1157 goto fail; 1158 1159 bzero((void *)sc->tx_desc_base, 1160 (sizeof(struct ixgb_tx_desc)) * sc->num_tx_desc); 1161 1162 sc->txtag = sc->osdep.ixgb_pa.pa_dmat; 1163 1164 tx_buffer = sc->tx_buffer_area; 1165 for (i = 0; i < sc->num_tx_desc; i++) { 1166 error = bus_dmamap_create(sc->txtag, IXGB_MAX_JUMBO_FRAME_SIZE, 1167 IXGB_MAX_SCATTER, IXGB_MAX_JUMBO_FRAME_SIZE, 0, 1168 BUS_DMA_NOWAIT, &tx_buffer->map); 1169 if (error != 0) { 1170 printf("%s: Unable to create TX DMA map\n", 1171 sc->sc_dv.dv_xname); 1172 goto fail; 1173 } 1174 tx_buffer++; 1175 } 1176 1177 sc->next_avail_tx_desc = 0; 1178 sc->oldest_used_tx_desc = 0; 1179 1180 /* Set number of descriptors available */ 1181 sc->num_tx_desc_avail = sc->num_tx_desc; 1182 1183 /* Set checksum context */ 1184 sc->active_checksum_context = OFFLOAD_NONE; 1185 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0, 1186 sc->txdma.dma_size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1187 1188 return (0); 1189 1190 fail: 1191 ixgb_free_transmit_structures(sc); 1192 return (error); 1193 } 1194 1195 /********************************************************************* 1196 * 1197 * Enable transmit unit. 1198 * 1199 **********************************************************************/ 1200 void 1201 ixgb_initialize_transmit_unit(struct ixgb_softc *sc) 1202 { 1203 u_int32_t reg_tctl; 1204 u_int64_t bus_addr; 1205 1206 /* Setup the Base and Length of the Tx Descriptor Ring */ 1207 bus_addr = sc->txdma.dma_map->dm_segs[0].ds_addr; 1208 IXGB_WRITE_REG(&sc->hw, TDBAL, (u_int32_t)bus_addr); 1209 IXGB_WRITE_REG(&sc->hw, TDBAH, (u_int32_t)(bus_addr >> 32)); 1210 IXGB_WRITE_REG(&sc->hw, TDLEN, 1211 sc->num_tx_desc * 1212 sizeof(struct ixgb_tx_desc)); 1213 1214 /* Setup the HW Tx Head and Tail descriptor pointers */ 1215 IXGB_WRITE_REG(&sc->hw, TDH, 0); 1216 IXGB_WRITE_REG(&sc->hw, TDT, 0); 1217 1218 HW_DEBUGOUT2("Base = %x, Length = %x\n", 1219 IXGB_READ_REG(&sc->hw, TDBAL), 1220 IXGB_READ_REG(&sc->hw, TDLEN)); 1221 1222 IXGB_WRITE_REG(&sc->hw, TIDV, sc->tx_int_delay); 1223 1224 /* Program the Transmit Control Register */ 1225 reg_tctl = IXGB_READ_REG(&sc->hw, TCTL); 1226 reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; 1227 IXGB_WRITE_REG(&sc->hw, TCTL, reg_tctl); 1228 1229 /* Setup Transmit Descriptor Settings for this adapter */ 1230 sc->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; 1231 1232 if (sc->tx_int_delay > 0) 1233 sc->txd_cmd |= IXGB_TX_DESC_CMD_IDE; 1234 } 1235 1236 /********************************************************************* 1237 * 1238 * Free all transmit related data structures. 1239 * 1240 **********************************************************************/ 1241 void 1242 ixgb_free_transmit_structures(struct ixgb_softc *sc) 1243 { 1244 struct ixgb_buffer *tx_buffer; 1245 int i; 1246 1247 INIT_DEBUGOUT("free_transmit_structures: begin"); 1248 1249 if (sc->tx_buffer_area != NULL) { 1250 tx_buffer = sc->tx_buffer_area; 1251 for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) { 1252 if (tx_buffer->map != NULL && 1253 tx_buffer->map->dm_nsegs > 0) { 1254 bus_dmamap_sync(sc->txtag, tx_buffer->map, 1255 0, tx_buffer->map->dm_mapsize, 1256 BUS_DMASYNC_POSTWRITE); 1257 bus_dmamap_unload(sc->txtag, 1258 tx_buffer->map); 1259 } 1260 1261 if (tx_buffer->m_head != NULL) { 1262 m_freem(tx_buffer->m_head); 1263 tx_buffer->m_head = NULL; 1264 } 1265 if (tx_buffer->map != NULL) { 1266 bus_dmamap_destroy(sc->txtag, 1267 tx_buffer->map); 1268 tx_buffer->map = NULL; 1269 } 1270 } 1271 } 1272 if (sc->tx_buffer_area != NULL) { 1273 free(sc->tx_buffer_area, M_DEVBUF); 1274 sc->tx_buffer_area = NULL; 1275 } 1276 if (sc->txtag != NULL) { 1277 sc->txtag = NULL; 1278 } 1279 } 1280 1281 /********************************************************************* 1282 * 1283 * The offload context needs to be set when we transfer the first 1284 * packet of a particular protocol (TCP/UDP). We change the 1285 * context only if the protocol type changes. 1286 * 1287 **********************************************************************/ 1288 void 1289 ixgb_transmit_checksum_setup(struct ixgb_softc *sc, 1290 struct mbuf *mp, 1291 u_int8_t *txd_popts) 1292 { 1293 struct ixgb_context_desc *TXD; 1294 struct ixgb_buffer *tx_buffer; 1295 int curr_txd; 1296 1297 if (mp->m_pkthdr.csum_flags) { 1298 1299 if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT) { 1300 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1301 if (sc->active_checksum_context == OFFLOAD_TCP_IP) 1302 return; 1303 else 1304 sc->active_checksum_context = OFFLOAD_TCP_IP; 1305 1306 } else if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT) { 1307 *txd_popts = IXGB_TX_DESC_POPTS_TXSM; 1308 if (sc->active_checksum_context == OFFLOAD_UDP_IP) 1309 return; 1310 else 1311 sc->active_checksum_context = OFFLOAD_UDP_IP; 1312 } else { 1313 *txd_popts = 0; 1314 return; 1315 } 1316 } else { 1317 *txd_popts = 0; 1318 return; 1319 } 1320 1321 /* 1322 * If we reach this point, the checksum offload context needs to be 1323 * reset. 1324 */ 1325 curr_txd = sc->next_avail_tx_desc; 1326 tx_buffer = &sc->tx_buffer_area[curr_txd]; 1327 TXD = (struct ixgb_context_desc *) & sc->tx_desc_base[curr_txd]; 1328 1329 TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); 1330 TXD->tucse = 0; 1331 1332 TXD->mss = 0; 1333 1334 if (sc->active_checksum_context == OFFLOAD_TCP_IP) { 1335 TXD->tucso = 1336 ENET_HEADER_SIZE + sizeof(struct ip) + 1337 offsetof(struct tcphdr, th_sum); 1338 } else if (sc->active_checksum_context == OFFLOAD_UDP_IP) { 1339 TXD->tucso = 1340 ENET_HEADER_SIZE + sizeof(struct ip) + 1341 offsetof(struct udphdr, uh_sum); 1342 } 1343 TXD->cmd_type_len = htole32(IXGB_CONTEXT_DESC_CMD_TCP | 1344 IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE); 1345 1346 tx_buffer->m_head = NULL; 1347 1348 if (++curr_txd == sc->num_tx_desc) 1349 curr_txd = 0; 1350 1351 sc->num_tx_desc_avail--; 1352 sc->next_avail_tx_desc = curr_txd; 1353 } 1354 1355 /********************************************************************** 1356 * 1357 * Examine each tx_buffer in the used queue. If the hardware is done 1358 * processing the packet then free associated resources. The 1359 * tx_buffer is put back on the free queue. 1360 * 1361 **********************************************************************/ 1362 void 1363 ixgb_txeof(struct ixgb_softc *sc) 1364 { 1365 int i, num_avail; 1366 struct ixgb_buffer *tx_buffer; 1367 struct ixgb_tx_desc *tx_desc; 1368 struct ifnet *ifp = &sc->interface_data.ac_if; 1369 1370 if (sc->num_tx_desc_avail == sc->num_tx_desc) 1371 return; 1372 1373 num_avail = sc->num_tx_desc_avail; 1374 i = sc->oldest_used_tx_desc; 1375 1376 tx_buffer = &sc->tx_buffer_area[i]; 1377 tx_desc = &sc->tx_desc_base[i]; 1378 1379 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0, 1380 sc->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1381 while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { 1382 1383 tx_desc->status = 0; 1384 num_avail++; 1385 1386 if (tx_buffer->m_head != NULL) { 1387 ifp->if_opackets++; 1388 1389 if (tx_buffer->map->dm_nsegs > 0) { 1390 bus_dmamap_sync(sc->txtag, tx_buffer->map, 1391 0, tx_buffer->map->dm_mapsize, 1392 BUS_DMASYNC_POSTWRITE); 1393 bus_dmamap_unload(sc->txtag, tx_buffer->map); 1394 } 1395 1396 m_freem(tx_buffer->m_head); 1397 tx_buffer->m_head = NULL; 1398 } 1399 if (++i == sc->num_tx_desc) 1400 i = 0; 1401 1402 tx_buffer = &sc->tx_buffer_area[i]; 1403 tx_desc = &sc->tx_desc_base[i]; 1404 } 1405 bus_dmamap_sync(sc->txdma.dma_tag, sc->txdma.dma_map, 0, 1406 sc->txdma.dma_map->dm_mapsize, 1407 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1408 1409 sc->oldest_used_tx_desc = i; 1410 1411 /* 1412 * If we have enough room, clear IFF_OACTIVE to tell the stack that 1413 * it is OK to send packets. If there are no pending descriptors, 1414 * clear the timeout. Otherwise, if some descriptors have been freed, 1415 * restart the timeout. 1416 */ 1417 if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) 1418 ifp->if_flags &= ~IFF_OACTIVE; 1419 1420 /* All clean, turn off the timer */ 1421 if (num_avail == sc->num_tx_desc) 1422 ifp->if_timer = 0; 1423 /* Some cleaned, reset the timer */ 1424 else if (num_avail != sc->num_tx_desc_avail) 1425 ifp->if_timer = IXGB_TX_TIMEOUT; 1426 1427 sc->num_tx_desc_avail = num_avail; 1428 } 1429 1430 1431 /********************************************************************* 1432 * 1433 * Get a buffer from system mbuf buffer pool. 1434 * 1435 **********************************************************************/ 1436 int 1437 ixgb_get_buf(struct ixgb_softc *sc, int i, 1438 struct mbuf *nmp) 1439 { 1440 struct mbuf *mp = nmp; 1441 struct ixgb_buffer *rx_buffer; 1442 int error; 1443 1444 if (mp == NULL) { 1445 MGETHDR(mp, M_DONTWAIT, MT_DATA); 1446 if (mp == NULL) { 1447 sc->mbuf_alloc_failed++; 1448 return (ENOBUFS); 1449 } 1450 MCLGET(mp, M_DONTWAIT); 1451 if ((mp->m_flags & M_EXT) == 0) { 1452 m_freem(mp); 1453 sc->mbuf_cluster_failed++; 1454 return (ENOBUFS); 1455 } 1456 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1457 } else { 1458 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 1459 mp->m_data = mp->m_ext.ext_buf; 1460 mp->m_next = NULL; 1461 } 1462 1463 if (sc->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 1464 m_adj(mp, ETHER_ALIGN); 1465 1466 rx_buffer = &sc->rx_buffer_area[i]; 1467 1468 /* 1469 * Using memory from the mbuf cluster pool, invoke the bus_dma 1470 * machinery to arrange the memory mapping. 1471 */ 1472 error = bus_dmamap_load_mbuf(sc->rxtag, rx_buffer->map, 1473 mp, BUS_DMA_NOWAIT); 1474 if (error) { 1475 m_freem(mp); 1476 return (error); 1477 } 1478 rx_buffer->m_head = mp; 1479 bzero(&sc->rx_desc_base[i], sizeof(sc->rx_desc_base[i])); 1480 sc->rx_desc_base[i].buff_addr = htole64(rx_buffer->map->dm_segs[0].ds_addr); 1481 bus_dmamap_sync(sc->rxtag, rx_buffer->map, 0, 1482 rx_buffer->map->dm_mapsize, BUS_DMASYNC_PREREAD); 1483 1484 return (0); 1485 } 1486 1487 /********************************************************************* 1488 * 1489 * Allocate memory for rx_buffer structures. Since we use one 1490 * rx_buffer per received packet, the maximum number of rx_buffer's 1491 * that we'll need is equal to the number of receive descriptors 1492 * that we've allocated. 1493 * 1494 **********************************************************************/ 1495 int 1496 ixgb_allocate_receive_structures(struct ixgb_softc *sc) 1497 { 1498 int i, error; 1499 struct ixgb_buffer *rx_buffer; 1500 1501 if (!(sc->rx_buffer_area = malloc(sizeof(struct ixgb_buffer) * 1502 sc->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { 1503 printf("%s: Unable to allocate rx_buffer memory\n", 1504 sc->sc_dv.dv_xname); 1505 return (ENOMEM); 1506 } 1507 1508 sc->rxtag = sc->osdep.ixgb_pa.pa_dmat; 1509 1510 rx_buffer = sc->rx_buffer_area; 1511 for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) { 1512 error = bus_dmamap_create(sc->rxtag, MCLBYTES, 1, 1513 MCLBYTES, 0, BUS_DMA_NOWAIT, 1514 &rx_buffer->map); 1515 if (error != 0) { 1516 printf("%s: ixgb_allocate_receive_structures: " 1517 "bus_dmamap_create failed; error %u\n", 1518 sc->sc_dv.dv_xname, error); 1519 goto fail; 1520 } 1521 } 1522 1523 for (i = 0; i < sc->num_rx_desc; i++) { 1524 error = ixgb_get_buf(sc, i, NULL); 1525 if (error != 0) 1526 goto fail; 1527 } 1528 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0, 1529 sc->rxdma.dma_map->dm_mapsize, 1530 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1531 1532 return (0); 1533 1534 fail: 1535 ixgb_free_receive_structures(sc); 1536 return (error); 1537 } 1538 1539 /********************************************************************* 1540 * 1541 * Allocate and initialize receive structures. 1542 * 1543 **********************************************************************/ 1544 int 1545 ixgb_setup_receive_structures(struct ixgb_softc *sc) 1546 { 1547 bzero((void *)sc->rx_desc_base, 1548 (sizeof(struct ixgb_rx_desc)) * sc->num_rx_desc); 1549 1550 if (ixgb_allocate_receive_structures(sc)) 1551 return (ENOMEM); 1552 1553 /* Setup our descriptor pointers */ 1554 sc->next_rx_desc_to_check = 0; 1555 sc->next_rx_desc_to_use = 0; 1556 return (0); 1557 } 1558 1559 /********************************************************************* 1560 * 1561 * Enable receive unit. 1562 * 1563 **********************************************************************/ 1564 void 1565 ixgb_initialize_receive_unit(struct ixgb_softc *sc) 1566 { 1567 u_int32_t reg_rctl; 1568 u_int32_t reg_rxcsum; 1569 u_int32_t reg_rxdctl; 1570 u_int64_t bus_addr; 1571 1572 /* 1573 * Make sure receives are disabled while setting up the descriptor 1574 * ring 1575 */ 1576 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL); 1577 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); 1578 1579 /* Set the Receive Delay Timer Register */ 1580 IXGB_WRITE_REG(&sc->hw, RDTR, 1581 sc->rx_int_delay); 1582 1583 /* Setup the Base and Length of the Rx Descriptor Ring */ 1584 bus_addr = sc->rxdma.dma_map->dm_segs[0].ds_addr; 1585 IXGB_WRITE_REG(&sc->hw, RDBAL, (u_int32_t)bus_addr); 1586 IXGB_WRITE_REG(&sc->hw, RDBAH, (u_int32_t)(bus_addr >> 32)); 1587 IXGB_WRITE_REG(&sc->hw, RDLEN, sc->num_rx_desc * 1588 sizeof(struct ixgb_rx_desc)); 1589 1590 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1591 IXGB_WRITE_REG(&sc->hw, RDH, 0); 1592 1593 IXGB_WRITE_REG(&sc->hw, RDT, sc->num_rx_desc - 1); 1594 1595 reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT 1596 | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT 1597 | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; 1598 IXGB_WRITE_REG(&sc->hw, RXDCTL, reg_rxdctl); 1599 1600 sc->raidc = 1; 1601 if (sc->raidc) { 1602 uint32_t raidc; 1603 uint8_t poll_threshold; 1604 #define IXGB_RAIDC_POLL_DEFAULT 120 1605 1606 poll_threshold = ((sc->num_rx_desc - 1) >> 3); 1607 poll_threshold >>= 1; 1608 poll_threshold &= 0x3F; 1609 raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | 1610 (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | 1611 (sc->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | 1612 poll_threshold; 1613 IXGB_WRITE_REG(&sc->hw, RAIDC, raidc); 1614 } 1615 1616 /* Enable Receive Checksum Offload for TCP and UDP ? */ 1617 reg_rxcsum = IXGB_READ_REG(&sc->hw, RXCSUM); 1618 reg_rxcsum |= IXGB_RXCSUM_TUOFL; 1619 IXGB_WRITE_REG(&sc->hw, RXCSUM, reg_rxcsum); 1620 1621 /* Setup the Receive Control Register */ 1622 reg_rctl = IXGB_READ_REG(&sc->hw, RCTL); 1623 reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); 1624 reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | 1625 IXGB_RCTL_CFF | 1626 (sc->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); 1627 1628 switch (sc->rx_buffer_len) { 1629 default: 1630 case IXGB_RXBUFFER_2048: 1631 reg_rctl |= IXGB_RCTL_BSIZE_2048; 1632 break; 1633 case IXGB_RXBUFFER_4096: 1634 reg_rctl |= IXGB_RCTL_BSIZE_4096; 1635 break; 1636 case IXGB_RXBUFFER_8192: 1637 reg_rctl |= IXGB_RCTL_BSIZE_8192; 1638 break; 1639 case IXGB_RXBUFFER_16384: 1640 reg_rctl |= IXGB_RCTL_BSIZE_16384; 1641 break; 1642 } 1643 1644 reg_rctl |= IXGB_RCTL_RXEN; 1645 1646 /* Enable Receives */ 1647 IXGB_WRITE_REG(&sc->hw, RCTL, reg_rctl); 1648 } 1649 1650 /********************************************************************* 1651 * 1652 * Free receive related data structures. 1653 * 1654 **********************************************************************/ 1655 void 1656 ixgb_free_receive_structures(struct ixgb_softc *sc) 1657 { 1658 struct ixgb_buffer *rx_buffer; 1659 int i; 1660 1661 INIT_DEBUGOUT("free_receive_structures: begin"); 1662 1663 if (sc->rx_buffer_area != NULL) { 1664 rx_buffer = sc->rx_buffer_area; 1665 for (i = 0; i < sc->num_rx_desc; i++, rx_buffer++) { 1666 if (rx_buffer->map != NULL && 1667 rx_buffer->map->dm_nsegs > 0) { 1668 bus_dmamap_sync(sc->rxtag, rx_buffer->map, 1669 0, rx_buffer->map->dm_mapsize, 1670 BUS_DMASYNC_POSTREAD); 1671 bus_dmamap_unload(sc->rxtag, 1672 rx_buffer->map); 1673 } 1674 if (rx_buffer->m_head != NULL) { 1675 m_freem(rx_buffer->m_head); 1676 rx_buffer->m_head = NULL; 1677 } 1678 if (rx_buffer->map != NULL) { 1679 bus_dmamap_destroy(sc->rxtag, 1680 rx_buffer->map); 1681 rx_buffer->map = NULL; 1682 } 1683 } 1684 } 1685 if (sc->rx_buffer_area != NULL) { 1686 free(sc->rx_buffer_area, M_DEVBUF); 1687 sc->rx_buffer_area = NULL; 1688 } 1689 if (sc->rxtag != NULL) 1690 sc->rxtag = NULL; 1691 } 1692 1693 /********************************************************************* 1694 * 1695 * This routine executes in interrupt context. It replenishes 1696 * the mbufs in the descriptor and sends data which has been 1697 * dma'ed into host memory to upper layer. 1698 * 1699 * We loop at most count times if count is > 0, or until done if 1700 * count < 0. 1701 * 1702 *********************************************************************/ 1703 void 1704 ixgb_rxeof(struct ixgb_softc *sc, int count) 1705 { 1706 struct ifnet *ifp; 1707 struct mbuf *mp; 1708 int eop = 0; 1709 int len; 1710 u_int8_t accept_frame = 0; 1711 int i; 1712 int next_to_use = 0; 1713 int eop_desc; 1714 1715 /* Pointer to the receive descriptor being examined. */ 1716 struct ixgb_rx_desc *current_desc; 1717 1718 ifp = &sc->interface_data.ac_if; 1719 i = sc->next_rx_desc_to_check; 1720 next_to_use = sc->next_rx_desc_to_use; 1721 eop_desc = sc->next_rx_desc_to_check; 1722 current_desc = &sc->rx_desc_base[i]; 1723 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0, 1724 sc->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1725 1726 if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) 1727 return; 1728 1729 while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && 1730 (count != 0) && 1731 (ifp->if_flags & IFF_RUNNING)) { 1732 1733 mp = sc->rx_buffer_area[i].m_head; 1734 bus_dmamap_sync(sc->rxtag, sc->rx_buffer_area[i].map, 1735 0, sc->rx_buffer_area[i].map->dm_mapsize, 1736 BUS_DMASYNC_POSTREAD); 1737 bus_dmamap_unload(sc->rxtag, sc->rx_buffer_area[i].map); 1738 1739 accept_frame = 1; 1740 if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { 1741 count--; 1742 eop = 1; 1743 } else { 1744 eop = 0; 1745 } 1746 len = letoh16(current_desc->length); 1747 1748 if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 1749 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 1750 IXGB_RX_DESC_ERRORS_RXE)) 1751 accept_frame = 0; 1752 if (accept_frame) { 1753 1754 /* Assign correct length to the current fragment */ 1755 mp->m_len = len; 1756 1757 if (sc->fmp == NULL) { 1758 mp->m_pkthdr.len = len; 1759 sc->fmp = mp; /* Store the first mbuf */ 1760 sc->lmp = mp; 1761 } else { 1762 /* Chain mbuf's together */ 1763 mp->m_flags &= ~M_PKTHDR; 1764 sc->lmp->m_next = mp; 1765 sc->lmp = sc->lmp->m_next; 1766 sc->fmp->m_pkthdr.len += len; 1767 } 1768 1769 if (eop) { 1770 eop_desc = i; 1771 sc->fmp->m_pkthdr.rcvif = ifp; 1772 ifp->if_ipackets++; 1773 ixgb_receive_checksum(sc, current_desc, sc->fmp); 1774 1775 #if NVLAN > 0 1776 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) { 1777 sc->fmp->m_pkthdr.ether_vtag = 1778 letoh16(current_desc->special); 1779 sc->fmp->m_flags |= M_VLANTAG; 1780 } 1781 #endif 1782 1783 #if NBPFILTER > 0 1784 /* 1785 * Handle BPF listeners. Let the BPF 1786 * user see the packet. 1787 */ 1788 if (ifp->if_bpf) 1789 bpf_mtap_ether(ifp->if_bpf, sc->fmp, 1790 BPF_DIRECTION_IN); 1791 #endif 1792 1793 ether_input_mbuf(ifp, sc->fmp); 1794 sc->fmp = NULL; 1795 sc->lmp = NULL; 1796 } 1797 sc->rx_buffer_area[i].m_head = NULL; 1798 } else { 1799 sc->dropped_pkts++; 1800 if (sc->fmp != NULL) 1801 m_freem(sc->fmp); 1802 sc->fmp = NULL; 1803 sc->lmp = NULL; 1804 } 1805 1806 /* Zero out the receive descriptors status */ 1807 current_desc->status = 0; 1808 bus_dmamap_sync(sc->rxdma.dma_tag, sc->rxdma.dma_map, 0, 1809 sc->rxdma.dma_map->dm_mapsize, 1810 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1811 1812 /* Advance our pointers to the next descriptor */ 1813 if (++i == sc->num_rx_desc) { 1814 i = 0; 1815 current_desc = sc->rx_desc_base; 1816 } else 1817 current_desc++; 1818 } 1819 sc->next_rx_desc_to_check = i; 1820 1821 if (--i < 0) 1822 i = (sc->num_rx_desc - 1); 1823 1824 /* 1825 * 82597EX: Workaround for redundent write back in receive descriptor ring (causes 1826 * memory corruption). Avoid using and re-submitting the most recently received RX 1827 * descriptor back to hardware. 1828 * 1829 * if(Last written back descriptor == EOP bit set descriptor) 1830 * then avoid re-submitting the most recently received RX descriptor 1831 * back to hardware. 1832 * if(Last written back descriptor != EOP bit set descriptor) 1833 * then avoid re-submitting the most recently received RX descriptors 1834 * till last EOP bit set descriptor. 1835 */ 1836 if (eop_desc != i) { 1837 if (++eop_desc == sc->num_rx_desc) 1838 eop_desc = 0; 1839 i = eop_desc; 1840 } 1841 /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ 1842 while (next_to_use != i) { 1843 current_desc = &sc->rx_desc_base[next_to_use]; 1844 if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | 1845 IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | 1846 IXGB_RX_DESC_ERRORS_RXE))) { 1847 mp = sc->rx_buffer_area[next_to_use].m_head; 1848 ixgb_get_buf(sc, next_to_use, mp); 1849 } else { 1850 if (ixgb_get_buf(sc, next_to_use, NULL) == ENOBUFS) 1851 break; 1852 } 1853 /* Advance our pointers to the next descriptor */ 1854 if (++next_to_use == sc->num_rx_desc) 1855 next_to_use = 0; 1856 } 1857 sc->next_rx_desc_to_use = next_to_use; 1858 if (--next_to_use < 0) 1859 next_to_use = (sc->num_rx_desc - 1); 1860 /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ 1861 IXGB_WRITE_REG(&sc->hw, RDT, next_to_use); 1862 } 1863 1864 /********************************************************************* 1865 * 1866 * Verify that the hardware indicated that the checksum is valid. 1867 * Inform the stack about the status of checksum so that stack 1868 * doesn't spend time verifying the checksum. 1869 * 1870 *********************************************************************/ 1871 void 1872 ixgb_receive_checksum(struct ixgb_softc *sc, 1873 struct ixgb_rx_desc *rx_desc, 1874 struct mbuf *mp) 1875 { 1876 if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { 1877 mp->m_pkthdr.csum_flags = 0; 1878 return; 1879 } 1880 1881 if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { 1882 /* Did it pass? */ 1883 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { 1884 /* IP Checksum Good */ 1885 mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1886 1887 } else { 1888 mp->m_pkthdr.csum_flags = 0; 1889 } 1890 } 1891 if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { 1892 /* Did it pass? */ 1893 if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { 1894 mp->m_pkthdr.csum_flags |= 1895 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1896 } 1897 } 1898 } 1899 1900 /* 1901 * This turns on the hardware offload of the VLAN 1902 * tag insertion and strip 1903 */ 1904 void 1905 ixgb_enable_hw_vlans(struct ixgb_softc *sc) 1906 { 1907 uint32_t ctrl; 1908 1909 ctrl = IXGB_READ_REG(&sc->hw, CTRL0); 1910 ctrl |= IXGB_CTRL0_VME; 1911 IXGB_WRITE_REG(&sc->hw, CTRL0, ctrl); 1912 } 1913 1914 void 1915 ixgb_enable_intr(struct ixgb_softc *sc) 1916 { 1917 uint32_t val; 1918 1919 val = IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 | 1920 IXGB_INT_LSC | IXGB_INT_RXO; 1921 if (sc->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) 1922 val |= IXGB_INT_GPI0; 1923 IXGB_WRITE_REG(&sc->hw, IMS, val); 1924 } 1925 1926 void 1927 ixgb_disable_intr(struct ixgb_softc *sc) 1928 { 1929 IXGB_WRITE_REG(&sc->hw, IMC, ~0); 1930 } 1931 1932 void 1933 ixgb_write_pci_cfg(struct ixgb_hw *hw, 1934 uint32_t reg, 1935 uint16_t *value) 1936 { 1937 struct pci_attach_args *pa = &((struct ixgb_osdep *)hw->back)->ixgb_pa; 1938 pci_chipset_tag_t pc = pa->pa_pc; 1939 /* Should we do read/mask/write...? 16 vs 32 bit!!! */ 1940 pci_conf_write(pc, pa->pa_tag, reg, *value); 1941 } 1942 1943 /********************************************************************** 1944 * 1945 * Update the board statistics counters. 1946 * 1947 **********************************************************************/ 1948 void 1949 ixgb_update_stats_counters(struct ixgb_softc *sc) 1950 { 1951 struct ifnet *ifp; 1952 1953 sc->stats.crcerrs += IXGB_READ_REG(&sc->hw, CRCERRS); 1954 sc->stats.gprcl += IXGB_READ_REG(&sc->hw, GPRCL); 1955 sc->stats.gprch += IXGB_READ_REG(&sc->hw, GPRCH); 1956 sc->stats.gorcl += IXGB_READ_REG(&sc->hw, GORCL); 1957 sc->stats.gorch += IXGB_READ_REG(&sc->hw, GORCH); 1958 sc->stats.bprcl += IXGB_READ_REG(&sc->hw, BPRCL); 1959 sc->stats.bprch += IXGB_READ_REG(&sc->hw, BPRCH); 1960 sc->stats.mprcl += IXGB_READ_REG(&sc->hw, MPRCL); 1961 sc->stats.mprch += IXGB_READ_REG(&sc->hw, MPRCH); 1962 sc->stats.roc += IXGB_READ_REG(&sc->hw, ROC); 1963 1964 sc->stats.mpc += IXGB_READ_REG(&sc->hw, MPC); 1965 sc->stats.dc += IXGB_READ_REG(&sc->hw, DC); 1966 sc->stats.rlec += IXGB_READ_REG(&sc->hw, RLEC); 1967 sc->stats.xonrxc += IXGB_READ_REG(&sc->hw, XONRXC); 1968 sc->stats.xontxc += IXGB_READ_REG(&sc->hw, XONTXC); 1969 sc->stats.xoffrxc += IXGB_READ_REG(&sc->hw, XOFFRXC); 1970 sc->stats.xofftxc += IXGB_READ_REG(&sc->hw, XOFFTXC); 1971 sc->stats.gptcl += IXGB_READ_REG(&sc->hw, GPTCL); 1972 sc->stats.gptch += IXGB_READ_REG(&sc->hw, GPTCH); 1973 sc->stats.gotcl += IXGB_READ_REG(&sc->hw, GOTCL); 1974 sc->stats.gotch += IXGB_READ_REG(&sc->hw, GOTCH); 1975 sc->stats.ruc += IXGB_READ_REG(&sc->hw, RUC); 1976 sc->stats.rfc += IXGB_READ_REG(&sc->hw, RFC); 1977 sc->stats.rjc += IXGB_READ_REG(&sc->hw, RJC); 1978 sc->stats.torl += IXGB_READ_REG(&sc->hw, TORL); 1979 sc->stats.torh += IXGB_READ_REG(&sc->hw, TORH); 1980 sc->stats.totl += IXGB_READ_REG(&sc->hw, TOTL); 1981 sc->stats.toth += IXGB_READ_REG(&sc->hw, TOTH); 1982 sc->stats.tprl += IXGB_READ_REG(&sc->hw, TPRL); 1983 sc->stats.tprh += IXGB_READ_REG(&sc->hw, TPRH); 1984 sc->stats.tptl += IXGB_READ_REG(&sc->hw, TPTL); 1985 sc->stats.tpth += IXGB_READ_REG(&sc->hw, TPTH); 1986 sc->stats.plt64c += IXGB_READ_REG(&sc->hw, PLT64C); 1987 sc->stats.mptcl += IXGB_READ_REG(&sc->hw, MPTCL); 1988 sc->stats.mptch += IXGB_READ_REG(&sc->hw, MPTCH); 1989 sc->stats.bptcl += IXGB_READ_REG(&sc->hw, BPTCL); 1990 sc->stats.bptch += IXGB_READ_REG(&sc->hw, BPTCH); 1991 1992 sc->stats.uprcl += IXGB_READ_REG(&sc->hw, UPRCL); 1993 sc->stats.uprch += IXGB_READ_REG(&sc->hw, UPRCH); 1994 sc->stats.vprcl += IXGB_READ_REG(&sc->hw, VPRCL); 1995 sc->stats.vprch += IXGB_READ_REG(&sc->hw, VPRCH); 1996 sc->stats.jprcl += IXGB_READ_REG(&sc->hw, JPRCL); 1997 sc->stats.jprch += IXGB_READ_REG(&sc->hw, JPRCH); 1998 sc->stats.rnbc += IXGB_READ_REG(&sc->hw, RNBC); 1999 sc->stats.icbc += IXGB_READ_REG(&sc->hw, ICBC); 2000 sc->stats.ecbc += IXGB_READ_REG(&sc->hw, ECBC); 2001 sc->stats.uptcl += IXGB_READ_REG(&sc->hw, UPTCL); 2002 sc->stats.uptch += IXGB_READ_REG(&sc->hw, UPTCH); 2003 sc->stats.vptcl += IXGB_READ_REG(&sc->hw, VPTCL); 2004 sc->stats.vptch += IXGB_READ_REG(&sc->hw, VPTCH); 2005 sc->stats.jptcl += IXGB_READ_REG(&sc->hw, JPTCL); 2006 sc->stats.jptch += IXGB_READ_REG(&sc->hw, JPTCH); 2007 sc->stats.tsctc += IXGB_READ_REG(&sc->hw, TSCTC); 2008 sc->stats.tsctfc += IXGB_READ_REG(&sc->hw, TSCTFC); 2009 sc->stats.ibic += IXGB_READ_REG(&sc->hw, IBIC); 2010 sc->stats.lfc += IXGB_READ_REG(&sc->hw, LFC); 2011 sc->stats.pfrc += IXGB_READ_REG(&sc->hw, PFRC); 2012 sc->stats.pftc += IXGB_READ_REG(&sc->hw, PFTC); 2013 sc->stats.mcfrc += IXGB_READ_REG(&sc->hw, MCFRC); 2014 2015 ifp = &sc->interface_data.ac_if; 2016 2017 /* Fill out the OS statistics structure */ 2018 ifp->if_collisions = 0; 2019 2020 /* Rx Errors */ 2021 ifp->if_ierrors = 2022 sc->dropped_pkts + 2023 sc->stats.crcerrs + 2024 sc->stats.rnbc + 2025 sc->stats.mpc + 2026 sc->stats.rlec; 2027 2028 /* Tx Errors */ 2029 ifp->if_oerrors = 2030 sc->watchdog_events; 2031 } 2032 2033 #ifdef IXGB_DEBUG 2034 /********************************************************************** 2035 * 2036 * This routine is called only when ixgb_display_debug_stats is enabled. 2037 * This routine provides a way to take a look at important statistics 2038 * maintained by the driver and hardware. 2039 * 2040 **********************************************************************/ 2041 void 2042 ixgb_print_hw_stats(struct ixgb_softc *sc) 2043 { 2044 char buf_speed[100], buf_type[100]; 2045 ixgb_bus_speed bus_speed; 2046 ixgb_bus_type bus_type; 2047 const char * const unit = sc->sc_dv.dv_xname; 2048 2049 bus_speed = sc->hw.bus.speed; 2050 bus_type = sc->hw.bus.type; 2051 snprintf(buf_speed, sizeof(buf_speed), 2052 bus_speed == ixgb_bus_speed_33 ? "33MHz" : 2053 bus_speed == ixgb_bus_speed_66 ? "66MHz" : 2054 bus_speed == ixgb_bus_speed_100 ? "100MHz" : 2055 bus_speed == ixgb_bus_speed_133 ? "133MHz" : 2056 "UNKNOWN"); 2057 printf("%s: PCI_Bus_Speed = %s\n", unit, 2058 buf_speed); 2059 2060 snprintf(buf_type, sizeof(buf_type), 2061 bus_type == ixgb_bus_type_pci ? "PCI" : 2062 bus_type == ixgb_bus_type_pcix ? "PCI-X" : 2063 "UNKNOWN"); 2064 printf("%s: PCI_Bus_Type = %s\n", unit, 2065 buf_type); 2066 2067 printf("%s: Tx Descriptors not Avail1 = %ld\n", unit, 2068 sc->no_tx_desc_avail1); 2069 printf("%s: Tx Descriptors not Avail2 = %ld\n", unit, 2070 sc->no_tx_desc_avail2); 2071 printf("%s: Std Mbuf Failed = %ld\n", unit, 2072 sc->mbuf_alloc_failed); 2073 printf("%s: Std Cluster Failed = %ld\n", unit, 2074 sc->mbuf_cluster_failed); 2075 2076 printf("%s: Defer count = %lld\n", unit, 2077 (long long)sc->stats.dc); 2078 printf("%s: Missed Packets = %lld\n", unit, 2079 (long long)sc->stats.mpc); 2080 printf("%s: Receive No Buffers = %lld\n", unit, 2081 (long long)sc->stats.rnbc); 2082 printf("%s: Receive length errors = %lld\n", unit, 2083 (long long)sc->stats.rlec); 2084 printf("%s: Crc errors = %lld\n", unit, 2085 (long long)sc->stats.crcerrs); 2086 printf("%s: Driver dropped packets = %ld\n", unit, 2087 sc->dropped_pkts); 2088 2089 printf("%s: XON Rcvd = %lld\n", unit, 2090 (long long)sc->stats.xonrxc); 2091 printf("%s: XON Xmtd = %lld\n", unit, 2092 (long long)sc->stats.xontxc); 2093 printf("%s: XOFF Rcvd = %lld\n", unit, 2094 (long long)sc->stats.xoffrxc); 2095 printf("%s: XOFF Xmtd = %lld\n", unit, 2096 (long long)sc->stats.xofftxc); 2097 2098 printf("%s: Good Packets Rcvd = %lld\n", unit, 2099 (long long)sc->stats.gprcl); 2100 printf("%s: Good Packets Xmtd = %lld\n", unit, 2101 (long long)sc->stats.gptcl); 2102 2103 printf("%s: Jumbo frames recvd = %lld\n", unit, 2104 (long long)sc->stats.jprcl); 2105 printf("%s: Jumbo frames Xmtd = %lld\n", unit, 2106 (long long)sc->stats.jptcl); 2107 } 2108 #endif 2109