1 /* $OpenBSD: if_igc.c,v 1.18 2024/02/23 01:06:18 kevlo Exp $ */ 2 /*- 3 * SPDX-License-Identifier: BSD-2-Clause 4 * 5 * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org> 6 * All rights reserved. 7 * Copyright (c) 2021 Rubicon Communications, LLC (Netgate) 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include "bpfilter.h" 32 #include "vlan.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/sockio.h> 37 #include <sys/mbuf.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/socket.h> 41 #include <sys/device.h> 42 #include <sys/endian.h> 43 #include <sys/intrmap.h> 44 45 #include <net/if.h> 46 #include <net/if_media.h> 47 #include <net/toeplitz.h> 48 49 #include <netinet/in.h> 50 #include <netinet/if_ether.h> 51 #include <netinet/ip.h> 52 #include <netinet/ip6.h> 53 54 #if NBPFILTER > 0 55 #include <net/bpf.h> 56 #endif 57 58 #include <machine/bus.h> 59 #include <machine/intr.h> 60 61 #include <dev/pci/pcivar.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcidevs.h> 64 #include <dev/pci/if_igc.h> 65 #include <dev/pci/igc_hw.h> 66 67 const struct pci_matchid igc_devices[] = { 68 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V }, 69 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V }, 70 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM }, 71 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I }, 72 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT }, 73 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K }, 74 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 }, 75 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM }, 76 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP }, 77 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V }, 78 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM }, 79 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT }, 80 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM }, 81 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K }, 82 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V } 83 }; 84 85 /********************************************************************* 86 * Function Prototypes 87 *********************************************************************/ 88 int igc_match(struct device *, void *, void *); 89 void igc_attach(struct device *, struct device *, void *); 90 int igc_detach(struct device *, int); 91 92 void igc_identify_hardware(struct igc_softc *); 93 int igc_allocate_pci_resources(struct igc_softc *); 94 int igc_allocate_queues(struct igc_softc *); 95 void igc_free_pci_resources(struct igc_softc *); 96 void igc_reset(struct igc_softc *); 97 void igc_init_dmac(struct igc_softc *, uint32_t); 98 int igc_allocate_msix(struct igc_softc *); 99 void igc_setup_msix(struct igc_softc *); 100 int igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *); 101 void igc_dma_free(struct igc_softc *, struct igc_dma_alloc *); 102 void igc_setup_interface(struct igc_softc *); 103 104 void igc_init(void *); 105 void igc_start(struct ifqueue *); 106 int igc_txeof(struct tx_ring *); 107 void igc_stop(struct igc_softc *); 108 int igc_ioctl(struct ifnet *, u_long, caddr_t); 109 int igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *); 110 int igc_rxfill(struct rx_ring *); 111 void igc_rxrefill(void *); 112 int igc_rxeof(struct rx_ring *); 113 void igc_rx_checksum(uint32_t, struct mbuf *, uint32_t); 114 void igc_watchdog(struct ifnet *); 115 void igc_media_status(struct ifnet *, struct ifmediareq *); 116 int igc_media_change(struct ifnet *); 117 void igc_iff(struct igc_softc *); 118 void igc_update_link_status(struct igc_softc *); 119 int igc_get_buf(struct rx_ring *, int); 120 int igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int, uint32_t *); 121 122 void igc_configure_queues(struct igc_softc *); 123 void igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int); 124 void igc_enable_queue(struct igc_softc *, uint32_t); 125 void igc_enable_intr(struct igc_softc *); 126 void igc_disable_intr(struct igc_softc *); 127 int igc_intr_link(void *); 128 int igc_intr_queue(void *); 129 130 int igc_allocate_transmit_buffers(struct tx_ring *); 131 int igc_setup_transmit_structures(struct igc_softc *); 132 int igc_setup_transmit_ring(struct tx_ring *); 133 void igc_initialize_transmit_unit(struct igc_softc *); 134 void igc_free_transmit_structures(struct igc_softc *); 135 void igc_free_transmit_buffers(struct tx_ring *); 136 int igc_allocate_receive_buffers(struct rx_ring *); 137 int igc_setup_receive_structures(struct igc_softc *); 138 int igc_setup_receive_ring(struct rx_ring *); 139 void igc_initialize_receive_unit(struct igc_softc *); 140 void igc_free_receive_structures(struct igc_softc *); 141 void igc_free_receive_buffers(struct rx_ring *); 142 void igc_initialize_rss_mapping(struct igc_softc *); 143 144 void igc_get_hw_control(struct igc_softc *); 145 void igc_release_hw_control(struct igc_softc *); 146 int igc_is_valid_ether_addr(uint8_t *); 147 148 /********************************************************************* 149 * OpenBSD Device Interface Entry Points 150 *********************************************************************/ 151 152 struct cfdriver igc_cd = { 153 NULL, "igc", DV_IFNET 154 }; 155 156 const struct cfattach igc_ca = { 157 sizeof(struct igc_softc), igc_match, igc_attach, igc_detach 158 }; 159 160 /********************************************************************* 161 * Device identification routine 162 * 163 * igc_match determines if the driver should be loaded on 164 * adapter based on PCI vendor/device id of the adapter. 165 * 166 * return 0 on success, positive on failure 167 *********************************************************************/ 168 int 169 igc_match(struct device *parent, void *match, void *aux) 170 { 171 return pci_matchbyid((struct pci_attach_args *)aux, igc_devices, 172 nitems(igc_devices)); 173 } 174 175 /********************************************************************* 176 * Device initialization routine 177 * 178 * The attach entry point is called when the driver is being loaded. 179 * This routine identifies the type of hardware, allocates all resources 180 * and initializes the hardware. 181 * 182 * return 0 on success, positive on failure 183 *********************************************************************/ 184 void 185 igc_attach(struct device *parent, struct device *self, void *aux) 186 { 187 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 188 struct igc_softc *sc = (struct igc_softc *)self; 189 struct igc_hw *hw = &sc->hw; 190 191 sc->osdep.os_sc = sc; 192 sc->osdep.os_pa = *pa; 193 194 /* Determine hardware and mac info */ 195 igc_identify_hardware(sc); 196 197 sc->num_tx_desc = IGC_DEFAULT_TXD; 198 sc->num_rx_desc = IGC_DEFAULT_RXD; 199 200 /* Setup PCI resources */ 201 if (igc_allocate_pci_resources(sc)) 202 goto err_pci; 203 204 /* Allocate TX/RX queues */ 205 if (igc_allocate_queues(sc)) 206 goto err_pci; 207 208 /* Do shared code initialization */ 209 if (igc_setup_init_funcs(hw, true)) { 210 printf(": Setup of shared code failed\n"); 211 goto err_pci; 212 } 213 214 hw->mac.autoneg = DO_AUTO_NEG; 215 hw->phy.autoneg_wait_to_complete = false; 216 hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 217 218 /* Copper options. */ 219 if (hw->phy.media_type == igc_media_type_copper) 220 hw->phy.mdix = AUTO_ALL_MODES; 221 222 /* Set the max frame size. */ 223 sc->hw.mac.max_frame_size = 9234; 224 225 /* Allocate multicast array memory. */ 226 sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES, 227 M_DEVBUF, M_NOWAIT); 228 if (sc->mta == NULL) { 229 printf(": Can not allocate multicast setup array\n"); 230 goto err_late; 231 } 232 233 /* Check SOL/IDER usage. */ 234 if (igc_check_reset_block(hw)) 235 printf(": PHY reset is blocked due to SOL/IDER session\n"); 236 237 /* Disable Energy Efficient Ethernet. */ 238 sc->hw.dev_spec._i225.eee_disable = true; 239 240 igc_reset_hw(hw); 241 242 /* Make sure we have a good EEPROM before we read from it. */ 243 if (igc_validate_nvm_checksum(hw) < 0) { 244 /* 245 * Some PCI-E parts fail the first check due to 246 * the link being in sleep state, call it again, 247 * if it fails a second time its a real issue. 248 */ 249 if (igc_validate_nvm_checksum(hw) < 0) { 250 printf(": The EEPROM checksum is not valid\n"); 251 goto err_late; 252 } 253 } 254 255 /* Copy the permanent MAC address out of the EEPROM. */ 256 if (igc_read_mac_addr(hw) < 0) { 257 printf(": EEPROM read error while reading MAC address\n"); 258 goto err_late; 259 } 260 261 if (!igc_is_valid_ether_addr(hw->mac.addr)) { 262 printf(": Invalid MAC address\n"); 263 goto err_late; 264 } 265 266 memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 267 268 if (igc_allocate_msix(sc)) 269 goto err_late; 270 271 /* Setup OS specific network interface. */ 272 igc_setup_interface(sc); 273 274 igc_reset(sc); 275 hw->mac.get_link_status = true; 276 igc_update_link_status(sc); 277 278 /* The driver can now take control from firmware. */ 279 igc_get_hw_control(sc); 280 281 printf(", address %s\n", ether_sprintf(sc->hw.mac.addr)); 282 return; 283 284 err_late: 285 igc_release_hw_control(sc); 286 err_pci: 287 igc_free_pci_resources(sc); 288 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 289 } 290 291 /********************************************************************* 292 * Device removal routine 293 * 294 * The detach entry point is called when the driver is being removed. 295 * This routine stops the adapter and deallocates all the resources 296 * that were allocated for driver operation. 297 * 298 * return 0 on success, positive on failure 299 *********************************************************************/ 300 int 301 igc_detach(struct device *self, int flags) 302 { 303 struct igc_softc *sc = (struct igc_softc *)self; 304 struct ifnet *ifp = &sc->sc_ac.ac_if; 305 306 igc_stop(sc); 307 308 igc_phy_hw_reset(&sc->hw); 309 igc_release_hw_control(sc); 310 311 ether_ifdetach(ifp); 312 if_detach(ifp); 313 314 igc_free_pci_resources(sc); 315 316 igc_free_transmit_structures(sc); 317 igc_free_receive_structures(sc); 318 free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 319 320 return 0; 321 } 322 323 void 324 igc_identify_hardware(struct igc_softc *sc) 325 { 326 struct igc_osdep *os = &sc->osdep; 327 struct pci_attach_args *pa = &os->os_pa; 328 329 /* Save off the information about this board. */ 330 sc->hw.device_id = PCI_PRODUCT(pa->pa_id); 331 332 /* Do shared code init and setup. */ 333 if (igc_set_mac_type(&sc->hw)) { 334 printf(": Setup init failure\n"); 335 return; 336 } 337 } 338 339 int 340 igc_allocate_pci_resources(struct igc_softc *sc) 341 { 342 struct igc_osdep *os = &sc->osdep; 343 struct pci_attach_args *pa = &os->os_pa; 344 pcireg_t memtype; 345 346 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG); 347 if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt, 348 &os->os_memh, &os->os_membase, &os->os_memsize, 0)) { 349 printf(": unable to map registers\n"); 350 return ENXIO; 351 } 352 sc->hw.hw_addr = (uint8_t *)os->os_membase; 353 sc->hw.back = os; 354 355 igc_setup_msix(sc); 356 357 return 0; 358 } 359 360 int 361 igc_allocate_queues(struct igc_softc *sc) 362 { 363 struct igc_queue *iq; 364 struct tx_ring *txr; 365 struct rx_ring *rxr; 366 int i, rsize, rxconf, tsize, txconf; 367 368 /* Allocate the top level queue structs. */ 369 sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue), 370 M_DEVBUF, M_NOWAIT | M_ZERO); 371 if (sc->queues == NULL) { 372 printf("%s: unable to allocate queue\n", DEVNAME(sc)); 373 goto fail; 374 } 375 376 /* Allocate the TX ring. */ 377 sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring), 378 M_DEVBUF, M_NOWAIT | M_ZERO); 379 if (sc->tx_rings == NULL) { 380 printf("%s: unable to allocate TX ring\n", DEVNAME(sc)); 381 goto fail; 382 } 383 384 /* Allocate the RX ring. */ 385 sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring), 386 M_DEVBUF, M_NOWAIT | M_ZERO); 387 if (sc->rx_rings == NULL) { 388 printf("%s: unable to allocate RX ring\n", DEVNAME(sc)); 389 goto rx_fail; 390 } 391 392 txconf = rxconf = 0; 393 394 /* Set up the TX queues. */ 395 tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc), 396 IGC_DBA_ALIGN); 397 for (i = 0; i < sc->sc_nqueues; i++, txconf++) { 398 txr = &sc->tx_rings[i]; 399 txr->sc = sc; 400 txr->me = i; 401 402 if (igc_dma_malloc(sc, tsize, &txr->txdma)) { 403 printf("%s: unable to allocate TX descriptor\n", 404 DEVNAME(sc)); 405 goto err_tx_desc; 406 } 407 txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr; 408 bzero((void *)txr->tx_base, tsize); 409 } 410 411 /* Set up the RX queues. */ 412 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 413 IGC_DBA_ALIGN); 414 for (i = 0; i < sc->sc_nqueues; i++, rxconf++) { 415 rxr = &sc->rx_rings[i]; 416 rxr->sc = sc; 417 rxr->me = i; 418 timeout_set(&rxr->rx_refill, igc_rxrefill, rxr); 419 420 if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) { 421 printf("%s: unable to allocate RX descriptor\n", 422 DEVNAME(sc)); 423 goto err_rx_desc; 424 } 425 rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr; 426 bzero((void *)rxr->rx_base, rsize); 427 } 428 429 /* Set up the queue holding structs. */ 430 for (i = 0; i < sc->sc_nqueues; i++) { 431 iq = &sc->queues[i]; 432 iq->sc = sc; 433 iq->txr = &sc->tx_rings[i]; 434 iq->rxr = &sc->rx_rings[i]; 435 snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i); 436 } 437 438 return 0; 439 440 err_rx_desc: 441 for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--) 442 igc_dma_free(sc, &rxr->rxdma); 443 err_tx_desc: 444 for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) 445 igc_dma_free(sc, &txr->txdma); 446 free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring)); 447 sc->rx_rings = NULL; 448 rx_fail: 449 free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring)); 450 sc->tx_rings = NULL; 451 fail: 452 return ENOMEM; 453 } 454 455 void 456 igc_free_pci_resources(struct igc_softc *sc) 457 { 458 struct igc_osdep *os = &sc->osdep; 459 struct pci_attach_args *pa = &os->os_pa; 460 struct igc_queue *iq = sc->queues; 461 int i; 462 463 /* Release all msix queue resources. */ 464 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 465 if (iq->tag) 466 pci_intr_disestablish(pa->pa_pc, iq->tag); 467 iq->tag = NULL; 468 } 469 470 if (sc->tag) 471 pci_intr_disestablish(pa->pa_pc, sc->tag); 472 sc->tag = NULL; 473 if (os->os_membase != 0) 474 bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize); 475 os->os_membase = 0; 476 } 477 478 /********************************************************************* 479 * 480 * Initialize the hardware to a configuration as specified by the 481 * adapter structure. 482 * 483 **********************************************************************/ 484 void 485 igc_reset(struct igc_softc *sc) 486 { 487 struct igc_hw *hw = &sc->hw; 488 uint32_t pba; 489 uint16_t rx_buffer_size; 490 491 /* Let the firmware know the OS is in control */ 492 igc_get_hw_control(sc); 493 494 /* 495 * Packet Buffer Allocation (PBA) 496 * Writing PBA sets the receive portion of the buffer 497 * the remainder is used for the transmit buffer. 498 */ 499 pba = IGC_PBA_34K; 500 501 /* 502 * These parameters control the automatic generation (Tx) and 503 * response (Rx) to Ethernet PAUSE frames. 504 * - High water mark should allow for at least two frames to be 505 * received after sending an XOFF. 506 * - Low water mark works best when it is very near the high water mark. 507 * This allows the receiver to restart by sending XON when it has 508 * drained a bit. Here we use an arbitrary value of 1500 which will 509 * restart after one full frame is pulled from the buffer. There 510 * could be several smaller frames in the buffer and if so they will 511 * not trigger the XON until their total number reduces the buffer 512 * by 1500. 513 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 514 */ 515 rx_buffer_size = (pba & 0xffff) << 10; 516 hw->fc.high_water = rx_buffer_size - 517 roundup2(sc->hw.mac.max_frame_size, 1024); 518 /* 16-byte granularity */ 519 hw->fc.low_water = hw->fc.high_water - 16; 520 521 if (sc->fc) /* locally set flow control value? */ 522 hw->fc.requested_mode = sc->fc; 523 else 524 hw->fc.requested_mode = igc_fc_full; 525 526 hw->fc.pause_time = IGC_FC_PAUSE_TIME; 527 528 hw->fc.send_xon = true; 529 530 /* Issue a global reset */ 531 igc_reset_hw(hw); 532 IGC_WRITE_REG(hw, IGC_WUC, 0); 533 534 /* and a re-init */ 535 if (igc_init_hw(hw) < 0) { 536 printf(": Hardware Initialization Failed\n"); 537 return; 538 } 539 540 /* Setup DMA Coalescing */ 541 igc_init_dmac(sc, pba); 542 543 IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN); 544 igc_get_phy_info(hw); 545 igc_check_for_link(hw); 546 } 547 548 /********************************************************************* 549 * 550 * Initialize the DMA Coalescing feature 551 * 552 **********************************************************************/ 553 void 554 igc_init_dmac(struct igc_softc *sc, uint32_t pba) 555 { 556 struct igc_hw *hw = &sc->hw; 557 uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN; 558 uint16_t hwm, max_frame_size; 559 int status; 560 561 max_frame_size = sc->hw.mac.max_frame_size; 562 563 if (sc->dmac == 0) { /* Disabling it */ 564 IGC_WRITE_REG(hw, IGC_DMACR, reg); 565 return; 566 } else 567 printf(": DMA Coalescing enabled\n"); 568 569 /* Set starting threshold */ 570 IGC_WRITE_REG(hw, IGC_DMCTXTH, 0); 571 572 hwm = 64 * pba - max_frame_size / 16; 573 if (hwm < 64 * (pba - 6)) 574 hwm = 64 * (pba - 6); 575 reg = IGC_READ_REG(hw, IGC_FCRTC); 576 reg &= ~IGC_FCRTC_RTH_COAL_MASK; 577 reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT) 578 & IGC_FCRTC_RTH_COAL_MASK); 579 IGC_WRITE_REG(hw, IGC_FCRTC, reg); 580 581 dmac = pba - max_frame_size / 512; 582 if (dmac < pba - 10) 583 dmac = pba - 10; 584 reg = IGC_READ_REG(hw, IGC_DMACR); 585 reg &= ~IGC_DMACR_DMACTHR_MASK; 586 reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT) 587 & IGC_DMACR_DMACTHR_MASK); 588 589 /* transition to L0x or L1 if available..*/ 590 reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK); 591 592 /* Check if status is 2.5Gb backplane connection 593 * before configuration of watchdog timer, which is 594 * in msec values in 12.8usec intervals 595 * watchdog timer= msec values in 32usec intervals 596 * for non 2.5Gb connection 597 */ 598 status = IGC_READ_REG(hw, IGC_STATUS); 599 if ((status & IGC_STATUS_2P5_SKU) && 600 (!(status & IGC_STATUS_2P5_SKU_OVER))) 601 reg |= ((sc->dmac * 5) >> 6); 602 else 603 reg |= (sc->dmac >> 5); 604 605 IGC_WRITE_REG(hw, IGC_DMACR, reg); 606 607 IGC_WRITE_REG(hw, IGC_DMCRTRH, 0); 608 609 /* Set the interval before transition */ 610 reg = IGC_READ_REG(hw, IGC_DMCTLX); 611 reg |= IGC_DMCTLX_DCFLUSH_DIS; 612 613 /* 614 ** in 2.5Gb connection, TTLX unit is 0.4 usec 615 ** which is 0x4*2 = 0xA. But delay is still 4 usec 616 */ 617 status = IGC_READ_REG(hw, IGC_STATUS); 618 if ((status & IGC_STATUS_2P5_SKU) && 619 (!(status & IGC_STATUS_2P5_SKU_OVER))) 620 reg |= 0xA; 621 else 622 reg |= 0x4; 623 624 IGC_WRITE_REG(hw, IGC_DMCTLX, reg); 625 626 /* free space in tx packet buffer to wake from DMA coal */ 627 IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE - 628 (2 * max_frame_size)) >> 6); 629 630 /* make low power state decision controlled by DMA coal */ 631 reg = IGC_READ_REG(hw, IGC_PCIEMISC); 632 reg &= ~IGC_PCIEMISC_LX_DECISION; 633 IGC_WRITE_REG(hw, IGC_PCIEMISC, reg); 634 } 635 636 int 637 igc_allocate_msix(struct igc_softc *sc) 638 { 639 struct igc_osdep *os = &sc->osdep; 640 struct pci_attach_args *pa = &os->os_pa; 641 struct igc_queue *iq; 642 pci_intr_handle_t ih; 643 int i, error = 0; 644 645 for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) { 646 if (pci_intr_map_msix(pa, i, &ih)) { 647 printf("%s: unable to map msi-x vector %d\n", 648 DEVNAME(sc), i); 649 error = ENOMEM; 650 goto fail; 651 } 652 653 iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih, 654 IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i), 655 igc_intr_queue, iq, iq->name); 656 if (iq->tag == NULL) { 657 printf("%s: unable to establish interrupt %d\n", 658 DEVNAME(sc), i); 659 error = ENOMEM; 660 goto fail; 661 } 662 663 iq->msix = i; 664 iq->eims = 1 << i; 665 } 666 667 /* Now the link status/control last MSI-X vector. */ 668 if (pci_intr_map_msix(pa, i, &ih)) { 669 printf("%s: unable to map link vector\n", DEVNAME(sc)); 670 error = ENOMEM; 671 goto fail; 672 } 673 674 sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE, 675 igc_intr_link, sc, sc->sc_dev.dv_xname); 676 if (sc->tag == NULL) { 677 printf("%s: unable to establish link interrupt\n", DEVNAME(sc)); 678 error = ENOMEM; 679 goto fail; 680 } 681 682 sc->linkvec = i; 683 printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih), 684 i, (i > 1) ? "s" : ""); 685 686 return 0; 687 fail: 688 for (iq = sc->queues; i > 0; i--, iq++) { 689 if (iq->tag == NULL) 690 continue; 691 pci_intr_disestablish(pa->pa_pc, iq->tag); 692 iq->tag = NULL; 693 } 694 695 return error; 696 } 697 698 void 699 igc_setup_msix(struct igc_softc *sc) 700 { 701 struct igc_osdep *os = &sc->osdep; 702 struct pci_attach_args *pa = &os->os_pa; 703 int nmsix; 704 705 nmsix = pci_intr_msix_count(pa); 706 if (nmsix <= 1) 707 printf(": not enough msi-x vectors\n"); 708 709 /* Give one vector to events. */ 710 nmsix--; 711 712 sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS, 713 INTRMAP_POWEROF2); 714 sc->sc_nqueues = intrmap_count(sc->sc_intrmap); 715 } 716 717 int 718 igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma) 719 { 720 struct igc_osdep *os = &sc->osdep; 721 722 dma->dma_tag = os->os_pa.pa_dmat; 723 724 if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT, 725 &dma->dma_map)) 726 return 1; 727 if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg, 728 1, &dma->dma_nseg, BUS_DMA_NOWAIT)) 729 goto destroy; 730 if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size, 731 &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) 732 goto free; 733 if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, 734 NULL, BUS_DMA_NOWAIT)) 735 goto unmap; 736 737 dma->dma_size = size; 738 739 return 0; 740 unmap: 741 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size); 742 free: 743 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 744 destroy: 745 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 746 dma->dma_map = NULL; 747 dma->dma_tag = NULL; 748 return 1; 749 } 750 751 void 752 igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma) 753 { 754 if (dma->dma_tag == NULL) 755 return; 756 757 if (dma->dma_map != NULL) { 758 bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0, 759 dma->dma_map->dm_mapsize, 760 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 761 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 762 bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size); 763 bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg); 764 bus_dmamap_destroy(dma->dma_tag, dma->dma_map); 765 dma->dma_map = NULL; 766 } 767 } 768 769 /********************************************************************* 770 * 771 * Setup networking device structure and register an interface. 772 * 773 **********************************************************************/ 774 void 775 igc_setup_interface(struct igc_softc *sc) 776 { 777 struct ifnet *ifp = &sc->sc_ac.ac_if; 778 int i; 779 780 ifp->if_softc = sc; 781 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 782 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 783 ifp->if_xflags = IFXF_MPSAFE; 784 ifp->if_ioctl = igc_ioctl; 785 ifp->if_qstart = igc_start; 786 ifp->if_watchdog = igc_watchdog; 787 ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN - 788 ETHER_CRC_LEN; 789 ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1); 790 791 ifp->if_capabilities = IFCAP_VLAN_MTU; 792 793 #ifdef notyet 794 #if NVLAN > 0 795 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 796 #endif 797 #endif 798 799 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 800 ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 801 ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6; 802 803 /* Initialize ifmedia structures. */ 804 ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status); 805 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL); 806 ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); 807 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); 808 ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); 809 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 810 ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); 811 ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL); 812 813 ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); 814 ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); 815 816 if_attach(ifp); 817 ether_ifattach(ifp); 818 819 if_attach_queues(ifp, sc->sc_nqueues); 820 if_attach_iqueues(ifp, sc->sc_nqueues); 821 for (i = 0; i < sc->sc_nqueues; i++) { 822 struct ifqueue *ifq = ifp->if_ifqs[i]; 823 struct ifiqueue *ifiq = ifp->if_iqs[i]; 824 struct tx_ring *txr = &sc->tx_rings[i]; 825 struct rx_ring *rxr = &sc->rx_rings[i]; 826 827 ifq->ifq_softc = txr; 828 txr->ifq = ifq; 829 830 ifiq->ifiq_softc = rxr; 831 rxr->ifiq = ifiq; 832 } 833 } 834 835 void 836 igc_init(void *arg) 837 { 838 struct igc_softc *sc = (struct igc_softc *)arg; 839 struct ifnet *ifp = &sc->sc_ac.ac_if; 840 struct rx_ring *rxr; 841 uint32_t ctrl = 0; 842 int i, s; 843 844 s = splnet(); 845 846 igc_stop(sc); 847 848 /* Get the latest mac address, user can use a LAA. */ 849 bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN); 850 851 /* Put the address into the receive address array. */ 852 igc_rar_set(&sc->hw, sc->hw.mac.addr, 0); 853 854 /* Initialize the hardware. */ 855 igc_reset(sc); 856 igc_update_link_status(sc); 857 858 /* Setup VLAN support, basic and offload if available. */ 859 IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN); 860 861 /* Prepare transmit descriptors and buffers. */ 862 if (igc_setup_transmit_structures(sc)) { 863 printf("%s: Could not setup transmit structures\n", 864 DEVNAME(sc)); 865 igc_stop(sc); 866 splx(s); 867 return; 868 } 869 igc_initialize_transmit_unit(sc); 870 871 sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN; 872 /* Prepare receive descriptors and buffers. */ 873 if (igc_setup_receive_structures(sc)) { 874 printf("%s: Could not setup receive structures\n", 875 DEVNAME(sc)); 876 igc_stop(sc); 877 splx(s); 878 return; 879 } 880 igc_initialize_receive_unit(sc); 881 882 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) { 883 ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL); 884 ctrl |= IGC_CTRL_VME; 885 IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl); 886 } 887 888 /* Setup multicast table. */ 889 igc_iff(sc); 890 891 igc_clear_hw_cntrs_base_generic(&sc->hw); 892 893 igc_configure_queues(sc); 894 895 /* This clears any pending interrupts */ 896 IGC_READ_REG(&sc->hw, IGC_ICR); 897 IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC); 898 899 /* The driver can now take control from firmware. */ 900 igc_get_hw_control(sc); 901 902 /* Set Energy Efficient Ethernet. */ 903 igc_set_eee_i225(&sc->hw, true, true, true); 904 905 for (i = 0; i < sc->sc_nqueues; i++) { 906 rxr = &sc->rx_rings[i]; 907 igc_rxfill(rxr); 908 if (if_rxr_inuse(&rxr->rx_ring) == 0) { 909 printf("%s: Unable to fill any rx descriptors\n", 910 DEVNAME(sc)); 911 igc_stop(sc); 912 splx(s); 913 } 914 IGC_WRITE_REG(&sc->hw, IGC_RDT(i), 915 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 916 } 917 918 igc_enable_intr(sc); 919 920 ifp->if_flags |= IFF_RUNNING; 921 for (i = 0; i < sc->sc_nqueues; i++) 922 ifq_clr_oactive(ifp->if_ifqs[i]); 923 924 splx(s); 925 } 926 927 static inline int 928 igc_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) 929 { 930 int error; 931 932 error = bus_dmamap_load_mbuf(dmat, map, m, 933 BUS_DMA_STREAMING | BUS_DMA_NOWAIT); 934 if (error != EFBIG) 935 return (error); 936 937 error = m_defrag(m, M_DONTWAIT); 938 if (error != 0) 939 return (error); 940 941 return (bus_dmamap_load_mbuf(dmat, map, m, 942 BUS_DMA_STREAMING | BUS_DMA_NOWAIT)); 943 } 944 945 void 946 igc_start(struct ifqueue *ifq) 947 { 948 struct ifnet *ifp = ifq->ifq_if; 949 struct igc_softc *sc = ifp->if_softc; 950 struct tx_ring *txr = ifq->ifq_softc; 951 union igc_adv_tx_desc *txdesc; 952 struct igc_tx_buf *txbuf; 953 bus_dmamap_t map; 954 struct mbuf *m; 955 unsigned int prod, free, last, i; 956 unsigned int mask; 957 uint32_t cmd_type_len; 958 uint32_t olinfo_status; 959 int post = 0; 960 #if NBPFILTER > 0 961 caddr_t if_bpf; 962 #endif 963 964 if (!sc->link_active) { 965 ifq_purge(ifq); 966 return; 967 } 968 969 prod = txr->next_avail_desc; 970 free = txr->next_to_clean; 971 if (free <= prod) 972 free += sc->num_tx_desc; 973 free -= prod; 974 975 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 976 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 977 978 mask = sc->num_tx_desc - 1; 979 980 for (;;) { 981 if (free <= IGC_MAX_SCATTER + 1) { 982 ifq_set_oactive(ifq); 983 break; 984 } 985 986 m = ifq_dequeue(ifq); 987 if (m == NULL) 988 break; 989 990 txbuf = &txr->tx_buffers[prod]; 991 map = txbuf->map; 992 993 if (igc_load_mbuf(txr->txdma.dma_tag, map, m) != 0) { 994 ifq->ifq_errors++; 995 m_freem(m); 996 continue; 997 } 998 999 olinfo_status = m->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT; 1000 1001 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, 1002 map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1003 1004 if (igc_tx_ctx_setup(txr, m, prod, &olinfo_status)) { 1005 /* Consume the first descriptor */ 1006 prod++; 1007 prod &= mask; 1008 free--; 1009 } 1010 1011 for (i = 0; i < map->dm_nsegs; i++) { 1012 txdesc = &txr->tx_base[prod]; 1013 1014 cmd_type_len = IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA | 1015 IGC_ADVTXD_DCMD_DEXT | map->dm_segs[i].ds_len; 1016 if (i == map->dm_nsegs - 1) 1017 cmd_type_len |= IGC_ADVTXD_DCMD_EOP | 1018 IGC_ADVTXD_DCMD_RS; 1019 1020 htolem64(&txdesc->read.buffer_addr, map->dm_segs[i].ds_addr); 1021 htolem32(&txdesc->read.cmd_type_len, cmd_type_len); 1022 htolem32(&txdesc->read.olinfo_status, olinfo_status); 1023 1024 last = prod; 1025 1026 prod++; 1027 prod &= mask; 1028 } 1029 1030 txbuf->m_head = m; 1031 txbuf->eop_index = last; 1032 1033 #if NBPFILTER > 0 1034 if_bpf = ifp->if_bpf; 1035 if (if_bpf) 1036 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT); 1037 #endif 1038 1039 free -= i; 1040 post = 1; 1041 } 1042 1043 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1044 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1045 1046 if (post) { 1047 txr->next_avail_desc = prod; 1048 IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod); 1049 } 1050 } 1051 1052 int 1053 igc_txeof(struct tx_ring *txr) 1054 { 1055 struct igc_softc *sc = txr->sc; 1056 struct ifqueue *ifq = txr->ifq; 1057 union igc_adv_tx_desc *txdesc; 1058 struct igc_tx_buf *txbuf; 1059 bus_dmamap_t map; 1060 unsigned int cons, prod, last; 1061 unsigned int mask; 1062 int done = 0; 1063 1064 prod = txr->next_avail_desc; 1065 cons = txr->next_to_clean; 1066 1067 if (cons == prod) 1068 return (0); 1069 1070 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1071 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1072 1073 mask = sc->num_tx_desc - 1; 1074 1075 do { 1076 txbuf = &txr->tx_buffers[cons]; 1077 last = txbuf->eop_index; 1078 txdesc = &txr->tx_base[last]; 1079 1080 if (!(txdesc->wb.status & htole32(IGC_TXD_STAT_DD))) 1081 break; 1082 1083 map = txbuf->map; 1084 1085 bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize, 1086 BUS_DMASYNC_POSTWRITE); 1087 bus_dmamap_unload(txr->txdma.dma_tag, map); 1088 m_freem(txbuf->m_head); 1089 1090 txbuf->m_head = NULL; 1091 txbuf->eop_index = -1; 1092 1093 cons = last + 1; 1094 cons &= mask; 1095 1096 done = 1; 1097 } while (cons != prod); 1098 1099 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1100 txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1101 1102 txr->next_to_clean = cons; 1103 1104 if (ifq_is_oactive(ifq)) 1105 ifq_restart(ifq); 1106 1107 return (done); 1108 } 1109 1110 /********************************************************************* 1111 * 1112 * This routine disables all traffic on the adapter by issuing a 1113 * global reset on the MAC. 1114 * 1115 **********************************************************************/ 1116 void 1117 igc_stop(struct igc_softc *sc) 1118 { 1119 struct ifnet *ifp = &sc->sc_ac.ac_if; 1120 int i; 1121 1122 /* Tell the stack that the interface is no longer active. */ 1123 ifp->if_flags &= ~IFF_RUNNING; 1124 1125 igc_disable_intr(sc); 1126 1127 igc_reset_hw(&sc->hw); 1128 IGC_WRITE_REG(&sc->hw, IGC_WUC, 0); 1129 1130 intr_barrier(sc->tag); 1131 for (i = 0; i < sc->sc_nqueues; i++) { 1132 struct ifqueue *ifq = ifp->if_ifqs[i]; 1133 ifq_barrier(ifq); 1134 ifq_clr_oactive(ifq); 1135 1136 if (sc->queues[i].tag != NULL) 1137 intr_barrier(sc->queues[i].tag); 1138 timeout_del(&sc->rx_rings[i].rx_refill); 1139 } 1140 1141 igc_free_transmit_structures(sc); 1142 igc_free_receive_structures(sc); 1143 1144 igc_update_link_status(sc); 1145 } 1146 1147 /********************************************************************* 1148 * Ioctl entry point 1149 * 1150 * igc_ioctl is called when the user wants to configure the 1151 * interface. 1152 * 1153 * return 0 on success, positive on failure 1154 **********************************************************************/ 1155 int 1156 igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data) 1157 { 1158 struct igc_softc *sc = ifp->if_softc; 1159 struct ifreq *ifr = (struct ifreq *)data; 1160 int s, error = 0; 1161 1162 s = splnet(); 1163 1164 switch (cmd) { 1165 case SIOCSIFADDR: 1166 ifp->if_flags |= IFF_UP; 1167 if (!(ifp->if_flags & IFF_RUNNING)) 1168 igc_init(sc); 1169 break; 1170 case SIOCSIFFLAGS: 1171 if (ifp->if_flags & IFF_UP) { 1172 if (ifp->if_flags & IFF_RUNNING) 1173 error = ENETRESET; 1174 else 1175 igc_init(sc); 1176 } else { 1177 if (ifp->if_flags & IFF_RUNNING) 1178 igc_stop(sc); 1179 } 1180 break; 1181 case SIOCSIFMEDIA: 1182 case SIOCGIFMEDIA: 1183 error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); 1184 break; 1185 case SIOCGIFRXR: 1186 error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data); 1187 break; 1188 default: 1189 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data); 1190 } 1191 1192 if (error == ENETRESET) { 1193 if (ifp->if_flags & IFF_RUNNING) { 1194 igc_disable_intr(sc); 1195 igc_iff(sc); 1196 igc_enable_intr(sc); 1197 } 1198 error = 0; 1199 } 1200 1201 splx(s); 1202 return error; 1203 } 1204 1205 int 1206 igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri) 1207 { 1208 struct if_rxring_info *ifr; 1209 struct rx_ring *rxr; 1210 int error, i, n = 0; 1211 1212 ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF, 1213 M_WAITOK | M_ZERO); 1214 1215 for (i = 0; i < sc->sc_nqueues; i++) { 1216 rxr = &sc->rx_rings[i]; 1217 ifr[n].ifr_size = MCLBYTES; 1218 snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i); 1219 ifr[n].ifr_info = rxr->rx_ring; 1220 n++; 1221 } 1222 1223 error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr); 1224 free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr)); 1225 1226 return error; 1227 } 1228 1229 int 1230 igc_rxfill(struct rx_ring *rxr) 1231 { 1232 struct igc_softc *sc = rxr->sc; 1233 int i, post = 0; 1234 u_int slots; 1235 1236 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1237 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1238 1239 i = rxr->last_desc_filled; 1240 for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0; 1241 slots--) { 1242 if (++i == sc->num_rx_desc) 1243 i = 0; 1244 1245 if (igc_get_buf(rxr, i) != 0) 1246 break; 1247 1248 rxr->last_desc_filled = i; 1249 post = 1; 1250 } 1251 1252 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 1253 rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1254 1255 if_rxr_put(&rxr->rx_ring, slots); 1256 1257 return post; 1258 } 1259 1260 void 1261 igc_rxrefill(void *xrxr) 1262 { 1263 struct rx_ring *rxr = xrxr; 1264 struct igc_softc *sc = rxr->sc; 1265 1266 if (igc_rxfill(rxr)) { 1267 IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), 1268 (rxr->last_desc_filled + 1) % sc->num_rx_desc); 1269 } 1270 else if (if_rxr_inuse(&rxr->rx_ring) == 0) 1271 timeout_add(&rxr->rx_refill, 1); 1272 } 1273 1274 /********************************************************************* 1275 * 1276 * This routine executes in interrupt context. It replenishes 1277 * the mbufs in the descriptor and sends data which has been 1278 * dma'ed into host memory to upper layer. 1279 * 1280 *********************************************************************/ 1281 int 1282 igc_rxeof(struct rx_ring *rxr) 1283 { 1284 struct igc_softc *sc = rxr->sc; 1285 struct ifnet *ifp = &sc->sc_ac.ac_if; 1286 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1287 struct mbuf *mp, *m; 1288 struct igc_rx_buf *rxbuf, *nxbuf; 1289 union igc_adv_rx_desc *rxdesc; 1290 uint32_t ptype, staterr = 0; 1291 uint16_t len, vtag; 1292 uint8_t eop = 0; 1293 int i, nextp; 1294 1295 if (!ISSET(ifp->if_flags, IFF_RUNNING)) 1296 return 0; 1297 1298 i = rxr->next_to_check; 1299 while (if_rxr_inuse(&rxr->rx_ring) > 0) { 1300 uint32_t hash; 1301 uint16_t hashtype; 1302 1303 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1304 i * sizeof(union igc_adv_rx_desc), 1305 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD); 1306 1307 rxdesc = &rxr->rx_base[i]; 1308 staterr = letoh32(rxdesc->wb.upper.status_error); 1309 if (!ISSET(staterr, IGC_RXD_STAT_DD)) { 1310 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1311 i * sizeof(union igc_adv_rx_desc), 1312 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1313 break; 1314 } 1315 1316 /* Zero out the receive descriptors status. */ 1317 rxdesc->wb.upper.status_error = 0; 1318 rxbuf = &rxr->rx_buffers[i]; 1319 1320 /* Pull the mbuf off the ring. */ 1321 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1322 rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1323 bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map); 1324 1325 mp = rxbuf->buf; 1326 len = letoh16(rxdesc->wb.upper.length); 1327 vtag = letoh16(rxdesc->wb.upper.vlan); 1328 eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP); 1329 ptype = letoh32(rxdesc->wb.lower.lo_dword.data) & 1330 IGC_PKTTYPE_MASK; 1331 hash = letoh32(rxdesc->wb.lower.hi_dword.rss); 1332 hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) & 1333 IGC_RXDADV_RSSTYPE_MASK; 1334 1335 if (staterr & IGC_RXDEXT_STATERR_RXE) { 1336 if (rxbuf->fmp) { 1337 m_freem(rxbuf->fmp); 1338 rxbuf->fmp = NULL; 1339 } 1340 1341 m_freem(mp); 1342 rxbuf->buf = NULL; 1343 goto next_desc; 1344 } 1345 1346 if (mp == NULL) { 1347 panic("%s: igc_rxeof: NULL mbuf in slot %d " 1348 "(nrx %d, filled %d)", DEVNAME(sc), i, 1349 if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled); 1350 } 1351 1352 if (!eop) { 1353 /* 1354 * Figure out the next descriptor of this frame. 1355 */ 1356 nextp = i + 1; 1357 if (nextp == sc->num_rx_desc) 1358 nextp = 0; 1359 nxbuf = &rxr->rx_buffers[nextp]; 1360 /* prefetch(nxbuf); */ 1361 } 1362 1363 mp->m_len = len; 1364 1365 m = rxbuf->fmp; 1366 rxbuf->buf = rxbuf->fmp = NULL; 1367 1368 if (m != NULL) 1369 m->m_pkthdr.len += mp->m_len; 1370 else { 1371 m = mp; 1372 m->m_pkthdr.len = mp->m_len; 1373 #if NVLAN > 0 1374 if (staterr & IGC_RXD_STAT_VP) { 1375 m->m_pkthdr.ether_vtag = vtag; 1376 m->m_flags |= M_VLANTAG; 1377 } 1378 #endif 1379 } 1380 1381 /* Pass the head pointer on */ 1382 if (eop == 0) { 1383 nxbuf->fmp = m; 1384 m = NULL; 1385 mp->m_next = nxbuf->buf; 1386 } else { 1387 igc_rx_checksum(staterr, m, ptype); 1388 1389 if (hashtype != IGC_RXDADV_RSSTYPE_NONE) { 1390 m->m_pkthdr.ph_flowid = hash; 1391 SET(m->m_pkthdr.csum_flags, M_FLOWID); 1392 } 1393 1394 ml_enqueue(&ml, m); 1395 } 1396 next_desc: 1397 if_rxr_put(&rxr->rx_ring, 1); 1398 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 1399 i * sizeof(union igc_adv_rx_desc), 1400 sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD); 1401 1402 /* Advance our pointers to the next descriptor. */ 1403 if (++i == sc->num_rx_desc) 1404 i = 0; 1405 } 1406 rxr->next_to_check = i; 1407 1408 if (ifiq_input(rxr->ifiq, &ml)) 1409 if_rxr_livelocked(&rxr->rx_ring); 1410 1411 if (!(staterr & IGC_RXD_STAT_DD)) 1412 return 0; 1413 1414 return 1; 1415 } 1416 1417 /********************************************************************* 1418 * 1419 * Verify that the hardware indicated that the checksum is valid. 1420 * Inform the stack about the status of checksum so that stack 1421 * doesn't spend time verifying the checksum. 1422 * 1423 *********************************************************************/ 1424 void 1425 igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype) 1426 { 1427 uint16_t status = (uint16_t)staterr; 1428 uint8_t errors = (uint8_t)(staterr >> 24); 1429 1430 if (status & IGC_RXD_STAT_IPCS) { 1431 if (!(errors & IGC_RXD_ERR_IPE)) { 1432 /* IP Checksum Good */ 1433 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 1434 } else 1435 m->m_pkthdr.csum_flags = 0; 1436 } 1437 1438 if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) { 1439 if (!(errors & IGC_RXD_ERR_TCPE)) 1440 m->m_pkthdr.csum_flags |= 1441 M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; 1442 } 1443 } 1444 1445 void 1446 igc_watchdog(struct ifnet * ifp) 1447 { 1448 } 1449 1450 /********************************************************************* 1451 * 1452 * Media Ioctl callback 1453 * 1454 * This routine is called whenever the user queries the status of 1455 * the interface using ifconfig. 1456 * 1457 **********************************************************************/ 1458 void 1459 igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1460 { 1461 struct igc_softc *sc = ifp->if_softc; 1462 1463 igc_update_link_status(sc); 1464 1465 ifmr->ifm_status = IFM_AVALID; 1466 ifmr->ifm_active = IFM_ETHER; 1467 1468 if (!sc->link_active) { 1469 ifmr->ifm_active |= IFM_NONE; 1470 return; 1471 } 1472 1473 ifmr->ifm_status |= IFM_ACTIVE; 1474 1475 switch (sc->link_speed) { 1476 case 10: 1477 ifmr->ifm_active |= IFM_10_T; 1478 break; 1479 case 100: 1480 ifmr->ifm_active |= IFM_100_TX; 1481 break; 1482 case 1000: 1483 ifmr->ifm_active |= IFM_1000_T; 1484 break; 1485 case 2500: 1486 ifmr->ifm_active |= IFM_2500_T; 1487 break; 1488 } 1489 1490 if (sc->link_duplex == FULL_DUPLEX) 1491 ifmr->ifm_active |= IFM_FDX; 1492 else 1493 ifmr->ifm_active |= IFM_HDX; 1494 1495 switch (sc->hw.fc.current_mode) { 1496 case igc_fc_tx_pause: 1497 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE; 1498 break; 1499 case igc_fc_rx_pause: 1500 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE; 1501 break; 1502 case igc_fc_full: 1503 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE | 1504 IFM_ETH_TXPAUSE; 1505 break; 1506 default: 1507 ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE | 1508 IFM_ETH_TXPAUSE); 1509 break; 1510 } 1511 } 1512 1513 /********************************************************************* 1514 * 1515 * Media Ioctl callback 1516 * 1517 * This routine is called when the user changes speed/duplex using 1518 * media/mediopt option with ifconfig. 1519 * 1520 **********************************************************************/ 1521 int 1522 igc_media_change(struct ifnet *ifp) 1523 { 1524 struct igc_softc *sc = ifp->if_softc; 1525 struct ifmedia *ifm = &sc->media; 1526 1527 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1528 return (EINVAL); 1529 1530 sc->hw.mac.autoneg = DO_AUTO_NEG; 1531 1532 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1533 case IFM_AUTO: 1534 sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1535 break; 1536 case IFM_2500_T: 1537 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL; 1538 break; 1539 case IFM_1000_T: 1540 sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1541 break; 1542 case IFM_100_TX: 1543 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1544 sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL; 1545 else 1546 sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF; 1547 break; 1548 case IFM_10_T: 1549 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1550 sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL; 1551 else 1552 sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF; 1553 break; 1554 default: 1555 return EINVAL; 1556 } 1557 1558 igc_init(sc); 1559 1560 return 0; 1561 } 1562 1563 void 1564 igc_iff(struct igc_softc *sc) 1565 { 1566 struct ifnet *ifp = &sc->sc_ac.ac_if; 1567 struct arpcom *ac = &sc->sc_ac; 1568 struct ether_multi *enm; 1569 struct ether_multistep step; 1570 uint32_t reg_rctl = 0; 1571 uint8_t *mta; 1572 int mcnt = 0; 1573 1574 mta = sc->mta; 1575 bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN * 1576 MAX_NUM_MULTICAST_ADDRESSES); 1577 1578 reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL); 1579 reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE); 1580 ifp->if_flags &= ~IFF_ALLMULTI; 1581 1582 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 || 1583 ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) { 1584 ifp->if_flags |= IFF_ALLMULTI; 1585 reg_rctl |= IGC_RCTL_MPE; 1586 if (ifp->if_flags & IFF_PROMISC) 1587 reg_rctl |= IGC_RCTL_UPE; 1588 } else { 1589 ETHER_FIRST_MULTI(step, ac, enm); 1590 while (enm != NULL) { 1591 bcopy(enm->enm_addrlo, 1592 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1593 mcnt++; 1594 1595 ETHER_NEXT_MULTI(step, enm); 1596 } 1597 1598 igc_update_mc_addr_list(&sc->hw, mta, mcnt); 1599 } 1600 1601 IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl); 1602 } 1603 1604 void 1605 igc_update_link_status(struct igc_softc *sc) 1606 { 1607 struct ifnet *ifp = &sc->sc_ac.ac_if; 1608 struct igc_hw *hw = &sc->hw; 1609 int link_state; 1610 1611 if (hw->mac.get_link_status == true) 1612 igc_check_for_link(hw); 1613 1614 if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) { 1615 if (sc->link_active == 0) { 1616 igc_get_speed_and_duplex(hw, &sc->link_speed, 1617 &sc->link_duplex); 1618 sc->link_active = 1; 1619 ifp->if_baudrate = IF_Mbps(sc->link_speed); 1620 } 1621 link_state = (sc->link_duplex == FULL_DUPLEX) ? 1622 LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX; 1623 } else { 1624 if (sc->link_active == 1) { 1625 ifp->if_baudrate = sc->link_speed = 0; 1626 sc->link_duplex = 0; 1627 sc->link_active = 0; 1628 } 1629 link_state = LINK_STATE_DOWN; 1630 } 1631 if (ifp->if_link_state != link_state) { 1632 ifp->if_link_state = link_state; 1633 if_link_state_change(ifp); 1634 } 1635 } 1636 1637 /********************************************************************* 1638 * 1639 * Get a buffer from system mbuf buffer pool. 1640 * 1641 **********************************************************************/ 1642 int 1643 igc_get_buf(struct rx_ring *rxr, int i) 1644 { 1645 struct igc_softc *sc = rxr->sc; 1646 struct igc_rx_buf *rxbuf; 1647 struct mbuf *m; 1648 union igc_adv_rx_desc *rxdesc; 1649 int error; 1650 1651 rxbuf = &rxr->rx_buffers[i]; 1652 rxdesc = &rxr->rx_base[i]; 1653 if (rxbuf->buf) { 1654 printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i); 1655 return ENOBUFS; 1656 } 1657 1658 m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz); 1659 if (!m) 1660 return ENOBUFS; 1661 1662 m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz); 1663 m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz; 1664 1665 error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m, 1666 BUS_DMA_NOWAIT); 1667 if (error) { 1668 m_freem(m); 1669 return error; 1670 } 1671 1672 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0, 1673 rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD); 1674 rxbuf->buf = m; 1675 1676 rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr); 1677 1678 return 0; 1679 } 1680 1681 void 1682 igc_configure_queues(struct igc_softc *sc) 1683 { 1684 struct igc_hw *hw = &sc->hw; 1685 struct igc_queue *iq = sc->queues; 1686 uint32_t ivar, newitr = 0; 1687 int i; 1688 1689 /* First turn on RSS capability */ 1690 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME | 1691 IGC_GPIE_PBA | IGC_GPIE_NSICR); 1692 1693 /* Set the starting interrupt rate */ 1694 newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC; 1695 1696 newitr |= IGC_EITR_CNT_IGNR; 1697 1698 /* Turn on MSI-X */ 1699 for (i = 0; i < sc->sc_nqueues; i++, iq++) { 1700 /* RX entries */ 1701 igc_set_queues(sc, i, iq->msix, 0); 1702 /* TX entries */ 1703 igc_set_queues(sc, i, iq->msix, 1); 1704 sc->msix_queuesmask |= iq->eims; 1705 IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr); 1706 } 1707 1708 /* And for the link interrupt */ 1709 ivar = (sc->linkvec | IGC_IVAR_VALID) << 8; 1710 sc->msix_linkmask = 1 << sc->linkvec; 1711 IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar); 1712 } 1713 1714 void 1715 igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type) 1716 { 1717 struct igc_hw *hw = &sc->hw; 1718 uint32_t ivar, index; 1719 1720 index = entry >> 1; 1721 ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index); 1722 if (type) { 1723 if (entry & 1) { 1724 ivar &= 0x00FFFFFF; 1725 ivar |= (vector | IGC_IVAR_VALID) << 24; 1726 } else { 1727 ivar &= 0xFFFF00FF; 1728 ivar |= (vector | IGC_IVAR_VALID) << 8; 1729 } 1730 } else { 1731 if (entry & 1) { 1732 ivar &= 0xFF00FFFF; 1733 ivar |= (vector | IGC_IVAR_VALID) << 16; 1734 } else { 1735 ivar &= 0xFFFFFF00; 1736 ivar |= vector | IGC_IVAR_VALID; 1737 } 1738 } 1739 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar); 1740 } 1741 1742 void 1743 igc_enable_queue(struct igc_softc *sc, uint32_t eims) 1744 { 1745 IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims); 1746 } 1747 1748 void 1749 igc_enable_intr(struct igc_softc *sc) 1750 { 1751 struct igc_hw *hw = &sc->hw; 1752 uint32_t mask; 1753 1754 mask = (sc->msix_queuesmask | sc->msix_linkmask); 1755 IGC_WRITE_REG(hw, IGC_EIAC, mask); 1756 IGC_WRITE_REG(hw, IGC_EIAM, mask); 1757 IGC_WRITE_REG(hw, IGC_EIMS, mask); 1758 IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC); 1759 IGC_WRITE_FLUSH(hw); 1760 } 1761 1762 void 1763 igc_disable_intr(struct igc_softc *sc) 1764 { 1765 struct igc_hw *hw = &sc->hw; 1766 1767 IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff); 1768 IGC_WRITE_REG(hw, IGC_EIAC, 0); 1769 IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff); 1770 IGC_WRITE_FLUSH(hw); 1771 } 1772 1773 int 1774 igc_intr_link(void *arg) 1775 { 1776 struct igc_softc *sc = (struct igc_softc *)arg; 1777 uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR); 1778 1779 if (reg_icr & IGC_ICR_LSC) { 1780 KERNEL_LOCK(); 1781 sc->hw.mac.get_link_status = true; 1782 igc_update_link_status(sc); 1783 KERNEL_UNLOCK(); 1784 } 1785 1786 IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC); 1787 IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask); 1788 1789 return 1; 1790 } 1791 1792 int 1793 igc_intr_queue(void *arg) 1794 { 1795 struct igc_queue *iq = arg; 1796 struct igc_softc *sc = iq->sc; 1797 struct ifnet *ifp = &sc->sc_ac.ac_if; 1798 struct rx_ring *rxr = iq->rxr; 1799 struct tx_ring *txr = iq->txr; 1800 1801 if (ifp->if_flags & IFF_RUNNING) { 1802 igc_txeof(txr); 1803 igc_rxeof(rxr); 1804 igc_rxrefill(rxr); 1805 } 1806 1807 igc_enable_queue(sc, iq->eims); 1808 1809 return 1; 1810 } 1811 1812 /********************************************************************* 1813 * 1814 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1815 * the information needed to transmit a packet on the wire. 1816 * 1817 **********************************************************************/ 1818 int 1819 igc_allocate_transmit_buffers(struct tx_ring *txr) 1820 { 1821 struct igc_softc *sc = txr->sc; 1822 struct igc_tx_buf *txbuf; 1823 int error, i; 1824 1825 txr->tx_buffers = mallocarray(sc->num_tx_desc, 1826 sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 1827 if (txr->tx_buffers == NULL) { 1828 printf("%s: Unable to allocate tx_buffer memory\n", 1829 DEVNAME(sc)); 1830 error = ENOMEM; 1831 goto fail; 1832 } 1833 txr->txtag = txr->txdma.dma_tag; 1834 1835 /* Create the descriptor buffer dma maps. */ 1836 for (i = 0; i < sc->num_tx_desc; i++) { 1837 txbuf = &txr->tx_buffers[i]; 1838 error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE, 1839 IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map); 1840 if (error != 0) { 1841 printf("%s: Unable to create TX DMA map\n", 1842 DEVNAME(sc)); 1843 goto fail; 1844 } 1845 } 1846 1847 return 0; 1848 fail: 1849 return error; 1850 } 1851 1852 1853 /********************************************************************* 1854 * 1855 * Allocate and initialize transmit structures. 1856 * 1857 **********************************************************************/ 1858 int 1859 igc_setup_transmit_structures(struct igc_softc *sc) 1860 { 1861 struct tx_ring *txr = sc->tx_rings; 1862 int i; 1863 1864 for (i = 0; i < sc->sc_nqueues; i++, txr++) { 1865 if (igc_setup_transmit_ring(txr)) 1866 goto fail; 1867 } 1868 1869 return 0; 1870 fail: 1871 igc_free_transmit_structures(sc); 1872 return ENOBUFS; 1873 } 1874 1875 /********************************************************************* 1876 * 1877 * Initialize a transmit ring. 1878 * 1879 **********************************************************************/ 1880 int 1881 igc_setup_transmit_ring(struct tx_ring *txr) 1882 { 1883 struct igc_softc *sc = txr->sc; 1884 1885 /* Now allocate transmit buffers for the ring. */ 1886 if (igc_allocate_transmit_buffers(txr)) 1887 return ENOMEM; 1888 1889 /* Clear the old ring contents */ 1890 bzero((void *)txr->tx_base, 1891 (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc); 1892 1893 /* Reset indices. */ 1894 txr->next_avail_desc = 0; 1895 txr->next_to_clean = 0; 1896 1897 bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0, 1898 txr->txdma.dma_map->dm_mapsize, 1899 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1900 1901 return 0; 1902 } 1903 1904 /********************************************************************* 1905 * 1906 * Enable transmit unit. 1907 * 1908 **********************************************************************/ 1909 void 1910 igc_initialize_transmit_unit(struct igc_softc *sc) 1911 { 1912 struct ifnet *ifp = &sc->sc_ac.ac_if; 1913 struct tx_ring *txr; 1914 struct igc_hw *hw = &sc->hw; 1915 uint64_t bus_addr; 1916 uint32_t tctl, txdctl = 0; 1917 int i; 1918 1919 /* Setup the Base and Length of the TX descriptor ring. */ 1920 for (i = 0; i < sc->sc_nqueues; i++) { 1921 txr = &sc->tx_rings[i]; 1922 1923 bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr; 1924 1925 /* Base and len of TX ring */ 1926 IGC_WRITE_REG(hw, IGC_TDLEN(i), 1927 sc->num_tx_desc * sizeof(union igc_adv_tx_desc)); 1928 IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32)); 1929 IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr); 1930 1931 /* Init the HEAD/TAIL indices */ 1932 IGC_WRITE_REG(hw, IGC_TDT(i), 0); 1933 IGC_WRITE_REG(hw, IGC_TDH(i), 0); 1934 1935 txr->watchdog_timer = 0; 1936 1937 txdctl = 0; /* Clear txdctl */ 1938 txdctl |= 0x1f; /* PTHRESH */ 1939 txdctl |= 1 << 8; /* HTHRESH */ 1940 txdctl |= 1 << 16; /* WTHRESH */ 1941 txdctl |= 1 << 22; /* Reserved bit 22 must always be 1 */ 1942 txdctl |= IGC_TXDCTL_GRAN; 1943 txdctl |= 1 << 25; /* LWTHRESH */ 1944 1945 IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl); 1946 } 1947 ifp->if_timer = 0; 1948 1949 /* Program the Transmit Control Register */ 1950 tctl = IGC_READ_REG(&sc->hw, IGC_TCTL); 1951 tctl &= ~IGC_TCTL_CT; 1952 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN | 1953 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT)); 1954 1955 /* This write will effectively turn on the transmit unit. */ 1956 IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl); 1957 } 1958 1959 /********************************************************************* 1960 * 1961 * Free all transmit rings. 1962 * 1963 **********************************************************************/ 1964 void 1965 igc_free_transmit_structures(struct igc_softc *sc) 1966 { 1967 struct tx_ring *txr = sc->tx_rings; 1968 int i; 1969 1970 for (i = 0; i < sc->sc_nqueues; i++, txr++) 1971 igc_free_transmit_buffers(txr); 1972 } 1973 1974 /********************************************************************* 1975 * 1976 * Free transmit ring related data structures. 1977 * 1978 **********************************************************************/ 1979 void 1980 igc_free_transmit_buffers(struct tx_ring *txr) 1981 { 1982 struct igc_softc *sc = txr->sc; 1983 struct igc_tx_buf *txbuf; 1984 int i; 1985 1986 if (txr->tx_buffers == NULL) 1987 return; 1988 1989 txbuf = txr->tx_buffers; 1990 for (i = 0; i < sc->num_tx_desc; i++, txbuf++) { 1991 if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) { 1992 bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map, 1993 0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1994 bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map); 1995 } 1996 if (txbuf->m_head != NULL) { 1997 m_freem(txbuf->m_head); 1998 txbuf->m_head = NULL; 1999 } 2000 if (txbuf->map != NULL) { 2001 bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map); 2002 txbuf->map = NULL; 2003 } 2004 } 2005 2006 if (txr->tx_buffers != NULL) 2007 free(txr->tx_buffers, M_DEVBUF, 2008 sc->num_tx_desc * sizeof(struct igc_tx_buf)); 2009 txr->tx_buffers = NULL; 2010 txr->txtag = NULL; 2011 } 2012 2013 2014 /********************************************************************* 2015 * 2016 * Advanced Context Descriptor setup for VLAN, CSUM or TSO 2017 * 2018 **********************************************************************/ 2019 2020 int 2021 igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod, 2022 uint32_t *olinfo_status) 2023 { 2024 struct ether_extracted ext; 2025 struct igc_adv_tx_context_desc *txdesc; 2026 uint32_t type_tucmd_mlhl = 0; 2027 uint32_t vlan_macip_lens = 0; 2028 int off = 0; 2029 2030 vlan_macip_lens |= (sizeof(*ext.eh) << IGC_ADVTXD_MACLEN_SHIFT); 2031 2032 /* 2033 * In advanced descriptors the vlan tag must 2034 * be placed into the context descriptor. Hence 2035 * we need to make one even if not doing offloads. 2036 */ 2037 #ifdef notyet 2038 #if NVLAN > 0 2039 if (ISSET(mp->m_flags, M_VLANTAG)) { 2040 uint32_t vtag = mp->m_pkthdr.ether_vtag; 2041 vlan_macip_lens |= (vtag << IGC_ADVTXD_VLAN_SHIFT); 2042 off = 1; 2043 } 2044 #endif 2045 #endif 2046 2047 ether_extract_headers(mp, &ext); 2048 2049 if (ext.ip4) { 2050 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4; 2051 if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) { 2052 *olinfo_status |= IGC_TXD_POPTS_IXSM << 8; 2053 off = 1; 2054 } 2055 #ifdef INET6 2056 } else if (ext.ip6) { 2057 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6; 2058 #endif 2059 } else { 2060 return 0; 2061 } 2062 2063 vlan_macip_lens |= ext.iphlen; 2064 type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT; 2065 2066 if (ext.tcp) { 2067 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP; 2068 if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) { 2069 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2070 off = 1; 2071 } 2072 } else if (ext.udp) { 2073 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP; 2074 if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) { 2075 *olinfo_status |= IGC_TXD_POPTS_TXSM << 8; 2076 off = 1; 2077 } 2078 } 2079 2080 if (off == 0) 2081 return 0; 2082 2083 /* Now ready a context descriptor */ 2084 txdesc = (struct igc_adv_tx_context_desc *)&txr->tx_base[prod]; 2085 2086 /* Now copy bits into descriptor */ 2087 htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens); 2088 htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl); 2089 htolem32(&txdesc->seqnum_seed, 0); 2090 htolem32(&txdesc->mss_l4len_idx, 0); 2091 2092 return 1; 2093 } 2094 2095 /********************************************************************* 2096 * 2097 * Allocate memory for rx_buffer structures. Since we use one 2098 * rx_buffer per received packet, the maximum number of rx_buffer's 2099 * that we'll need is equal to the number of receive descriptors 2100 * that we've allocated. 2101 * 2102 **********************************************************************/ 2103 int 2104 igc_allocate_receive_buffers(struct rx_ring *rxr) 2105 { 2106 struct igc_softc *sc = rxr->sc; 2107 struct igc_rx_buf *rxbuf; 2108 int i, error; 2109 2110 rxr->rx_buffers = mallocarray(sc->num_rx_desc, 2111 sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO); 2112 if (rxr->rx_buffers == NULL) { 2113 printf("%s: Unable to allocate rx_buffer memory\n", 2114 DEVNAME(sc)); 2115 error = ENOMEM; 2116 goto fail; 2117 } 2118 2119 rxbuf = rxr->rx_buffers; 2120 for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) { 2121 error = bus_dmamap_create(rxr->rxdma.dma_tag, 2122 MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0, 2123 BUS_DMA_NOWAIT, &rxbuf->map); 2124 if (error) { 2125 printf("%s: Unable to create RX DMA map\n", 2126 DEVNAME(sc)); 2127 goto fail; 2128 } 2129 } 2130 bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0, 2131 rxr->rxdma.dma_map->dm_mapsize, 2132 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2133 2134 return 0; 2135 fail: 2136 return error; 2137 } 2138 2139 /********************************************************************* 2140 * 2141 * Allocate and initialize receive structures. 2142 * 2143 **********************************************************************/ 2144 int 2145 igc_setup_receive_structures(struct igc_softc *sc) 2146 { 2147 struct rx_ring *rxr = sc->rx_rings; 2148 int i; 2149 2150 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2151 if (igc_setup_receive_ring(rxr)) 2152 goto fail; 2153 } 2154 2155 return 0; 2156 fail: 2157 igc_free_receive_structures(sc); 2158 return ENOBUFS; 2159 } 2160 2161 /********************************************************************* 2162 * 2163 * Initialize a receive ring and its buffers. 2164 * 2165 **********************************************************************/ 2166 int 2167 igc_setup_receive_ring(struct rx_ring *rxr) 2168 { 2169 struct igc_softc *sc = rxr->sc; 2170 struct ifnet *ifp = &sc->sc_ac.ac_if; 2171 int rsize; 2172 2173 rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc), 2174 IGC_DBA_ALIGN); 2175 2176 /* Clear the ring contents. */ 2177 bzero((void *)rxr->rx_base, rsize); 2178 2179 if (igc_allocate_receive_buffers(rxr)) 2180 return ENOMEM; 2181 2182 /* Setup our descriptor indices. */ 2183 rxr->next_to_check = 0; 2184 rxr->last_desc_filled = sc->num_rx_desc - 1; 2185 2186 if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1), 2187 sc->num_rx_desc - 1); 2188 2189 return 0; 2190 } 2191 2192 /********************************************************************* 2193 * 2194 * Enable receive unit. 2195 * 2196 **********************************************************************/ 2197 #define BSIZEPKT_ROUNDUP ((1 << IGC_SRRCTL_BSIZEPKT_SHIFT) - 1) 2198 2199 void 2200 igc_initialize_receive_unit(struct igc_softc *sc) 2201 { 2202 struct rx_ring *rxr = sc->rx_rings; 2203 struct igc_hw *hw = &sc->hw; 2204 uint32_t rctl, rxcsum, srrctl = 0; 2205 int i; 2206 2207 /* 2208 * Make sure receives are disabled while setting 2209 * up the descriptor ring. 2210 */ 2211 rctl = IGC_READ_REG(hw, IGC_RCTL); 2212 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN); 2213 2214 /* Setup the Receive Control Register */ 2215 rctl &= ~(3 << IGC_RCTL_MO_SHIFT); 2216 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO | 2217 IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); 2218 2219 /* Do not store bad packets */ 2220 rctl &= ~IGC_RCTL_SBP; 2221 2222 /* Enable Long Packet receive */ 2223 if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN) 2224 rctl |= IGC_RCTL_LPE; 2225 2226 /* Strip the CRC */ 2227 rctl |= IGC_RCTL_SECRC; 2228 2229 /* 2230 * Set the interrupt throttling rate. Value is calculated 2231 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) 2232 */ 2233 IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR); 2234 2235 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM); 2236 rxcsum &= ~IGC_RXCSUM_PCSD; 2237 2238 if (sc->sc_nqueues > 1) 2239 rxcsum |= IGC_RXCSUM_PCSD; 2240 2241 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum); 2242 2243 if (sc->sc_nqueues > 1) 2244 igc_initialize_rss_mapping(sc); 2245 2246 /* Set maximum packet buffer len */ 2247 srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >> 2248 IGC_SRRCTL_BSIZEPKT_SHIFT; 2249 /* srrctl above overrides this but set the register to a sane value */ 2250 rctl |= IGC_RCTL_SZ_2048; 2251 2252 /* 2253 * If TX flow control is disabled and there's > 1 queue defined, 2254 * enable DROP. 2255 * 2256 * This drops frames rather than hanging the RX MAC for all queues. 2257 */ 2258 if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none || 2259 sc->fc == igc_fc_rx_pause)) { 2260 srrctl |= IGC_SRRCTL_DROP_EN; 2261 } 2262 2263 /* Setup the Base and Length of the RX descriptor rings. */ 2264 for (i = 0; i < sc->sc_nqueues; i++, rxr++) { 2265 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0); 2266 uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr; 2267 uint32_t rxdctl; 2268 2269 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; 2270 2271 IGC_WRITE_REG(hw, IGC_RDLEN(i), 2272 sc->num_rx_desc * sizeof(union igc_adv_rx_desc)); 2273 IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32)); 2274 IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr); 2275 IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl); 2276 2277 /* Setup the Head and Tail Descriptor Pointers */ 2278 IGC_WRITE_REG(hw, IGC_RDH(i), 0); 2279 IGC_WRITE_REG(hw, IGC_RDT(i), 0); 2280 2281 /* Enable this Queue */ 2282 rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i)); 2283 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE; 2284 rxdctl &= 0xFFF00000; 2285 rxdctl |= IGC_RX_PTHRESH; 2286 rxdctl |= IGC_RX_HTHRESH << 8; 2287 rxdctl |= IGC_RX_WTHRESH << 16; 2288 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl); 2289 } 2290 2291 /* Make sure VLAN Filters are off */ 2292 rctl &= ~IGC_RCTL_VFE; 2293 2294 /* Write out the settings */ 2295 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 2296 } 2297 2298 /********************************************************************* 2299 * 2300 * Free all receive rings. 2301 * 2302 **********************************************************************/ 2303 void 2304 igc_free_receive_structures(struct igc_softc *sc) 2305 { 2306 struct rx_ring *rxr; 2307 int i; 2308 2309 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2310 if_rxr_init(&rxr->rx_ring, 0, 0); 2311 2312 for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++) 2313 igc_free_receive_buffers(rxr); 2314 } 2315 2316 /********************************************************************* 2317 * 2318 * Free receive ring data structures 2319 * 2320 **********************************************************************/ 2321 void 2322 igc_free_receive_buffers(struct rx_ring *rxr) 2323 { 2324 struct igc_softc *sc = rxr->sc; 2325 struct igc_rx_buf *rxbuf; 2326 int i; 2327 2328 if (rxr->rx_buffers != NULL) { 2329 for (i = 0; i < sc->num_rx_desc; i++) { 2330 rxbuf = &rxr->rx_buffers[i]; 2331 if (rxbuf->buf != NULL) { 2332 bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 2333 0, rxbuf->map->dm_mapsize, 2334 BUS_DMASYNC_POSTREAD); 2335 bus_dmamap_unload(rxr->rxdma.dma_tag, 2336 rxbuf->map); 2337 m_freem(rxbuf->buf); 2338 rxbuf->buf = NULL; 2339 } 2340 bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map); 2341 rxbuf->map = NULL; 2342 } 2343 free(rxr->rx_buffers, M_DEVBUF, 2344 sc->num_rx_desc * sizeof(struct igc_rx_buf)); 2345 rxr->rx_buffers = NULL; 2346 } 2347 } 2348 2349 /* 2350 * Initialise the RSS mapping for NICs that support multiple transmit/ 2351 * receive rings. 2352 */ 2353 void 2354 igc_initialize_rss_mapping(struct igc_softc *sc) 2355 { 2356 struct igc_hw *hw = &sc->hw; 2357 uint32_t rss_key[10], mrqc, reta, shift = 0; 2358 int i, queue_id; 2359 2360 /* 2361 * The redirection table controls which destination 2362 * queue each bucket redirects traffic to. 2363 * Each DWORD represents four queues, with the LSB 2364 * being the first queue in the DWORD. 2365 * 2366 * This just allocates buckets to queues using round-robin 2367 * allocation. 2368 * 2369 * NOTE: It Just Happens to line up with the default 2370 * RSS allocation method. 2371 */ 2372 2373 /* Warning FM follows */ 2374 reta = 0; 2375 for (i = 0; i < 128; i++) { 2376 queue_id = (i % sc->sc_nqueues); 2377 /* Adjust if required */ 2378 queue_id = queue_id << shift; 2379 2380 /* 2381 * The low 8 bits are for hash value (n+0); 2382 * The next 8 bits are for hash value (n+1), etc. 2383 */ 2384 reta = reta >> 8; 2385 reta = reta | ( ((uint32_t) queue_id) << 24); 2386 if ((i & 3) == 3) { 2387 IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta); 2388 reta = 0; 2389 } 2390 } 2391 2392 /* 2393 * MRQC: Multiple Receive Queues Command 2394 * Set queuing to RSS control, number depends on the device. 2395 */ 2396 mrqc = IGC_MRQC_ENABLE_RSS_4Q; 2397 2398 /* Set up random bits */ 2399 stoeplitz_to_key(&rss_key, sizeof(rss_key)); 2400 2401 /* Now fill our hash function seeds */ 2402 for (i = 0; i < 10; i++) 2403 IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]); 2404 2405 /* 2406 * Configure the RSS fields to hash upon. 2407 */ 2408 mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP); 2409 mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP); 2410 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX; 2411 2412 IGC_WRITE_REG(hw, IGC_MRQC, mrqc); 2413 } 2414 2415 /* 2416 * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit. 2417 * For ASF and Pass Through versions of f/w this means 2418 * that the driver is loaded. For AMT version type f/w 2419 * this means that the network i/f is open. 2420 */ 2421 void 2422 igc_get_hw_control(struct igc_softc *sc) 2423 { 2424 uint32_t ctrl_ext; 2425 2426 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2427 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD); 2428 } 2429 2430 /* 2431 * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 2432 * For ASF and Pass Through versions of f/w this means that 2433 * the driver is no longer loaded. For AMT versions of the 2434 * f/w this means that the network i/f is closed. 2435 */ 2436 void 2437 igc_release_hw_control(struct igc_softc *sc) 2438 { 2439 uint32_t ctrl_ext; 2440 2441 ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT); 2442 IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD); 2443 } 2444 2445 int 2446 igc_is_valid_ether_addr(uint8_t *addr) 2447 { 2448 char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; 2449 2450 if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { 2451 return 0; 2452 } 2453 2454 return 1; 2455 } 2456