1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 */ 67 /* 68 * SERIALIZATION API RULES: 69 * 70 * - If the driver uses the same serializer for the interrupt as for the 71 * ifnet, most of the serialization will be done automatically for the 72 * driver. 73 * 74 * - ifmedia entry points will be serialized by the ifmedia code using the 75 * ifnet serializer. 76 * 77 * - if_* entry points except for if_input will be serialized by the IF 78 * and protocol layers. 79 * 80 * - The device driver must be sure to serialize access from timeout code 81 * installed by the device driver. 82 * 83 * - The device driver typically holds the serializer at the time it wishes 84 * to call if_input. 85 * 86 * - We must call lwkt_serialize_handler_enable() prior to enabling the 87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling 88 * the hardware interrupt in order to avoid handler execution races from 89 * scheduled interrupt threads. 90 * 91 * NOTE! Since callers into the device driver hold the ifnet serializer, 92 * the device driver may be holding a serializer at the time it calls 93 * if_input even if it is not serializer-aware. 94 */ 95 96 #include "opt_polling.h" 97 98 #include <sys/param.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/interrupt.h> 102 #include <sys/kernel.h> 103 #include <sys/ktr.h> 104 #include <sys/malloc.h> 105 #include <sys/mbuf.h> 106 #include <sys/proc.h> 107 #include <sys/rman.h> 108 #include <sys/serialize.h> 109 #include <sys/socket.h> 110 #include <sys/sockio.h> 111 #include <sys/sysctl.h> 112 #include <sys/systm.h> 113 114 #include <net/bpf.h> 115 #include <net/ethernet.h> 116 #include <net/if.h> 117 #include <net/if_arp.h> 118 #include <net/if_dl.h> 119 #include <net/if_media.h> 120 #include <net/ifq_var.h> 121 #include <net/vlan/if_vlan_var.h> 122 #include <net/vlan/if_vlan_ether.h> 123 124 #include <netinet/in_systm.h> 125 #include <netinet/in.h> 126 #include <netinet/ip.h> 127 #include <netinet/tcp.h> 128 #include <netinet/udp.h> 129 130 #include <bus/pci/pcivar.h> 131 #include <bus/pci/pcireg.h> 132 133 #include <dev/netif/ig_hal/e1000_api.h> 134 #include <dev/netif/ig_hal/e1000_82571.h> 135 #include <dev/netif/em/if_em.h> 136 137 #define EM_NAME "Intel(R) PRO/1000 Network Connection " 138 #define EM_VER " 7.2.4" 139 140 #define _EM_DEVICE(id, ret) \ 141 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER } 142 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100) 143 #define EM_DEVICE(id) _EM_DEVICE(id, 0) 144 #define EM_DEVICE_NULL { 0, 0, 0, NULL } 145 146 static const struct em_vendor_info em_vendor_info_array[] = { 147 EM_DEVICE(82540EM), 148 EM_DEVICE(82540EM_LOM), 149 EM_DEVICE(82540EP), 150 EM_DEVICE(82540EP_LOM), 151 EM_DEVICE(82540EP_LP), 152 153 EM_DEVICE(82541EI), 154 EM_DEVICE(82541ER), 155 EM_DEVICE(82541ER_LOM), 156 EM_DEVICE(82541EI_MOBILE), 157 EM_DEVICE(82541GI), 158 EM_DEVICE(82541GI_LF), 159 EM_DEVICE(82541GI_MOBILE), 160 161 EM_DEVICE(82542), 162 163 EM_DEVICE(82543GC_FIBER), 164 EM_DEVICE(82543GC_COPPER), 165 166 EM_DEVICE(82544EI_COPPER), 167 EM_DEVICE(82544EI_FIBER), 168 EM_DEVICE(82544GC_COPPER), 169 EM_DEVICE(82544GC_LOM), 170 171 EM_DEVICE(82545EM_COPPER), 172 EM_DEVICE(82545EM_FIBER), 173 EM_DEVICE(82545GM_COPPER), 174 EM_DEVICE(82545GM_FIBER), 175 EM_DEVICE(82545GM_SERDES), 176 177 EM_DEVICE(82546EB_COPPER), 178 EM_DEVICE(82546EB_FIBER), 179 EM_DEVICE(82546EB_QUAD_COPPER), 180 EM_DEVICE(82546GB_COPPER), 181 EM_DEVICE(82546GB_FIBER), 182 EM_DEVICE(82546GB_SERDES), 183 EM_DEVICE(82546GB_PCIE), 184 EM_DEVICE(82546GB_QUAD_COPPER), 185 EM_DEVICE(82546GB_QUAD_COPPER_KSP3), 186 187 EM_DEVICE(82547EI), 188 EM_DEVICE(82547EI_MOBILE), 189 EM_DEVICE(82547GI), 190 191 EM_EMX_DEVICE(82571EB_COPPER), 192 EM_EMX_DEVICE(82571EB_FIBER), 193 EM_EMX_DEVICE(82571EB_SERDES), 194 EM_EMX_DEVICE(82571EB_SERDES_DUAL), 195 EM_EMX_DEVICE(82571EB_SERDES_QUAD), 196 EM_EMX_DEVICE(82571EB_QUAD_COPPER), 197 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP), 198 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP), 199 EM_EMX_DEVICE(82571EB_QUAD_FIBER), 200 EM_EMX_DEVICE(82571PT_QUAD_COPPER), 201 202 EM_EMX_DEVICE(82572EI_COPPER), 203 EM_EMX_DEVICE(82572EI_FIBER), 204 EM_EMX_DEVICE(82572EI_SERDES), 205 EM_EMX_DEVICE(82572EI), 206 207 EM_EMX_DEVICE(82573E), 208 EM_EMX_DEVICE(82573E_IAMT), 209 EM_EMX_DEVICE(82573L), 210 211 EM_DEVICE(82583V), 212 213 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT), 214 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT), 215 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT), 216 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT), 217 218 EM_DEVICE(ICH8_IGP_M_AMT), 219 EM_DEVICE(ICH8_IGP_AMT), 220 EM_DEVICE(ICH8_IGP_C), 221 EM_DEVICE(ICH8_IFE), 222 EM_DEVICE(ICH8_IFE_GT), 223 EM_DEVICE(ICH8_IFE_G), 224 EM_DEVICE(ICH8_IGP_M), 225 EM_DEVICE(ICH8_82567V_3), 226 227 EM_DEVICE(ICH9_IGP_M_AMT), 228 EM_DEVICE(ICH9_IGP_AMT), 229 EM_DEVICE(ICH9_IGP_C), 230 EM_DEVICE(ICH9_IGP_M), 231 EM_DEVICE(ICH9_IGP_M_V), 232 EM_DEVICE(ICH9_IFE), 233 EM_DEVICE(ICH9_IFE_GT), 234 EM_DEVICE(ICH9_IFE_G), 235 EM_DEVICE(ICH9_BM), 236 237 EM_EMX_DEVICE(82574L), 238 EM_EMX_DEVICE(82574LA), 239 240 EM_DEVICE(ICH10_R_BM_LM), 241 EM_DEVICE(ICH10_R_BM_LF), 242 EM_DEVICE(ICH10_R_BM_V), 243 EM_DEVICE(ICH10_D_BM_LM), 244 EM_DEVICE(ICH10_D_BM_LF), 245 EM_DEVICE(ICH10_D_BM_V), 246 247 EM_DEVICE(PCH_M_HV_LM), 248 EM_DEVICE(PCH_M_HV_LC), 249 EM_DEVICE(PCH_D_HV_DM), 250 EM_DEVICE(PCH_D_HV_DC), 251 252 EM_DEVICE(PCH2_LV_LM), 253 EM_DEVICE(PCH2_LV_V), 254 255 /* required last entry */ 256 EM_DEVICE_NULL 257 }; 258 259 static int em_probe(device_t); 260 static int em_attach(device_t); 261 static int em_detach(device_t); 262 static int em_shutdown(device_t); 263 static int em_suspend(device_t); 264 static int em_resume(device_t); 265 266 static void em_init(void *); 267 static void em_stop(struct adapter *); 268 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 269 static void em_start(struct ifnet *); 270 #ifdef DEVICE_POLLING 271 static void em_poll(struct ifnet *, enum poll_cmd, int); 272 #endif 273 static void em_watchdog(struct ifnet *); 274 static void em_media_status(struct ifnet *, struct ifmediareq *); 275 static int em_media_change(struct ifnet *); 276 static void em_timer(void *); 277 278 static void em_intr(void *); 279 static void em_intr_mask(void *); 280 static void em_intr_body(struct adapter *, boolean_t); 281 static void em_rxeof(struct adapter *, int); 282 static void em_txeof(struct adapter *); 283 static void em_tx_collect(struct adapter *); 284 static void em_tx_purge(struct adapter *); 285 static void em_enable_intr(struct adapter *); 286 static void em_disable_intr(struct adapter *); 287 288 static int em_dma_malloc(struct adapter *, bus_size_t, 289 struct em_dma_alloc *); 290 static void em_dma_free(struct adapter *, struct em_dma_alloc *); 291 static void em_init_tx_ring(struct adapter *); 292 static int em_init_rx_ring(struct adapter *); 293 static int em_create_tx_ring(struct adapter *); 294 static int em_create_rx_ring(struct adapter *); 295 static void em_destroy_tx_ring(struct adapter *, int); 296 static void em_destroy_rx_ring(struct adapter *, int); 297 static int em_newbuf(struct adapter *, int, int); 298 static int em_encap(struct adapter *, struct mbuf **); 299 static void em_rxcsum(struct adapter *, struct e1000_rx_desc *, 300 struct mbuf *); 301 static int em_txcsum_pullup(struct adapter *, struct mbuf **); 302 static int em_txcsum(struct adapter *, struct mbuf *, 303 uint32_t *, uint32_t *); 304 305 static int em_get_hw_info(struct adapter *); 306 static int em_is_valid_eaddr(const uint8_t *); 307 static int em_alloc_pci_res(struct adapter *); 308 static void em_free_pci_res(struct adapter *); 309 static int em_reset(struct adapter *); 310 static void em_setup_ifp(struct adapter *); 311 static void em_init_tx_unit(struct adapter *); 312 static void em_init_rx_unit(struct adapter *); 313 static void em_update_stats(struct adapter *); 314 static void em_set_promisc(struct adapter *); 315 static void em_disable_promisc(struct adapter *); 316 static void em_set_multi(struct adapter *); 317 static void em_update_link_status(struct adapter *); 318 static void em_smartspeed(struct adapter *); 319 static void em_set_itr(struct adapter *, uint32_t); 320 static void em_disable_aspm(struct adapter *); 321 322 /* Hardware workarounds */ 323 static int em_82547_fifo_workaround(struct adapter *, int); 324 static void em_82547_update_fifo_head(struct adapter *, int); 325 static int em_82547_tx_fifo_reset(struct adapter *); 326 static void em_82547_move_tail(void *); 327 static void em_82547_move_tail_serialized(struct adapter *); 328 static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY); 329 330 static void em_print_debug_info(struct adapter *); 331 static void em_print_nvm_info(struct adapter *); 332 static void em_print_hw_stats(struct adapter *); 333 334 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 335 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 336 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 337 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 338 static void em_add_sysctl(struct adapter *adapter); 339 340 /* Management and WOL Support */ 341 static void em_get_mgmt(struct adapter *); 342 static void em_rel_mgmt(struct adapter *); 343 static void em_get_hw_control(struct adapter *); 344 static void em_rel_hw_control(struct adapter *); 345 static void em_enable_wol(device_t); 346 347 static device_method_t em_methods[] = { 348 /* Device interface */ 349 DEVMETHOD(device_probe, em_probe), 350 DEVMETHOD(device_attach, em_attach), 351 DEVMETHOD(device_detach, em_detach), 352 DEVMETHOD(device_shutdown, em_shutdown), 353 DEVMETHOD(device_suspend, em_suspend), 354 DEVMETHOD(device_resume, em_resume), 355 { 0, 0 } 356 }; 357 358 static driver_t em_driver = { 359 "em", 360 em_methods, 361 sizeof(struct adapter), 362 }; 363 364 static devclass_t em_devclass; 365 366 DECLARE_DUMMY_MODULE(if_em); 367 MODULE_DEPEND(em, ig_hal, 1, 1, 1); 368 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL); 369 370 /* 371 * Tunables 372 */ 373 static int em_int_throttle_ceil = EM_DEFAULT_ITR; 374 static int em_rxd = EM_DEFAULT_RXD; 375 static int em_txd = EM_DEFAULT_TXD; 376 static int em_smart_pwr_down = 0; 377 378 /* Controls whether promiscuous also shows bad packets */ 379 static int em_debug_sbp = FALSE; 380 381 static int em_82573_workaround = 1; 382 static int em_msi_enable = 1; 383 384 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil); 385 TUNABLE_INT("hw.em.rxd", &em_rxd); 386 TUNABLE_INT("hw.em.txd", &em_txd); 387 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 388 TUNABLE_INT("hw.em.sbp", &em_debug_sbp); 389 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround); 390 TUNABLE_INT("hw.em.msi.enable", &em_msi_enable); 391 392 /* Global used in WOL setup with multiport cards */ 393 static int em_global_quad_port_a = 0; 394 395 /* Set this to one to display debug statistics */ 396 static int em_display_debug_stats = 0; 397 398 #if !defined(KTR_IF_EM) 399 #define KTR_IF_EM KTR_ALL 400 #endif 401 KTR_INFO_MASTER(if_em); 402 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin"); 403 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end"); 404 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet"); 405 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet"); 406 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean"); 407 #define logif(name) KTR_LOG(if_em_ ## name) 408 409 static int 410 em_probe(device_t dev) 411 { 412 const struct em_vendor_info *ent; 413 uint16_t vid, did; 414 415 vid = pci_get_vendor(dev); 416 did = pci_get_device(dev); 417 418 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) { 419 if (vid == ent->vendor_id && did == ent->device_id) { 420 device_set_desc(dev, ent->desc); 421 device_set_async_attach(dev, TRUE); 422 return (ent->ret); 423 } 424 } 425 return (ENXIO); 426 } 427 428 static int 429 em_attach(device_t dev) 430 { 431 struct adapter *adapter = device_get_softc(dev); 432 struct ifnet *ifp = &adapter->arpcom.ac_if; 433 int tsize, rsize; 434 int error = 0; 435 uint16_t eeprom_data, device_id, apme_mask; 436 driver_intr_t *intr_func; 437 438 adapter->dev = adapter->osdep.dev = dev; 439 440 callout_init_mp(&adapter->timer); 441 callout_init_mp(&adapter->tx_fifo_timer); 442 443 /* Determine hardware and mac info */ 444 error = em_get_hw_info(adapter); 445 if (error) { 446 device_printf(dev, "Identify hardware failed\n"); 447 goto fail; 448 } 449 450 /* Setup PCI resources */ 451 error = em_alloc_pci_res(adapter); 452 if (error) { 453 device_printf(dev, "Allocation of PCI resources failed\n"); 454 goto fail; 455 } 456 457 /* 458 * For ICH8 and family we need to map the flash memory, 459 * and this must happen after the MAC is identified. 460 */ 461 if (adapter->hw.mac.type == e1000_ich8lan || 462 adapter->hw.mac.type == e1000_ich9lan || 463 adapter->hw.mac.type == e1000_ich10lan || 464 adapter->hw.mac.type == e1000_pchlan || 465 adapter->hw.mac.type == e1000_pch2lan) { 466 adapter->flash_rid = EM_BAR_FLASH; 467 468 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 469 &adapter->flash_rid, RF_ACTIVE); 470 if (adapter->flash == NULL) { 471 device_printf(dev, "Mapping of Flash failed\n"); 472 error = ENXIO; 473 goto fail; 474 } 475 adapter->osdep.flash_bus_space_tag = 476 rman_get_bustag(adapter->flash); 477 adapter->osdep.flash_bus_space_handle = 478 rman_get_bushandle(adapter->flash); 479 480 /* 481 * This is used in the shared code 482 * XXX this goof is actually not used. 483 */ 484 adapter->hw.flash_address = (uint8_t *)adapter->flash; 485 } 486 487 /* Do Shared Code initialization */ 488 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 489 device_printf(dev, "Setup of Shared code failed\n"); 490 error = ENXIO; 491 goto fail; 492 } 493 494 e1000_get_bus_info(&adapter->hw); 495 496 /* 497 * Validate number of transmit and receive descriptors. It 498 * must not exceed hardware maximum, and must be multiple 499 * of E1000_DBA_ALIGN. 500 */ 501 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 || 502 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || 503 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || 504 em_txd < EM_MIN_TXD) { 505 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 506 EM_DEFAULT_TXD, em_txd); 507 adapter->num_tx_desc = EM_DEFAULT_TXD; 508 } else { 509 adapter->num_tx_desc = em_txd; 510 } 511 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 || 512 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || 513 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || 514 em_rxd < EM_MIN_RXD) { 515 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 516 EM_DEFAULT_RXD, em_rxd); 517 adapter->num_rx_desc = EM_DEFAULT_RXD; 518 } else { 519 adapter->num_rx_desc = em_rxd; 520 } 521 522 adapter->hw.mac.autoneg = DO_AUTO_NEG; 523 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 524 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 525 adapter->rx_buffer_len = MCLBYTES; 526 527 /* 528 * Interrupt throttle rate 529 */ 530 if (em_int_throttle_ceil == 0) { 531 adapter->int_throttle_ceil = 0; 532 } else { 533 int throttle = em_int_throttle_ceil; 534 535 if (throttle < 0) 536 throttle = EM_DEFAULT_ITR; 537 538 /* Recalculate the tunable value to get the exact frequency. */ 539 throttle = 1000000000 / 256 / throttle; 540 541 /* Upper 16bits of ITR is reserved and should be zero */ 542 if (throttle & 0xffff0000) 543 throttle = 1000000000 / 256 / EM_DEFAULT_ITR; 544 545 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 546 } 547 548 e1000_init_script_state_82541(&adapter->hw, TRUE); 549 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 550 551 /* Copper options */ 552 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 553 adapter->hw.phy.mdix = AUTO_ALL_MODES; 554 adapter->hw.phy.disable_polarity_correction = FALSE; 555 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 556 } 557 558 /* Set the frame limits assuming standard ethernet sized frames. */ 559 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 560 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN; 561 562 /* This controls when hardware reports transmit completion status. */ 563 adapter->hw.mac.report_tx_early = 1; 564 565 /* 566 * Create top level busdma tag 567 */ 568 error = bus_dma_tag_create(NULL, 1, 0, 569 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 570 NULL, NULL, 571 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 572 0, &adapter->parent_dtag); 573 if (error) { 574 device_printf(dev, "could not create top level DMA tag\n"); 575 goto fail; 576 } 577 578 /* 579 * Allocate Transmit Descriptor ring 580 */ 581 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 582 EM_DBA_ALIGN); 583 error = em_dma_malloc(adapter, tsize, &adapter->txdma); 584 if (error) { 585 device_printf(dev, "Unable to allocate tx_desc memory\n"); 586 goto fail; 587 } 588 adapter->tx_desc_base = adapter->txdma.dma_vaddr; 589 590 /* 591 * Allocate Receive Descriptor ring 592 */ 593 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 594 EM_DBA_ALIGN); 595 error = em_dma_malloc(adapter, rsize, &adapter->rxdma); 596 if (error) { 597 device_printf(dev, "Unable to allocate rx_desc memory\n"); 598 goto fail; 599 } 600 adapter->rx_desc_base = adapter->rxdma.dma_vaddr; 601 602 /* Allocate multicast array memory. */ 603 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 604 M_DEVBUF, M_WAITOK); 605 606 /* Indicate SOL/IDER usage */ 607 if (e1000_check_reset_block(&adapter->hw)) { 608 device_printf(dev, 609 "PHY reset is blocked due to SOL/IDER session.\n"); 610 } 611 612 /* 613 * Start from a known state, this is important in reading the 614 * nvm and mac from that. 615 */ 616 e1000_reset_hw(&adapter->hw); 617 618 /* Make sure we have a good EEPROM before we read from it */ 619 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 620 /* 621 * Some PCI-E parts fail the first check due to 622 * the link being in sleep state, call it again, 623 * if it fails a second time its a real issue. 624 */ 625 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 626 device_printf(dev, 627 "The EEPROM Checksum Is Not Valid\n"); 628 error = EIO; 629 goto fail; 630 } 631 } 632 633 /* Copy the permanent MAC address out of the EEPROM */ 634 if (e1000_read_mac_addr(&adapter->hw) < 0) { 635 device_printf(dev, "EEPROM read error while reading MAC" 636 " address\n"); 637 error = EIO; 638 goto fail; 639 } 640 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) { 641 device_printf(dev, "Invalid MAC address\n"); 642 error = EIO; 643 goto fail; 644 } 645 646 /* Allocate transmit descriptors and buffers */ 647 error = em_create_tx_ring(adapter); 648 if (error) { 649 device_printf(dev, "Could not setup transmit structures\n"); 650 goto fail; 651 } 652 653 /* Allocate receive descriptors and buffers */ 654 error = em_create_rx_ring(adapter); 655 if (error) { 656 device_printf(dev, "Could not setup receive structures\n"); 657 goto fail; 658 } 659 660 /* Manually turn off all interrupts */ 661 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 662 663 /* Determine if we have to control management hardware */ 664 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); 665 666 /* 667 * Setup Wake-on-Lan 668 */ 669 apme_mask = EM_EEPROM_APME; 670 eeprom_data = 0; 671 switch (adapter->hw.mac.type) { 672 case e1000_82542: 673 case e1000_82543: 674 break; 675 676 case e1000_82573: 677 case e1000_82583: 678 adapter->has_amt = 1; 679 /* FALL THROUGH */ 680 681 case e1000_82546: 682 case e1000_82546_rev_3: 683 case e1000_82571: 684 case e1000_82572: 685 case e1000_80003es2lan: 686 if (adapter->hw.bus.func == 1) { 687 e1000_read_nvm(&adapter->hw, 688 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 689 } else { 690 e1000_read_nvm(&adapter->hw, 691 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 692 } 693 break; 694 695 case e1000_ich8lan: 696 case e1000_ich9lan: 697 case e1000_ich10lan: 698 case e1000_pchlan: 699 case e1000_pch2lan: 700 apme_mask = E1000_WUC_APME; 701 adapter->has_amt = TRUE; 702 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); 703 break; 704 705 default: 706 e1000_read_nvm(&adapter->hw, 707 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 708 break; 709 } 710 if (eeprom_data & apme_mask) 711 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 712 713 /* 714 * We have the eeprom settings, now apply the special cases 715 * where the eeprom may be wrong or the board won't support 716 * wake on lan on a particular port 717 */ 718 device_id = pci_get_device(dev); 719 switch (device_id) { 720 case E1000_DEV_ID_82546GB_PCIE: 721 adapter->wol = 0; 722 break; 723 724 case E1000_DEV_ID_82546EB_FIBER: 725 case E1000_DEV_ID_82546GB_FIBER: 726 case E1000_DEV_ID_82571EB_FIBER: 727 /* 728 * Wake events only supported on port A for dual fiber 729 * regardless of eeprom setting 730 */ 731 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 732 E1000_STATUS_FUNC_1) 733 adapter->wol = 0; 734 break; 735 736 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 737 case E1000_DEV_ID_82571EB_QUAD_COPPER: 738 case E1000_DEV_ID_82571EB_QUAD_FIBER: 739 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 740 /* if quad port adapter, disable WoL on all but port A */ 741 if (em_global_quad_port_a != 0) 742 adapter->wol = 0; 743 /* Reset for multiple quad port adapters */ 744 if (++em_global_quad_port_a == 4) 745 em_global_quad_port_a = 0; 746 break; 747 } 748 749 /* XXX disable wol */ 750 adapter->wol = 0; 751 752 /* Setup OS specific network interface */ 753 em_setup_ifp(adapter); 754 755 /* Add sysctl tree, must after em_setup_ifp() */ 756 em_add_sysctl(adapter); 757 758 /* Reset the hardware */ 759 error = em_reset(adapter); 760 if (error) { 761 device_printf(dev, "Unable to reset the hardware\n"); 762 goto fail; 763 } 764 765 /* Initialize statistics */ 766 em_update_stats(adapter); 767 768 adapter->hw.mac.get_link_status = 1; 769 em_update_link_status(adapter); 770 771 /* Do we need workaround for 82544 PCI-X adapter? */ 772 if (adapter->hw.bus.type == e1000_bus_type_pcix && 773 adapter->hw.mac.type == e1000_82544) 774 adapter->pcix_82544 = TRUE; 775 else 776 adapter->pcix_82544 = FALSE; 777 778 if (adapter->pcix_82544) { 779 /* 780 * 82544 on PCI-X may split one TX segment 781 * into two TX descs, so we double its number 782 * of spare TX desc here. 783 */ 784 adapter->spare_tx_desc = 2 * EM_TX_SPARE; 785 } else { 786 adapter->spare_tx_desc = EM_TX_SPARE; 787 } 788 789 /* 790 * Keep following relationship between spare_tx_desc, oact_tx_desc 791 * and tx_int_nsegs: 792 * (spare_tx_desc + EM_TX_RESERVED) <= 793 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs 794 */ 795 adapter->oact_tx_desc = adapter->num_tx_desc / 8; 796 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX) 797 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX; 798 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED) 799 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED; 800 801 adapter->tx_int_nsegs = adapter->num_tx_desc / 16; 802 if (adapter->tx_int_nsegs < adapter->oact_tx_desc) 803 adapter->tx_int_nsegs = adapter->oact_tx_desc; 804 805 /* Non-AMT based hardware can now take control from firmware */ 806 if (adapter->has_manage && !adapter->has_amt && 807 adapter->hw.mac.type >= e1000_82571) 808 em_get_hw_control(adapter); 809 810 /* 811 * Missing Interrupt Following ICR read: 812 * 813 * 82571/82572 specification update errata #76 814 * 82573 specification update errata #31 815 * 82574 specification update errata #12 816 * 82583 specification update errata #4 817 */ 818 intr_func = em_intr; 819 if ((adapter->flags & EM_FLAG_SHARED_INTR) && 820 (adapter->hw.mac.type == e1000_82571 || 821 adapter->hw.mac.type == e1000_82572 || 822 adapter->hw.mac.type == e1000_82573 || 823 adapter->hw.mac.type == e1000_82574 || 824 adapter->hw.mac.type == e1000_82583)) 825 intr_func = em_intr_mask; 826 827 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE, 828 intr_func, adapter, &adapter->intr_tag, 829 ifp->if_serializer); 830 if (error) { 831 device_printf(dev, "Failed to register interrupt handler"); 832 ether_ifdetach(&adapter->arpcom.ac_if); 833 goto fail; 834 } 835 836 ifp->if_cpuid = rman_get_cpuid(adapter->intr_res); 837 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 838 return (0); 839 fail: 840 em_detach(dev); 841 return (error); 842 } 843 844 static int 845 em_detach(device_t dev) 846 { 847 struct adapter *adapter = device_get_softc(dev); 848 849 if (device_is_attached(dev)) { 850 struct ifnet *ifp = &adapter->arpcom.ac_if; 851 852 lwkt_serialize_enter(ifp->if_serializer); 853 854 em_stop(adapter); 855 856 e1000_phy_hw_reset(&adapter->hw); 857 858 em_rel_mgmt(adapter); 859 em_rel_hw_control(adapter); 860 861 if (adapter->wol) { 862 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 863 E1000_WUC_PME_EN); 864 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 865 em_enable_wol(dev); 866 } 867 868 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag); 869 870 lwkt_serialize_exit(ifp->if_serializer); 871 872 ether_ifdetach(ifp); 873 } else if (adapter->memory != NULL) { 874 em_rel_hw_control(adapter); 875 } 876 bus_generic_detach(dev); 877 878 em_free_pci_res(adapter); 879 880 em_destroy_tx_ring(adapter, adapter->num_tx_desc); 881 em_destroy_rx_ring(adapter, adapter->num_rx_desc); 882 883 /* Free Transmit Descriptor ring */ 884 if (adapter->tx_desc_base) 885 em_dma_free(adapter, &adapter->txdma); 886 887 /* Free Receive Descriptor ring */ 888 if (adapter->rx_desc_base) 889 em_dma_free(adapter, &adapter->rxdma); 890 891 /* Free top level busdma tag */ 892 if (adapter->parent_dtag != NULL) 893 bus_dma_tag_destroy(adapter->parent_dtag); 894 895 /* Free sysctl tree */ 896 if (adapter->sysctl_tree != NULL) 897 sysctl_ctx_free(&adapter->sysctl_ctx); 898 899 if (adapter->mta != NULL) 900 kfree(adapter->mta, M_DEVBUF); 901 902 return (0); 903 } 904 905 static int 906 em_shutdown(device_t dev) 907 { 908 return em_suspend(dev); 909 } 910 911 static int 912 em_suspend(device_t dev) 913 { 914 struct adapter *adapter = device_get_softc(dev); 915 struct ifnet *ifp = &adapter->arpcom.ac_if; 916 917 lwkt_serialize_enter(ifp->if_serializer); 918 919 em_stop(adapter); 920 921 em_rel_mgmt(adapter); 922 em_rel_hw_control(adapter); 923 924 if (adapter->wol) { 925 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 926 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 927 em_enable_wol(dev); 928 } 929 930 lwkt_serialize_exit(ifp->if_serializer); 931 932 return bus_generic_suspend(dev); 933 } 934 935 static int 936 em_resume(device_t dev) 937 { 938 struct adapter *adapter = device_get_softc(dev); 939 struct ifnet *ifp = &adapter->arpcom.ac_if; 940 941 lwkt_serialize_enter(ifp->if_serializer); 942 943 em_init(adapter); 944 em_get_mgmt(adapter); 945 if_devstart(ifp); 946 947 lwkt_serialize_exit(ifp->if_serializer); 948 949 return bus_generic_resume(dev); 950 } 951 952 static void 953 em_start(struct ifnet *ifp) 954 { 955 struct adapter *adapter = ifp->if_softc; 956 struct mbuf *m_head; 957 958 ASSERT_SERIALIZED(ifp->if_serializer); 959 960 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 961 return; 962 963 if (!adapter->link_active) { 964 ifq_purge(&ifp->if_snd); 965 return; 966 } 967 968 while (!ifq_is_empty(&ifp->if_snd)) { 969 /* Now do we at least have a minimal? */ 970 if (EM_IS_OACTIVE(adapter)) { 971 em_tx_collect(adapter); 972 if (EM_IS_OACTIVE(adapter)) { 973 ifp->if_flags |= IFF_OACTIVE; 974 adapter->no_tx_desc_avail1++; 975 break; 976 } 977 } 978 979 logif(pkt_txqueue); 980 m_head = ifq_dequeue(&ifp->if_snd, NULL); 981 if (m_head == NULL) 982 break; 983 984 if (em_encap(adapter, &m_head)) { 985 ifp->if_oerrors++; 986 em_tx_collect(adapter); 987 continue; 988 } 989 990 /* Send a copy of the frame to the BPF listener */ 991 ETHER_BPF_MTAP(ifp, m_head); 992 993 /* Set timeout in case hardware has problems transmitting. */ 994 ifp->if_timer = EM_TX_TIMEOUT; 995 } 996 } 997 998 static int 999 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1000 { 1001 struct adapter *adapter = ifp->if_softc; 1002 struct ifreq *ifr = (struct ifreq *)data; 1003 uint16_t eeprom_data = 0; 1004 int max_frame_size, mask, reinit; 1005 int error = 0; 1006 1007 ASSERT_SERIALIZED(ifp->if_serializer); 1008 1009 switch (command) { 1010 case SIOCSIFMTU: 1011 switch (adapter->hw.mac.type) { 1012 case e1000_82573: 1013 /* 1014 * 82573 only supports jumbo frames 1015 * if ASPM is disabled. 1016 */ 1017 e1000_read_nvm(&adapter->hw, 1018 NVM_INIT_3GIO_3, 1, &eeprom_data); 1019 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1020 max_frame_size = ETHER_MAX_LEN; 1021 break; 1022 } 1023 /* FALL THROUGH */ 1024 1025 /* Limit Jumbo Frame size */ 1026 case e1000_82571: 1027 case e1000_82572: 1028 case e1000_ich9lan: 1029 case e1000_ich10lan: 1030 case e1000_pch2lan: 1031 case e1000_82574: 1032 case e1000_82583: 1033 case e1000_80003es2lan: 1034 max_frame_size = 9234; 1035 break; 1036 1037 case e1000_pchlan: 1038 max_frame_size = 4096; 1039 break; 1040 1041 /* Adapters that do not support jumbo frames */ 1042 case e1000_82542: 1043 case e1000_ich8lan: 1044 max_frame_size = ETHER_MAX_LEN; 1045 break; 1046 1047 default: 1048 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1049 break; 1050 } 1051 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1052 ETHER_CRC_LEN) { 1053 error = EINVAL; 1054 break; 1055 } 1056 1057 ifp->if_mtu = ifr->ifr_mtu; 1058 adapter->max_frame_size = 1059 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1060 1061 if (ifp->if_flags & IFF_RUNNING) 1062 em_init(adapter); 1063 break; 1064 1065 case SIOCSIFFLAGS: 1066 if (ifp->if_flags & IFF_UP) { 1067 if ((ifp->if_flags & IFF_RUNNING)) { 1068 if ((ifp->if_flags ^ adapter->if_flags) & 1069 (IFF_PROMISC | IFF_ALLMULTI)) { 1070 em_disable_promisc(adapter); 1071 em_set_promisc(adapter); 1072 } 1073 } else { 1074 em_init(adapter); 1075 } 1076 } else if (ifp->if_flags & IFF_RUNNING) { 1077 em_stop(adapter); 1078 } 1079 adapter->if_flags = ifp->if_flags; 1080 break; 1081 1082 case SIOCADDMULTI: 1083 case SIOCDELMULTI: 1084 if (ifp->if_flags & IFF_RUNNING) { 1085 em_disable_intr(adapter); 1086 em_set_multi(adapter); 1087 if (adapter->hw.mac.type == e1000_82542 && 1088 adapter->hw.revision_id == E1000_REVISION_2) 1089 em_init_rx_unit(adapter); 1090 #ifdef DEVICE_POLLING 1091 if (!(ifp->if_flags & IFF_POLLING)) 1092 #endif 1093 em_enable_intr(adapter); 1094 } 1095 break; 1096 1097 case SIOCSIFMEDIA: 1098 /* Check SOL/IDER usage */ 1099 if (e1000_check_reset_block(&adapter->hw)) { 1100 device_printf(adapter->dev, "Media change is" 1101 " blocked due to SOL/IDER session.\n"); 1102 break; 1103 } 1104 /* FALL THROUGH */ 1105 1106 case SIOCGIFMEDIA: 1107 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1108 break; 1109 1110 case SIOCSIFCAP: 1111 reinit = 0; 1112 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1113 if (mask & IFCAP_HWCSUM) { 1114 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 1115 reinit = 1; 1116 } 1117 if (mask & IFCAP_VLAN_HWTAGGING) { 1118 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1119 reinit = 1; 1120 } 1121 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1122 em_init(adapter); 1123 break; 1124 1125 default: 1126 error = ether_ioctl(ifp, command, data); 1127 break; 1128 } 1129 return (error); 1130 } 1131 1132 static void 1133 em_watchdog(struct ifnet *ifp) 1134 { 1135 struct adapter *adapter = ifp->if_softc; 1136 1137 ASSERT_SERIALIZED(ifp->if_serializer); 1138 1139 /* 1140 * The timer is set to 5 every time start queues a packet. 1141 * Then txeof keeps resetting it as long as it cleans at 1142 * least one descriptor. 1143 * Finally, anytime all descriptors are clean the timer is 1144 * set to 0. 1145 */ 1146 1147 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1148 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) { 1149 /* 1150 * If we reach here, all TX jobs are completed and 1151 * the TX engine should have been idled for some time. 1152 * We don't need to call if_devstart() here. 1153 */ 1154 ifp->if_flags &= ~IFF_OACTIVE; 1155 ifp->if_timer = 0; 1156 return; 1157 } 1158 1159 /* 1160 * If we are in this routine because of pause frames, then 1161 * don't reset the hardware. 1162 */ 1163 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 1164 E1000_STATUS_TXOFF) { 1165 ifp->if_timer = EM_TX_TIMEOUT; 1166 return; 1167 } 1168 1169 if (e1000_check_for_link(&adapter->hw) == 0) 1170 if_printf(ifp, "watchdog timeout -- resetting\n"); 1171 1172 ifp->if_oerrors++; 1173 adapter->watchdog_events++; 1174 1175 em_init(adapter); 1176 1177 if (!ifq_is_empty(&ifp->if_snd)) 1178 if_devstart(ifp); 1179 } 1180 1181 static void 1182 em_init(void *xsc) 1183 { 1184 struct adapter *adapter = xsc; 1185 struct ifnet *ifp = &adapter->arpcom.ac_if; 1186 device_t dev = adapter->dev; 1187 uint32_t pba; 1188 1189 ASSERT_SERIALIZED(ifp->if_serializer); 1190 1191 em_stop(adapter); 1192 1193 /* 1194 * Packet Buffer Allocation (PBA) 1195 * Writing PBA sets the receive portion of the buffer 1196 * the remainder is used for the transmit buffer. 1197 * 1198 * Devices before the 82547 had a Packet Buffer of 64K. 1199 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 1200 * After the 82547 the buffer was reduced to 40K. 1201 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 1202 * Note: default does not leave enough room for Jumbo Frame >10k. 1203 */ 1204 switch (adapter->hw.mac.type) { 1205 case e1000_82547: 1206 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 1207 if (adapter->max_frame_size > 8192) 1208 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1209 else 1210 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1211 adapter->tx_fifo_head = 0; 1212 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 1213 adapter->tx_fifo_size = 1214 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 1215 break; 1216 1217 /* Total Packet Buffer on these is 48K */ 1218 case e1000_82571: 1219 case e1000_82572: 1220 case e1000_80003es2lan: 1221 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1222 break; 1223 1224 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1225 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1226 break; 1227 1228 case e1000_82574: 1229 case e1000_82583: 1230 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1231 break; 1232 1233 case e1000_ich8lan: 1234 pba = E1000_PBA_8K; 1235 break; 1236 1237 case e1000_ich9lan: 1238 case e1000_ich10lan: 1239 #define E1000_PBA_10K 0x000A 1240 pba = E1000_PBA_10K; 1241 break; 1242 1243 case e1000_pchlan: 1244 case e1000_pch2lan: 1245 pba = E1000_PBA_26K; 1246 break; 1247 1248 default: 1249 /* Devices before 82547 had a Packet Buffer of 64K. */ 1250 if (adapter->max_frame_size > 8192) 1251 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1252 else 1253 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1254 } 1255 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 1256 1257 /* Get the latest mac address, User can use a LAA */ 1258 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); 1259 1260 /* Put the address into the Receive Address Array */ 1261 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1262 1263 /* 1264 * With the 82571 adapter, RAR[0] may be overwritten 1265 * when the other port is reset, we make a duplicate 1266 * in RAR[14] for that eventuality, this assures 1267 * the interface continues to function. 1268 */ 1269 if (adapter->hw.mac.type == e1000_82571) { 1270 e1000_set_laa_state_82571(&adapter->hw, TRUE); 1271 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 1272 E1000_RAR_ENTRIES - 1); 1273 } 1274 1275 /* Reset the hardware */ 1276 if (em_reset(adapter)) { 1277 device_printf(dev, "Unable to reset the hardware\n"); 1278 /* XXX em_stop()? */ 1279 return; 1280 } 1281 em_update_link_status(adapter); 1282 1283 /* Setup VLAN support, basic and offload if available */ 1284 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1285 1286 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1287 uint32_t ctrl; 1288 1289 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1290 ctrl |= E1000_CTRL_VME; 1291 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1292 } 1293 1294 /* Set hardware offload abilities */ 1295 if (ifp->if_capenable & IFCAP_TXCSUM) 1296 ifp->if_hwassist = EM_CSUM_FEATURES; 1297 else 1298 ifp->if_hwassist = 0; 1299 1300 /* Configure for OS presence */ 1301 em_get_mgmt(adapter); 1302 1303 /* Prepare transmit descriptors and buffers */ 1304 em_init_tx_ring(adapter); 1305 em_init_tx_unit(adapter); 1306 1307 /* Setup Multicast table */ 1308 em_set_multi(adapter); 1309 1310 /* Prepare receive descriptors and buffers */ 1311 if (em_init_rx_ring(adapter)) { 1312 device_printf(dev, "Could not setup receive structures\n"); 1313 em_stop(adapter); 1314 return; 1315 } 1316 em_init_rx_unit(adapter); 1317 1318 /* Don't lose promiscuous settings */ 1319 em_set_promisc(adapter); 1320 1321 ifp->if_flags |= IFF_RUNNING; 1322 ifp->if_flags &= ~IFF_OACTIVE; 1323 1324 callout_reset(&adapter->timer, hz, em_timer, adapter); 1325 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1326 1327 /* MSI/X configuration for 82574 */ 1328 if (adapter->hw.mac.type == e1000_82574) { 1329 int tmp; 1330 1331 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 1332 tmp |= E1000_CTRL_EXT_PBA_CLR; 1333 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); 1334 /* 1335 * XXX MSIX 1336 * Set the IVAR - interrupt vector routing. 1337 * Each nibble represents a vector, high bit 1338 * is enable, other 3 bits are the MSIX table 1339 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1340 * Link (other) to 2, hence the magic number. 1341 */ 1342 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); 1343 } 1344 1345 #ifdef DEVICE_POLLING 1346 /* 1347 * Only enable interrupts if we are not polling, make sure 1348 * they are off otherwise. 1349 */ 1350 if (ifp->if_flags & IFF_POLLING) 1351 em_disable_intr(adapter); 1352 else 1353 #endif /* DEVICE_POLLING */ 1354 em_enable_intr(adapter); 1355 1356 /* AMT based hardware can now take control from firmware */ 1357 if (adapter->has_manage && adapter->has_amt && 1358 adapter->hw.mac.type >= e1000_82571) 1359 em_get_hw_control(adapter); 1360 1361 /* Don't reset the phy next time init gets called */ 1362 adapter->hw.phy.reset_disable = TRUE; 1363 } 1364 1365 #ifdef DEVICE_POLLING 1366 1367 static void 1368 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1369 { 1370 struct adapter *adapter = ifp->if_softc; 1371 uint32_t reg_icr; 1372 1373 ASSERT_SERIALIZED(ifp->if_serializer); 1374 1375 switch (cmd) { 1376 case POLL_REGISTER: 1377 em_disable_intr(adapter); 1378 break; 1379 1380 case POLL_DEREGISTER: 1381 em_enable_intr(adapter); 1382 break; 1383 1384 case POLL_AND_CHECK_STATUS: 1385 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1386 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1387 callout_stop(&adapter->timer); 1388 adapter->hw.mac.get_link_status = 1; 1389 em_update_link_status(adapter); 1390 callout_reset(&adapter->timer, hz, em_timer, adapter); 1391 } 1392 /* FALL THROUGH */ 1393 case POLL_ONLY: 1394 if (ifp->if_flags & IFF_RUNNING) { 1395 em_rxeof(adapter, count); 1396 em_txeof(adapter); 1397 1398 if (!ifq_is_empty(&ifp->if_snd)) 1399 if_devstart(ifp); 1400 } 1401 break; 1402 } 1403 } 1404 1405 #endif /* DEVICE_POLLING */ 1406 1407 static void 1408 em_intr(void *xsc) 1409 { 1410 em_intr_body(xsc, TRUE); 1411 } 1412 1413 static void 1414 em_intr_body(struct adapter *adapter, boolean_t chk_asserted) 1415 { 1416 struct ifnet *ifp = &adapter->arpcom.ac_if; 1417 uint32_t reg_icr; 1418 1419 logif(intr_beg); 1420 ASSERT_SERIALIZED(ifp->if_serializer); 1421 1422 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1423 1424 if (chk_asserted && 1425 ((adapter->hw.mac.type >= e1000_82571 && 1426 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) || 1427 reg_icr == 0)) { 1428 logif(intr_end); 1429 return; 1430 } 1431 1432 /* 1433 * XXX: some laptops trigger several spurious interrupts 1434 * on em(4) when in the resume cycle. The ICR register 1435 * reports all-ones value in this case. Processing such 1436 * interrupts would lead to a freeze. I don't know why. 1437 */ 1438 if (reg_icr == 0xffffffff) { 1439 logif(intr_end); 1440 return; 1441 } 1442 1443 if (ifp->if_flags & IFF_RUNNING) { 1444 if (reg_icr & 1445 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) 1446 em_rxeof(adapter, -1); 1447 if (reg_icr & E1000_ICR_TXDW) { 1448 em_txeof(adapter); 1449 if (!ifq_is_empty(&ifp->if_snd)) 1450 if_devstart(ifp); 1451 } 1452 } 1453 1454 /* Link status change */ 1455 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1456 callout_stop(&adapter->timer); 1457 adapter->hw.mac.get_link_status = 1; 1458 em_update_link_status(adapter); 1459 1460 /* Deal with TX cruft when link lost */ 1461 em_tx_purge(adapter); 1462 1463 callout_reset(&adapter->timer, hz, em_timer, adapter); 1464 } 1465 1466 if (reg_icr & E1000_ICR_RXO) 1467 adapter->rx_overruns++; 1468 1469 logif(intr_end); 1470 } 1471 1472 static void 1473 em_intr_mask(void *xsc) 1474 { 1475 struct adapter *adapter = xsc; 1476 1477 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 1478 /* 1479 * NOTE: 1480 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1481 * so don't check it. 1482 */ 1483 em_intr_body(adapter, FALSE); 1484 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); 1485 } 1486 1487 static void 1488 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1489 { 1490 struct adapter *adapter = ifp->if_softc; 1491 u_char fiber_type = IFM_1000_SX; 1492 1493 ASSERT_SERIALIZED(ifp->if_serializer); 1494 1495 em_update_link_status(adapter); 1496 1497 ifmr->ifm_status = IFM_AVALID; 1498 ifmr->ifm_active = IFM_ETHER; 1499 1500 if (!adapter->link_active) 1501 return; 1502 1503 ifmr->ifm_status |= IFM_ACTIVE; 1504 1505 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 1506 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 1507 if (adapter->hw.mac.type == e1000_82545) 1508 fiber_type = IFM_1000_LX; 1509 ifmr->ifm_active |= fiber_type | IFM_FDX; 1510 } else { 1511 switch (adapter->link_speed) { 1512 case 10: 1513 ifmr->ifm_active |= IFM_10_T; 1514 break; 1515 case 100: 1516 ifmr->ifm_active |= IFM_100_TX; 1517 break; 1518 1519 case 1000: 1520 ifmr->ifm_active |= IFM_1000_T; 1521 break; 1522 } 1523 if (adapter->link_duplex == FULL_DUPLEX) 1524 ifmr->ifm_active |= IFM_FDX; 1525 else 1526 ifmr->ifm_active |= IFM_HDX; 1527 } 1528 } 1529 1530 static int 1531 em_media_change(struct ifnet *ifp) 1532 { 1533 struct adapter *adapter = ifp->if_softc; 1534 struct ifmedia *ifm = &adapter->media; 1535 1536 ASSERT_SERIALIZED(ifp->if_serializer); 1537 1538 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1539 return (EINVAL); 1540 1541 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1542 case IFM_AUTO: 1543 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1544 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1545 break; 1546 1547 case IFM_1000_LX: 1548 case IFM_1000_SX: 1549 case IFM_1000_T: 1550 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1551 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1552 break; 1553 1554 case IFM_100_TX: 1555 adapter->hw.mac.autoneg = FALSE; 1556 adapter->hw.phy.autoneg_advertised = 0; 1557 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1558 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1559 else 1560 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1561 break; 1562 1563 case IFM_10_T: 1564 adapter->hw.mac.autoneg = FALSE; 1565 adapter->hw.phy.autoneg_advertised = 0; 1566 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1567 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1568 else 1569 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1570 break; 1571 1572 default: 1573 if_printf(ifp, "Unsupported media type\n"); 1574 break; 1575 } 1576 1577 /* 1578 * As the speed/duplex settings my have changed we need to 1579 * reset the PHY. 1580 */ 1581 adapter->hw.phy.reset_disable = FALSE; 1582 1583 em_init(adapter); 1584 1585 return (0); 1586 } 1587 1588 static int 1589 em_encap(struct adapter *adapter, struct mbuf **m_headp) 1590 { 1591 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1592 bus_dmamap_t map; 1593 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1594 struct e1000_tx_desc *ctxd = NULL; 1595 struct mbuf *m_head = *m_headp; 1596 uint32_t txd_upper, txd_lower, txd_used, cmd = 0; 1597 int maxsegs, nsegs, i, j, first, last = 0, error; 1598 1599 if (m_head->m_len < EM_TXCSUM_MINHL && 1600 (m_head->m_flags & EM_CSUM_FEATURES)) { 1601 /* 1602 * Make sure that ethernet header and ip.ip_hl are in 1603 * contiguous memory, since if TXCSUM is enabled, later 1604 * TX context descriptor's setup need to access ip.ip_hl. 1605 */ 1606 error = em_txcsum_pullup(adapter, m_headp); 1607 if (error) { 1608 KKASSERT(*m_headp == NULL); 1609 return error; 1610 } 1611 m_head = *m_headp; 1612 } 1613 1614 txd_upper = txd_lower = 0; 1615 txd_used = 0; 1616 1617 /* 1618 * Capture the first descriptor index, this descriptor 1619 * will have the index of the EOP which is the only one 1620 * that now gets a DONE bit writeback. 1621 */ 1622 first = adapter->next_avail_tx_desc; 1623 tx_buffer = &adapter->tx_buffer_area[first]; 1624 tx_buffer_mapped = tx_buffer; 1625 map = tx_buffer->map; 1626 1627 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED; 1628 KASSERT(maxsegs >= adapter->spare_tx_desc, 1629 ("not enough spare TX desc")); 1630 if (adapter->pcix_82544) { 1631 /* Half it; see the comment in em_attach() */ 1632 maxsegs >>= 1; 1633 } 1634 if (maxsegs > EM_MAX_SCATTER) 1635 maxsegs = EM_MAX_SCATTER; 1636 1637 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp, 1638 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1639 if (error) { 1640 if (error == ENOBUFS) 1641 adapter->mbuf_alloc_failed++; 1642 else 1643 adapter->no_tx_dma_setup++; 1644 1645 m_freem(*m_headp); 1646 *m_headp = NULL; 1647 return error; 1648 } 1649 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1650 1651 m_head = *m_headp; 1652 adapter->tx_nsegs += nsegs; 1653 1654 if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) { 1655 /* TX csum offloading will consume one TX desc */ 1656 adapter->tx_nsegs += em_txcsum(adapter, m_head, 1657 &txd_upper, &txd_lower); 1658 } 1659 i = adapter->next_avail_tx_desc; 1660 1661 /* Set up our transmit descriptors */ 1662 for (j = 0; j < nsegs; j++) { 1663 /* If adapter is 82544 and on PCIX bus */ 1664 if(adapter->pcix_82544) { 1665 DESC_ARRAY desc_array; 1666 uint32_t array_elements, counter; 1667 1668 /* 1669 * Check the Address and Length combination and 1670 * split the data accordingly 1671 */ 1672 array_elements = em_82544_fill_desc(segs[j].ds_addr, 1673 segs[j].ds_len, &desc_array); 1674 for (counter = 0; counter < array_elements; counter++) { 1675 KKASSERT(txd_used < adapter->num_tx_desc_avail); 1676 1677 tx_buffer = &adapter->tx_buffer_area[i]; 1678 ctxd = &adapter->tx_desc_base[i]; 1679 1680 ctxd->buffer_addr = htole64( 1681 desc_array.descriptor[counter].address); 1682 ctxd->lower.data = htole32( 1683 E1000_TXD_CMD_IFCS | txd_lower | 1684 desc_array.descriptor[counter].length); 1685 ctxd->upper.data = htole32(txd_upper); 1686 1687 last = i; 1688 if (++i == adapter->num_tx_desc) 1689 i = 0; 1690 1691 txd_used++; 1692 } 1693 } else { 1694 tx_buffer = &adapter->tx_buffer_area[i]; 1695 ctxd = &adapter->tx_desc_base[i]; 1696 1697 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1698 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1699 txd_lower | segs[j].ds_len); 1700 ctxd->upper.data = htole32(txd_upper); 1701 1702 last = i; 1703 if (++i == adapter->num_tx_desc) 1704 i = 0; 1705 } 1706 } 1707 1708 adapter->next_avail_tx_desc = i; 1709 if (adapter->pcix_82544) { 1710 KKASSERT(adapter->num_tx_desc_avail > txd_used); 1711 adapter->num_tx_desc_avail -= txd_used; 1712 } else { 1713 KKASSERT(adapter->num_tx_desc_avail > nsegs); 1714 adapter->num_tx_desc_avail -= nsegs; 1715 } 1716 1717 /* Handle VLAN tag */ 1718 if (m_head->m_flags & M_VLANTAG) { 1719 /* Set the vlan id. */ 1720 ctxd->upper.fields.special = 1721 htole16(m_head->m_pkthdr.ether_vlantag); 1722 1723 /* Tell hardware to add tag */ 1724 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1725 } 1726 1727 tx_buffer->m_head = m_head; 1728 tx_buffer_mapped->map = tx_buffer->map; 1729 tx_buffer->map = map; 1730 1731 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) { 1732 adapter->tx_nsegs = 0; 1733 1734 /* 1735 * Report Status (RS) is turned on 1736 * every tx_int_nsegs descriptors. 1737 */ 1738 cmd = E1000_TXD_CMD_RS; 1739 1740 /* 1741 * Keep track of the descriptor, which will 1742 * be written back by hardware. 1743 */ 1744 adapter->tx_dd[adapter->tx_dd_tail] = last; 1745 EM_INC_TXDD_IDX(adapter->tx_dd_tail); 1746 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head); 1747 } 1748 1749 /* 1750 * Last Descriptor of Packet needs End Of Packet (EOP) 1751 */ 1752 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1753 1754 /* 1755 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 1756 * that this frame is available to transmit. 1757 */ 1758 if (adapter->hw.mac.type == e1000_82547 && 1759 adapter->link_duplex == HALF_DUPLEX) { 1760 em_82547_move_tail_serialized(adapter); 1761 } else { 1762 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1763 if (adapter->hw.mac.type == e1000_82547) { 1764 em_82547_update_fifo_head(adapter, 1765 m_head->m_pkthdr.len); 1766 } 1767 } 1768 return (0); 1769 } 1770 1771 /* 1772 * 82547 workaround to avoid controller hang in half-duplex environment. 1773 * The workaround is to avoid queuing a large packet that would span 1774 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1775 * in this case. We do that only when FIFO is quiescent. 1776 */ 1777 static void 1778 em_82547_move_tail_serialized(struct adapter *adapter) 1779 { 1780 struct e1000_tx_desc *tx_desc; 1781 uint16_t hw_tdt, sw_tdt, length = 0; 1782 bool eop = 0; 1783 1784 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer); 1785 1786 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1787 sw_tdt = adapter->next_avail_tx_desc; 1788 1789 while (hw_tdt != sw_tdt) { 1790 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1791 length += tx_desc->lower.flags.length; 1792 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1793 if (++hw_tdt == adapter->num_tx_desc) 1794 hw_tdt = 0; 1795 1796 if (eop) { 1797 if (em_82547_fifo_workaround(adapter, length)) { 1798 adapter->tx_fifo_wrk_cnt++; 1799 callout_reset(&adapter->tx_fifo_timer, 1, 1800 em_82547_move_tail, adapter); 1801 break; 1802 } 1803 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1804 em_82547_update_fifo_head(adapter, length); 1805 length = 0; 1806 } 1807 } 1808 } 1809 1810 static void 1811 em_82547_move_tail(void *xsc) 1812 { 1813 struct adapter *adapter = xsc; 1814 struct ifnet *ifp = &adapter->arpcom.ac_if; 1815 1816 lwkt_serialize_enter(ifp->if_serializer); 1817 em_82547_move_tail_serialized(adapter); 1818 lwkt_serialize_exit(ifp->if_serializer); 1819 } 1820 1821 static int 1822 em_82547_fifo_workaround(struct adapter *adapter, int len) 1823 { 1824 int fifo_space, fifo_pkt_len; 1825 1826 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1827 1828 if (adapter->link_duplex == HALF_DUPLEX) { 1829 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1830 1831 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1832 if (em_82547_tx_fifo_reset(adapter)) 1833 return (0); 1834 else 1835 return (1); 1836 } 1837 } 1838 return (0); 1839 } 1840 1841 static void 1842 em_82547_update_fifo_head(struct adapter *adapter, int len) 1843 { 1844 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1845 1846 /* tx_fifo_head is always 16 byte aligned */ 1847 adapter->tx_fifo_head += fifo_pkt_len; 1848 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 1849 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1850 } 1851 1852 static int 1853 em_82547_tx_fifo_reset(struct adapter *adapter) 1854 { 1855 uint32_t tctl; 1856 1857 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1858 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1859 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1860 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1861 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1862 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1863 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1864 /* Disable TX unit */ 1865 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1866 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1867 tctl & ~E1000_TCTL_EN); 1868 1869 /* Reset FIFO pointers */ 1870 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1871 adapter->tx_head_addr); 1872 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1873 adapter->tx_head_addr); 1874 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1875 adapter->tx_head_addr); 1876 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1877 adapter->tx_head_addr); 1878 1879 /* Re-enable TX unit */ 1880 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1881 E1000_WRITE_FLUSH(&adapter->hw); 1882 1883 adapter->tx_fifo_head = 0; 1884 adapter->tx_fifo_reset_cnt++; 1885 1886 return (TRUE); 1887 } else { 1888 return (FALSE); 1889 } 1890 } 1891 1892 static void 1893 em_set_promisc(struct adapter *adapter) 1894 { 1895 struct ifnet *ifp = &adapter->arpcom.ac_if; 1896 uint32_t reg_rctl; 1897 1898 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1899 1900 if (ifp->if_flags & IFF_PROMISC) { 1901 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1902 /* Turn this on if you want to see bad packets */ 1903 if (em_debug_sbp) 1904 reg_rctl |= E1000_RCTL_SBP; 1905 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1906 } else if (ifp->if_flags & IFF_ALLMULTI) { 1907 reg_rctl |= E1000_RCTL_MPE; 1908 reg_rctl &= ~E1000_RCTL_UPE; 1909 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1910 } 1911 } 1912 1913 static void 1914 em_disable_promisc(struct adapter *adapter) 1915 { 1916 uint32_t reg_rctl; 1917 1918 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1919 1920 reg_rctl &= ~E1000_RCTL_UPE; 1921 reg_rctl &= ~E1000_RCTL_MPE; 1922 reg_rctl &= ~E1000_RCTL_SBP; 1923 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1924 } 1925 1926 static void 1927 em_set_multi(struct adapter *adapter) 1928 { 1929 struct ifnet *ifp = &adapter->arpcom.ac_if; 1930 struct ifmultiaddr *ifma; 1931 uint32_t reg_rctl = 0; 1932 uint8_t *mta; 1933 int mcnt = 0; 1934 1935 mta = adapter->mta; 1936 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1937 1938 if (adapter->hw.mac.type == e1000_82542 && 1939 adapter->hw.revision_id == E1000_REVISION_2) { 1940 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1941 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1942 e1000_pci_clear_mwi(&adapter->hw); 1943 reg_rctl |= E1000_RCTL_RST; 1944 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1945 msec_delay(5); 1946 } 1947 1948 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1949 if (ifma->ifma_addr->sa_family != AF_LINK) 1950 continue; 1951 1952 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1953 break; 1954 1955 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1956 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1957 mcnt++; 1958 } 1959 1960 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1961 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1962 reg_rctl |= E1000_RCTL_MPE; 1963 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1964 } else { 1965 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 1966 } 1967 1968 if (adapter->hw.mac.type == e1000_82542 && 1969 adapter->hw.revision_id == E1000_REVISION_2) { 1970 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1971 reg_rctl &= ~E1000_RCTL_RST; 1972 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1973 msec_delay(5); 1974 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1975 e1000_pci_set_mwi(&adapter->hw); 1976 } 1977 } 1978 1979 /* 1980 * This routine checks for link status and updates statistics. 1981 */ 1982 static void 1983 em_timer(void *xsc) 1984 { 1985 struct adapter *adapter = xsc; 1986 struct ifnet *ifp = &adapter->arpcom.ac_if; 1987 1988 lwkt_serialize_enter(ifp->if_serializer); 1989 1990 em_update_link_status(adapter); 1991 em_update_stats(adapter); 1992 1993 /* Reset LAA into RAR[0] on 82571 */ 1994 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) 1995 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1996 1997 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1998 em_print_hw_stats(adapter); 1999 2000 em_smartspeed(adapter); 2001 2002 callout_reset(&adapter->timer, hz, em_timer, adapter); 2003 2004 lwkt_serialize_exit(ifp->if_serializer); 2005 } 2006 2007 static void 2008 em_update_link_status(struct adapter *adapter) 2009 { 2010 struct e1000_hw *hw = &adapter->hw; 2011 struct ifnet *ifp = &adapter->arpcom.ac_if; 2012 device_t dev = adapter->dev; 2013 uint32_t link_check = 0; 2014 2015 /* Get the cached link value or read phy for real */ 2016 switch (hw->phy.media_type) { 2017 case e1000_media_type_copper: 2018 if (hw->mac.get_link_status) { 2019 /* Do the work to read phy */ 2020 e1000_check_for_link(hw); 2021 link_check = !hw->mac.get_link_status; 2022 if (link_check) /* ESB2 fix */ 2023 e1000_cfg_on_link_up(hw); 2024 } else { 2025 link_check = TRUE; 2026 } 2027 break; 2028 2029 case e1000_media_type_fiber: 2030 e1000_check_for_link(hw); 2031 link_check = 2032 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 2033 break; 2034 2035 case e1000_media_type_internal_serdes: 2036 e1000_check_for_link(hw); 2037 link_check = adapter->hw.mac.serdes_has_link; 2038 break; 2039 2040 case e1000_media_type_unknown: 2041 default: 2042 break; 2043 } 2044 2045 /* Now check for a transition */ 2046 if (link_check && adapter->link_active == 0) { 2047 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2048 &adapter->link_duplex); 2049 2050 /* 2051 * Check if we should enable/disable SPEED_MODE bit on 2052 * 82571/82572 2053 */ 2054 if (adapter->link_speed != SPEED_1000 && 2055 (hw->mac.type == e1000_82571 || 2056 hw->mac.type == e1000_82572)) { 2057 int tarc0; 2058 2059 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 2060 tarc0 &= ~SPEED_MODE_BIT; 2061 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 2062 } 2063 if (bootverbose) { 2064 device_printf(dev, "Link is up %d Mbps %s\n", 2065 adapter->link_speed, 2066 ((adapter->link_duplex == FULL_DUPLEX) ? 2067 "Full Duplex" : "Half Duplex")); 2068 } 2069 adapter->link_active = 1; 2070 adapter->smartspeed = 0; 2071 ifp->if_baudrate = adapter->link_speed * 1000000; 2072 ifp->if_link_state = LINK_STATE_UP; 2073 if_link_state_change(ifp); 2074 } else if (!link_check && adapter->link_active == 1) { 2075 ifp->if_baudrate = adapter->link_speed = 0; 2076 adapter->link_duplex = 0; 2077 if (bootverbose) 2078 device_printf(dev, "Link is Down\n"); 2079 adapter->link_active = 0; 2080 #if 0 2081 /* Link down, disable watchdog */ 2082 if->if_timer = 0; 2083 #endif 2084 ifp->if_link_state = LINK_STATE_DOWN; 2085 if_link_state_change(ifp); 2086 } 2087 } 2088 2089 static void 2090 em_stop(struct adapter *adapter) 2091 { 2092 struct ifnet *ifp = &adapter->arpcom.ac_if; 2093 int i; 2094 2095 ASSERT_SERIALIZED(ifp->if_serializer); 2096 2097 em_disable_intr(adapter); 2098 2099 callout_stop(&adapter->timer); 2100 callout_stop(&adapter->tx_fifo_timer); 2101 2102 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2103 ifp->if_timer = 0; 2104 2105 e1000_reset_hw(&adapter->hw); 2106 if (adapter->hw.mac.type >= e1000_82544) 2107 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2108 2109 for (i = 0; i < adapter->num_tx_desc; i++) { 2110 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i]; 2111 2112 if (tx_buffer->m_head != NULL) { 2113 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2114 m_freem(tx_buffer->m_head); 2115 tx_buffer->m_head = NULL; 2116 } 2117 } 2118 2119 for (i = 0; i < adapter->num_rx_desc; i++) { 2120 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i]; 2121 2122 if (rx_buffer->m_head != NULL) { 2123 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2124 m_freem(rx_buffer->m_head); 2125 rx_buffer->m_head = NULL; 2126 } 2127 } 2128 2129 if (adapter->fmp != NULL) 2130 m_freem(adapter->fmp); 2131 adapter->fmp = NULL; 2132 adapter->lmp = NULL; 2133 2134 adapter->csum_flags = 0; 2135 adapter->csum_ehlen = 0; 2136 adapter->csum_iphlen = 0; 2137 2138 adapter->tx_dd_head = 0; 2139 adapter->tx_dd_tail = 0; 2140 adapter->tx_nsegs = 0; 2141 } 2142 2143 static int 2144 em_get_hw_info(struct adapter *adapter) 2145 { 2146 device_t dev = adapter->dev; 2147 2148 /* Save off the information about this board */ 2149 adapter->hw.vendor_id = pci_get_vendor(dev); 2150 adapter->hw.device_id = pci_get_device(dev); 2151 adapter->hw.revision_id = pci_get_revid(dev); 2152 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev); 2153 adapter->hw.subsystem_device_id = pci_get_subdevice(dev); 2154 2155 /* Do Shared Code Init and Setup */ 2156 if (e1000_set_mac_type(&adapter->hw)) 2157 return ENXIO; 2158 return 0; 2159 } 2160 2161 static int 2162 em_alloc_pci_res(struct adapter *adapter) 2163 { 2164 device_t dev = adapter->dev; 2165 u_int intr_flags; 2166 int val, rid, msi_enable; 2167 2168 /* Enable bus mastering */ 2169 pci_enable_busmaster(dev); 2170 2171 adapter->memory_rid = EM_BAR_MEM; 2172 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2173 &adapter->memory_rid, RF_ACTIVE); 2174 if (adapter->memory == NULL) { 2175 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2176 return (ENXIO); 2177 } 2178 adapter->osdep.mem_bus_space_tag = 2179 rman_get_bustag(adapter->memory); 2180 adapter->osdep.mem_bus_space_handle = 2181 rman_get_bushandle(adapter->memory); 2182 2183 /* XXX This is quite goofy, it is not actually used */ 2184 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2185 2186 /* Only older adapters use IO mapping */ 2187 if (adapter->hw.mac.type > e1000_82543 && 2188 adapter->hw.mac.type < e1000_82571) { 2189 /* Figure our where our IO BAR is ? */ 2190 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) { 2191 val = pci_read_config(dev, rid, 4); 2192 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2193 adapter->io_rid = rid; 2194 break; 2195 } 2196 rid += 4; 2197 /* check for 64bit BAR */ 2198 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2199 rid += 4; 2200 } 2201 if (rid >= PCIR_CARDBUSCIS) { 2202 device_printf(dev, "Unable to locate IO BAR\n"); 2203 return (ENXIO); 2204 } 2205 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2206 &adapter->io_rid, RF_ACTIVE); 2207 if (adapter->ioport == NULL) { 2208 device_printf(dev, "Unable to allocate bus resource: " 2209 "ioport\n"); 2210 return (ENXIO); 2211 } 2212 adapter->hw.io_base = 0; 2213 adapter->osdep.io_bus_space_tag = 2214 rman_get_bustag(adapter->ioport); 2215 adapter->osdep.io_bus_space_handle = 2216 rman_get_bushandle(adapter->ioport); 2217 } 2218 2219 /* 2220 * Don't enable MSI-X on 82574, see: 2221 * 82574 specification update errata #15 2222 * 2223 * Don't enable MSI on PCI/PCI-X chips, see: 2224 * 82540 specification update errata #6 2225 * 82545 specification update errata #4 2226 * 2227 * Don't enable MSI on 82571/82572, see: 2228 * 82571/82572 specification update errata #63 2229 */ 2230 msi_enable = em_msi_enable; 2231 if (msi_enable && 2232 (!pci_is_pcie(dev) || 2233 adapter->hw.mac.type == e1000_82571 || 2234 adapter->hw.mac.type == e1000_82572)) 2235 msi_enable = 0; 2236 2237 adapter->intr_type = pci_alloc_1intr(dev, msi_enable, 2238 &adapter->intr_rid, &intr_flags); 2239 2240 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) { 2241 int unshared; 2242 2243 unshared = device_getenv_int(dev, "irq.unshared", 0); 2244 if (!unshared) { 2245 adapter->flags |= EM_FLAG_SHARED_INTR; 2246 if (bootverbose) 2247 device_printf(dev, "IRQ shared\n"); 2248 } else { 2249 intr_flags &= ~RF_SHAREABLE; 2250 if (bootverbose) 2251 device_printf(dev, "IRQ unshared\n"); 2252 } 2253 } 2254 2255 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2256 &adapter->intr_rid, intr_flags); 2257 if (adapter->intr_res == NULL) { 2258 device_printf(dev, "Unable to allocate bus resource: " 2259 "interrupt\n"); 2260 return (ENXIO); 2261 } 2262 2263 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2264 adapter->hw.back = &adapter->osdep; 2265 return (0); 2266 } 2267 2268 static void 2269 em_free_pci_res(struct adapter *adapter) 2270 { 2271 device_t dev = adapter->dev; 2272 2273 if (adapter->intr_res != NULL) { 2274 bus_release_resource(dev, SYS_RES_IRQ, 2275 adapter->intr_rid, adapter->intr_res); 2276 } 2277 2278 if (adapter->intr_type == PCI_INTR_TYPE_MSI) 2279 pci_release_msi(dev); 2280 2281 if (adapter->memory != NULL) { 2282 bus_release_resource(dev, SYS_RES_MEMORY, 2283 adapter->memory_rid, adapter->memory); 2284 } 2285 2286 if (adapter->flash != NULL) { 2287 bus_release_resource(dev, SYS_RES_MEMORY, 2288 adapter->flash_rid, adapter->flash); 2289 } 2290 2291 if (adapter->ioport != NULL) { 2292 bus_release_resource(dev, SYS_RES_IOPORT, 2293 adapter->io_rid, adapter->ioport); 2294 } 2295 } 2296 2297 static int 2298 em_reset(struct adapter *adapter) 2299 { 2300 device_t dev = adapter->dev; 2301 uint16_t rx_buffer_size; 2302 2303 /* When hardware is reset, fifo_head is also reset */ 2304 adapter->tx_fifo_head = 0; 2305 2306 /* Set up smart power down as default off on newer adapters. */ 2307 if (!em_smart_pwr_down && 2308 (adapter->hw.mac.type == e1000_82571 || 2309 adapter->hw.mac.type == e1000_82572)) { 2310 uint16_t phy_tmp = 0; 2311 2312 /* Speed up time to link by disabling smart power down. */ 2313 e1000_read_phy_reg(&adapter->hw, 2314 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2315 phy_tmp &= ~IGP02E1000_PM_SPD; 2316 e1000_write_phy_reg(&adapter->hw, 2317 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2318 } 2319 2320 /* 2321 * These parameters control the automatic generation (Tx) and 2322 * response (Rx) to Ethernet PAUSE frames. 2323 * - High water mark should allow for at least two frames to be 2324 * received after sending an XOFF. 2325 * - Low water mark works best when it is very near the high water mark. 2326 * This allows the receiver to restart by sending XON when it has 2327 * drained a bit. Here we use an arbitary value of 1500 which will 2328 * restart after one full frame is pulled from the buffer. There 2329 * could be several smaller frames in the buffer and if so they will 2330 * not trigger the XON until their total number reduces the buffer 2331 * by 1500. 2332 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2333 */ 2334 rx_buffer_size = 2335 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10; 2336 2337 adapter->hw.fc.high_water = rx_buffer_size - 2338 roundup2(adapter->max_frame_size, 1024); 2339 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2340 2341 if (adapter->hw.mac.type == e1000_80003es2lan) 2342 adapter->hw.fc.pause_time = 0xFFFF; 2343 else 2344 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2345 2346 adapter->hw.fc.send_xon = TRUE; 2347 2348 adapter->hw.fc.requested_mode = e1000_fc_full; 2349 2350 /* Workaround: no TX flow ctrl for PCH */ 2351 if (adapter->hw.mac.type == e1000_pchlan) 2352 adapter->hw.fc.requested_mode = e1000_fc_rx_pause; 2353 2354 /* Override - settings for PCH2LAN, ya its magic :) */ 2355 if (adapter->hw.mac.type == e1000_pch2lan) { 2356 adapter->hw.fc.high_water = 0x5C20; 2357 adapter->hw.fc.low_water = 0x5048; 2358 adapter->hw.fc.pause_time = 0x0650; 2359 adapter->hw.fc.refresh_time = 0x0400; 2360 2361 /* Jumbos need adjusted PBA */ 2362 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) 2363 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12); 2364 else 2365 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26); 2366 } 2367 2368 /* Issue a global reset */ 2369 e1000_reset_hw(&adapter->hw); 2370 if (adapter->hw.mac.type >= e1000_82544) 2371 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2372 em_disable_aspm(adapter); 2373 2374 if (e1000_init_hw(&adapter->hw) < 0) { 2375 device_printf(dev, "Hardware Initialization Failed\n"); 2376 return (EIO); 2377 } 2378 2379 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 2380 e1000_get_phy_info(&adapter->hw); 2381 e1000_check_for_link(&adapter->hw); 2382 2383 return (0); 2384 } 2385 2386 static void 2387 em_setup_ifp(struct adapter *adapter) 2388 { 2389 struct ifnet *ifp = &adapter->arpcom.ac_if; 2390 2391 if_initname(ifp, device_get_name(adapter->dev), 2392 device_get_unit(adapter->dev)); 2393 ifp->if_softc = adapter; 2394 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2395 ifp->if_init = em_init; 2396 ifp->if_ioctl = em_ioctl; 2397 ifp->if_start = em_start; 2398 #ifdef DEVICE_POLLING 2399 ifp->if_poll = em_poll; 2400 #endif 2401 ifp->if_watchdog = em_watchdog; 2402 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1); 2403 ifq_set_ready(&ifp->if_snd); 2404 2405 ether_ifattach(ifp, adapter->hw.mac.addr, NULL); 2406 2407 if (adapter->hw.mac.type >= e1000_82543) 2408 ifp->if_capabilities = IFCAP_HWCSUM; 2409 2410 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2411 ifp->if_capenable = ifp->if_capabilities; 2412 2413 if (ifp->if_capenable & IFCAP_TXCSUM) 2414 ifp->if_hwassist = EM_CSUM_FEATURES; 2415 2416 /* 2417 * Tell the upper layer(s) we support long frames. 2418 */ 2419 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2420 2421 /* 2422 * Specify the media types supported by this adapter and register 2423 * callbacks to update media and link information 2424 */ 2425 ifmedia_init(&adapter->media, IFM_IMASK, 2426 em_media_change, em_media_status); 2427 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2428 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 2429 u_char fiber_type = IFM_1000_SX; /* default type */ 2430 2431 if (adapter->hw.mac.type == e1000_82545) 2432 fiber_type = IFM_1000_LX; 2433 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2434 0, NULL); 2435 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2436 } else { 2437 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2438 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2439 0, NULL); 2440 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2441 0, NULL); 2442 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2443 0, NULL); 2444 if (adapter->hw.phy.type != e1000_phy_ife) { 2445 ifmedia_add(&adapter->media, 2446 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2447 ifmedia_add(&adapter->media, 2448 IFM_ETHER | IFM_1000_T, 0, NULL); 2449 } 2450 } 2451 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2452 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2453 } 2454 2455 2456 /* 2457 * Workaround for SmartSpeed on 82541 and 82547 controllers 2458 */ 2459 static void 2460 em_smartspeed(struct adapter *adapter) 2461 { 2462 uint16_t phy_tmp; 2463 2464 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp || 2465 adapter->hw.mac.autoneg == 0 || 2466 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2467 return; 2468 2469 if (adapter->smartspeed == 0) { 2470 /* 2471 * If Master/Slave config fault is asserted twice, 2472 * we assume back-to-back 2473 */ 2474 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2475 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2476 return; 2477 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2478 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2479 e1000_read_phy_reg(&adapter->hw, 2480 PHY_1000T_CTRL, &phy_tmp); 2481 if (phy_tmp & CR_1000T_MS_ENABLE) { 2482 phy_tmp &= ~CR_1000T_MS_ENABLE; 2483 e1000_write_phy_reg(&adapter->hw, 2484 PHY_1000T_CTRL, phy_tmp); 2485 adapter->smartspeed++; 2486 if (adapter->hw.mac.autoneg && 2487 !e1000_phy_setup_autoneg(&adapter->hw) && 2488 !e1000_read_phy_reg(&adapter->hw, 2489 PHY_CONTROL, &phy_tmp)) { 2490 phy_tmp |= MII_CR_AUTO_NEG_EN | 2491 MII_CR_RESTART_AUTO_NEG; 2492 e1000_write_phy_reg(&adapter->hw, 2493 PHY_CONTROL, phy_tmp); 2494 } 2495 } 2496 } 2497 return; 2498 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2499 /* If still no link, perhaps using 2/3 pair cable */ 2500 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2501 phy_tmp |= CR_1000T_MS_ENABLE; 2502 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2503 if (adapter->hw.mac.autoneg && 2504 !e1000_phy_setup_autoneg(&adapter->hw) && 2505 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2506 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2507 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2508 } 2509 } 2510 2511 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2512 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2513 adapter->smartspeed = 0; 2514 } 2515 2516 static int 2517 em_dma_malloc(struct adapter *adapter, bus_size_t size, 2518 struct em_dma_alloc *dma) 2519 { 2520 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag, 2521 EM_DBA_ALIGN, size, BUS_DMA_WAITOK, 2522 &dma->dma_tag, &dma->dma_map, 2523 &dma->dma_paddr); 2524 if (dma->dma_vaddr == NULL) 2525 return ENOMEM; 2526 else 2527 return 0; 2528 } 2529 2530 static void 2531 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2532 { 2533 if (dma->dma_tag == NULL) 2534 return; 2535 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2536 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2537 bus_dma_tag_destroy(dma->dma_tag); 2538 } 2539 2540 static int 2541 em_create_tx_ring(struct adapter *adapter) 2542 { 2543 device_t dev = adapter->dev; 2544 struct em_buffer *tx_buffer; 2545 int error, i; 2546 2547 adapter->tx_buffer_area = 2548 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc, 2549 M_DEVBUF, M_WAITOK | M_ZERO); 2550 2551 /* 2552 * Create DMA tags for tx buffers 2553 */ 2554 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2555 1, 0, /* alignment, bounds */ 2556 BUS_SPACE_MAXADDR, /* lowaddr */ 2557 BUS_SPACE_MAXADDR, /* highaddr */ 2558 NULL, NULL, /* filter, filterarg */ 2559 EM_TSO_SIZE, /* maxsize */ 2560 EM_MAX_SCATTER, /* nsegments */ 2561 EM_MAX_SEGSIZE, /* maxsegsize */ 2562 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2563 BUS_DMA_ONEBPAGE, /* flags */ 2564 &adapter->txtag); 2565 if (error) { 2566 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2567 kfree(adapter->tx_buffer_area, M_DEVBUF); 2568 adapter->tx_buffer_area = NULL; 2569 return error; 2570 } 2571 2572 /* 2573 * Create DMA maps for tx buffers 2574 */ 2575 for (i = 0; i < adapter->num_tx_desc; i++) { 2576 tx_buffer = &adapter->tx_buffer_area[i]; 2577 2578 error = bus_dmamap_create(adapter->txtag, 2579 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2580 &tx_buffer->map); 2581 if (error) { 2582 device_printf(dev, "Unable to create TX DMA map\n"); 2583 em_destroy_tx_ring(adapter, i); 2584 return error; 2585 } 2586 } 2587 return (0); 2588 } 2589 2590 static void 2591 em_init_tx_ring(struct adapter *adapter) 2592 { 2593 /* Clear the old ring contents */ 2594 bzero(adapter->tx_desc_base, 2595 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2596 2597 /* Reset state */ 2598 adapter->next_avail_tx_desc = 0; 2599 adapter->next_tx_to_clean = 0; 2600 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2601 } 2602 2603 static void 2604 em_init_tx_unit(struct adapter *adapter) 2605 { 2606 uint32_t tctl, tarc, tipg = 0; 2607 uint64_t bus_addr; 2608 2609 /* Setup the Base and Length of the Tx Descriptor Ring */ 2610 bus_addr = adapter->txdma.dma_paddr; 2611 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2612 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2613 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2614 (uint32_t)(bus_addr >> 32)); 2615 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2616 (uint32_t)bus_addr); 2617 /* Setup the HW Tx Head and Tail descriptor pointers */ 2618 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2619 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2620 2621 /* Set the default values for the Tx Inter Packet Gap timer */ 2622 switch (adapter->hw.mac.type) { 2623 case e1000_82542: 2624 tipg = DEFAULT_82542_TIPG_IPGT; 2625 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2626 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2627 break; 2628 2629 case e1000_80003es2lan: 2630 tipg = DEFAULT_82543_TIPG_IPGR1; 2631 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2632 E1000_TIPG_IPGR2_SHIFT; 2633 break; 2634 2635 default: 2636 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2637 adapter->hw.phy.media_type == 2638 e1000_media_type_internal_serdes) 2639 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2640 else 2641 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2642 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2643 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2644 break; 2645 } 2646 2647 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2648 2649 /* NOTE: 0 is not allowed for TIDV */ 2650 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1); 2651 if(adapter->hw.mac.type >= e1000_82540) 2652 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0); 2653 2654 if (adapter->hw.mac.type == e1000_82571 || 2655 adapter->hw.mac.type == e1000_82572) { 2656 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2657 tarc |= SPEED_MODE_BIT; 2658 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2659 } else if (adapter->hw.mac.type == e1000_80003es2lan) { 2660 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2661 tarc |= 1; 2662 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2663 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2664 tarc |= 1; 2665 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2666 } 2667 2668 /* Program the Transmit Control Register */ 2669 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2670 tctl &= ~E1000_TCTL_CT; 2671 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2672 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2673 2674 if (adapter->hw.mac.type >= e1000_82571) 2675 tctl |= E1000_TCTL_MULR; 2676 2677 /* This write will effectively turn on the transmit unit. */ 2678 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2679 } 2680 2681 static void 2682 em_destroy_tx_ring(struct adapter *adapter, int ndesc) 2683 { 2684 struct em_buffer *tx_buffer; 2685 int i; 2686 2687 if (adapter->tx_buffer_area == NULL) 2688 return; 2689 2690 for (i = 0; i < ndesc; i++) { 2691 tx_buffer = &adapter->tx_buffer_area[i]; 2692 2693 KKASSERT(tx_buffer->m_head == NULL); 2694 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2695 } 2696 bus_dma_tag_destroy(adapter->txtag); 2697 2698 kfree(adapter->tx_buffer_area, M_DEVBUF); 2699 adapter->tx_buffer_area = NULL; 2700 } 2701 2702 /* 2703 * The offload context needs to be set when we transfer the first 2704 * packet of a particular protocol (TCP/UDP). This routine has been 2705 * enhanced to deal with inserted VLAN headers. 2706 * 2707 * If the new packet's ether header length, ip header length and 2708 * csum offloading type are same as the previous packet, we should 2709 * avoid allocating a new csum context descriptor; mainly to take 2710 * advantage of the pipeline effect of the TX data read request. 2711 * 2712 * This function returns number of TX descrptors allocated for 2713 * csum context. 2714 */ 2715 static int 2716 em_txcsum(struct adapter *adapter, struct mbuf *mp, 2717 uint32_t *txd_upper, uint32_t *txd_lower) 2718 { 2719 struct e1000_context_desc *TXD; 2720 struct em_buffer *tx_buffer; 2721 struct ether_vlan_header *eh; 2722 struct ip *ip; 2723 int curr_txd, ehdrlen, csum_flags; 2724 uint32_t cmd, hdr_len, ip_hlen; 2725 uint16_t etype; 2726 2727 /* 2728 * Determine where frame payload starts. 2729 * Jump over vlan headers if already present, 2730 * helpful for QinQ too. 2731 */ 2732 KASSERT(mp->m_len >= ETHER_HDR_LEN, 2733 ("em_txcsum_pullup is not called (eh)?")); 2734 eh = mtod(mp, struct ether_vlan_header *); 2735 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2736 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 2737 ("em_txcsum_pullup is not called (evh)?")); 2738 etype = ntohs(eh->evl_proto); 2739 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 2740 } else { 2741 etype = ntohs(eh->evl_encap_proto); 2742 ehdrlen = ETHER_HDR_LEN; 2743 } 2744 2745 /* 2746 * We only support TCP/UDP for IPv4 for the moment. 2747 * TODO: Support SCTP too when it hits the tree. 2748 */ 2749 if (etype != ETHERTYPE_IP) 2750 return 0; 2751 2752 KASSERT(mp->m_len >= ehdrlen + EM_IPVHL_SIZE, 2753 ("em_txcsum_pullup is not called (eh+ip_vhl)?")); 2754 2755 /* NOTE: We could only safely access ip.ip_vhl part */ 2756 ip = (struct ip *)(mp->m_data + ehdrlen); 2757 ip_hlen = ip->ip_hl << 2; 2758 2759 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES; 2760 2761 if (adapter->csum_ehlen == ehdrlen && 2762 adapter->csum_iphlen == ip_hlen && 2763 adapter->csum_flags == csum_flags) { 2764 /* 2765 * Same csum offload context as the previous packets; 2766 * just return. 2767 */ 2768 *txd_upper = adapter->csum_txd_upper; 2769 *txd_lower = adapter->csum_txd_lower; 2770 return 0; 2771 } 2772 2773 /* 2774 * Setup a new csum offload context. 2775 */ 2776 2777 curr_txd = adapter->next_avail_tx_desc; 2778 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2779 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 2780 2781 cmd = 0; 2782 2783 /* Setup of IP header checksum. */ 2784 if (csum_flags & CSUM_IP) { 2785 /* 2786 * Start offset for header checksum calculation. 2787 * End offset for header checksum calculation. 2788 * Offset of place to put the checksum. 2789 */ 2790 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2791 TXD->lower_setup.ip_fields.ipcse = 2792 htole16(ehdrlen + ip_hlen - 1); 2793 TXD->lower_setup.ip_fields.ipcso = 2794 ehdrlen + offsetof(struct ip, ip_sum); 2795 cmd |= E1000_TXD_CMD_IP; 2796 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2797 } 2798 hdr_len = ehdrlen + ip_hlen; 2799 2800 if (csum_flags & CSUM_TCP) { 2801 /* 2802 * Start offset for payload checksum calculation. 2803 * End offset for payload checksum calculation. 2804 * Offset of place to put the checksum. 2805 */ 2806 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2807 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2808 TXD->upper_setup.tcp_fields.tucso = 2809 hdr_len + offsetof(struct tcphdr, th_sum); 2810 cmd |= E1000_TXD_CMD_TCP; 2811 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2812 } else if (csum_flags & CSUM_UDP) { 2813 /* 2814 * Start offset for header checksum calculation. 2815 * End offset for header checksum calculation. 2816 * Offset of place to put the checksum. 2817 */ 2818 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2819 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2820 TXD->upper_setup.tcp_fields.tucso = 2821 hdr_len + offsetof(struct udphdr, uh_sum); 2822 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2823 } 2824 2825 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2826 E1000_TXD_DTYP_D; /* Data descr */ 2827 2828 /* Save the information for this csum offloading context */ 2829 adapter->csum_ehlen = ehdrlen; 2830 adapter->csum_iphlen = ip_hlen; 2831 adapter->csum_flags = csum_flags; 2832 adapter->csum_txd_upper = *txd_upper; 2833 adapter->csum_txd_lower = *txd_lower; 2834 2835 TXD->tcp_seg_setup.data = htole32(0); 2836 TXD->cmd_and_length = 2837 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2838 2839 if (++curr_txd == adapter->num_tx_desc) 2840 curr_txd = 0; 2841 2842 KKASSERT(adapter->num_tx_desc_avail > 0); 2843 adapter->num_tx_desc_avail--; 2844 2845 adapter->next_avail_tx_desc = curr_txd; 2846 return 1; 2847 } 2848 2849 static int 2850 em_txcsum_pullup(struct adapter *adapter, struct mbuf **m0) 2851 { 2852 struct mbuf *m = *m0; 2853 struct ether_header *eh; 2854 int len; 2855 2856 adapter->tx_csum_try_pullup++; 2857 2858 len = ETHER_HDR_LEN + EM_IPVHL_SIZE; 2859 2860 if (__predict_false(!M_WRITABLE(m))) { 2861 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2862 adapter->tx_csum_drop1++; 2863 m_freem(m); 2864 *m0 = NULL; 2865 return ENOBUFS; 2866 } 2867 eh = mtod(m, struct ether_header *); 2868 2869 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2870 len += EVL_ENCAPLEN; 2871 2872 if (m->m_len < len) { 2873 adapter->tx_csum_drop2++; 2874 m_freem(m); 2875 *m0 = NULL; 2876 return ENOBUFS; 2877 } 2878 return 0; 2879 } 2880 2881 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2882 adapter->tx_csum_pullup1++; 2883 m = m_pullup(m, ETHER_HDR_LEN); 2884 if (m == NULL) { 2885 adapter->tx_csum_pullup1_failed++; 2886 *m0 = NULL; 2887 return ENOBUFS; 2888 } 2889 *m0 = m; 2890 } 2891 eh = mtod(m, struct ether_header *); 2892 2893 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2894 len += EVL_ENCAPLEN; 2895 2896 if (m->m_len < len) { 2897 adapter->tx_csum_pullup2++; 2898 m = m_pullup(m, len); 2899 if (m == NULL) { 2900 adapter->tx_csum_pullup2_failed++; 2901 *m0 = NULL; 2902 return ENOBUFS; 2903 } 2904 *m0 = m; 2905 } 2906 return 0; 2907 } 2908 2909 static void 2910 em_txeof(struct adapter *adapter) 2911 { 2912 struct ifnet *ifp = &adapter->arpcom.ac_if; 2913 struct em_buffer *tx_buffer; 2914 int first, num_avail; 2915 2916 if (adapter->tx_dd_head == adapter->tx_dd_tail) 2917 return; 2918 2919 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2920 return; 2921 2922 num_avail = adapter->num_tx_desc_avail; 2923 first = adapter->next_tx_to_clean; 2924 2925 while (adapter->tx_dd_head != adapter->tx_dd_tail) { 2926 struct e1000_tx_desc *tx_desc; 2927 int dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2928 2929 tx_desc = &adapter->tx_desc_base[dd_idx]; 2930 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2931 EM_INC_TXDD_IDX(adapter->tx_dd_head); 2932 2933 if (++dd_idx == adapter->num_tx_desc) 2934 dd_idx = 0; 2935 2936 while (first != dd_idx) { 2937 logif(pkt_txclean); 2938 2939 num_avail++; 2940 2941 tx_buffer = &adapter->tx_buffer_area[first]; 2942 if (tx_buffer->m_head) { 2943 ifp->if_opackets++; 2944 bus_dmamap_unload(adapter->txtag, 2945 tx_buffer->map); 2946 m_freem(tx_buffer->m_head); 2947 tx_buffer->m_head = NULL; 2948 } 2949 2950 if (++first == adapter->num_tx_desc) 2951 first = 0; 2952 } 2953 } else { 2954 break; 2955 } 2956 } 2957 adapter->next_tx_to_clean = first; 2958 adapter->num_tx_desc_avail = num_avail; 2959 2960 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 2961 adapter->tx_dd_head = 0; 2962 adapter->tx_dd_tail = 0; 2963 } 2964 2965 if (!EM_IS_OACTIVE(adapter)) { 2966 ifp->if_flags &= ~IFF_OACTIVE; 2967 2968 /* All clean, turn off the timer */ 2969 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2970 ifp->if_timer = 0; 2971 } 2972 } 2973 2974 static void 2975 em_tx_collect(struct adapter *adapter) 2976 { 2977 struct ifnet *ifp = &adapter->arpcom.ac_if; 2978 struct em_buffer *tx_buffer; 2979 int tdh, first, num_avail, dd_idx = -1; 2980 2981 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2982 return; 2983 2984 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 2985 if (tdh == adapter->next_tx_to_clean) 2986 return; 2987 2988 if (adapter->tx_dd_head != adapter->tx_dd_tail) 2989 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2990 2991 num_avail = adapter->num_tx_desc_avail; 2992 first = adapter->next_tx_to_clean; 2993 2994 while (first != tdh) { 2995 logif(pkt_txclean); 2996 2997 num_avail++; 2998 2999 tx_buffer = &adapter->tx_buffer_area[first]; 3000 if (tx_buffer->m_head) { 3001 ifp->if_opackets++; 3002 bus_dmamap_unload(adapter->txtag, 3003 tx_buffer->map); 3004 m_freem(tx_buffer->m_head); 3005 tx_buffer->m_head = NULL; 3006 } 3007 3008 if (first == dd_idx) { 3009 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3010 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3011 adapter->tx_dd_head = 0; 3012 adapter->tx_dd_tail = 0; 3013 dd_idx = -1; 3014 } else { 3015 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3016 } 3017 } 3018 3019 if (++first == adapter->num_tx_desc) 3020 first = 0; 3021 } 3022 adapter->next_tx_to_clean = first; 3023 adapter->num_tx_desc_avail = num_avail; 3024 3025 if (!EM_IS_OACTIVE(adapter)) { 3026 ifp->if_flags &= ~IFF_OACTIVE; 3027 3028 /* All clean, turn off the timer */ 3029 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3030 ifp->if_timer = 0; 3031 } 3032 } 3033 3034 /* 3035 * When Link is lost sometimes there is work still in the TX ring 3036 * which will result in a watchdog, rather than allow that do an 3037 * attempted cleanup and then reinit here. Note that this has been 3038 * seens mostly with fiber adapters. 3039 */ 3040 static void 3041 em_tx_purge(struct adapter *adapter) 3042 { 3043 struct ifnet *ifp = &adapter->arpcom.ac_if; 3044 3045 if (!adapter->link_active && ifp->if_timer) { 3046 em_tx_collect(adapter); 3047 if (ifp->if_timer) { 3048 if_printf(ifp, "Link lost, TX pending, reinit\n"); 3049 ifp->if_timer = 0; 3050 em_init(adapter); 3051 } 3052 } 3053 } 3054 3055 static int 3056 em_newbuf(struct adapter *adapter, int i, int init) 3057 { 3058 struct mbuf *m; 3059 bus_dma_segment_t seg; 3060 bus_dmamap_t map; 3061 struct em_buffer *rx_buffer; 3062 int error, nseg; 3063 3064 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3065 if (m == NULL) { 3066 adapter->mbuf_cluster_failed++; 3067 if (init) { 3068 if_printf(&adapter->arpcom.ac_if, 3069 "Unable to allocate RX mbuf\n"); 3070 } 3071 return (ENOBUFS); 3072 } 3073 m->m_len = m->m_pkthdr.len = MCLBYTES; 3074 3075 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN) 3076 m_adj(m, ETHER_ALIGN); 3077 3078 error = bus_dmamap_load_mbuf_segment(adapter->rxtag, 3079 adapter->rx_sparemap, m, 3080 &seg, 1, &nseg, BUS_DMA_NOWAIT); 3081 if (error) { 3082 m_freem(m); 3083 if (init) { 3084 if_printf(&adapter->arpcom.ac_if, 3085 "Unable to load RX mbuf\n"); 3086 } 3087 return (error); 3088 } 3089 3090 rx_buffer = &adapter->rx_buffer_area[i]; 3091 if (rx_buffer->m_head != NULL) 3092 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3093 3094 map = rx_buffer->map; 3095 rx_buffer->map = adapter->rx_sparemap; 3096 adapter->rx_sparemap = map; 3097 3098 rx_buffer->m_head = m; 3099 3100 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr); 3101 return (0); 3102 } 3103 3104 static int 3105 em_create_rx_ring(struct adapter *adapter) 3106 { 3107 device_t dev = adapter->dev; 3108 struct em_buffer *rx_buffer; 3109 int i, error; 3110 3111 adapter->rx_buffer_area = 3112 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 3113 M_DEVBUF, M_WAITOK | M_ZERO); 3114 3115 /* 3116 * Create DMA tag for rx buffers 3117 */ 3118 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 3119 1, 0, /* alignment, bounds */ 3120 BUS_SPACE_MAXADDR, /* lowaddr */ 3121 BUS_SPACE_MAXADDR, /* highaddr */ 3122 NULL, NULL, /* filter, filterarg */ 3123 MCLBYTES, /* maxsize */ 3124 1, /* nsegments */ 3125 MCLBYTES, /* maxsegsize */ 3126 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 3127 &adapter->rxtag); 3128 if (error) { 3129 device_printf(dev, "Unable to allocate RX DMA tag\n"); 3130 kfree(adapter->rx_buffer_area, M_DEVBUF); 3131 adapter->rx_buffer_area = NULL; 3132 return error; 3133 } 3134 3135 /* 3136 * Create spare DMA map for rx buffers 3137 */ 3138 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3139 &adapter->rx_sparemap); 3140 if (error) { 3141 device_printf(dev, "Unable to create spare RX DMA map\n"); 3142 bus_dma_tag_destroy(adapter->rxtag); 3143 kfree(adapter->rx_buffer_area, M_DEVBUF); 3144 adapter->rx_buffer_area = NULL; 3145 return error; 3146 } 3147 3148 /* 3149 * Create DMA maps for rx buffers 3150 */ 3151 for (i = 0; i < adapter->num_rx_desc; i++) { 3152 rx_buffer = &adapter->rx_buffer_area[i]; 3153 3154 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3155 &rx_buffer->map); 3156 if (error) { 3157 device_printf(dev, "Unable to create RX DMA map\n"); 3158 em_destroy_rx_ring(adapter, i); 3159 return error; 3160 } 3161 } 3162 return (0); 3163 } 3164 3165 static int 3166 em_init_rx_ring(struct adapter *adapter) 3167 { 3168 int i, error; 3169 3170 /* Reset descriptor ring */ 3171 bzero(adapter->rx_desc_base, 3172 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3173 3174 /* Allocate new ones. */ 3175 for (i = 0; i < adapter->num_rx_desc; i++) { 3176 error = em_newbuf(adapter, i, 1); 3177 if (error) 3178 return (error); 3179 } 3180 3181 /* Setup our descriptor pointers */ 3182 adapter->next_rx_desc_to_check = 0; 3183 3184 return (0); 3185 } 3186 3187 static void 3188 em_init_rx_unit(struct adapter *adapter) 3189 { 3190 struct ifnet *ifp = &adapter->arpcom.ac_if; 3191 uint64_t bus_addr; 3192 uint32_t rctl; 3193 3194 /* 3195 * Make sure receives are disabled while setting 3196 * up the descriptor ring 3197 */ 3198 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3199 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3200 3201 if (adapter->hw.mac.type >= e1000_82540) { 3202 uint32_t itr; 3203 3204 /* 3205 * Set the interrupt throttling rate. Value is calculated 3206 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3207 */ 3208 if (adapter->int_throttle_ceil) 3209 itr = 1000000000 / 256 / adapter->int_throttle_ceil; 3210 else 3211 itr = 0; 3212 em_set_itr(adapter, itr); 3213 } 3214 3215 /* Disable accelerated ackknowledge */ 3216 if (adapter->hw.mac.type == e1000_82574) { 3217 E1000_WRITE_REG(&adapter->hw, 3218 E1000_RFCTL, E1000_RFCTL_ACK_DIS); 3219 } 3220 3221 /* Receive Checksum Offload for TCP and UDP */ 3222 if (ifp->if_capenable & IFCAP_RXCSUM) { 3223 uint32_t rxcsum; 3224 3225 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3226 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3227 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3228 } 3229 3230 /* 3231 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3232 * long latencies are observed, like Lenovo X60. This 3233 * change eliminates the problem, but since having positive 3234 * values in RDTR is a known source of problems on other 3235 * platforms another solution is being sought. 3236 */ 3237 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) { 3238 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573); 3239 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573); 3240 } 3241 3242 /* 3243 * Setup the Base and Length of the Rx Descriptor Ring 3244 */ 3245 bus_addr = adapter->rxdma.dma_paddr; 3246 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3247 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3248 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3249 (uint32_t)(bus_addr >> 32)); 3250 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3251 (uint32_t)bus_addr); 3252 3253 /* 3254 * Setup the HW Rx Head and Tail Descriptor Pointers 3255 */ 3256 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3257 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); 3258 3259 /* Set early receive threshold on appropriate hw */ 3260 if (((adapter->hw.mac.type == e1000_ich9lan) || 3261 (adapter->hw.mac.type == e1000_pch2lan) || 3262 (adapter->hw.mac.type == e1000_ich10lan)) && 3263 (ifp->if_mtu > ETHERMTU)) { 3264 uint32_t rxdctl; 3265 3266 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0)); 3267 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl | 3); 3268 E1000_WRITE_REG(&adapter->hw, E1000_ERT, 0x100 | (1 << 13)); 3269 } 3270 3271 if (adapter->hw.mac.type == e1000_pch2lan) { 3272 if (ifp->if_mtu > ETHERMTU) 3273 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE); 3274 else 3275 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE); 3276 } 3277 3278 /* Setup the Receive Control Register */ 3279 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3280 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3281 E1000_RCTL_RDMTS_HALF | 3282 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3283 3284 /* Make sure VLAN Filters are off */ 3285 rctl &= ~E1000_RCTL_VFE; 3286 3287 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3288 rctl |= E1000_RCTL_SBP; 3289 else 3290 rctl &= ~E1000_RCTL_SBP; 3291 3292 switch (adapter->rx_buffer_len) { 3293 default: 3294 case 2048: 3295 rctl |= E1000_RCTL_SZ_2048; 3296 break; 3297 3298 case 4096: 3299 rctl |= E1000_RCTL_SZ_4096 | 3300 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3301 break; 3302 3303 case 8192: 3304 rctl |= E1000_RCTL_SZ_8192 | 3305 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3306 break; 3307 3308 case 16384: 3309 rctl |= E1000_RCTL_SZ_16384 | 3310 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3311 break; 3312 } 3313 3314 if (ifp->if_mtu > ETHERMTU) 3315 rctl |= E1000_RCTL_LPE; 3316 else 3317 rctl &= ~E1000_RCTL_LPE; 3318 3319 /* Enable Receives */ 3320 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3321 } 3322 3323 static void 3324 em_destroy_rx_ring(struct adapter *adapter, int ndesc) 3325 { 3326 struct em_buffer *rx_buffer; 3327 int i; 3328 3329 if (adapter->rx_buffer_area == NULL) 3330 return; 3331 3332 for (i = 0; i < ndesc; i++) { 3333 rx_buffer = &adapter->rx_buffer_area[i]; 3334 3335 KKASSERT(rx_buffer->m_head == NULL); 3336 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 3337 } 3338 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3339 bus_dma_tag_destroy(adapter->rxtag); 3340 3341 kfree(adapter->rx_buffer_area, M_DEVBUF); 3342 adapter->rx_buffer_area = NULL; 3343 } 3344 3345 static void 3346 em_rxeof(struct adapter *adapter, int count) 3347 { 3348 struct ifnet *ifp = &adapter->arpcom.ac_if; 3349 uint8_t status, accept_frame = 0, eop = 0; 3350 uint16_t len, desc_len, prev_len_adj; 3351 struct e1000_rx_desc *current_desc; 3352 struct mbuf *mp; 3353 int i; 3354 3355 i = adapter->next_rx_desc_to_check; 3356 current_desc = &adapter->rx_desc_base[i]; 3357 3358 if (!(current_desc->status & E1000_RXD_STAT_DD)) 3359 return; 3360 3361 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) { 3362 struct mbuf *m = NULL; 3363 3364 logif(pkt_receive); 3365 3366 mp = adapter->rx_buffer_area[i].m_head; 3367 3368 /* 3369 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3370 * needs to access the last received byte in the mbuf. 3371 */ 3372 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3373 BUS_DMASYNC_POSTREAD); 3374 3375 accept_frame = 1; 3376 prev_len_adj = 0; 3377 desc_len = le16toh(current_desc->length); 3378 status = current_desc->status; 3379 if (status & E1000_RXD_STAT_EOP) { 3380 count--; 3381 eop = 1; 3382 if (desc_len < ETHER_CRC_LEN) { 3383 len = 0; 3384 prev_len_adj = ETHER_CRC_LEN - desc_len; 3385 } else { 3386 len = desc_len - ETHER_CRC_LEN; 3387 } 3388 } else { 3389 eop = 0; 3390 len = desc_len; 3391 } 3392 3393 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3394 uint8_t last_byte; 3395 uint32_t pkt_len = desc_len; 3396 3397 if (adapter->fmp != NULL) 3398 pkt_len += adapter->fmp->m_pkthdr.len; 3399 3400 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3401 if (TBI_ACCEPT(&adapter->hw, status, 3402 current_desc->errors, pkt_len, last_byte, 3403 adapter->min_frame_size, adapter->max_frame_size)) { 3404 e1000_tbi_adjust_stats_82543(&adapter->hw, 3405 &adapter->stats, pkt_len, 3406 adapter->hw.mac.addr, 3407 adapter->max_frame_size); 3408 if (len > 0) 3409 len--; 3410 } else { 3411 accept_frame = 0; 3412 } 3413 } 3414 3415 if (accept_frame) { 3416 if (em_newbuf(adapter, i, 0) != 0) { 3417 ifp->if_iqdrops++; 3418 goto discard; 3419 } 3420 3421 /* Assign correct length to the current fragment */ 3422 mp->m_len = len; 3423 3424 if (adapter->fmp == NULL) { 3425 mp->m_pkthdr.len = len; 3426 adapter->fmp = mp; /* Store the first mbuf */ 3427 adapter->lmp = mp; 3428 } else { 3429 /* 3430 * Chain mbuf's together 3431 */ 3432 3433 /* 3434 * Adjust length of previous mbuf in chain if 3435 * we received less than 4 bytes in the last 3436 * descriptor. 3437 */ 3438 if (prev_len_adj > 0) { 3439 adapter->lmp->m_len -= prev_len_adj; 3440 adapter->fmp->m_pkthdr.len -= 3441 prev_len_adj; 3442 } 3443 adapter->lmp->m_next = mp; 3444 adapter->lmp = adapter->lmp->m_next; 3445 adapter->fmp->m_pkthdr.len += len; 3446 } 3447 3448 if (eop) { 3449 adapter->fmp->m_pkthdr.rcvif = ifp; 3450 ifp->if_ipackets++; 3451 3452 if (ifp->if_capenable & IFCAP_RXCSUM) { 3453 em_rxcsum(adapter, current_desc, 3454 adapter->fmp); 3455 } 3456 3457 if (status & E1000_RXD_STAT_VP) { 3458 adapter->fmp->m_pkthdr.ether_vlantag = 3459 (le16toh(current_desc->special) & 3460 E1000_RXD_SPC_VLAN_MASK); 3461 adapter->fmp->m_flags |= M_VLANTAG; 3462 } 3463 m = adapter->fmp; 3464 adapter->fmp = NULL; 3465 adapter->lmp = NULL; 3466 } 3467 } else { 3468 ifp->if_ierrors++; 3469 discard: 3470 #ifdef foo 3471 /* Reuse loaded DMA map and just update mbuf chain */ 3472 mp = adapter->rx_buffer_area[i].m_head; 3473 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3474 mp->m_data = mp->m_ext.ext_buf; 3475 mp->m_next = NULL; 3476 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3477 m_adj(mp, ETHER_ALIGN); 3478 #endif 3479 if (adapter->fmp != NULL) { 3480 m_freem(adapter->fmp); 3481 adapter->fmp = NULL; 3482 adapter->lmp = NULL; 3483 } 3484 m = NULL; 3485 } 3486 3487 /* Zero out the receive descriptors status. */ 3488 current_desc->status = 0; 3489 3490 if (m != NULL) 3491 ifp->if_input(ifp, m); 3492 3493 /* Advance our pointers to the next descriptor. */ 3494 if (++i == adapter->num_rx_desc) 3495 i = 0; 3496 current_desc = &adapter->rx_desc_base[i]; 3497 } 3498 adapter->next_rx_desc_to_check = i; 3499 3500 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3501 if (--i < 0) 3502 i = adapter->num_rx_desc - 1; 3503 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3504 } 3505 3506 static void 3507 em_rxcsum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, 3508 struct mbuf *mp) 3509 { 3510 /* 82543 or newer only */ 3511 if (adapter->hw.mac.type < e1000_82543 || 3512 /* Ignore Checksum bit is set */ 3513 (rx_desc->status & E1000_RXD_STAT_IXSM)) 3514 return; 3515 3516 if ((rx_desc->status & E1000_RXD_STAT_IPCS) && 3517 !(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3518 /* IP Checksum Good */ 3519 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 3520 } 3521 3522 if ((rx_desc->status & E1000_RXD_STAT_TCPCS) && 3523 !(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3524 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3525 CSUM_PSEUDO_HDR | 3526 CSUM_FRAG_NOT_CHECKED; 3527 mp->m_pkthdr.csum_data = htons(0xffff); 3528 } 3529 } 3530 3531 static void 3532 em_enable_intr(struct adapter *adapter) 3533 { 3534 uint32_t ims_mask = IMS_ENABLE_MASK; 3535 3536 lwkt_serialize_handler_enable(adapter->arpcom.ac_if.if_serializer); 3537 3538 #if 0 3539 /* XXX MSIX */ 3540 if (adapter->hw.mac.type == e1000_82574) { 3541 E1000_WRITE_REG(&adapter->hw, EM_EIAC, EM_MSIX_MASK); 3542 ims_mask |= EM_MSIX_MASK; 3543 } 3544 #endif 3545 E1000_WRITE_REG(&adapter->hw, E1000_IMS, ims_mask); 3546 } 3547 3548 static void 3549 em_disable_intr(struct adapter *adapter) 3550 { 3551 uint32_t clear = 0xffffffff; 3552 3553 /* 3554 * The first version of 82542 had an errata where when link was forced 3555 * it would stay up even up even if the cable was disconnected. 3556 * Sequence errors were used to detect the disconnect and then the 3557 * driver would unforce the link. This code in the in the ISR. For 3558 * this to work correctly the Sequence error interrupt had to be 3559 * enabled all the time. 3560 */ 3561 if (adapter->hw.mac.type == e1000_82542 && 3562 adapter->hw.revision_id == E1000_REVISION_2) 3563 clear &= ~E1000_ICR_RXSEQ; 3564 else if (adapter->hw.mac.type == e1000_82574) 3565 E1000_WRITE_REG(&adapter->hw, EM_EIAC, 0); 3566 3567 E1000_WRITE_REG(&adapter->hw, E1000_IMC, clear); 3568 3569 lwkt_serialize_handler_disable(adapter->arpcom.ac_if.if_serializer); 3570 } 3571 3572 /* 3573 * Bit of a misnomer, what this really means is 3574 * to enable OS management of the system... aka 3575 * to disable special hardware management features 3576 */ 3577 static void 3578 em_get_mgmt(struct adapter *adapter) 3579 { 3580 /* A shared code workaround */ 3581 #define E1000_82542_MANC2H E1000_MANC2H 3582 if (adapter->has_manage) { 3583 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 3584 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3585 3586 /* disable hardware interception of ARP */ 3587 manc &= ~(E1000_MANC_ARP_EN); 3588 3589 /* enable receiving management packets to the host */ 3590 if (adapter->hw.mac.type >= e1000_82571) { 3591 manc |= E1000_MANC_EN_MNG2HOST; 3592 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3593 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3594 manc2h |= E1000_MNG2HOST_PORT_623; 3595 manc2h |= E1000_MNG2HOST_PORT_664; 3596 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 3597 } 3598 3599 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3600 } 3601 } 3602 3603 /* 3604 * Give control back to hardware management 3605 * controller if there is one. 3606 */ 3607 static void 3608 em_rel_mgmt(struct adapter *adapter) 3609 { 3610 if (adapter->has_manage) { 3611 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3612 3613 /* re-enable hardware interception of ARP */ 3614 manc |= E1000_MANC_ARP_EN; 3615 3616 if (adapter->hw.mac.type >= e1000_82571) 3617 manc &= ~E1000_MANC_EN_MNG2HOST; 3618 3619 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3620 } 3621 } 3622 3623 /* 3624 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3625 * For ASF and Pass Through versions of f/w this means that 3626 * the driver is loaded. For AMT version (only with 82573) 3627 * of the f/w this means that the network i/f is open. 3628 */ 3629 static void 3630 em_get_hw_control(struct adapter *adapter) 3631 { 3632 /* Let firmware know the driver has taken over */ 3633 if (adapter->hw.mac.type == e1000_82573) { 3634 uint32_t swsm; 3635 3636 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3637 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3638 swsm | E1000_SWSM_DRV_LOAD); 3639 } else { 3640 uint32_t ctrl_ext; 3641 3642 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3643 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3644 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3645 } 3646 adapter->control_hw = 1; 3647 } 3648 3649 /* 3650 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3651 * For ASF and Pass Through versions of f/w this means that the 3652 * driver is no longer loaded. For AMT version (only with 82573) 3653 * of the f/w this means that the network i/f is closed. 3654 */ 3655 static void 3656 em_rel_hw_control(struct adapter *adapter) 3657 { 3658 if (!adapter->control_hw) 3659 return; 3660 adapter->control_hw = 0; 3661 3662 /* Let firmware taken over control of h/w */ 3663 if (adapter->hw.mac.type == e1000_82573) { 3664 uint32_t swsm; 3665 3666 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3667 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3668 swsm & ~E1000_SWSM_DRV_LOAD); 3669 } else { 3670 uint32_t ctrl_ext; 3671 3672 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3673 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3674 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3675 } 3676 } 3677 3678 static int 3679 em_is_valid_eaddr(const uint8_t *addr) 3680 { 3681 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3682 3683 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3684 return (FALSE); 3685 3686 return (TRUE); 3687 } 3688 3689 /* 3690 * Enable PCI Wake On Lan capability 3691 */ 3692 void 3693 em_enable_wol(device_t dev) 3694 { 3695 uint16_t cap, status; 3696 uint8_t id; 3697 3698 /* First find the capabilities pointer*/ 3699 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3700 3701 /* Read the PM Capabilities */ 3702 id = pci_read_config(dev, cap, 1); 3703 if (id != PCIY_PMG) /* Something wrong */ 3704 return; 3705 3706 /* 3707 * OK, we have the power capabilities, 3708 * so now get the status register 3709 */ 3710 cap += PCIR_POWER_STATUS; 3711 status = pci_read_config(dev, cap, 2); 3712 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3713 pci_write_config(dev, cap, status, 2); 3714 } 3715 3716 3717 /* 3718 * 82544 Coexistence issue workaround. 3719 * There are 2 issues. 3720 * 1. Transmit Hang issue. 3721 * To detect this issue, following equation can be used... 3722 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3723 * If SUM[3:0] is in between 1 to 4, we will have this issue. 3724 * 3725 * 2. DAC issue. 3726 * To detect this issue, following equation can be used... 3727 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3728 * If SUM[3:0] is in between 9 to c, we will have this issue. 3729 * 3730 * WORKAROUND: 3731 * Make sure we do not have ending address 3732 * as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3733 */ 3734 static uint32_t 3735 em_82544_fill_desc(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array) 3736 { 3737 uint32_t safe_terminator; 3738 3739 /* 3740 * Since issue is sensitive to length and address. 3741 * Let us first check the address... 3742 */ 3743 if (length <= 4) { 3744 desc_array->descriptor[0].address = address; 3745 desc_array->descriptor[0].length = length; 3746 desc_array->elements = 1; 3747 return (desc_array->elements); 3748 } 3749 3750 safe_terminator = 3751 (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 3752 3753 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3754 if (safe_terminator == 0 || 3755 (safe_terminator > 4 && safe_terminator < 9) || 3756 (safe_terminator > 0xC && safe_terminator <= 0xF)) { 3757 desc_array->descriptor[0].address = address; 3758 desc_array->descriptor[0].length = length; 3759 desc_array->elements = 1; 3760 return (desc_array->elements); 3761 } 3762 3763 desc_array->descriptor[0].address = address; 3764 desc_array->descriptor[0].length = length - 4; 3765 desc_array->descriptor[1].address = address + (length - 4); 3766 desc_array->descriptor[1].length = 4; 3767 desc_array->elements = 2; 3768 return (desc_array->elements); 3769 } 3770 3771 static void 3772 em_update_stats(struct adapter *adapter) 3773 { 3774 struct ifnet *ifp = &adapter->arpcom.ac_if; 3775 3776 if (adapter->hw.phy.media_type == e1000_media_type_copper || 3777 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3778 adapter->stats.symerrs += 3779 E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 3780 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 3781 } 3782 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 3783 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 3784 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 3785 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 3786 3787 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 3788 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 3789 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 3790 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 3791 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 3792 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 3793 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 3794 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 3795 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 3796 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 3797 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 3798 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 3799 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 3800 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 3801 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 3802 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 3803 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 3804 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 3805 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 3806 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 3807 3808 /* For the 64-bit byte counters the low dword must be read first. */ 3809 /* Both registers clear on the read of the high dword */ 3810 3811 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); 3812 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); 3813 3814 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 3815 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 3816 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 3817 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 3818 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 3819 3820 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 3821 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 3822 3823 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 3824 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 3825 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 3826 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 3827 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 3828 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 3829 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 3830 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 3831 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 3832 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 3833 3834 if (adapter->hw.mac.type >= e1000_82543) { 3835 adapter->stats.algnerrc += 3836 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 3837 adapter->stats.rxerrc += 3838 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 3839 adapter->stats.tncrs += 3840 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 3841 adapter->stats.cexterr += 3842 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 3843 adapter->stats.tsctc += 3844 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 3845 adapter->stats.tsctfc += 3846 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 3847 } 3848 3849 ifp->if_collisions = adapter->stats.colc; 3850 3851 /* Rx Errors */ 3852 ifp->if_ierrors = 3853 adapter->dropped_pkts + adapter->stats.rxerrc + 3854 adapter->stats.crcerrs + adapter->stats.algnerrc + 3855 adapter->stats.ruc + adapter->stats.roc + 3856 adapter->stats.mpc + adapter->stats.cexterr; 3857 3858 /* Tx Errors */ 3859 ifp->if_oerrors = 3860 adapter->stats.ecol + adapter->stats.latecol + 3861 adapter->watchdog_events; 3862 } 3863 3864 static void 3865 em_print_debug_info(struct adapter *adapter) 3866 { 3867 device_t dev = adapter->dev; 3868 uint8_t *hw_addr = adapter->hw.hw_addr; 3869 3870 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3871 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3872 E1000_READ_REG(&adapter->hw, E1000_CTRL), 3873 E1000_READ_REG(&adapter->hw, E1000_RCTL)); 3874 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3875 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3876 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); 3877 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3878 adapter->hw.fc.high_water, 3879 adapter->hw.fc.low_water); 3880 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3881 E1000_READ_REG(&adapter->hw, E1000_TIDV), 3882 E1000_READ_REG(&adapter->hw, E1000_TADV)); 3883 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3884 E1000_READ_REG(&adapter->hw, E1000_RDTR), 3885 E1000_READ_REG(&adapter->hw, E1000_RADV)); 3886 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 3887 (long long)adapter->tx_fifo_wrk_cnt, 3888 (long long)adapter->tx_fifo_reset_cnt); 3889 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3890 E1000_READ_REG(&adapter->hw, E1000_TDH(0)), 3891 E1000_READ_REG(&adapter->hw, E1000_TDT(0))); 3892 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3893 E1000_READ_REG(&adapter->hw, E1000_RDH(0)), 3894 E1000_READ_REG(&adapter->hw, E1000_RDT(0))); 3895 device_printf(dev, "Num Tx descriptors avail = %d\n", 3896 adapter->num_tx_desc_avail); 3897 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3898 adapter->no_tx_desc_avail1); 3899 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3900 adapter->no_tx_desc_avail2); 3901 device_printf(dev, "Std mbuf failed = %ld\n", 3902 adapter->mbuf_alloc_failed); 3903 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3904 adapter->mbuf_cluster_failed); 3905 device_printf(dev, "Driver dropped packets = %ld\n", 3906 adapter->dropped_pkts); 3907 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3908 adapter->no_tx_dma_setup); 3909 3910 device_printf(dev, "TXCSUM try pullup = %lu\n", 3911 adapter->tx_csum_try_pullup); 3912 device_printf(dev, "TXCSUM m_pullup(eh) called = %lu\n", 3913 adapter->tx_csum_pullup1); 3914 device_printf(dev, "TXCSUM m_pullup(eh) failed = %lu\n", 3915 adapter->tx_csum_pullup1_failed); 3916 device_printf(dev, "TXCSUM m_pullup(eh+ip) called = %lu\n", 3917 adapter->tx_csum_pullup2); 3918 device_printf(dev, "TXCSUM m_pullup(eh+ip) failed = %lu\n", 3919 adapter->tx_csum_pullup2_failed); 3920 device_printf(dev, "TXCSUM non-writable(eh) droped = %lu\n", 3921 adapter->tx_csum_drop1); 3922 device_printf(dev, "TXCSUM non-writable(eh+ip) droped = %lu\n", 3923 adapter->tx_csum_drop2); 3924 } 3925 3926 static void 3927 em_print_hw_stats(struct adapter *adapter) 3928 { 3929 device_t dev = adapter->dev; 3930 3931 device_printf(dev, "Excessive collisions = %lld\n", 3932 (long long)adapter->stats.ecol); 3933 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3934 device_printf(dev, "Symbol errors = %lld\n", 3935 (long long)adapter->stats.symerrs); 3936 #endif 3937 device_printf(dev, "Sequence errors = %lld\n", 3938 (long long)adapter->stats.sec); 3939 device_printf(dev, "Defer count = %lld\n", 3940 (long long)adapter->stats.dc); 3941 device_printf(dev, "Missed Packets = %lld\n", 3942 (long long)adapter->stats.mpc); 3943 device_printf(dev, "Receive No Buffers = %lld\n", 3944 (long long)adapter->stats.rnbc); 3945 /* RLEC is inaccurate on some hardware, calculate our own. */ 3946 device_printf(dev, "Receive Length Errors = %lld\n", 3947 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 3948 device_printf(dev, "Receive errors = %lld\n", 3949 (long long)adapter->stats.rxerrc); 3950 device_printf(dev, "Crc errors = %lld\n", 3951 (long long)adapter->stats.crcerrs); 3952 device_printf(dev, "Alignment errors = %lld\n", 3953 (long long)adapter->stats.algnerrc); 3954 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3955 (long long)adapter->stats.cexterr); 3956 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 3957 device_printf(dev, "watchdog timeouts = %ld\n", 3958 adapter->watchdog_events); 3959 device_printf(dev, "XON Rcvd = %lld\n", 3960 (long long)adapter->stats.xonrxc); 3961 device_printf(dev, "XON Xmtd = %lld\n", 3962 (long long)adapter->stats.xontxc); 3963 device_printf(dev, "XOFF Rcvd = %lld\n", 3964 (long long)adapter->stats.xoffrxc); 3965 device_printf(dev, "XOFF Xmtd = %lld\n", 3966 (long long)adapter->stats.xofftxc); 3967 device_printf(dev, "Good Packets Rcvd = %lld\n", 3968 (long long)adapter->stats.gprc); 3969 device_printf(dev, "Good Packets Xmtd = %lld\n", 3970 (long long)adapter->stats.gptc); 3971 } 3972 3973 static void 3974 em_print_nvm_info(struct adapter *adapter) 3975 { 3976 uint16_t eeprom_data; 3977 int i, j, row = 0; 3978 3979 /* Its a bit crude, but it gets the job done */ 3980 kprintf("\nInterface EEPROM Dump:\n"); 3981 kprintf("Offset\n0x0000 "); 3982 for (i = 0, j = 0; i < 32; i++, j++) { 3983 if (j == 8) { /* Make the offset block */ 3984 j = 0; ++row; 3985 kprintf("\n0x00%x0 ",row); 3986 } 3987 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 3988 kprintf("%04x ", eeprom_data); 3989 } 3990 kprintf("\n"); 3991 } 3992 3993 static int 3994 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3995 { 3996 struct adapter *adapter; 3997 struct ifnet *ifp; 3998 int error, result; 3999 4000 result = -1; 4001 error = sysctl_handle_int(oidp, &result, 0, req); 4002 if (error || !req->newptr) 4003 return (error); 4004 4005 adapter = (struct adapter *)arg1; 4006 ifp = &adapter->arpcom.ac_if; 4007 4008 lwkt_serialize_enter(ifp->if_serializer); 4009 4010 if (result == 1) 4011 em_print_debug_info(adapter); 4012 4013 /* 4014 * This value will cause a hex dump of the 4015 * first 32 16-bit words of the EEPROM to 4016 * the screen. 4017 */ 4018 if (result == 2) 4019 em_print_nvm_info(adapter); 4020 4021 lwkt_serialize_exit(ifp->if_serializer); 4022 4023 return (error); 4024 } 4025 4026 static int 4027 em_sysctl_stats(SYSCTL_HANDLER_ARGS) 4028 { 4029 int error, result; 4030 4031 result = -1; 4032 error = sysctl_handle_int(oidp, &result, 0, req); 4033 if (error || !req->newptr) 4034 return (error); 4035 4036 if (result == 1) { 4037 struct adapter *adapter = (struct adapter *)arg1; 4038 struct ifnet *ifp = &adapter->arpcom.ac_if; 4039 4040 lwkt_serialize_enter(ifp->if_serializer); 4041 em_print_hw_stats(adapter); 4042 lwkt_serialize_exit(ifp->if_serializer); 4043 } 4044 return (error); 4045 } 4046 4047 static void 4048 em_add_sysctl(struct adapter *adapter) 4049 { 4050 sysctl_ctx_init(&adapter->sysctl_ctx); 4051 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx, 4052 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 4053 device_get_nameunit(adapter->dev), 4054 CTLFLAG_RD, 0, ""); 4055 if (adapter->sysctl_tree == NULL) { 4056 device_printf(adapter->dev, "can't add sysctl node\n"); 4057 } else { 4058 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4059 SYSCTL_CHILDREN(adapter->sysctl_tree), 4060 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4061 em_sysctl_debug_info, "I", "Debug Information"); 4062 4063 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4064 SYSCTL_CHILDREN(adapter->sysctl_tree), 4065 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4066 em_sysctl_stats, "I", "Statistics"); 4067 4068 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 4069 SYSCTL_CHILDREN(adapter->sysctl_tree), 4070 OID_AUTO, "rxd", CTLFLAG_RD, 4071 &adapter->num_rx_desc, 0, NULL); 4072 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 4073 SYSCTL_CHILDREN(adapter->sysctl_tree), 4074 OID_AUTO, "txd", CTLFLAG_RD, 4075 &adapter->num_tx_desc, 0, NULL); 4076 4077 if (adapter->hw.mac.type >= e1000_82540) { 4078 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4079 SYSCTL_CHILDREN(adapter->sysctl_tree), 4080 OID_AUTO, "int_throttle_ceil", 4081 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4082 em_sysctl_int_throttle, "I", 4083 "interrupt throttling rate"); 4084 } 4085 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4086 SYSCTL_CHILDREN(adapter->sysctl_tree), 4087 OID_AUTO, "int_tx_nsegs", 4088 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4089 em_sysctl_int_tx_nsegs, "I", 4090 "# segments per TX interrupt"); 4091 } 4092 } 4093 4094 static int 4095 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 4096 { 4097 struct adapter *adapter = (void *)arg1; 4098 struct ifnet *ifp = &adapter->arpcom.ac_if; 4099 int error, throttle; 4100 4101 throttle = adapter->int_throttle_ceil; 4102 error = sysctl_handle_int(oidp, &throttle, 0, req); 4103 if (error || req->newptr == NULL) 4104 return error; 4105 if (throttle < 0 || throttle > 1000000000 / 256) 4106 return EINVAL; 4107 4108 if (throttle) { 4109 /* 4110 * Set the interrupt throttling rate in 256ns increments, 4111 * recalculate sysctl value assignment to get exact frequency. 4112 */ 4113 throttle = 1000000000 / 256 / throttle; 4114 4115 /* Upper 16bits of ITR is reserved and should be zero */ 4116 if (throttle & 0xffff0000) 4117 return EINVAL; 4118 } 4119 4120 lwkt_serialize_enter(ifp->if_serializer); 4121 4122 if (throttle) 4123 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 4124 else 4125 adapter->int_throttle_ceil = 0; 4126 4127 if (ifp->if_flags & IFF_RUNNING) 4128 em_set_itr(adapter, throttle); 4129 4130 lwkt_serialize_exit(ifp->if_serializer); 4131 4132 if (bootverbose) { 4133 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 4134 adapter->int_throttle_ceil); 4135 } 4136 return 0; 4137 } 4138 4139 static int 4140 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 4141 { 4142 struct adapter *adapter = (void *)arg1; 4143 struct ifnet *ifp = &adapter->arpcom.ac_if; 4144 int error, segs; 4145 4146 segs = adapter->tx_int_nsegs; 4147 error = sysctl_handle_int(oidp, &segs, 0, req); 4148 if (error || req->newptr == NULL) 4149 return error; 4150 if (segs <= 0) 4151 return EINVAL; 4152 4153 lwkt_serialize_enter(ifp->if_serializer); 4154 4155 /* 4156 * Don't allow int_tx_nsegs to become: 4157 * o Less the oact_tx_desc 4158 * o Too large that no TX desc will cause TX interrupt to 4159 * be generated (OACTIVE will never recover) 4160 * o Too small that will cause tx_dd[] overflow 4161 */ 4162 if (segs < adapter->oact_tx_desc || 4163 segs >= adapter->num_tx_desc - adapter->oact_tx_desc || 4164 segs < adapter->num_tx_desc / EM_TXDD_SAFE) { 4165 error = EINVAL; 4166 } else { 4167 error = 0; 4168 adapter->tx_int_nsegs = segs; 4169 } 4170 4171 lwkt_serialize_exit(ifp->if_serializer); 4172 4173 return error; 4174 } 4175 4176 static void 4177 em_set_itr(struct adapter *adapter, uint32_t itr) 4178 { 4179 E1000_WRITE_REG(&adapter->hw, E1000_ITR, itr); 4180 if (adapter->hw.mac.type == e1000_82574) { 4181 int i; 4182 4183 /* 4184 * When using MSIX interrupts we need to 4185 * throttle using the EITR register 4186 */ 4187 for (i = 0; i < 4; ++i) { 4188 E1000_WRITE_REG(&adapter->hw, 4189 E1000_EITR_82574(i), itr); 4190 } 4191 } 4192 } 4193 4194 static void 4195 em_disable_aspm(struct adapter *adapter) 4196 { 4197 uint16_t link_cap, link_ctrl, disable; 4198 uint8_t pcie_ptr, reg; 4199 device_t dev = adapter->dev; 4200 4201 switch (adapter->hw.mac.type) { 4202 case e1000_82571: 4203 case e1000_82572: 4204 case e1000_82573: 4205 /* 4206 * 82573 specification update 4207 * errata #8 disable L0s 4208 * errata #41 disable L1 4209 * 4210 * 82571/82572 specification update 4211 # errata #13 disable L1 4212 * errata #68 disable L0s 4213 */ 4214 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4215 break; 4216 4217 case e1000_82574: 4218 case e1000_82583: 4219 /* 4220 * 82574 specification update errata #20 4221 * 82583 specification update errata #9 4222 * 4223 * There is no need to disable L1 4224 */ 4225 disable = PCIEM_LNKCTL_ASPM_L0S; 4226 break; 4227 4228 default: 4229 return; 4230 } 4231 4232 pcie_ptr = pci_get_pciecap_ptr(dev); 4233 if (pcie_ptr == 0) 4234 return; 4235 4236 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4237 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4238 return; 4239 4240 if (bootverbose) { 4241 if_printf(&adapter->arpcom.ac_if, 4242 "disable ASPM %#02x\n", disable); 4243 } 4244 4245 reg = pcie_ptr + PCIER_LINKCTRL; 4246 link_ctrl = pci_read_config(dev, reg, 2); 4247 link_ctrl &= ~disable; 4248 pci_write_config(dev, reg, link_ctrl, 2); 4249 } 4250