1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 */ 67 /* 68 * SERIALIZATION API RULES: 69 * 70 * - If the driver uses the same serializer for the interrupt as for the 71 * ifnet, most of the serialization will be done automatically for the 72 * driver. 73 * 74 * - ifmedia entry points will be serialized by the ifmedia code using the 75 * ifnet serializer. 76 * 77 * - if_* entry points except for if_input will be serialized by the IF 78 * and protocol layers. 79 * 80 * - The device driver must be sure to serialize access from timeout code 81 * installed by the device driver. 82 * 83 * - The device driver typically holds the serializer at the time it wishes 84 * to call if_input. 85 * 86 * - We must call lwkt_serialize_handler_enable() prior to enabling the 87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling 88 * the hardware interrupt in order to avoid handler execution races from 89 * scheduled interrupt threads. 90 * 91 * NOTE! Since callers into the device driver hold the ifnet serializer, 92 * the device driver may be holding a serializer at the time it calls 93 * if_input even if it is not serializer-aware. 94 */ 95 96 #include "opt_ifpoll.h" 97 98 #include <sys/param.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/interrupt.h> 102 #include <sys/kernel.h> 103 #include <sys/ktr.h> 104 #include <sys/malloc.h> 105 #include <sys/mbuf.h> 106 #include <sys/proc.h> 107 #include <sys/rman.h> 108 #include <sys/serialize.h> 109 #include <sys/socket.h> 110 #include <sys/sockio.h> 111 #include <sys/sysctl.h> 112 #include <sys/systm.h> 113 114 #include <net/bpf.h> 115 #include <net/ethernet.h> 116 #include <net/if.h> 117 #include <net/if_arp.h> 118 #include <net/if_dl.h> 119 #include <net/if_media.h> 120 #include <net/if_poll.h> 121 #include <net/ifq_var.h> 122 #include <net/vlan/if_vlan_var.h> 123 #include <net/vlan/if_vlan_ether.h> 124 125 #include <netinet/ip.h> 126 #include <netinet/tcp.h> 127 #include <netinet/udp.h> 128 129 #include <bus/pci/pcivar.h> 130 #include <bus/pci/pcireg.h> 131 132 #include <dev/netif/ig_hal/e1000_api.h> 133 #include <dev/netif/ig_hal/e1000_82571.h> 134 #include <dev/netif/em/if_em.h> 135 136 #define EM_NAME "Intel(R) PRO/1000 Network Connection " 137 #define EM_VER " 7.3.4" 138 139 #define _EM_DEVICE(id, ret) \ 140 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER } 141 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100) 142 #define EM_DEVICE(id) _EM_DEVICE(id, 0) 143 #define EM_DEVICE_NULL { 0, 0, 0, NULL } 144 145 static const struct em_vendor_info em_vendor_info_array[] = { 146 EM_DEVICE(82540EM), 147 EM_DEVICE(82540EM_LOM), 148 EM_DEVICE(82540EP), 149 EM_DEVICE(82540EP_LOM), 150 EM_DEVICE(82540EP_LP), 151 152 EM_DEVICE(82541EI), 153 EM_DEVICE(82541ER), 154 EM_DEVICE(82541ER_LOM), 155 EM_DEVICE(82541EI_MOBILE), 156 EM_DEVICE(82541GI), 157 EM_DEVICE(82541GI_LF), 158 EM_DEVICE(82541GI_MOBILE), 159 160 EM_DEVICE(82542), 161 162 EM_DEVICE(82543GC_FIBER), 163 EM_DEVICE(82543GC_COPPER), 164 165 EM_DEVICE(82544EI_COPPER), 166 EM_DEVICE(82544EI_FIBER), 167 EM_DEVICE(82544GC_COPPER), 168 EM_DEVICE(82544GC_LOM), 169 170 EM_DEVICE(82545EM_COPPER), 171 EM_DEVICE(82545EM_FIBER), 172 EM_DEVICE(82545GM_COPPER), 173 EM_DEVICE(82545GM_FIBER), 174 EM_DEVICE(82545GM_SERDES), 175 176 EM_DEVICE(82546EB_COPPER), 177 EM_DEVICE(82546EB_FIBER), 178 EM_DEVICE(82546EB_QUAD_COPPER), 179 EM_DEVICE(82546GB_COPPER), 180 EM_DEVICE(82546GB_FIBER), 181 EM_DEVICE(82546GB_SERDES), 182 EM_DEVICE(82546GB_PCIE), 183 EM_DEVICE(82546GB_QUAD_COPPER), 184 EM_DEVICE(82546GB_QUAD_COPPER_KSP3), 185 186 EM_DEVICE(82547EI), 187 EM_DEVICE(82547EI_MOBILE), 188 EM_DEVICE(82547GI), 189 190 EM_EMX_DEVICE(82571EB_COPPER), 191 EM_EMX_DEVICE(82571EB_FIBER), 192 EM_EMX_DEVICE(82571EB_SERDES), 193 EM_EMX_DEVICE(82571EB_SERDES_DUAL), 194 EM_EMX_DEVICE(82571EB_SERDES_QUAD), 195 EM_EMX_DEVICE(82571EB_QUAD_COPPER), 196 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP), 197 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP), 198 EM_EMX_DEVICE(82571EB_QUAD_FIBER), 199 EM_EMX_DEVICE(82571PT_QUAD_COPPER), 200 201 EM_EMX_DEVICE(82572EI_COPPER), 202 EM_EMX_DEVICE(82572EI_FIBER), 203 EM_EMX_DEVICE(82572EI_SERDES), 204 EM_EMX_DEVICE(82572EI), 205 206 EM_EMX_DEVICE(82573E), 207 EM_EMX_DEVICE(82573E_IAMT), 208 EM_EMX_DEVICE(82573L), 209 210 EM_DEVICE(82583V), 211 212 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT), 213 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT), 214 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT), 215 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT), 216 217 EM_DEVICE(ICH8_IGP_M_AMT), 218 EM_DEVICE(ICH8_IGP_AMT), 219 EM_DEVICE(ICH8_IGP_C), 220 EM_DEVICE(ICH8_IFE), 221 EM_DEVICE(ICH8_IFE_GT), 222 EM_DEVICE(ICH8_IFE_G), 223 EM_DEVICE(ICH8_IGP_M), 224 EM_DEVICE(ICH8_82567V_3), 225 226 EM_DEVICE(ICH9_IGP_M_AMT), 227 EM_DEVICE(ICH9_IGP_AMT), 228 EM_DEVICE(ICH9_IGP_C), 229 EM_DEVICE(ICH9_IGP_M), 230 EM_DEVICE(ICH9_IGP_M_V), 231 EM_DEVICE(ICH9_IFE), 232 EM_DEVICE(ICH9_IFE_GT), 233 EM_DEVICE(ICH9_IFE_G), 234 EM_DEVICE(ICH9_BM), 235 236 EM_EMX_DEVICE(82574L), 237 EM_EMX_DEVICE(82574LA), 238 239 EM_DEVICE(ICH10_R_BM_LM), 240 EM_DEVICE(ICH10_R_BM_LF), 241 EM_DEVICE(ICH10_R_BM_V), 242 EM_DEVICE(ICH10_D_BM_LM), 243 EM_DEVICE(ICH10_D_BM_LF), 244 EM_DEVICE(ICH10_D_BM_V), 245 246 EM_DEVICE(PCH_M_HV_LM), 247 EM_DEVICE(PCH_M_HV_LC), 248 EM_DEVICE(PCH_D_HV_DM), 249 EM_DEVICE(PCH_D_HV_DC), 250 251 EM_DEVICE(PCH2_LV_LM), 252 EM_DEVICE(PCH2_LV_V), 253 254 /* required last entry */ 255 EM_DEVICE_NULL 256 }; 257 258 static int em_probe(device_t); 259 static int em_attach(device_t); 260 static int em_detach(device_t); 261 static int em_shutdown(device_t); 262 static int em_suspend(device_t); 263 static int em_resume(device_t); 264 265 static void em_init(void *); 266 static void em_stop(struct adapter *); 267 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 268 static void em_start(struct ifnet *, struct ifaltq_subque *); 269 #ifdef IFPOLL_ENABLE 270 static void em_npoll(struct ifnet *, struct ifpoll_info *); 271 static void em_npoll_compat(struct ifnet *, void *, int); 272 #endif 273 static void em_watchdog(struct ifnet *); 274 static void em_media_status(struct ifnet *, struct ifmediareq *); 275 static int em_media_change(struct ifnet *); 276 static void em_timer(void *); 277 278 static void em_intr(void *); 279 static void em_intr_mask(void *); 280 static void em_intr_body(struct adapter *, boolean_t); 281 static void em_rxeof(struct adapter *, int); 282 static void em_txeof(struct adapter *); 283 static void em_tx_collect(struct adapter *); 284 static void em_tx_purge(struct adapter *); 285 static void em_enable_intr(struct adapter *); 286 static void em_disable_intr(struct adapter *); 287 288 static int em_dma_malloc(struct adapter *, bus_size_t, 289 struct em_dma_alloc *); 290 static void em_dma_free(struct adapter *, struct em_dma_alloc *); 291 static void em_init_tx_ring(struct adapter *); 292 static int em_init_rx_ring(struct adapter *); 293 static int em_create_tx_ring(struct adapter *); 294 static int em_create_rx_ring(struct adapter *); 295 static void em_destroy_tx_ring(struct adapter *, int); 296 static void em_destroy_rx_ring(struct adapter *, int); 297 static int em_newbuf(struct adapter *, int, int); 298 static int em_encap(struct adapter *, struct mbuf **, int *, int *); 299 static void em_rxcsum(struct adapter *, struct e1000_rx_desc *, 300 struct mbuf *); 301 static int em_txcsum(struct adapter *, struct mbuf *, 302 uint32_t *, uint32_t *); 303 static int em_tso_pullup(struct adapter *, struct mbuf **); 304 static int em_tso_setup(struct adapter *, struct mbuf *, 305 uint32_t *, uint32_t *); 306 307 static int em_get_hw_info(struct adapter *); 308 static int em_is_valid_eaddr(const uint8_t *); 309 static int em_alloc_pci_res(struct adapter *); 310 static void em_free_pci_res(struct adapter *); 311 static int em_reset(struct adapter *); 312 static void em_setup_ifp(struct adapter *); 313 static void em_init_tx_unit(struct adapter *); 314 static void em_init_rx_unit(struct adapter *); 315 static void em_update_stats(struct adapter *); 316 static void em_set_promisc(struct adapter *); 317 static void em_disable_promisc(struct adapter *); 318 static void em_set_multi(struct adapter *); 319 static void em_update_link_status(struct adapter *); 320 static void em_smartspeed(struct adapter *); 321 static void em_set_itr(struct adapter *, uint32_t); 322 static void em_disable_aspm(struct adapter *); 323 324 /* Hardware workarounds */ 325 static int em_82547_fifo_workaround(struct adapter *, int); 326 static void em_82547_update_fifo_head(struct adapter *, int); 327 static int em_82547_tx_fifo_reset(struct adapter *); 328 static void em_82547_move_tail(void *); 329 static void em_82547_move_tail_serialized(struct adapter *); 330 static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY); 331 332 static void em_print_debug_info(struct adapter *); 333 static void em_print_nvm_info(struct adapter *); 334 static void em_print_hw_stats(struct adapter *); 335 336 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 337 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 338 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 339 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 340 static void em_add_sysctl(struct adapter *adapter); 341 342 /* Management and WOL Support */ 343 static void em_get_mgmt(struct adapter *); 344 static void em_rel_mgmt(struct adapter *); 345 static void em_get_hw_control(struct adapter *); 346 static void em_rel_hw_control(struct adapter *); 347 static void em_enable_wol(device_t); 348 349 static device_method_t em_methods[] = { 350 /* Device interface */ 351 DEVMETHOD(device_probe, em_probe), 352 DEVMETHOD(device_attach, em_attach), 353 DEVMETHOD(device_detach, em_detach), 354 DEVMETHOD(device_shutdown, em_shutdown), 355 DEVMETHOD(device_suspend, em_suspend), 356 DEVMETHOD(device_resume, em_resume), 357 { 0, 0 } 358 }; 359 360 static driver_t em_driver = { 361 "em", 362 em_methods, 363 sizeof(struct adapter), 364 }; 365 366 static devclass_t em_devclass; 367 368 DECLARE_DUMMY_MODULE(if_em); 369 MODULE_DEPEND(em, ig_hal, 1, 1, 1); 370 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL); 371 372 /* 373 * Tunables 374 */ 375 static int em_int_throttle_ceil = EM_DEFAULT_ITR; 376 static int em_rxd = EM_DEFAULT_RXD; 377 static int em_txd = EM_DEFAULT_TXD; 378 static int em_smart_pwr_down = 0; 379 380 /* Controls whether promiscuous also shows bad packets */ 381 static int em_debug_sbp = FALSE; 382 383 static int em_82573_workaround = 1; 384 static int em_msi_enable = 1; 385 386 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil); 387 TUNABLE_INT("hw.em.rxd", &em_rxd); 388 TUNABLE_INT("hw.em.txd", &em_txd); 389 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 390 TUNABLE_INT("hw.em.sbp", &em_debug_sbp); 391 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround); 392 TUNABLE_INT("hw.em.msi.enable", &em_msi_enable); 393 394 /* Global used in WOL setup with multiport cards */ 395 static int em_global_quad_port_a = 0; 396 397 /* Set this to one to display debug statistics */ 398 static int em_display_debug_stats = 0; 399 400 #if !defined(KTR_IF_EM) 401 #define KTR_IF_EM KTR_ALL 402 #endif 403 KTR_INFO_MASTER(if_em); 404 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin"); 405 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end"); 406 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet"); 407 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet"); 408 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean"); 409 #define logif(name) KTR_LOG(if_em_ ## name) 410 411 static int 412 em_probe(device_t dev) 413 { 414 const struct em_vendor_info *ent; 415 uint16_t vid, did; 416 417 vid = pci_get_vendor(dev); 418 did = pci_get_device(dev); 419 420 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) { 421 if (vid == ent->vendor_id && did == ent->device_id) { 422 device_set_desc(dev, ent->desc); 423 device_set_async_attach(dev, TRUE); 424 return (ent->ret); 425 } 426 } 427 return (ENXIO); 428 } 429 430 static int 431 em_attach(device_t dev) 432 { 433 struct adapter *adapter = device_get_softc(dev); 434 struct ifnet *ifp = &adapter->arpcom.ac_if; 435 int tsize, rsize; 436 int error = 0; 437 uint16_t eeprom_data, device_id, apme_mask; 438 driver_intr_t *intr_func; 439 440 adapter->dev = adapter->osdep.dev = dev; 441 442 callout_init_mp(&adapter->timer); 443 callout_init_mp(&adapter->tx_fifo_timer); 444 445 /* Determine hardware and mac info */ 446 error = em_get_hw_info(adapter); 447 if (error) { 448 device_printf(dev, "Identify hardware failed\n"); 449 goto fail; 450 } 451 452 /* Setup PCI resources */ 453 error = em_alloc_pci_res(adapter); 454 if (error) { 455 device_printf(dev, "Allocation of PCI resources failed\n"); 456 goto fail; 457 } 458 459 /* 460 * For ICH8 and family we need to map the flash memory, 461 * and this must happen after the MAC is identified. 462 */ 463 if (adapter->hw.mac.type == e1000_ich8lan || 464 adapter->hw.mac.type == e1000_ich9lan || 465 adapter->hw.mac.type == e1000_ich10lan || 466 adapter->hw.mac.type == e1000_pchlan || 467 adapter->hw.mac.type == e1000_pch2lan) { 468 adapter->flash_rid = EM_BAR_FLASH; 469 470 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 471 &adapter->flash_rid, RF_ACTIVE); 472 if (adapter->flash == NULL) { 473 device_printf(dev, "Mapping of Flash failed\n"); 474 error = ENXIO; 475 goto fail; 476 } 477 adapter->osdep.flash_bus_space_tag = 478 rman_get_bustag(adapter->flash); 479 adapter->osdep.flash_bus_space_handle = 480 rman_get_bushandle(adapter->flash); 481 482 /* 483 * This is used in the shared code 484 * XXX this goof is actually not used. 485 */ 486 adapter->hw.flash_address = (uint8_t *)adapter->flash; 487 } 488 489 switch (adapter->hw.mac.type) { 490 case e1000_82571: 491 case e1000_82572: 492 /* 493 * Pullup extra 4bytes into the first data segment, see: 494 * 82571/82572 specification update errata #7 495 * 496 * NOTE: 497 * 4bytes instead of 2bytes, which are mentioned in the 498 * errata, are pulled; mainly to keep rest of the data 499 * properly aligned. 500 */ 501 adapter->flags |= EM_FLAG_TSO_PULLEX; 502 /* FALL THROUGH */ 503 504 case e1000_82573: 505 case e1000_82574: 506 case e1000_80003es2lan: 507 adapter->flags |= EM_FLAG_TSO; 508 break; 509 510 default: 511 break; 512 } 513 514 /* Do Shared Code initialization */ 515 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 516 device_printf(dev, "Setup of Shared code failed\n"); 517 error = ENXIO; 518 goto fail; 519 } 520 521 e1000_get_bus_info(&adapter->hw); 522 523 /* 524 * Validate number of transmit and receive descriptors. It 525 * must not exceed hardware maximum, and must be multiple 526 * of E1000_DBA_ALIGN. 527 */ 528 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 || 529 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || 530 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || 531 em_txd < EM_MIN_TXD) { 532 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 533 EM_DEFAULT_TXD, em_txd); 534 adapter->num_tx_desc = EM_DEFAULT_TXD; 535 } else { 536 adapter->num_tx_desc = em_txd; 537 } 538 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 || 539 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || 540 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || 541 em_rxd < EM_MIN_RXD) { 542 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 543 EM_DEFAULT_RXD, em_rxd); 544 adapter->num_rx_desc = EM_DEFAULT_RXD; 545 } else { 546 adapter->num_rx_desc = em_rxd; 547 } 548 549 adapter->hw.mac.autoneg = DO_AUTO_NEG; 550 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 551 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 552 adapter->rx_buffer_len = MCLBYTES; 553 554 /* 555 * Interrupt throttle rate 556 */ 557 if (em_int_throttle_ceil == 0) { 558 adapter->int_throttle_ceil = 0; 559 } else { 560 int throttle = em_int_throttle_ceil; 561 562 if (throttle < 0) 563 throttle = EM_DEFAULT_ITR; 564 565 /* Recalculate the tunable value to get the exact frequency. */ 566 throttle = 1000000000 / 256 / throttle; 567 568 /* Upper 16bits of ITR is reserved and should be zero */ 569 if (throttle & 0xffff0000) 570 throttle = 1000000000 / 256 / EM_DEFAULT_ITR; 571 572 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 573 } 574 575 e1000_init_script_state_82541(&adapter->hw, TRUE); 576 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 577 578 /* Copper options */ 579 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 580 adapter->hw.phy.mdix = AUTO_ALL_MODES; 581 adapter->hw.phy.disable_polarity_correction = FALSE; 582 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 583 } 584 585 /* Set the frame limits assuming standard ethernet sized frames. */ 586 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 587 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN; 588 589 /* This controls when hardware reports transmit completion status. */ 590 adapter->hw.mac.report_tx_early = 1; 591 592 /* 593 * Create top level busdma tag 594 */ 595 error = bus_dma_tag_create(NULL, 1, 0, 596 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 597 NULL, NULL, 598 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 599 0, &adapter->parent_dtag); 600 if (error) { 601 device_printf(dev, "could not create top level DMA tag\n"); 602 goto fail; 603 } 604 605 /* 606 * Allocate Transmit Descriptor ring 607 */ 608 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 609 EM_DBA_ALIGN); 610 error = em_dma_malloc(adapter, tsize, &adapter->txdma); 611 if (error) { 612 device_printf(dev, "Unable to allocate tx_desc memory\n"); 613 goto fail; 614 } 615 adapter->tx_desc_base = adapter->txdma.dma_vaddr; 616 617 /* 618 * Allocate Receive Descriptor ring 619 */ 620 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 621 EM_DBA_ALIGN); 622 error = em_dma_malloc(adapter, rsize, &adapter->rxdma); 623 if (error) { 624 device_printf(dev, "Unable to allocate rx_desc memory\n"); 625 goto fail; 626 } 627 adapter->rx_desc_base = adapter->rxdma.dma_vaddr; 628 629 /* Allocate multicast array memory. */ 630 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 631 M_DEVBUF, M_WAITOK); 632 633 /* Indicate SOL/IDER usage */ 634 if (e1000_check_reset_block(&adapter->hw)) { 635 device_printf(dev, 636 "PHY reset is blocked due to SOL/IDER session.\n"); 637 } 638 639 /* 640 * Start from a known state, this is important in reading the 641 * nvm and mac from that. 642 */ 643 e1000_reset_hw(&adapter->hw); 644 645 /* Make sure we have a good EEPROM before we read from it */ 646 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 647 /* 648 * Some PCI-E parts fail the first check due to 649 * the link being in sleep state, call it again, 650 * if it fails a second time its a real issue. 651 */ 652 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 653 device_printf(dev, 654 "The EEPROM Checksum Is Not Valid\n"); 655 error = EIO; 656 goto fail; 657 } 658 } 659 660 /* Copy the permanent MAC address out of the EEPROM */ 661 if (e1000_read_mac_addr(&adapter->hw) < 0) { 662 device_printf(dev, "EEPROM read error while reading MAC" 663 " address\n"); 664 error = EIO; 665 goto fail; 666 } 667 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) { 668 device_printf(dev, "Invalid MAC address\n"); 669 error = EIO; 670 goto fail; 671 } 672 673 /* Allocate transmit descriptors and buffers */ 674 error = em_create_tx_ring(adapter); 675 if (error) { 676 device_printf(dev, "Could not setup transmit structures\n"); 677 goto fail; 678 } 679 680 /* Allocate receive descriptors and buffers */ 681 error = em_create_rx_ring(adapter); 682 if (error) { 683 device_printf(dev, "Could not setup receive structures\n"); 684 goto fail; 685 } 686 687 /* Manually turn off all interrupts */ 688 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 689 690 /* Determine if we have to control management hardware */ 691 if (e1000_enable_mng_pass_thru(&adapter->hw)) 692 adapter->flags |= EM_FLAG_HAS_MGMT; 693 694 /* 695 * Setup Wake-on-Lan 696 */ 697 apme_mask = EM_EEPROM_APME; 698 eeprom_data = 0; 699 switch (adapter->hw.mac.type) { 700 case e1000_82542: 701 case e1000_82543: 702 break; 703 704 case e1000_82573: 705 case e1000_82583: 706 adapter->flags |= EM_FLAG_HAS_AMT; 707 /* FALL THROUGH */ 708 709 case e1000_82546: 710 case e1000_82546_rev_3: 711 case e1000_82571: 712 case e1000_82572: 713 case e1000_80003es2lan: 714 if (adapter->hw.bus.func == 1) { 715 e1000_read_nvm(&adapter->hw, 716 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 717 } else { 718 e1000_read_nvm(&adapter->hw, 719 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 720 } 721 break; 722 723 case e1000_ich8lan: 724 case e1000_ich9lan: 725 case e1000_ich10lan: 726 case e1000_pchlan: 727 case e1000_pch2lan: 728 apme_mask = E1000_WUC_APME; 729 adapter->flags |= EM_FLAG_HAS_AMT; 730 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); 731 break; 732 733 default: 734 e1000_read_nvm(&adapter->hw, 735 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 736 break; 737 } 738 if (eeprom_data & apme_mask) 739 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 740 741 /* 742 * We have the eeprom settings, now apply the special cases 743 * where the eeprom may be wrong or the board won't support 744 * wake on lan on a particular port 745 */ 746 device_id = pci_get_device(dev); 747 switch (device_id) { 748 case E1000_DEV_ID_82546GB_PCIE: 749 adapter->wol = 0; 750 break; 751 752 case E1000_DEV_ID_82546EB_FIBER: 753 case E1000_DEV_ID_82546GB_FIBER: 754 case E1000_DEV_ID_82571EB_FIBER: 755 /* 756 * Wake events only supported on port A for dual fiber 757 * regardless of eeprom setting 758 */ 759 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 760 E1000_STATUS_FUNC_1) 761 adapter->wol = 0; 762 break; 763 764 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 765 case E1000_DEV_ID_82571EB_QUAD_COPPER: 766 case E1000_DEV_ID_82571EB_QUAD_FIBER: 767 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 768 /* if quad port adapter, disable WoL on all but port A */ 769 if (em_global_quad_port_a != 0) 770 adapter->wol = 0; 771 /* Reset for multiple quad port adapters */ 772 if (++em_global_quad_port_a == 4) 773 em_global_quad_port_a = 0; 774 break; 775 } 776 777 /* XXX disable wol */ 778 adapter->wol = 0; 779 780 /* Setup OS specific network interface */ 781 em_setup_ifp(adapter); 782 783 /* Add sysctl tree, must after em_setup_ifp() */ 784 em_add_sysctl(adapter); 785 786 #ifdef IFPOLL_ENABLE 787 /* Polling setup */ 788 ifpoll_compat_setup(&adapter->npoll, 789 &adapter->sysctl_ctx, adapter->sysctl_tree, device_get_unit(dev), 790 ifp->if_serializer); 791 #endif 792 793 /* Reset the hardware */ 794 error = em_reset(adapter); 795 if (error) { 796 device_printf(dev, "Unable to reset the hardware\n"); 797 goto fail; 798 } 799 800 /* Initialize statistics */ 801 em_update_stats(adapter); 802 803 adapter->hw.mac.get_link_status = 1; 804 em_update_link_status(adapter); 805 806 /* Do we need workaround for 82544 PCI-X adapter? */ 807 if (adapter->hw.bus.type == e1000_bus_type_pcix && 808 adapter->hw.mac.type == e1000_82544) 809 adapter->pcix_82544 = TRUE; 810 else 811 adapter->pcix_82544 = FALSE; 812 813 if (adapter->pcix_82544) { 814 /* 815 * 82544 on PCI-X may split one TX segment 816 * into two TX descs, so we double its number 817 * of spare TX desc here. 818 */ 819 adapter->spare_tx_desc = 2 * EM_TX_SPARE; 820 } else { 821 adapter->spare_tx_desc = EM_TX_SPARE; 822 } 823 if (adapter->flags & EM_FLAG_TSO) 824 adapter->spare_tx_desc = EM_TX_SPARE_TSO; 825 adapter->tx_wreg_nsegs = 8; 826 827 /* 828 * Keep following relationship between spare_tx_desc, oact_tx_desc 829 * and tx_int_nsegs: 830 * (spare_tx_desc + EM_TX_RESERVED) <= 831 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs 832 */ 833 adapter->oact_tx_desc = adapter->num_tx_desc / 8; 834 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX) 835 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX; 836 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED) 837 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED; 838 839 adapter->tx_int_nsegs = adapter->num_tx_desc / 16; 840 if (adapter->tx_int_nsegs < adapter->oact_tx_desc) 841 adapter->tx_int_nsegs = adapter->oact_tx_desc; 842 843 /* Non-AMT based hardware can now take control from firmware */ 844 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 845 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571) 846 em_get_hw_control(adapter); 847 848 /* 849 * Missing Interrupt Following ICR read: 850 * 851 * 82571/82572 specification update errata #76 852 * 82573 specification update errata #31 853 * 82574 specification update errata #12 854 * 82583 specification update errata #4 855 */ 856 intr_func = em_intr; 857 if ((adapter->flags & EM_FLAG_SHARED_INTR) && 858 (adapter->hw.mac.type == e1000_82571 || 859 adapter->hw.mac.type == e1000_82572 || 860 adapter->hw.mac.type == e1000_82573 || 861 adapter->hw.mac.type == e1000_82574 || 862 adapter->hw.mac.type == e1000_82583)) 863 intr_func = em_intr_mask; 864 865 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE, 866 intr_func, adapter, &adapter->intr_tag, 867 ifp->if_serializer); 868 if (error) { 869 device_printf(dev, "Failed to register interrupt handler"); 870 ether_ifdetach(&adapter->arpcom.ac_if); 871 goto fail; 872 } 873 874 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 875 return (0); 876 fail: 877 em_detach(dev); 878 return (error); 879 } 880 881 static int 882 em_detach(device_t dev) 883 { 884 struct adapter *adapter = device_get_softc(dev); 885 886 if (device_is_attached(dev)) { 887 struct ifnet *ifp = &adapter->arpcom.ac_if; 888 889 lwkt_serialize_enter(ifp->if_serializer); 890 891 em_stop(adapter); 892 893 e1000_phy_hw_reset(&adapter->hw); 894 895 em_rel_mgmt(adapter); 896 em_rel_hw_control(adapter); 897 898 if (adapter->wol) { 899 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 900 E1000_WUC_PME_EN); 901 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 902 em_enable_wol(dev); 903 } 904 905 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag); 906 907 lwkt_serialize_exit(ifp->if_serializer); 908 909 ether_ifdetach(ifp); 910 } else if (adapter->memory != NULL) { 911 em_rel_hw_control(adapter); 912 } 913 bus_generic_detach(dev); 914 915 em_free_pci_res(adapter); 916 917 em_destroy_tx_ring(adapter, adapter->num_tx_desc); 918 em_destroy_rx_ring(adapter, adapter->num_rx_desc); 919 920 /* Free Transmit Descriptor ring */ 921 if (adapter->tx_desc_base) 922 em_dma_free(adapter, &adapter->txdma); 923 924 /* Free Receive Descriptor ring */ 925 if (adapter->rx_desc_base) 926 em_dma_free(adapter, &adapter->rxdma); 927 928 /* Free top level busdma tag */ 929 if (adapter->parent_dtag != NULL) 930 bus_dma_tag_destroy(adapter->parent_dtag); 931 932 /* Free sysctl tree */ 933 if (adapter->sysctl_tree != NULL) 934 sysctl_ctx_free(&adapter->sysctl_ctx); 935 936 if (adapter->mta != NULL) 937 kfree(adapter->mta, M_DEVBUF); 938 939 return (0); 940 } 941 942 static int 943 em_shutdown(device_t dev) 944 { 945 return em_suspend(dev); 946 } 947 948 static int 949 em_suspend(device_t dev) 950 { 951 struct adapter *adapter = device_get_softc(dev); 952 struct ifnet *ifp = &adapter->arpcom.ac_if; 953 954 lwkt_serialize_enter(ifp->if_serializer); 955 956 em_stop(adapter); 957 958 em_rel_mgmt(adapter); 959 em_rel_hw_control(adapter); 960 961 if (adapter->wol) { 962 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 963 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 964 em_enable_wol(dev); 965 } 966 967 lwkt_serialize_exit(ifp->if_serializer); 968 969 return bus_generic_suspend(dev); 970 } 971 972 static int 973 em_resume(device_t dev) 974 { 975 struct adapter *adapter = device_get_softc(dev); 976 struct ifnet *ifp = &adapter->arpcom.ac_if; 977 978 lwkt_serialize_enter(ifp->if_serializer); 979 980 if (adapter->hw.mac.type == e1000_pch2lan) 981 e1000_resume_workarounds_pchlan(&adapter->hw); 982 983 em_init(adapter); 984 em_get_mgmt(adapter); 985 if_devstart(ifp); 986 987 lwkt_serialize_exit(ifp->if_serializer); 988 989 return bus_generic_resume(dev); 990 } 991 992 static void 993 em_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 994 { 995 struct adapter *adapter = ifp->if_softc; 996 struct mbuf *m_head; 997 int idx = -1, nsegs = 0; 998 999 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1000 ASSERT_SERIALIZED(ifp->if_serializer); 1001 1002 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1003 return; 1004 1005 if (!adapter->link_active) { 1006 ifq_purge(&ifp->if_snd); 1007 return; 1008 } 1009 1010 while (!ifq_is_empty(&ifp->if_snd)) { 1011 /* Now do we at least have a minimal? */ 1012 if (EM_IS_OACTIVE(adapter)) { 1013 em_tx_collect(adapter); 1014 if (EM_IS_OACTIVE(adapter)) { 1015 ifq_set_oactive(&ifp->if_snd); 1016 adapter->no_tx_desc_avail1++; 1017 break; 1018 } 1019 } 1020 1021 logif(pkt_txqueue); 1022 m_head = ifq_dequeue(&ifp->if_snd, NULL); 1023 if (m_head == NULL) 1024 break; 1025 1026 if (em_encap(adapter, &m_head, &nsegs, &idx)) { 1027 ifp->if_oerrors++; 1028 em_tx_collect(adapter); 1029 continue; 1030 } 1031 1032 if (nsegs >= adapter->tx_wreg_nsegs && idx >= 0) { 1033 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1034 nsegs = 0; 1035 idx = -1; 1036 } 1037 1038 /* Send a copy of the frame to the BPF listener */ 1039 ETHER_BPF_MTAP(ifp, m_head); 1040 1041 /* Set timeout in case hardware has problems transmitting. */ 1042 ifp->if_timer = EM_TX_TIMEOUT; 1043 } 1044 if (idx >= 0) 1045 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1046 } 1047 1048 static int 1049 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1050 { 1051 struct adapter *adapter = ifp->if_softc; 1052 struct ifreq *ifr = (struct ifreq *)data; 1053 uint16_t eeprom_data = 0; 1054 int max_frame_size, mask, reinit; 1055 int error = 0; 1056 1057 ASSERT_SERIALIZED(ifp->if_serializer); 1058 1059 switch (command) { 1060 case SIOCSIFMTU: 1061 switch (adapter->hw.mac.type) { 1062 case e1000_82573: 1063 /* 1064 * 82573 only supports jumbo frames 1065 * if ASPM is disabled. 1066 */ 1067 e1000_read_nvm(&adapter->hw, 1068 NVM_INIT_3GIO_3, 1, &eeprom_data); 1069 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1070 max_frame_size = ETHER_MAX_LEN; 1071 break; 1072 } 1073 /* FALL THROUGH */ 1074 1075 /* Limit Jumbo Frame size */ 1076 case e1000_82571: 1077 case e1000_82572: 1078 case e1000_ich9lan: 1079 case e1000_ich10lan: 1080 case e1000_pch2lan: 1081 case e1000_82574: 1082 case e1000_82583: 1083 case e1000_80003es2lan: 1084 max_frame_size = 9234; 1085 break; 1086 1087 case e1000_pchlan: 1088 max_frame_size = 4096; 1089 break; 1090 1091 /* Adapters that do not support jumbo frames */ 1092 case e1000_82542: 1093 case e1000_ich8lan: 1094 max_frame_size = ETHER_MAX_LEN; 1095 break; 1096 1097 default: 1098 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1099 break; 1100 } 1101 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1102 ETHER_CRC_LEN) { 1103 error = EINVAL; 1104 break; 1105 } 1106 1107 ifp->if_mtu = ifr->ifr_mtu; 1108 adapter->max_frame_size = 1109 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1110 1111 if (ifp->if_flags & IFF_RUNNING) 1112 em_init(adapter); 1113 break; 1114 1115 case SIOCSIFFLAGS: 1116 if (ifp->if_flags & IFF_UP) { 1117 if ((ifp->if_flags & IFF_RUNNING)) { 1118 if ((ifp->if_flags ^ adapter->if_flags) & 1119 (IFF_PROMISC | IFF_ALLMULTI)) { 1120 em_disable_promisc(adapter); 1121 em_set_promisc(adapter); 1122 } 1123 } else { 1124 em_init(adapter); 1125 } 1126 } else if (ifp->if_flags & IFF_RUNNING) { 1127 em_stop(adapter); 1128 } 1129 adapter->if_flags = ifp->if_flags; 1130 break; 1131 1132 case SIOCADDMULTI: 1133 case SIOCDELMULTI: 1134 if (ifp->if_flags & IFF_RUNNING) { 1135 em_disable_intr(adapter); 1136 em_set_multi(adapter); 1137 if (adapter->hw.mac.type == e1000_82542 && 1138 adapter->hw.revision_id == E1000_REVISION_2) 1139 em_init_rx_unit(adapter); 1140 #ifdef IFPOLL_ENABLE 1141 if (!(ifp->if_flags & IFF_NPOLLING)) 1142 #endif 1143 em_enable_intr(adapter); 1144 } 1145 break; 1146 1147 case SIOCSIFMEDIA: 1148 /* Check SOL/IDER usage */ 1149 if (e1000_check_reset_block(&adapter->hw)) { 1150 device_printf(adapter->dev, "Media change is" 1151 " blocked due to SOL/IDER session.\n"); 1152 break; 1153 } 1154 /* FALL THROUGH */ 1155 1156 case SIOCGIFMEDIA: 1157 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1158 break; 1159 1160 case SIOCSIFCAP: 1161 reinit = 0; 1162 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1163 if (mask & IFCAP_RXCSUM) { 1164 ifp->if_capenable ^= IFCAP_RXCSUM; 1165 reinit = 1; 1166 } 1167 if (mask & IFCAP_TXCSUM) { 1168 ifp->if_capenable ^= IFCAP_TXCSUM; 1169 if (ifp->if_capenable & IFCAP_TXCSUM) 1170 ifp->if_hwassist |= EM_CSUM_FEATURES; 1171 else 1172 ifp->if_hwassist &= ~EM_CSUM_FEATURES; 1173 } 1174 if (mask & IFCAP_TSO) { 1175 ifp->if_capenable ^= IFCAP_TSO; 1176 if (ifp->if_capenable & IFCAP_TSO) 1177 ifp->if_hwassist |= CSUM_TSO; 1178 else 1179 ifp->if_hwassist &= ~CSUM_TSO; 1180 } 1181 if (mask & IFCAP_VLAN_HWTAGGING) { 1182 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1183 reinit = 1; 1184 } 1185 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1186 em_init(adapter); 1187 break; 1188 1189 default: 1190 error = ether_ioctl(ifp, command, data); 1191 break; 1192 } 1193 return (error); 1194 } 1195 1196 static void 1197 em_watchdog(struct ifnet *ifp) 1198 { 1199 struct adapter *adapter = ifp->if_softc; 1200 1201 ASSERT_SERIALIZED(ifp->if_serializer); 1202 1203 /* 1204 * The timer is set to 5 every time start queues a packet. 1205 * Then txeof keeps resetting it as long as it cleans at 1206 * least one descriptor. 1207 * Finally, anytime all descriptors are clean the timer is 1208 * set to 0. 1209 */ 1210 1211 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1212 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) { 1213 /* 1214 * If we reach here, all TX jobs are completed and 1215 * the TX engine should have been idled for some time. 1216 * We don't need to call if_devstart() here. 1217 */ 1218 ifq_clr_oactive(&ifp->if_snd); 1219 ifp->if_timer = 0; 1220 return; 1221 } 1222 1223 /* 1224 * If we are in this routine because of pause frames, then 1225 * don't reset the hardware. 1226 */ 1227 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 1228 E1000_STATUS_TXOFF) { 1229 ifp->if_timer = EM_TX_TIMEOUT; 1230 return; 1231 } 1232 1233 if (e1000_check_for_link(&adapter->hw) == 0) 1234 if_printf(ifp, "watchdog timeout -- resetting\n"); 1235 1236 ifp->if_oerrors++; 1237 adapter->watchdog_events++; 1238 1239 em_init(adapter); 1240 1241 if (!ifq_is_empty(&ifp->if_snd)) 1242 if_devstart(ifp); 1243 } 1244 1245 static void 1246 em_init(void *xsc) 1247 { 1248 struct adapter *adapter = xsc; 1249 struct ifnet *ifp = &adapter->arpcom.ac_if; 1250 device_t dev = adapter->dev; 1251 1252 ASSERT_SERIALIZED(ifp->if_serializer); 1253 1254 em_stop(adapter); 1255 1256 /* Get the latest mac address, User can use a LAA */ 1257 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); 1258 1259 /* Put the address into the Receive Address Array */ 1260 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1261 1262 /* 1263 * With the 82571 adapter, RAR[0] may be overwritten 1264 * when the other port is reset, we make a duplicate 1265 * in RAR[14] for that eventuality, this assures 1266 * the interface continues to function. 1267 */ 1268 if (adapter->hw.mac.type == e1000_82571) { 1269 e1000_set_laa_state_82571(&adapter->hw, TRUE); 1270 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 1271 E1000_RAR_ENTRIES - 1); 1272 } 1273 1274 /* Reset the hardware */ 1275 if (em_reset(adapter)) { 1276 device_printf(dev, "Unable to reset the hardware\n"); 1277 /* XXX em_stop()? */ 1278 return; 1279 } 1280 em_update_link_status(adapter); 1281 1282 /* Setup VLAN support, basic and offload if available */ 1283 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1284 1285 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1286 uint32_t ctrl; 1287 1288 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1289 ctrl |= E1000_CTRL_VME; 1290 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1291 } 1292 1293 /* Configure for OS presence */ 1294 em_get_mgmt(adapter); 1295 1296 /* Prepare transmit descriptors and buffers */ 1297 em_init_tx_ring(adapter); 1298 em_init_tx_unit(adapter); 1299 1300 /* Setup Multicast table */ 1301 em_set_multi(adapter); 1302 1303 /* Prepare receive descriptors and buffers */ 1304 if (em_init_rx_ring(adapter)) { 1305 device_printf(dev, "Could not setup receive structures\n"); 1306 em_stop(adapter); 1307 return; 1308 } 1309 em_init_rx_unit(adapter); 1310 1311 /* Don't lose promiscuous settings */ 1312 em_set_promisc(adapter); 1313 1314 ifp->if_flags |= IFF_RUNNING; 1315 ifq_clr_oactive(&ifp->if_snd); 1316 1317 callout_reset(&adapter->timer, hz, em_timer, adapter); 1318 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1319 1320 /* MSI/X configuration for 82574 */ 1321 if (adapter->hw.mac.type == e1000_82574) { 1322 int tmp; 1323 1324 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 1325 tmp |= E1000_CTRL_EXT_PBA_CLR; 1326 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); 1327 /* 1328 * XXX MSIX 1329 * Set the IVAR - interrupt vector routing. 1330 * Each nibble represents a vector, high bit 1331 * is enable, other 3 bits are the MSIX table 1332 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1333 * Link (other) to 2, hence the magic number. 1334 */ 1335 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); 1336 } 1337 1338 #ifdef IFPOLL_ENABLE 1339 /* 1340 * Only enable interrupts if we are not polling, make sure 1341 * they are off otherwise. 1342 */ 1343 if (ifp->if_flags & IFF_NPOLLING) 1344 em_disable_intr(adapter); 1345 else 1346 #endif /* IFPOLL_ENABLE */ 1347 em_enable_intr(adapter); 1348 1349 /* AMT based hardware can now take control from firmware */ 1350 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 1351 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) && 1352 adapter->hw.mac.type >= e1000_82571) 1353 em_get_hw_control(adapter); 1354 } 1355 1356 #ifdef IFPOLL_ENABLE 1357 1358 static void 1359 em_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1360 { 1361 struct adapter *adapter = ifp->if_softc; 1362 1363 ASSERT_SERIALIZED(ifp->if_serializer); 1364 1365 if (adapter->npoll.ifpc_stcount-- == 0) { 1366 uint32_t reg_icr; 1367 1368 adapter->npoll.ifpc_stcount = adapter->npoll.ifpc_stfrac; 1369 1370 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1371 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1372 callout_stop(&adapter->timer); 1373 adapter->hw.mac.get_link_status = 1; 1374 em_update_link_status(adapter); 1375 callout_reset(&adapter->timer, hz, em_timer, adapter); 1376 } 1377 } 1378 1379 em_rxeof(adapter, count); 1380 em_txeof(adapter); 1381 1382 if (!ifq_is_empty(&ifp->if_snd)) 1383 if_devstart(ifp); 1384 } 1385 1386 static void 1387 em_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1388 { 1389 struct adapter *adapter = ifp->if_softc; 1390 1391 ASSERT_SERIALIZED(ifp->if_serializer); 1392 1393 if (info != NULL) { 1394 int cpuid = adapter->npoll.ifpc_cpuid; 1395 1396 info->ifpi_rx[cpuid].poll_func = em_npoll_compat; 1397 info->ifpi_rx[cpuid].arg = NULL; 1398 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1399 1400 if (ifp->if_flags & IFF_RUNNING) 1401 em_disable_intr(adapter); 1402 ifq_set_cpuid(&ifp->if_snd, cpuid); 1403 } else { 1404 if (ifp->if_flags & IFF_RUNNING) 1405 em_enable_intr(adapter); 1406 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 1407 } 1408 } 1409 1410 #endif /* IFPOLL_ENABLE */ 1411 1412 static void 1413 em_intr(void *xsc) 1414 { 1415 em_intr_body(xsc, TRUE); 1416 } 1417 1418 static void 1419 em_intr_body(struct adapter *adapter, boolean_t chk_asserted) 1420 { 1421 struct ifnet *ifp = &adapter->arpcom.ac_if; 1422 uint32_t reg_icr; 1423 1424 logif(intr_beg); 1425 ASSERT_SERIALIZED(ifp->if_serializer); 1426 1427 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1428 1429 if (chk_asserted && 1430 ((adapter->hw.mac.type >= e1000_82571 && 1431 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) || 1432 reg_icr == 0)) { 1433 logif(intr_end); 1434 return; 1435 } 1436 1437 /* 1438 * XXX: some laptops trigger several spurious interrupts 1439 * on em(4) when in the resume cycle. The ICR register 1440 * reports all-ones value in this case. Processing such 1441 * interrupts would lead to a freeze. I don't know why. 1442 */ 1443 if (reg_icr == 0xffffffff) { 1444 logif(intr_end); 1445 return; 1446 } 1447 1448 if (ifp->if_flags & IFF_RUNNING) { 1449 if (reg_icr & 1450 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) 1451 em_rxeof(adapter, -1); 1452 if (reg_icr & E1000_ICR_TXDW) { 1453 em_txeof(adapter); 1454 if (!ifq_is_empty(&ifp->if_snd)) 1455 if_devstart(ifp); 1456 } 1457 } 1458 1459 /* Link status change */ 1460 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1461 callout_stop(&adapter->timer); 1462 adapter->hw.mac.get_link_status = 1; 1463 em_update_link_status(adapter); 1464 1465 /* Deal with TX cruft when link lost */ 1466 em_tx_purge(adapter); 1467 1468 callout_reset(&adapter->timer, hz, em_timer, adapter); 1469 } 1470 1471 if (reg_icr & E1000_ICR_RXO) 1472 adapter->rx_overruns++; 1473 1474 logif(intr_end); 1475 } 1476 1477 static void 1478 em_intr_mask(void *xsc) 1479 { 1480 struct adapter *adapter = xsc; 1481 1482 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 1483 /* 1484 * NOTE: 1485 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1486 * so don't check it. 1487 */ 1488 em_intr_body(adapter, FALSE); 1489 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); 1490 } 1491 1492 static void 1493 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1494 { 1495 struct adapter *adapter = ifp->if_softc; 1496 u_char fiber_type = IFM_1000_SX; 1497 1498 ASSERT_SERIALIZED(ifp->if_serializer); 1499 1500 em_update_link_status(adapter); 1501 1502 ifmr->ifm_status = IFM_AVALID; 1503 ifmr->ifm_active = IFM_ETHER; 1504 1505 if (!adapter->link_active) 1506 return; 1507 1508 ifmr->ifm_status |= IFM_ACTIVE; 1509 1510 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 1511 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 1512 if (adapter->hw.mac.type == e1000_82545) 1513 fiber_type = IFM_1000_LX; 1514 ifmr->ifm_active |= fiber_type | IFM_FDX; 1515 } else { 1516 switch (adapter->link_speed) { 1517 case 10: 1518 ifmr->ifm_active |= IFM_10_T; 1519 break; 1520 case 100: 1521 ifmr->ifm_active |= IFM_100_TX; 1522 break; 1523 1524 case 1000: 1525 ifmr->ifm_active |= IFM_1000_T; 1526 break; 1527 } 1528 if (adapter->link_duplex == FULL_DUPLEX) 1529 ifmr->ifm_active |= IFM_FDX; 1530 else 1531 ifmr->ifm_active |= IFM_HDX; 1532 } 1533 } 1534 1535 static int 1536 em_media_change(struct ifnet *ifp) 1537 { 1538 struct adapter *adapter = ifp->if_softc; 1539 struct ifmedia *ifm = &adapter->media; 1540 1541 ASSERT_SERIALIZED(ifp->if_serializer); 1542 1543 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1544 return (EINVAL); 1545 1546 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1547 case IFM_AUTO: 1548 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1549 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1550 break; 1551 1552 case IFM_1000_LX: 1553 case IFM_1000_SX: 1554 case IFM_1000_T: 1555 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1556 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1557 break; 1558 1559 case IFM_100_TX: 1560 adapter->hw.mac.autoneg = FALSE; 1561 adapter->hw.phy.autoneg_advertised = 0; 1562 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1563 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1564 else 1565 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1566 break; 1567 1568 case IFM_10_T: 1569 adapter->hw.mac.autoneg = FALSE; 1570 adapter->hw.phy.autoneg_advertised = 0; 1571 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1572 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1573 else 1574 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1575 break; 1576 1577 default: 1578 if_printf(ifp, "Unsupported media type\n"); 1579 break; 1580 } 1581 1582 em_init(adapter); 1583 1584 return (0); 1585 } 1586 1587 static int 1588 em_encap(struct adapter *adapter, struct mbuf **m_headp, 1589 int *segs_used, int *idx) 1590 { 1591 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1592 bus_dmamap_t map; 1593 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1594 struct e1000_tx_desc *ctxd = NULL; 1595 struct mbuf *m_head = *m_headp; 1596 uint32_t txd_upper, txd_lower, txd_used, cmd = 0; 1597 int maxsegs, nsegs, i, j, first, last = 0, error; 1598 1599 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1600 error = em_tso_pullup(adapter, m_headp); 1601 if (error) 1602 return error; 1603 m_head = *m_headp; 1604 } 1605 1606 txd_upper = txd_lower = 0; 1607 txd_used = 0; 1608 1609 /* 1610 * Capture the first descriptor index, this descriptor 1611 * will have the index of the EOP which is the only one 1612 * that now gets a DONE bit writeback. 1613 */ 1614 first = adapter->next_avail_tx_desc; 1615 tx_buffer = &adapter->tx_buffer_area[first]; 1616 tx_buffer_mapped = tx_buffer; 1617 map = tx_buffer->map; 1618 1619 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED; 1620 KASSERT(maxsegs >= adapter->spare_tx_desc, 1621 ("not enough spare TX desc")); 1622 if (adapter->pcix_82544) { 1623 /* Half it; see the comment in em_attach() */ 1624 maxsegs >>= 1; 1625 } 1626 if (maxsegs > EM_MAX_SCATTER) 1627 maxsegs = EM_MAX_SCATTER; 1628 1629 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp, 1630 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1631 if (error) { 1632 if (error == ENOBUFS) 1633 adapter->mbuf_alloc_failed++; 1634 else 1635 adapter->no_tx_dma_setup++; 1636 1637 m_freem(*m_headp); 1638 *m_headp = NULL; 1639 return error; 1640 } 1641 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1642 1643 m_head = *m_headp; 1644 adapter->tx_nsegs += nsegs; 1645 *segs_used += nsegs; 1646 1647 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1648 /* TSO will consume one TX desc */ 1649 i = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower); 1650 adapter->tx_nsegs += i; 1651 *segs_used += i; 1652 } else if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) { 1653 /* TX csum offloading will consume one TX desc */ 1654 i = em_txcsum(adapter, m_head, &txd_upper, &txd_lower); 1655 adapter->tx_nsegs += i; 1656 *segs_used += i; 1657 } 1658 i = adapter->next_avail_tx_desc; 1659 1660 /* Set up our transmit descriptors */ 1661 for (j = 0; j < nsegs; j++) { 1662 /* If adapter is 82544 and on PCIX bus */ 1663 if(adapter->pcix_82544) { 1664 DESC_ARRAY desc_array; 1665 uint32_t array_elements, counter; 1666 1667 /* 1668 * Check the Address and Length combination and 1669 * split the data accordingly 1670 */ 1671 array_elements = em_82544_fill_desc(segs[j].ds_addr, 1672 segs[j].ds_len, &desc_array); 1673 for (counter = 0; counter < array_elements; counter++) { 1674 KKASSERT(txd_used < adapter->num_tx_desc_avail); 1675 1676 tx_buffer = &adapter->tx_buffer_area[i]; 1677 ctxd = &adapter->tx_desc_base[i]; 1678 1679 ctxd->buffer_addr = htole64( 1680 desc_array.descriptor[counter].address); 1681 ctxd->lower.data = htole32( 1682 E1000_TXD_CMD_IFCS | txd_lower | 1683 desc_array.descriptor[counter].length); 1684 ctxd->upper.data = htole32(txd_upper); 1685 1686 last = i; 1687 if (++i == adapter->num_tx_desc) 1688 i = 0; 1689 1690 txd_used++; 1691 } 1692 } else { 1693 tx_buffer = &adapter->tx_buffer_area[i]; 1694 ctxd = &adapter->tx_desc_base[i]; 1695 1696 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1697 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1698 txd_lower | segs[j].ds_len); 1699 ctxd->upper.data = htole32(txd_upper); 1700 1701 last = i; 1702 if (++i == adapter->num_tx_desc) 1703 i = 0; 1704 } 1705 } 1706 1707 adapter->next_avail_tx_desc = i; 1708 if (adapter->pcix_82544) { 1709 KKASSERT(adapter->num_tx_desc_avail > txd_used); 1710 adapter->num_tx_desc_avail -= txd_used; 1711 } else { 1712 KKASSERT(adapter->num_tx_desc_avail > nsegs); 1713 adapter->num_tx_desc_avail -= nsegs; 1714 } 1715 1716 /* Handle VLAN tag */ 1717 if (m_head->m_flags & M_VLANTAG) { 1718 /* Set the vlan id. */ 1719 ctxd->upper.fields.special = 1720 htole16(m_head->m_pkthdr.ether_vlantag); 1721 1722 /* Tell hardware to add tag */ 1723 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1724 } 1725 1726 tx_buffer->m_head = m_head; 1727 tx_buffer_mapped->map = tx_buffer->map; 1728 tx_buffer->map = map; 1729 1730 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) { 1731 adapter->tx_nsegs = 0; 1732 1733 /* 1734 * Report Status (RS) is turned on 1735 * every tx_int_nsegs descriptors. 1736 */ 1737 cmd = E1000_TXD_CMD_RS; 1738 1739 /* 1740 * Keep track of the descriptor, which will 1741 * be written back by hardware. 1742 */ 1743 adapter->tx_dd[adapter->tx_dd_tail] = last; 1744 EM_INC_TXDD_IDX(adapter->tx_dd_tail); 1745 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head); 1746 } 1747 1748 /* 1749 * Last Descriptor of Packet needs End Of Packet (EOP) 1750 */ 1751 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1752 1753 if (adapter->hw.mac.type == e1000_82547) { 1754 /* 1755 * Advance the Transmit Descriptor Tail (TDT), this tells the 1756 * E1000 that this frame is available to transmit. 1757 */ 1758 if (adapter->link_duplex == HALF_DUPLEX) { 1759 em_82547_move_tail_serialized(adapter); 1760 } else { 1761 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1762 em_82547_update_fifo_head(adapter, 1763 m_head->m_pkthdr.len); 1764 } 1765 } else { 1766 /* 1767 * Defer TDT updating, until enough descriptors are setup 1768 */ 1769 *idx = i; 1770 } 1771 return (0); 1772 } 1773 1774 /* 1775 * 82547 workaround to avoid controller hang in half-duplex environment. 1776 * The workaround is to avoid queuing a large packet that would span 1777 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1778 * in this case. We do that only when FIFO is quiescent. 1779 */ 1780 static void 1781 em_82547_move_tail_serialized(struct adapter *adapter) 1782 { 1783 struct e1000_tx_desc *tx_desc; 1784 uint16_t hw_tdt, sw_tdt, length = 0; 1785 bool eop = 0; 1786 1787 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer); 1788 1789 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1790 sw_tdt = adapter->next_avail_tx_desc; 1791 1792 while (hw_tdt != sw_tdt) { 1793 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1794 length += tx_desc->lower.flags.length; 1795 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1796 if (++hw_tdt == adapter->num_tx_desc) 1797 hw_tdt = 0; 1798 1799 if (eop) { 1800 if (em_82547_fifo_workaround(adapter, length)) { 1801 adapter->tx_fifo_wrk_cnt++; 1802 callout_reset(&adapter->tx_fifo_timer, 1, 1803 em_82547_move_tail, adapter); 1804 break; 1805 } 1806 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1807 em_82547_update_fifo_head(adapter, length); 1808 length = 0; 1809 } 1810 } 1811 } 1812 1813 static void 1814 em_82547_move_tail(void *xsc) 1815 { 1816 struct adapter *adapter = xsc; 1817 struct ifnet *ifp = &adapter->arpcom.ac_if; 1818 1819 lwkt_serialize_enter(ifp->if_serializer); 1820 em_82547_move_tail_serialized(adapter); 1821 lwkt_serialize_exit(ifp->if_serializer); 1822 } 1823 1824 static int 1825 em_82547_fifo_workaround(struct adapter *adapter, int len) 1826 { 1827 int fifo_space, fifo_pkt_len; 1828 1829 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1830 1831 if (adapter->link_duplex == HALF_DUPLEX) { 1832 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1833 1834 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1835 if (em_82547_tx_fifo_reset(adapter)) 1836 return (0); 1837 else 1838 return (1); 1839 } 1840 } 1841 return (0); 1842 } 1843 1844 static void 1845 em_82547_update_fifo_head(struct adapter *adapter, int len) 1846 { 1847 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1848 1849 /* tx_fifo_head is always 16 byte aligned */ 1850 adapter->tx_fifo_head += fifo_pkt_len; 1851 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 1852 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1853 } 1854 1855 static int 1856 em_82547_tx_fifo_reset(struct adapter *adapter) 1857 { 1858 uint32_t tctl; 1859 1860 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1861 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1862 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1863 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1864 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1865 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1866 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1867 /* Disable TX unit */ 1868 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1869 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1870 tctl & ~E1000_TCTL_EN); 1871 1872 /* Reset FIFO pointers */ 1873 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1874 adapter->tx_head_addr); 1875 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1876 adapter->tx_head_addr); 1877 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1878 adapter->tx_head_addr); 1879 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1880 adapter->tx_head_addr); 1881 1882 /* Re-enable TX unit */ 1883 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1884 E1000_WRITE_FLUSH(&adapter->hw); 1885 1886 adapter->tx_fifo_head = 0; 1887 adapter->tx_fifo_reset_cnt++; 1888 1889 return (TRUE); 1890 } else { 1891 return (FALSE); 1892 } 1893 } 1894 1895 static void 1896 em_set_promisc(struct adapter *adapter) 1897 { 1898 struct ifnet *ifp = &adapter->arpcom.ac_if; 1899 uint32_t reg_rctl; 1900 1901 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1902 1903 if (ifp->if_flags & IFF_PROMISC) { 1904 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1905 /* Turn this on if you want to see bad packets */ 1906 if (em_debug_sbp) 1907 reg_rctl |= E1000_RCTL_SBP; 1908 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1909 } else if (ifp->if_flags & IFF_ALLMULTI) { 1910 reg_rctl |= E1000_RCTL_MPE; 1911 reg_rctl &= ~E1000_RCTL_UPE; 1912 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1913 } 1914 } 1915 1916 static void 1917 em_disable_promisc(struct adapter *adapter) 1918 { 1919 uint32_t reg_rctl; 1920 1921 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1922 1923 reg_rctl &= ~E1000_RCTL_UPE; 1924 reg_rctl &= ~E1000_RCTL_MPE; 1925 reg_rctl &= ~E1000_RCTL_SBP; 1926 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1927 } 1928 1929 static void 1930 em_set_multi(struct adapter *adapter) 1931 { 1932 struct ifnet *ifp = &adapter->arpcom.ac_if; 1933 struct ifmultiaddr *ifma; 1934 uint32_t reg_rctl = 0; 1935 uint8_t *mta; 1936 int mcnt = 0; 1937 1938 mta = adapter->mta; 1939 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 1940 1941 if (adapter->hw.mac.type == e1000_82542 && 1942 adapter->hw.revision_id == E1000_REVISION_2) { 1943 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1944 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1945 e1000_pci_clear_mwi(&adapter->hw); 1946 reg_rctl |= E1000_RCTL_RST; 1947 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1948 msec_delay(5); 1949 } 1950 1951 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1952 if (ifma->ifma_addr->sa_family != AF_LINK) 1953 continue; 1954 1955 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1956 break; 1957 1958 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1959 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1960 mcnt++; 1961 } 1962 1963 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1964 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1965 reg_rctl |= E1000_RCTL_MPE; 1966 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1967 } else { 1968 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 1969 } 1970 1971 if (adapter->hw.mac.type == e1000_82542 && 1972 adapter->hw.revision_id == E1000_REVISION_2) { 1973 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1974 reg_rctl &= ~E1000_RCTL_RST; 1975 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1976 msec_delay(5); 1977 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1978 e1000_pci_set_mwi(&adapter->hw); 1979 } 1980 } 1981 1982 /* 1983 * This routine checks for link status and updates statistics. 1984 */ 1985 static void 1986 em_timer(void *xsc) 1987 { 1988 struct adapter *adapter = xsc; 1989 struct ifnet *ifp = &adapter->arpcom.ac_if; 1990 1991 lwkt_serialize_enter(ifp->if_serializer); 1992 1993 em_update_link_status(adapter); 1994 em_update_stats(adapter); 1995 1996 /* Reset LAA into RAR[0] on 82571 */ 1997 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) 1998 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1999 2000 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 2001 em_print_hw_stats(adapter); 2002 2003 em_smartspeed(adapter); 2004 2005 callout_reset(&adapter->timer, hz, em_timer, adapter); 2006 2007 lwkt_serialize_exit(ifp->if_serializer); 2008 } 2009 2010 static void 2011 em_update_link_status(struct adapter *adapter) 2012 { 2013 struct e1000_hw *hw = &adapter->hw; 2014 struct ifnet *ifp = &adapter->arpcom.ac_if; 2015 device_t dev = adapter->dev; 2016 uint32_t link_check = 0; 2017 2018 /* Get the cached link value or read phy for real */ 2019 switch (hw->phy.media_type) { 2020 case e1000_media_type_copper: 2021 if (hw->mac.get_link_status) { 2022 /* Do the work to read phy */ 2023 e1000_check_for_link(hw); 2024 link_check = !hw->mac.get_link_status; 2025 if (link_check) /* ESB2 fix */ 2026 e1000_cfg_on_link_up(hw); 2027 } else { 2028 link_check = TRUE; 2029 } 2030 break; 2031 2032 case e1000_media_type_fiber: 2033 e1000_check_for_link(hw); 2034 link_check = 2035 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 2036 break; 2037 2038 case e1000_media_type_internal_serdes: 2039 e1000_check_for_link(hw); 2040 link_check = adapter->hw.mac.serdes_has_link; 2041 break; 2042 2043 case e1000_media_type_unknown: 2044 default: 2045 break; 2046 } 2047 2048 /* Now check for a transition */ 2049 if (link_check && adapter->link_active == 0) { 2050 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2051 &adapter->link_duplex); 2052 2053 /* 2054 * Check if we should enable/disable SPEED_MODE bit on 2055 * 82571/82572 2056 */ 2057 if (adapter->link_speed != SPEED_1000 && 2058 (hw->mac.type == e1000_82571 || 2059 hw->mac.type == e1000_82572)) { 2060 int tarc0; 2061 2062 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 2063 tarc0 &= ~SPEED_MODE_BIT; 2064 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 2065 } 2066 if (bootverbose) { 2067 device_printf(dev, "Link is up %d Mbps %s\n", 2068 adapter->link_speed, 2069 ((adapter->link_duplex == FULL_DUPLEX) ? 2070 "Full Duplex" : "Half Duplex")); 2071 } 2072 adapter->link_active = 1; 2073 adapter->smartspeed = 0; 2074 ifp->if_baudrate = adapter->link_speed * 1000000; 2075 ifp->if_link_state = LINK_STATE_UP; 2076 if_link_state_change(ifp); 2077 } else if (!link_check && adapter->link_active == 1) { 2078 ifp->if_baudrate = adapter->link_speed = 0; 2079 adapter->link_duplex = 0; 2080 if (bootverbose) 2081 device_printf(dev, "Link is Down\n"); 2082 adapter->link_active = 0; 2083 #if 0 2084 /* Link down, disable watchdog */ 2085 if->if_timer = 0; 2086 #endif 2087 ifp->if_link_state = LINK_STATE_DOWN; 2088 if_link_state_change(ifp); 2089 } 2090 } 2091 2092 static void 2093 em_stop(struct adapter *adapter) 2094 { 2095 struct ifnet *ifp = &adapter->arpcom.ac_if; 2096 int i; 2097 2098 ASSERT_SERIALIZED(ifp->if_serializer); 2099 2100 em_disable_intr(adapter); 2101 2102 callout_stop(&adapter->timer); 2103 callout_stop(&adapter->tx_fifo_timer); 2104 2105 ifp->if_flags &= ~IFF_RUNNING; 2106 ifq_clr_oactive(&ifp->if_snd); 2107 ifp->if_timer = 0; 2108 2109 e1000_reset_hw(&adapter->hw); 2110 if (adapter->hw.mac.type >= e1000_82544) 2111 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2112 2113 for (i = 0; i < adapter->num_tx_desc; i++) { 2114 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i]; 2115 2116 if (tx_buffer->m_head != NULL) { 2117 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2118 m_freem(tx_buffer->m_head); 2119 tx_buffer->m_head = NULL; 2120 } 2121 } 2122 2123 for (i = 0; i < adapter->num_rx_desc; i++) { 2124 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i]; 2125 2126 if (rx_buffer->m_head != NULL) { 2127 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2128 m_freem(rx_buffer->m_head); 2129 rx_buffer->m_head = NULL; 2130 } 2131 } 2132 2133 if (adapter->fmp != NULL) 2134 m_freem(adapter->fmp); 2135 adapter->fmp = NULL; 2136 adapter->lmp = NULL; 2137 2138 adapter->csum_flags = 0; 2139 adapter->csum_lhlen = 0; 2140 adapter->csum_iphlen = 0; 2141 adapter->csum_thlen = 0; 2142 adapter->csum_mss = 0; 2143 adapter->csum_pktlen = 0; 2144 2145 adapter->tx_dd_head = 0; 2146 adapter->tx_dd_tail = 0; 2147 adapter->tx_nsegs = 0; 2148 } 2149 2150 static int 2151 em_get_hw_info(struct adapter *adapter) 2152 { 2153 device_t dev = adapter->dev; 2154 2155 /* Save off the information about this board */ 2156 adapter->hw.vendor_id = pci_get_vendor(dev); 2157 adapter->hw.device_id = pci_get_device(dev); 2158 adapter->hw.revision_id = pci_get_revid(dev); 2159 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev); 2160 adapter->hw.subsystem_device_id = pci_get_subdevice(dev); 2161 2162 /* Do Shared Code Init and Setup */ 2163 if (e1000_set_mac_type(&adapter->hw)) 2164 return ENXIO; 2165 return 0; 2166 } 2167 2168 static int 2169 em_alloc_pci_res(struct adapter *adapter) 2170 { 2171 device_t dev = adapter->dev; 2172 u_int intr_flags; 2173 int val, rid, msi_enable; 2174 2175 /* Enable bus mastering */ 2176 pci_enable_busmaster(dev); 2177 2178 adapter->memory_rid = EM_BAR_MEM; 2179 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2180 &adapter->memory_rid, RF_ACTIVE); 2181 if (adapter->memory == NULL) { 2182 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2183 return (ENXIO); 2184 } 2185 adapter->osdep.mem_bus_space_tag = 2186 rman_get_bustag(adapter->memory); 2187 adapter->osdep.mem_bus_space_handle = 2188 rman_get_bushandle(adapter->memory); 2189 2190 /* XXX This is quite goofy, it is not actually used */ 2191 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2192 2193 /* Only older adapters use IO mapping */ 2194 if (adapter->hw.mac.type > e1000_82543 && 2195 adapter->hw.mac.type < e1000_82571) { 2196 /* Figure our where our IO BAR is ? */ 2197 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) { 2198 val = pci_read_config(dev, rid, 4); 2199 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2200 adapter->io_rid = rid; 2201 break; 2202 } 2203 rid += 4; 2204 /* check for 64bit BAR */ 2205 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2206 rid += 4; 2207 } 2208 if (rid >= PCIR_CARDBUSCIS) { 2209 device_printf(dev, "Unable to locate IO BAR\n"); 2210 return (ENXIO); 2211 } 2212 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2213 &adapter->io_rid, RF_ACTIVE); 2214 if (adapter->ioport == NULL) { 2215 device_printf(dev, "Unable to allocate bus resource: " 2216 "ioport\n"); 2217 return (ENXIO); 2218 } 2219 adapter->hw.io_base = 0; 2220 adapter->osdep.io_bus_space_tag = 2221 rman_get_bustag(adapter->ioport); 2222 adapter->osdep.io_bus_space_handle = 2223 rman_get_bushandle(adapter->ioport); 2224 } 2225 2226 /* 2227 * Don't enable MSI-X on 82574, see: 2228 * 82574 specification update errata #15 2229 * 2230 * Don't enable MSI on PCI/PCI-X chips, see: 2231 * 82540 specification update errata #6 2232 * 82545 specification update errata #4 2233 * 2234 * Don't enable MSI on 82571/82572, see: 2235 * 82571/82572 specification update errata #63 2236 */ 2237 msi_enable = em_msi_enable; 2238 if (msi_enable && 2239 (!pci_is_pcie(dev) || 2240 adapter->hw.mac.type == e1000_82571 || 2241 adapter->hw.mac.type == e1000_82572)) 2242 msi_enable = 0; 2243 2244 adapter->intr_type = pci_alloc_1intr(dev, msi_enable, 2245 &adapter->intr_rid, &intr_flags); 2246 2247 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) { 2248 int unshared; 2249 2250 unshared = device_getenv_int(dev, "irq.unshared", 0); 2251 if (!unshared) { 2252 adapter->flags |= EM_FLAG_SHARED_INTR; 2253 if (bootverbose) 2254 device_printf(dev, "IRQ shared\n"); 2255 } else { 2256 intr_flags &= ~RF_SHAREABLE; 2257 if (bootverbose) 2258 device_printf(dev, "IRQ unshared\n"); 2259 } 2260 } 2261 2262 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2263 &adapter->intr_rid, intr_flags); 2264 if (adapter->intr_res == NULL) { 2265 device_printf(dev, "Unable to allocate bus resource: " 2266 "interrupt\n"); 2267 return (ENXIO); 2268 } 2269 2270 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2271 adapter->hw.back = &adapter->osdep; 2272 return (0); 2273 } 2274 2275 static void 2276 em_free_pci_res(struct adapter *adapter) 2277 { 2278 device_t dev = adapter->dev; 2279 2280 if (adapter->intr_res != NULL) { 2281 bus_release_resource(dev, SYS_RES_IRQ, 2282 adapter->intr_rid, adapter->intr_res); 2283 } 2284 2285 if (adapter->intr_type == PCI_INTR_TYPE_MSI) 2286 pci_release_msi(dev); 2287 2288 if (adapter->memory != NULL) { 2289 bus_release_resource(dev, SYS_RES_MEMORY, 2290 adapter->memory_rid, adapter->memory); 2291 } 2292 2293 if (adapter->flash != NULL) { 2294 bus_release_resource(dev, SYS_RES_MEMORY, 2295 adapter->flash_rid, adapter->flash); 2296 } 2297 2298 if (adapter->ioport != NULL) { 2299 bus_release_resource(dev, SYS_RES_IOPORT, 2300 adapter->io_rid, adapter->ioport); 2301 } 2302 } 2303 2304 static int 2305 em_reset(struct adapter *adapter) 2306 { 2307 device_t dev = adapter->dev; 2308 uint16_t rx_buffer_size; 2309 uint32_t pba; 2310 2311 /* When hardware is reset, fifo_head is also reset */ 2312 adapter->tx_fifo_head = 0; 2313 2314 /* Set up smart power down as default off on newer adapters. */ 2315 if (!em_smart_pwr_down && 2316 (adapter->hw.mac.type == e1000_82571 || 2317 adapter->hw.mac.type == e1000_82572)) { 2318 uint16_t phy_tmp = 0; 2319 2320 /* Speed up time to link by disabling smart power down. */ 2321 e1000_read_phy_reg(&adapter->hw, 2322 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2323 phy_tmp &= ~IGP02E1000_PM_SPD; 2324 e1000_write_phy_reg(&adapter->hw, 2325 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2326 } 2327 2328 /* 2329 * Packet Buffer Allocation (PBA) 2330 * Writing PBA sets the receive portion of the buffer 2331 * the remainder is used for the transmit buffer. 2332 * 2333 * Devices before the 82547 had a Packet Buffer of 64K. 2334 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 2335 * After the 82547 the buffer was reduced to 40K. 2336 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 2337 * Note: default does not leave enough room for Jumbo Frame >10k. 2338 */ 2339 switch (adapter->hw.mac.type) { 2340 case e1000_82547: 2341 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 2342 if (adapter->max_frame_size > 8192) 2343 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 2344 else 2345 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 2346 adapter->tx_fifo_head = 0; 2347 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 2348 adapter->tx_fifo_size = 2349 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 2350 break; 2351 2352 /* Total Packet Buffer on these is 48K */ 2353 case e1000_82571: 2354 case e1000_82572: 2355 case e1000_80003es2lan: 2356 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2357 break; 2358 2359 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2360 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2361 break; 2362 2363 case e1000_82574: 2364 case e1000_82583: 2365 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2366 break; 2367 2368 case e1000_ich8lan: 2369 pba = E1000_PBA_8K; 2370 break; 2371 2372 case e1000_ich9lan: 2373 case e1000_ich10lan: 2374 #define E1000_PBA_10K 0x000A 2375 pba = E1000_PBA_10K; 2376 break; 2377 2378 case e1000_pchlan: 2379 case e1000_pch2lan: 2380 pba = E1000_PBA_26K; 2381 break; 2382 2383 default: 2384 /* Devices before 82547 had a Packet Buffer of 64K. */ 2385 if (adapter->max_frame_size > 8192) 2386 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2387 else 2388 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2389 } 2390 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 2391 2392 /* 2393 * These parameters control the automatic generation (Tx) and 2394 * response (Rx) to Ethernet PAUSE frames. 2395 * - High water mark should allow for at least two frames to be 2396 * received after sending an XOFF. 2397 * - Low water mark works best when it is very near the high water mark. 2398 * This allows the receiver to restart by sending XON when it has 2399 * drained a bit. Here we use an arbitary value of 1500 which will 2400 * restart after one full frame is pulled from the buffer. There 2401 * could be several smaller frames in the buffer and if so they will 2402 * not trigger the XON until their total number reduces the buffer 2403 * by 1500. 2404 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2405 */ 2406 rx_buffer_size = 2407 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10; 2408 2409 adapter->hw.fc.high_water = rx_buffer_size - 2410 roundup2(adapter->max_frame_size, 1024); 2411 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2412 2413 if (adapter->hw.mac.type == e1000_80003es2lan) 2414 adapter->hw.fc.pause_time = 0xFFFF; 2415 else 2416 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2417 2418 adapter->hw.fc.send_xon = TRUE; 2419 2420 adapter->hw.fc.requested_mode = e1000_fc_full; 2421 2422 /* 2423 * Device specific overrides/settings 2424 */ 2425 switch (adapter->hw.mac.type) { 2426 case e1000_pchlan: 2427 /* Workaround: no TX flow ctrl for PCH */ 2428 adapter->hw.fc.requested_mode = e1000_fc_rx_pause; 2429 adapter->hw.fc.pause_time = 0xFFFF; /* override */ 2430 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2431 adapter->hw.fc.high_water = 0x3500; 2432 adapter->hw.fc.low_water = 0x1500; 2433 } else { 2434 adapter->hw.fc.high_water = 0x5000; 2435 adapter->hw.fc.low_water = 0x3000; 2436 } 2437 adapter->hw.fc.refresh_time = 0x1000; 2438 break; 2439 2440 case e1000_pch2lan: 2441 adapter->hw.fc.high_water = 0x5C20; 2442 adapter->hw.fc.low_water = 0x5048; 2443 adapter->hw.fc.pause_time = 0x0650; 2444 adapter->hw.fc.refresh_time = 0x0400; 2445 /* Jumbos need adjusted PBA */ 2446 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) 2447 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12); 2448 else 2449 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26); 2450 break; 2451 2452 case e1000_ich9lan: 2453 case e1000_ich10lan: 2454 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2455 adapter->hw.fc.high_water = 0x2800; 2456 adapter->hw.fc.low_water = 2457 adapter->hw.fc.high_water - 8; 2458 break; 2459 } 2460 /* FALL THROUGH */ 2461 default: 2462 if (adapter->hw.mac.type == e1000_80003es2lan) 2463 adapter->hw.fc.pause_time = 0xFFFF; 2464 break; 2465 } 2466 2467 /* Issue a global reset */ 2468 e1000_reset_hw(&adapter->hw); 2469 if (adapter->hw.mac.type >= e1000_82544) 2470 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2471 em_disable_aspm(adapter); 2472 2473 if (e1000_init_hw(&adapter->hw) < 0) { 2474 device_printf(dev, "Hardware Initialization Failed\n"); 2475 return (EIO); 2476 } 2477 2478 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 2479 e1000_get_phy_info(&adapter->hw); 2480 e1000_check_for_link(&adapter->hw); 2481 2482 return (0); 2483 } 2484 2485 static void 2486 em_setup_ifp(struct adapter *adapter) 2487 { 2488 struct ifnet *ifp = &adapter->arpcom.ac_if; 2489 2490 if_initname(ifp, device_get_name(adapter->dev), 2491 device_get_unit(adapter->dev)); 2492 ifp->if_softc = adapter; 2493 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2494 ifp->if_init = em_init; 2495 ifp->if_ioctl = em_ioctl; 2496 ifp->if_start = em_start; 2497 #ifdef IFPOLL_ENABLE 2498 ifp->if_npoll = em_npoll; 2499 #endif 2500 ifp->if_watchdog = em_watchdog; 2501 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1); 2502 ifq_set_ready(&ifp->if_snd); 2503 2504 ether_ifattach(ifp, adapter->hw.mac.addr, NULL); 2505 2506 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2507 if (adapter->hw.mac.type >= e1000_82543) 2508 ifp->if_capabilities |= IFCAP_HWCSUM; 2509 if (adapter->flags & EM_FLAG_TSO) 2510 ifp->if_capabilities |= IFCAP_TSO; 2511 ifp->if_capenable = ifp->if_capabilities; 2512 2513 if (ifp->if_capenable & IFCAP_TXCSUM) 2514 ifp->if_hwassist |= EM_CSUM_FEATURES; 2515 if (ifp->if_capenable & IFCAP_TSO) 2516 ifp->if_hwassist |= CSUM_TSO; 2517 2518 /* 2519 * Tell the upper layer(s) we support long frames. 2520 */ 2521 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2522 2523 /* 2524 * Specify the media types supported by this adapter and register 2525 * callbacks to update media and link information 2526 */ 2527 ifmedia_init(&adapter->media, IFM_IMASK, 2528 em_media_change, em_media_status); 2529 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2530 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 2531 u_char fiber_type = IFM_1000_SX; /* default type */ 2532 2533 if (adapter->hw.mac.type == e1000_82545) 2534 fiber_type = IFM_1000_LX; 2535 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2536 0, NULL); 2537 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2538 } else { 2539 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2540 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2541 0, NULL); 2542 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2543 0, NULL); 2544 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2545 0, NULL); 2546 if (adapter->hw.phy.type != e1000_phy_ife) { 2547 ifmedia_add(&adapter->media, 2548 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2549 ifmedia_add(&adapter->media, 2550 IFM_ETHER | IFM_1000_T, 0, NULL); 2551 } 2552 } 2553 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2554 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2555 } 2556 2557 2558 /* 2559 * Workaround for SmartSpeed on 82541 and 82547 controllers 2560 */ 2561 static void 2562 em_smartspeed(struct adapter *adapter) 2563 { 2564 uint16_t phy_tmp; 2565 2566 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp || 2567 adapter->hw.mac.autoneg == 0 || 2568 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2569 return; 2570 2571 if (adapter->smartspeed == 0) { 2572 /* 2573 * If Master/Slave config fault is asserted twice, 2574 * we assume back-to-back 2575 */ 2576 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2577 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2578 return; 2579 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2580 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2581 e1000_read_phy_reg(&adapter->hw, 2582 PHY_1000T_CTRL, &phy_tmp); 2583 if (phy_tmp & CR_1000T_MS_ENABLE) { 2584 phy_tmp &= ~CR_1000T_MS_ENABLE; 2585 e1000_write_phy_reg(&adapter->hw, 2586 PHY_1000T_CTRL, phy_tmp); 2587 adapter->smartspeed++; 2588 if (adapter->hw.mac.autoneg && 2589 !e1000_phy_setup_autoneg(&adapter->hw) && 2590 !e1000_read_phy_reg(&adapter->hw, 2591 PHY_CONTROL, &phy_tmp)) { 2592 phy_tmp |= MII_CR_AUTO_NEG_EN | 2593 MII_CR_RESTART_AUTO_NEG; 2594 e1000_write_phy_reg(&adapter->hw, 2595 PHY_CONTROL, phy_tmp); 2596 } 2597 } 2598 } 2599 return; 2600 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2601 /* If still no link, perhaps using 2/3 pair cable */ 2602 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2603 phy_tmp |= CR_1000T_MS_ENABLE; 2604 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2605 if (adapter->hw.mac.autoneg && 2606 !e1000_phy_setup_autoneg(&adapter->hw) && 2607 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2608 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2609 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2610 } 2611 } 2612 2613 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2614 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2615 adapter->smartspeed = 0; 2616 } 2617 2618 static int 2619 em_dma_malloc(struct adapter *adapter, bus_size_t size, 2620 struct em_dma_alloc *dma) 2621 { 2622 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag, 2623 EM_DBA_ALIGN, size, BUS_DMA_WAITOK, 2624 &dma->dma_tag, &dma->dma_map, 2625 &dma->dma_paddr); 2626 if (dma->dma_vaddr == NULL) 2627 return ENOMEM; 2628 else 2629 return 0; 2630 } 2631 2632 static void 2633 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2634 { 2635 if (dma->dma_tag == NULL) 2636 return; 2637 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2638 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2639 bus_dma_tag_destroy(dma->dma_tag); 2640 } 2641 2642 static int 2643 em_create_tx_ring(struct adapter *adapter) 2644 { 2645 device_t dev = adapter->dev; 2646 struct em_buffer *tx_buffer; 2647 int error, i; 2648 2649 adapter->tx_buffer_area = 2650 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc, 2651 M_DEVBUF, M_WAITOK | M_ZERO); 2652 2653 /* 2654 * Create DMA tags for tx buffers 2655 */ 2656 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2657 1, 0, /* alignment, bounds */ 2658 BUS_SPACE_MAXADDR, /* lowaddr */ 2659 BUS_SPACE_MAXADDR, /* highaddr */ 2660 NULL, NULL, /* filter, filterarg */ 2661 EM_TSO_SIZE, /* maxsize */ 2662 EM_MAX_SCATTER, /* nsegments */ 2663 PAGE_SIZE, /* maxsegsize */ 2664 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2665 BUS_DMA_ONEBPAGE, /* flags */ 2666 &adapter->txtag); 2667 if (error) { 2668 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2669 kfree(adapter->tx_buffer_area, M_DEVBUF); 2670 adapter->tx_buffer_area = NULL; 2671 return error; 2672 } 2673 2674 /* 2675 * Create DMA maps for tx buffers 2676 */ 2677 for (i = 0; i < adapter->num_tx_desc; i++) { 2678 tx_buffer = &adapter->tx_buffer_area[i]; 2679 2680 error = bus_dmamap_create(adapter->txtag, 2681 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2682 &tx_buffer->map); 2683 if (error) { 2684 device_printf(dev, "Unable to create TX DMA map\n"); 2685 em_destroy_tx_ring(adapter, i); 2686 return error; 2687 } 2688 } 2689 return (0); 2690 } 2691 2692 static void 2693 em_init_tx_ring(struct adapter *adapter) 2694 { 2695 /* Clear the old ring contents */ 2696 bzero(adapter->tx_desc_base, 2697 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2698 2699 /* Reset state */ 2700 adapter->next_avail_tx_desc = 0; 2701 adapter->next_tx_to_clean = 0; 2702 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2703 } 2704 2705 static void 2706 em_init_tx_unit(struct adapter *adapter) 2707 { 2708 uint32_t tctl, tarc, tipg = 0; 2709 uint64_t bus_addr; 2710 2711 /* Setup the Base and Length of the Tx Descriptor Ring */ 2712 bus_addr = adapter->txdma.dma_paddr; 2713 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2714 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2715 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2716 (uint32_t)(bus_addr >> 32)); 2717 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2718 (uint32_t)bus_addr); 2719 /* Setup the HW Tx Head and Tail descriptor pointers */ 2720 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2721 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2722 2723 /* Set the default values for the Tx Inter Packet Gap timer */ 2724 switch (adapter->hw.mac.type) { 2725 case e1000_82542: 2726 tipg = DEFAULT_82542_TIPG_IPGT; 2727 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2728 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2729 break; 2730 2731 case e1000_80003es2lan: 2732 tipg = DEFAULT_82543_TIPG_IPGR1; 2733 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2734 E1000_TIPG_IPGR2_SHIFT; 2735 break; 2736 2737 default: 2738 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2739 adapter->hw.phy.media_type == 2740 e1000_media_type_internal_serdes) 2741 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2742 else 2743 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2744 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2745 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2746 break; 2747 } 2748 2749 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2750 2751 /* NOTE: 0 is not allowed for TIDV */ 2752 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1); 2753 if(adapter->hw.mac.type >= e1000_82540) 2754 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0); 2755 2756 if (adapter->hw.mac.type == e1000_82571 || 2757 adapter->hw.mac.type == e1000_82572) { 2758 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2759 tarc |= SPEED_MODE_BIT; 2760 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2761 } else if (adapter->hw.mac.type == e1000_80003es2lan) { 2762 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2763 tarc |= 1; 2764 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2765 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2766 tarc |= 1; 2767 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2768 } 2769 2770 /* Program the Transmit Control Register */ 2771 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2772 tctl &= ~E1000_TCTL_CT; 2773 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2774 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2775 2776 if (adapter->hw.mac.type >= e1000_82571) 2777 tctl |= E1000_TCTL_MULR; 2778 2779 /* This write will effectively turn on the transmit unit. */ 2780 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2781 2782 if (adapter->hw.mac.type == e1000_82571 || 2783 adapter->hw.mac.type == e1000_82572 || 2784 adapter->hw.mac.type == e1000_80003es2lan) { 2785 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2786 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2787 tarc &= ~(1 << 28); 2788 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2789 } 2790 } 2791 2792 static void 2793 em_destroy_tx_ring(struct adapter *adapter, int ndesc) 2794 { 2795 struct em_buffer *tx_buffer; 2796 int i; 2797 2798 if (adapter->tx_buffer_area == NULL) 2799 return; 2800 2801 for (i = 0; i < ndesc; i++) { 2802 tx_buffer = &adapter->tx_buffer_area[i]; 2803 2804 KKASSERT(tx_buffer->m_head == NULL); 2805 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2806 } 2807 bus_dma_tag_destroy(adapter->txtag); 2808 2809 kfree(adapter->tx_buffer_area, M_DEVBUF); 2810 adapter->tx_buffer_area = NULL; 2811 } 2812 2813 /* 2814 * The offload context needs to be set when we transfer the first 2815 * packet of a particular protocol (TCP/UDP). This routine has been 2816 * enhanced to deal with inserted VLAN headers. 2817 * 2818 * If the new packet's ether header length, ip header length and 2819 * csum offloading type are same as the previous packet, we should 2820 * avoid allocating a new csum context descriptor; mainly to take 2821 * advantage of the pipeline effect of the TX data read request. 2822 * 2823 * This function returns number of TX descrptors allocated for 2824 * csum context. 2825 */ 2826 static int 2827 em_txcsum(struct adapter *adapter, struct mbuf *mp, 2828 uint32_t *txd_upper, uint32_t *txd_lower) 2829 { 2830 struct e1000_context_desc *TXD; 2831 int curr_txd, ehdrlen, csum_flags; 2832 uint32_t cmd, hdr_len, ip_hlen; 2833 2834 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES; 2835 ip_hlen = mp->m_pkthdr.csum_iphlen; 2836 ehdrlen = mp->m_pkthdr.csum_lhlen; 2837 2838 if (adapter->csum_lhlen == ehdrlen && 2839 adapter->csum_iphlen == ip_hlen && 2840 adapter->csum_flags == csum_flags) { 2841 /* 2842 * Same csum offload context as the previous packets; 2843 * just return. 2844 */ 2845 *txd_upper = adapter->csum_txd_upper; 2846 *txd_lower = adapter->csum_txd_lower; 2847 return 0; 2848 } 2849 2850 /* 2851 * Setup a new csum offload context. 2852 */ 2853 2854 curr_txd = adapter->next_avail_tx_desc; 2855 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 2856 2857 cmd = 0; 2858 2859 /* Setup of IP header checksum. */ 2860 if (csum_flags & CSUM_IP) { 2861 /* 2862 * Start offset for header checksum calculation. 2863 * End offset for header checksum calculation. 2864 * Offset of place to put the checksum. 2865 */ 2866 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2867 TXD->lower_setup.ip_fields.ipcse = 2868 htole16(ehdrlen + ip_hlen - 1); 2869 TXD->lower_setup.ip_fields.ipcso = 2870 ehdrlen + offsetof(struct ip, ip_sum); 2871 cmd |= E1000_TXD_CMD_IP; 2872 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2873 } 2874 hdr_len = ehdrlen + ip_hlen; 2875 2876 if (csum_flags & CSUM_TCP) { 2877 /* 2878 * Start offset for payload checksum calculation. 2879 * End offset for payload checksum calculation. 2880 * Offset of place to put the checksum. 2881 */ 2882 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2883 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2884 TXD->upper_setup.tcp_fields.tucso = 2885 hdr_len + offsetof(struct tcphdr, th_sum); 2886 cmd |= E1000_TXD_CMD_TCP; 2887 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2888 } else if (csum_flags & CSUM_UDP) { 2889 /* 2890 * Start offset for header checksum calculation. 2891 * End offset for header checksum calculation. 2892 * Offset of place to put the checksum. 2893 */ 2894 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2895 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2896 TXD->upper_setup.tcp_fields.tucso = 2897 hdr_len + offsetof(struct udphdr, uh_sum); 2898 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2899 } 2900 2901 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2902 E1000_TXD_DTYP_D; /* Data descr */ 2903 2904 /* Save the information for this csum offloading context */ 2905 adapter->csum_lhlen = ehdrlen; 2906 adapter->csum_iphlen = ip_hlen; 2907 adapter->csum_flags = csum_flags; 2908 adapter->csum_txd_upper = *txd_upper; 2909 adapter->csum_txd_lower = *txd_lower; 2910 2911 TXD->tcp_seg_setup.data = htole32(0); 2912 TXD->cmd_and_length = 2913 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2914 2915 if (++curr_txd == adapter->num_tx_desc) 2916 curr_txd = 0; 2917 2918 KKASSERT(adapter->num_tx_desc_avail > 0); 2919 adapter->num_tx_desc_avail--; 2920 2921 adapter->next_avail_tx_desc = curr_txd; 2922 return 1; 2923 } 2924 2925 static void 2926 em_txeof(struct adapter *adapter) 2927 { 2928 struct ifnet *ifp = &adapter->arpcom.ac_if; 2929 struct em_buffer *tx_buffer; 2930 int first, num_avail; 2931 2932 if (adapter->tx_dd_head == adapter->tx_dd_tail) 2933 return; 2934 2935 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2936 return; 2937 2938 num_avail = adapter->num_tx_desc_avail; 2939 first = adapter->next_tx_to_clean; 2940 2941 while (adapter->tx_dd_head != adapter->tx_dd_tail) { 2942 struct e1000_tx_desc *tx_desc; 2943 int dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2944 2945 tx_desc = &adapter->tx_desc_base[dd_idx]; 2946 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2947 EM_INC_TXDD_IDX(adapter->tx_dd_head); 2948 2949 if (++dd_idx == adapter->num_tx_desc) 2950 dd_idx = 0; 2951 2952 while (first != dd_idx) { 2953 logif(pkt_txclean); 2954 2955 num_avail++; 2956 2957 tx_buffer = &adapter->tx_buffer_area[first]; 2958 if (tx_buffer->m_head) { 2959 ifp->if_opackets++; 2960 bus_dmamap_unload(adapter->txtag, 2961 tx_buffer->map); 2962 m_freem(tx_buffer->m_head); 2963 tx_buffer->m_head = NULL; 2964 } 2965 2966 if (++first == adapter->num_tx_desc) 2967 first = 0; 2968 } 2969 } else { 2970 break; 2971 } 2972 } 2973 adapter->next_tx_to_clean = first; 2974 adapter->num_tx_desc_avail = num_avail; 2975 2976 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 2977 adapter->tx_dd_head = 0; 2978 adapter->tx_dd_tail = 0; 2979 } 2980 2981 if (!EM_IS_OACTIVE(adapter)) { 2982 ifq_clr_oactive(&ifp->if_snd); 2983 2984 /* All clean, turn off the timer */ 2985 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2986 ifp->if_timer = 0; 2987 } 2988 } 2989 2990 static void 2991 em_tx_collect(struct adapter *adapter) 2992 { 2993 struct ifnet *ifp = &adapter->arpcom.ac_if; 2994 struct em_buffer *tx_buffer; 2995 int tdh, first, num_avail, dd_idx = -1; 2996 2997 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2998 return; 2999 3000 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 3001 if (tdh == adapter->next_tx_to_clean) 3002 return; 3003 3004 if (adapter->tx_dd_head != adapter->tx_dd_tail) 3005 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3006 3007 num_avail = adapter->num_tx_desc_avail; 3008 first = adapter->next_tx_to_clean; 3009 3010 while (first != tdh) { 3011 logif(pkt_txclean); 3012 3013 num_avail++; 3014 3015 tx_buffer = &adapter->tx_buffer_area[first]; 3016 if (tx_buffer->m_head) { 3017 ifp->if_opackets++; 3018 bus_dmamap_unload(adapter->txtag, 3019 tx_buffer->map); 3020 m_freem(tx_buffer->m_head); 3021 tx_buffer->m_head = NULL; 3022 } 3023 3024 if (first == dd_idx) { 3025 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3026 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3027 adapter->tx_dd_head = 0; 3028 adapter->tx_dd_tail = 0; 3029 dd_idx = -1; 3030 } else { 3031 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3032 } 3033 } 3034 3035 if (++first == adapter->num_tx_desc) 3036 first = 0; 3037 } 3038 adapter->next_tx_to_clean = first; 3039 adapter->num_tx_desc_avail = num_avail; 3040 3041 if (!EM_IS_OACTIVE(adapter)) { 3042 ifq_clr_oactive(&ifp->if_snd); 3043 3044 /* All clean, turn off the timer */ 3045 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3046 ifp->if_timer = 0; 3047 } 3048 } 3049 3050 /* 3051 * When Link is lost sometimes there is work still in the TX ring 3052 * which will result in a watchdog, rather than allow that do an 3053 * attempted cleanup and then reinit here. Note that this has been 3054 * seens mostly with fiber adapters. 3055 */ 3056 static void 3057 em_tx_purge(struct adapter *adapter) 3058 { 3059 struct ifnet *ifp = &adapter->arpcom.ac_if; 3060 3061 if (!adapter->link_active && ifp->if_timer) { 3062 em_tx_collect(adapter); 3063 if (ifp->if_timer) { 3064 if_printf(ifp, "Link lost, TX pending, reinit\n"); 3065 ifp->if_timer = 0; 3066 em_init(adapter); 3067 } 3068 } 3069 } 3070 3071 static int 3072 em_newbuf(struct adapter *adapter, int i, int init) 3073 { 3074 struct mbuf *m; 3075 bus_dma_segment_t seg; 3076 bus_dmamap_t map; 3077 struct em_buffer *rx_buffer; 3078 int error, nseg; 3079 3080 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 3081 if (m == NULL) { 3082 adapter->mbuf_cluster_failed++; 3083 if (init) { 3084 if_printf(&adapter->arpcom.ac_if, 3085 "Unable to allocate RX mbuf\n"); 3086 } 3087 return (ENOBUFS); 3088 } 3089 m->m_len = m->m_pkthdr.len = MCLBYTES; 3090 3091 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN) 3092 m_adj(m, ETHER_ALIGN); 3093 3094 error = bus_dmamap_load_mbuf_segment(adapter->rxtag, 3095 adapter->rx_sparemap, m, 3096 &seg, 1, &nseg, BUS_DMA_NOWAIT); 3097 if (error) { 3098 m_freem(m); 3099 if (init) { 3100 if_printf(&adapter->arpcom.ac_if, 3101 "Unable to load RX mbuf\n"); 3102 } 3103 return (error); 3104 } 3105 3106 rx_buffer = &adapter->rx_buffer_area[i]; 3107 if (rx_buffer->m_head != NULL) 3108 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3109 3110 map = rx_buffer->map; 3111 rx_buffer->map = adapter->rx_sparemap; 3112 adapter->rx_sparemap = map; 3113 3114 rx_buffer->m_head = m; 3115 3116 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr); 3117 return (0); 3118 } 3119 3120 static int 3121 em_create_rx_ring(struct adapter *adapter) 3122 { 3123 device_t dev = adapter->dev; 3124 struct em_buffer *rx_buffer; 3125 int i, error; 3126 3127 adapter->rx_buffer_area = 3128 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 3129 M_DEVBUF, M_WAITOK | M_ZERO); 3130 3131 /* 3132 * Create DMA tag for rx buffers 3133 */ 3134 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 3135 1, 0, /* alignment, bounds */ 3136 BUS_SPACE_MAXADDR, /* lowaddr */ 3137 BUS_SPACE_MAXADDR, /* highaddr */ 3138 NULL, NULL, /* filter, filterarg */ 3139 MCLBYTES, /* maxsize */ 3140 1, /* nsegments */ 3141 MCLBYTES, /* maxsegsize */ 3142 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 3143 &adapter->rxtag); 3144 if (error) { 3145 device_printf(dev, "Unable to allocate RX DMA tag\n"); 3146 kfree(adapter->rx_buffer_area, M_DEVBUF); 3147 adapter->rx_buffer_area = NULL; 3148 return error; 3149 } 3150 3151 /* 3152 * Create spare DMA map for rx buffers 3153 */ 3154 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3155 &adapter->rx_sparemap); 3156 if (error) { 3157 device_printf(dev, "Unable to create spare RX DMA map\n"); 3158 bus_dma_tag_destroy(adapter->rxtag); 3159 kfree(adapter->rx_buffer_area, M_DEVBUF); 3160 adapter->rx_buffer_area = NULL; 3161 return error; 3162 } 3163 3164 /* 3165 * Create DMA maps for rx buffers 3166 */ 3167 for (i = 0; i < adapter->num_rx_desc; i++) { 3168 rx_buffer = &adapter->rx_buffer_area[i]; 3169 3170 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3171 &rx_buffer->map); 3172 if (error) { 3173 device_printf(dev, "Unable to create RX DMA map\n"); 3174 em_destroy_rx_ring(adapter, i); 3175 return error; 3176 } 3177 } 3178 return (0); 3179 } 3180 3181 static int 3182 em_init_rx_ring(struct adapter *adapter) 3183 { 3184 int i, error; 3185 3186 /* Reset descriptor ring */ 3187 bzero(adapter->rx_desc_base, 3188 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3189 3190 /* Allocate new ones. */ 3191 for (i = 0; i < adapter->num_rx_desc; i++) { 3192 error = em_newbuf(adapter, i, 1); 3193 if (error) 3194 return (error); 3195 } 3196 3197 /* Setup our descriptor pointers */ 3198 adapter->next_rx_desc_to_check = 0; 3199 3200 return (0); 3201 } 3202 3203 static void 3204 em_init_rx_unit(struct adapter *adapter) 3205 { 3206 struct ifnet *ifp = &adapter->arpcom.ac_if; 3207 uint64_t bus_addr; 3208 uint32_t rctl; 3209 3210 /* 3211 * Make sure receives are disabled while setting 3212 * up the descriptor ring 3213 */ 3214 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3215 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3216 3217 if (adapter->hw.mac.type >= e1000_82540) { 3218 uint32_t itr; 3219 3220 /* 3221 * Set the interrupt throttling rate. Value is calculated 3222 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3223 */ 3224 if (adapter->int_throttle_ceil) 3225 itr = 1000000000 / 256 / adapter->int_throttle_ceil; 3226 else 3227 itr = 0; 3228 em_set_itr(adapter, itr); 3229 } 3230 3231 /* Disable accelerated ackknowledge */ 3232 if (adapter->hw.mac.type == e1000_82574) { 3233 E1000_WRITE_REG(&adapter->hw, 3234 E1000_RFCTL, E1000_RFCTL_ACK_DIS); 3235 } 3236 3237 /* Receive Checksum Offload for TCP and UDP */ 3238 if (ifp->if_capenable & IFCAP_RXCSUM) { 3239 uint32_t rxcsum; 3240 3241 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3242 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3243 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3244 } 3245 3246 /* 3247 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3248 * long latencies are observed, like Lenovo X60. This 3249 * change eliminates the problem, but since having positive 3250 * values in RDTR is a known source of problems on other 3251 * platforms another solution is being sought. 3252 */ 3253 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) { 3254 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573); 3255 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573); 3256 } 3257 3258 /* 3259 * Setup the Base and Length of the Rx Descriptor Ring 3260 */ 3261 bus_addr = adapter->rxdma.dma_paddr; 3262 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3263 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3264 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3265 (uint32_t)(bus_addr >> 32)); 3266 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3267 (uint32_t)bus_addr); 3268 3269 /* 3270 * Setup the HW Rx Head and Tail Descriptor Pointers 3271 */ 3272 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3273 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); 3274 3275 /* Set PTHRESH for improved jumbo performance */ 3276 if (((adapter->hw.mac.type == e1000_ich9lan) || 3277 (adapter->hw.mac.type == e1000_pch2lan) || 3278 (adapter->hw.mac.type == e1000_ich10lan)) && 3279 (ifp->if_mtu > ETHERMTU)) { 3280 uint32_t rxdctl; 3281 3282 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0)); 3283 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl | 3); 3284 } 3285 3286 if (adapter->hw.mac.type == e1000_pch2lan) { 3287 if (ifp->if_mtu > ETHERMTU) 3288 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE); 3289 else 3290 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE); 3291 } 3292 3293 /* Setup the Receive Control Register */ 3294 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3295 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3296 E1000_RCTL_RDMTS_HALF | 3297 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3298 3299 /* Make sure VLAN Filters are off */ 3300 rctl &= ~E1000_RCTL_VFE; 3301 3302 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3303 rctl |= E1000_RCTL_SBP; 3304 else 3305 rctl &= ~E1000_RCTL_SBP; 3306 3307 switch (adapter->rx_buffer_len) { 3308 default: 3309 case 2048: 3310 rctl |= E1000_RCTL_SZ_2048; 3311 break; 3312 3313 case 4096: 3314 rctl |= E1000_RCTL_SZ_4096 | 3315 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3316 break; 3317 3318 case 8192: 3319 rctl |= E1000_RCTL_SZ_8192 | 3320 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3321 break; 3322 3323 case 16384: 3324 rctl |= E1000_RCTL_SZ_16384 | 3325 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3326 break; 3327 } 3328 3329 if (ifp->if_mtu > ETHERMTU) 3330 rctl |= E1000_RCTL_LPE; 3331 else 3332 rctl &= ~E1000_RCTL_LPE; 3333 3334 /* Enable Receives */ 3335 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3336 } 3337 3338 static void 3339 em_destroy_rx_ring(struct adapter *adapter, int ndesc) 3340 { 3341 struct em_buffer *rx_buffer; 3342 int i; 3343 3344 if (adapter->rx_buffer_area == NULL) 3345 return; 3346 3347 for (i = 0; i < ndesc; i++) { 3348 rx_buffer = &adapter->rx_buffer_area[i]; 3349 3350 KKASSERT(rx_buffer->m_head == NULL); 3351 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 3352 } 3353 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3354 bus_dma_tag_destroy(adapter->rxtag); 3355 3356 kfree(adapter->rx_buffer_area, M_DEVBUF); 3357 adapter->rx_buffer_area = NULL; 3358 } 3359 3360 static void 3361 em_rxeof(struct adapter *adapter, int count) 3362 { 3363 struct ifnet *ifp = &adapter->arpcom.ac_if; 3364 uint8_t status, accept_frame = 0, eop = 0; 3365 uint16_t len, desc_len, prev_len_adj; 3366 struct e1000_rx_desc *current_desc; 3367 struct mbuf *mp; 3368 int i; 3369 3370 i = adapter->next_rx_desc_to_check; 3371 current_desc = &adapter->rx_desc_base[i]; 3372 3373 if (!(current_desc->status & E1000_RXD_STAT_DD)) 3374 return; 3375 3376 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) { 3377 struct mbuf *m = NULL; 3378 3379 logif(pkt_receive); 3380 3381 mp = adapter->rx_buffer_area[i].m_head; 3382 3383 /* 3384 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3385 * needs to access the last received byte in the mbuf. 3386 */ 3387 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3388 BUS_DMASYNC_POSTREAD); 3389 3390 accept_frame = 1; 3391 prev_len_adj = 0; 3392 desc_len = le16toh(current_desc->length); 3393 status = current_desc->status; 3394 if (status & E1000_RXD_STAT_EOP) { 3395 count--; 3396 eop = 1; 3397 if (desc_len < ETHER_CRC_LEN) { 3398 len = 0; 3399 prev_len_adj = ETHER_CRC_LEN - desc_len; 3400 } else { 3401 len = desc_len - ETHER_CRC_LEN; 3402 } 3403 } else { 3404 eop = 0; 3405 len = desc_len; 3406 } 3407 3408 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3409 uint8_t last_byte; 3410 uint32_t pkt_len = desc_len; 3411 3412 if (adapter->fmp != NULL) 3413 pkt_len += adapter->fmp->m_pkthdr.len; 3414 3415 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3416 if (TBI_ACCEPT(&adapter->hw, status, 3417 current_desc->errors, pkt_len, last_byte, 3418 adapter->min_frame_size, adapter->max_frame_size)) { 3419 e1000_tbi_adjust_stats_82543(&adapter->hw, 3420 &adapter->stats, pkt_len, 3421 adapter->hw.mac.addr, 3422 adapter->max_frame_size); 3423 if (len > 0) 3424 len--; 3425 } else { 3426 accept_frame = 0; 3427 } 3428 } 3429 3430 if (accept_frame) { 3431 if (em_newbuf(adapter, i, 0) != 0) { 3432 ifp->if_iqdrops++; 3433 goto discard; 3434 } 3435 3436 /* Assign correct length to the current fragment */ 3437 mp->m_len = len; 3438 3439 if (adapter->fmp == NULL) { 3440 mp->m_pkthdr.len = len; 3441 adapter->fmp = mp; /* Store the first mbuf */ 3442 adapter->lmp = mp; 3443 } else { 3444 /* 3445 * Chain mbuf's together 3446 */ 3447 3448 /* 3449 * Adjust length of previous mbuf in chain if 3450 * we received less than 4 bytes in the last 3451 * descriptor. 3452 */ 3453 if (prev_len_adj > 0) { 3454 adapter->lmp->m_len -= prev_len_adj; 3455 adapter->fmp->m_pkthdr.len -= 3456 prev_len_adj; 3457 } 3458 adapter->lmp->m_next = mp; 3459 adapter->lmp = adapter->lmp->m_next; 3460 adapter->fmp->m_pkthdr.len += len; 3461 } 3462 3463 if (eop) { 3464 adapter->fmp->m_pkthdr.rcvif = ifp; 3465 ifp->if_ipackets++; 3466 3467 if (ifp->if_capenable & IFCAP_RXCSUM) { 3468 em_rxcsum(adapter, current_desc, 3469 adapter->fmp); 3470 } 3471 3472 if (status & E1000_RXD_STAT_VP) { 3473 adapter->fmp->m_pkthdr.ether_vlantag = 3474 (le16toh(current_desc->special) & 3475 E1000_RXD_SPC_VLAN_MASK); 3476 adapter->fmp->m_flags |= M_VLANTAG; 3477 } 3478 m = adapter->fmp; 3479 adapter->fmp = NULL; 3480 adapter->lmp = NULL; 3481 } 3482 } else { 3483 ifp->if_ierrors++; 3484 discard: 3485 #ifdef foo 3486 /* Reuse loaded DMA map and just update mbuf chain */ 3487 mp = adapter->rx_buffer_area[i].m_head; 3488 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3489 mp->m_data = mp->m_ext.ext_buf; 3490 mp->m_next = NULL; 3491 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3492 m_adj(mp, ETHER_ALIGN); 3493 #endif 3494 if (adapter->fmp != NULL) { 3495 m_freem(adapter->fmp); 3496 adapter->fmp = NULL; 3497 adapter->lmp = NULL; 3498 } 3499 m = NULL; 3500 } 3501 3502 /* Zero out the receive descriptors status. */ 3503 current_desc->status = 0; 3504 3505 if (m != NULL) 3506 ifp->if_input(ifp, m); 3507 3508 /* Advance our pointers to the next descriptor. */ 3509 if (++i == adapter->num_rx_desc) 3510 i = 0; 3511 current_desc = &adapter->rx_desc_base[i]; 3512 } 3513 adapter->next_rx_desc_to_check = i; 3514 3515 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3516 if (--i < 0) 3517 i = adapter->num_rx_desc - 1; 3518 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3519 } 3520 3521 static void 3522 em_rxcsum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, 3523 struct mbuf *mp) 3524 { 3525 /* 82543 or newer only */ 3526 if (adapter->hw.mac.type < e1000_82543 || 3527 /* Ignore Checksum bit is set */ 3528 (rx_desc->status & E1000_RXD_STAT_IXSM)) 3529 return; 3530 3531 if ((rx_desc->status & E1000_RXD_STAT_IPCS) && 3532 !(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3533 /* IP Checksum Good */ 3534 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 3535 } 3536 3537 if ((rx_desc->status & E1000_RXD_STAT_TCPCS) && 3538 !(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3539 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3540 CSUM_PSEUDO_HDR | 3541 CSUM_FRAG_NOT_CHECKED; 3542 mp->m_pkthdr.csum_data = htons(0xffff); 3543 } 3544 } 3545 3546 static void 3547 em_enable_intr(struct adapter *adapter) 3548 { 3549 uint32_t ims_mask = IMS_ENABLE_MASK; 3550 3551 lwkt_serialize_handler_enable(adapter->arpcom.ac_if.if_serializer); 3552 3553 #if 0 3554 /* XXX MSIX */ 3555 if (adapter->hw.mac.type == e1000_82574) { 3556 E1000_WRITE_REG(&adapter->hw, EM_EIAC, EM_MSIX_MASK); 3557 ims_mask |= EM_MSIX_MASK; 3558 } 3559 #endif 3560 E1000_WRITE_REG(&adapter->hw, E1000_IMS, ims_mask); 3561 } 3562 3563 static void 3564 em_disable_intr(struct adapter *adapter) 3565 { 3566 uint32_t clear = 0xffffffff; 3567 3568 /* 3569 * The first version of 82542 had an errata where when link was forced 3570 * it would stay up even up even if the cable was disconnected. 3571 * Sequence errors were used to detect the disconnect and then the 3572 * driver would unforce the link. This code in the in the ISR. For 3573 * this to work correctly the Sequence error interrupt had to be 3574 * enabled all the time. 3575 */ 3576 if (adapter->hw.mac.type == e1000_82542 && 3577 adapter->hw.revision_id == E1000_REVISION_2) 3578 clear &= ~E1000_ICR_RXSEQ; 3579 else if (adapter->hw.mac.type == e1000_82574) 3580 E1000_WRITE_REG(&adapter->hw, EM_EIAC, 0); 3581 3582 E1000_WRITE_REG(&adapter->hw, E1000_IMC, clear); 3583 3584 adapter->npoll.ifpc_stcount = 0; 3585 3586 lwkt_serialize_handler_disable(adapter->arpcom.ac_if.if_serializer); 3587 } 3588 3589 /* 3590 * Bit of a misnomer, what this really means is 3591 * to enable OS management of the system... aka 3592 * to disable special hardware management features 3593 */ 3594 static void 3595 em_get_mgmt(struct adapter *adapter) 3596 { 3597 /* A shared code workaround */ 3598 #define E1000_82542_MANC2H E1000_MANC2H 3599 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3600 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 3601 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3602 3603 /* disable hardware interception of ARP */ 3604 manc &= ~(E1000_MANC_ARP_EN); 3605 3606 /* enable receiving management packets to the host */ 3607 if (adapter->hw.mac.type >= e1000_82571) { 3608 manc |= E1000_MANC_EN_MNG2HOST; 3609 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3610 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3611 manc2h |= E1000_MNG2HOST_PORT_623; 3612 manc2h |= E1000_MNG2HOST_PORT_664; 3613 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 3614 } 3615 3616 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3617 } 3618 } 3619 3620 /* 3621 * Give control back to hardware management 3622 * controller if there is one. 3623 */ 3624 static void 3625 em_rel_mgmt(struct adapter *adapter) 3626 { 3627 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3628 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3629 3630 /* re-enable hardware interception of ARP */ 3631 manc |= E1000_MANC_ARP_EN; 3632 3633 if (adapter->hw.mac.type >= e1000_82571) 3634 manc &= ~E1000_MANC_EN_MNG2HOST; 3635 3636 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3637 } 3638 } 3639 3640 /* 3641 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3642 * For ASF and Pass Through versions of f/w this means that 3643 * the driver is loaded. For AMT version (only with 82573) 3644 * of the f/w this means that the network i/f is open. 3645 */ 3646 static void 3647 em_get_hw_control(struct adapter *adapter) 3648 { 3649 /* Let firmware know the driver has taken over */ 3650 if (adapter->hw.mac.type == e1000_82573) { 3651 uint32_t swsm; 3652 3653 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3654 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3655 swsm | E1000_SWSM_DRV_LOAD); 3656 } else { 3657 uint32_t ctrl_ext; 3658 3659 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3660 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3661 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3662 } 3663 adapter->flags |= EM_FLAG_HW_CTRL; 3664 } 3665 3666 /* 3667 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3668 * For ASF and Pass Through versions of f/w this means that the 3669 * driver is no longer loaded. For AMT version (only with 82573) 3670 * of the f/w this means that the network i/f is closed. 3671 */ 3672 static void 3673 em_rel_hw_control(struct adapter *adapter) 3674 { 3675 if ((adapter->flags & EM_FLAG_HW_CTRL) == 0) 3676 return; 3677 adapter->flags &= ~EM_FLAG_HW_CTRL; 3678 3679 /* Let firmware taken over control of h/w */ 3680 if (adapter->hw.mac.type == e1000_82573) { 3681 uint32_t swsm; 3682 3683 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3684 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3685 swsm & ~E1000_SWSM_DRV_LOAD); 3686 } else { 3687 uint32_t ctrl_ext; 3688 3689 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3690 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3691 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3692 } 3693 } 3694 3695 static int 3696 em_is_valid_eaddr(const uint8_t *addr) 3697 { 3698 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3699 3700 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3701 return (FALSE); 3702 3703 return (TRUE); 3704 } 3705 3706 /* 3707 * Enable PCI Wake On Lan capability 3708 */ 3709 void 3710 em_enable_wol(device_t dev) 3711 { 3712 uint16_t cap, status; 3713 uint8_t id; 3714 3715 /* First find the capabilities pointer*/ 3716 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3717 3718 /* Read the PM Capabilities */ 3719 id = pci_read_config(dev, cap, 1); 3720 if (id != PCIY_PMG) /* Something wrong */ 3721 return; 3722 3723 /* 3724 * OK, we have the power capabilities, 3725 * so now get the status register 3726 */ 3727 cap += PCIR_POWER_STATUS; 3728 status = pci_read_config(dev, cap, 2); 3729 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3730 pci_write_config(dev, cap, status, 2); 3731 } 3732 3733 3734 /* 3735 * 82544 Coexistence issue workaround. 3736 * There are 2 issues. 3737 * 1. Transmit Hang issue. 3738 * To detect this issue, following equation can be used... 3739 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3740 * If SUM[3:0] is in between 1 to 4, we will have this issue. 3741 * 3742 * 2. DAC issue. 3743 * To detect this issue, following equation can be used... 3744 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3745 * If SUM[3:0] is in between 9 to c, we will have this issue. 3746 * 3747 * WORKAROUND: 3748 * Make sure we do not have ending address 3749 * as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3750 */ 3751 static uint32_t 3752 em_82544_fill_desc(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array) 3753 { 3754 uint32_t safe_terminator; 3755 3756 /* 3757 * Since issue is sensitive to length and address. 3758 * Let us first check the address... 3759 */ 3760 if (length <= 4) { 3761 desc_array->descriptor[0].address = address; 3762 desc_array->descriptor[0].length = length; 3763 desc_array->elements = 1; 3764 return (desc_array->elements); 3765 } 3766 3767 safe_terminator = 3768 (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 3769 3770 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3771 if (safe_terminator == 0 || 3772 (safe_terminator > 4 && safe_terminator < 9) || 3773 (safe_terminator > 0xC && safe_terminator <= 0xF)) { 3774 desc_array->descriptor[0].address = address; 3775 desc_array->descriptor[0].length = length; 3776 desc_array->elements = 1; 3777 return (desc_array->elements); 3778 } 3779 3780 desc_array->descriptor[0].address = address; 3781 desc_array->descriptor[0].length = length - 4; 3782 desc_array->descriptor[1].address = address + (length - 4); 3783 desc_array->descriptor[1].length = 4; 3784 desc_array->elements = 2; 3785 return (desc_array->elements); 3786 } 3787 3788 static void 3789 em_update_stats(struct adapter *adapter) 3790 { 3791 struct ifnet *ifp = &adapter->arpcom.ac_if; 3792 3793 if (adapter->hw.phy.media_type == e1000_media_type_copper || 3794 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3795 adapter->stats.symerrs += 3796 E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 3797 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 3798 } 3799 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 3800 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 3801 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 3802 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 3803 3804 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 3805 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 3806 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 3807 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 3808 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 3809 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 3810 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 3811 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 3812 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 3813 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 3814 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 3815 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 3816 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 3817 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 3818 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 3819 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 3820 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 3821 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 3822 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 3823 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 3824 3825 /* For the 64-bit byte counters the low dword must be read first. */ 3826 /* Both registers clear on the read of the high dword */ 3827 3828 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); 3829 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); 3830 3831 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 3832 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 3833 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 3834 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 3835 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 3836 3837 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 3838 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 3839 3840 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 3841 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 3842 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 3843 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 3844 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 3845 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 3846 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 3847 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 3848 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 3849 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 3850 3851 if (adapter->hw.mac.type >= e1000_82543) { 3852 adapter->stats.algnerrc += 3853 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 3854 adapter->stats.rxerrc += 3855 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 3856 adapter->stats.tncrs += 3857 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 3858 adapter->stats.cexterr += 3859 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 3860 adapter->stats.tsctc += 3861 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 3862 adapter->stats.tsctfc += 3863 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 3864 } 3865 3866 ifp->if_collisions = adapter->stats.colc; 3867 3868 /* Rx Errors */ 3869 ifp->if_ierrors = 3870 adapter->dropped_pkts + adapter->stats.rxerrc + 3871 adapter->stats.crcerrs + adapter->stats.algnerrc + 3872 adapter->stats.ruc + adapter->stats.roc + 3873 adapter->stats.mpc + adapter->stats.cexterr; 3874 3875 /* Tx Errors */ 3876 ifp->if_oerrors = 3877 adapter->stats.ecol + adapter->stats.latecol + 3878 adapter->watchdog_events; 3879 } 3880 3881 static void 3882 em_print_debug_info(struct adapter *adapter) 3883 { 3884 device_t dev = adapter->dev; 3885 uint8_t *hw_addr = adapter->hw.hw_addr; 3886 3887 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3888 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3889 E1000_READ_REG(&adapter->hw, E1000_CTRL), 3890 E1000_READ_REG(&adapter->hw, E1000_RCTL)); 3891 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3892 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3893 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); 3894 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3895 adapter->hw.fc.high_water, 3896 adapter->hw.fc.low_water); 3897 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3898 E1000_READ_REG(&adapter->hw, E1000_TIDV), 3899 E1000_READ_REG(&adapter->hw, E1000_TADV)); 3900 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3901 E1000_READ_REG(&adapter->hw, E1000_RDTR), 3902 E1000_READ_REG(&adapter->hw, E1000_RADV)); 3903 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 3904 (long long)adapter->tx_fifo_wrk_cnt, 3905 (long long)adapter->tx_fifo_reset_cnt); 3906 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3907 E1000_READ_REG(&adapter->hw, E1000_TDH(0)), 3908 E1000_READ_REG(&adapter->hw, E1000_TDT(0))); 3909 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3910 E1000_READ_REG(&adapter->hw, E1000_RDH(0)), 3911 E1000_READ_REG(&adapter->hw, E1000_RDT(0))); 3912 device_printf(dev, "Num Tx descriptors avail = %d\n", 3913 adapter->num_tx_desc_avail); 3914 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3915 adapter->no_tx_desc_avail1); 3916 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3917 adapter->no_tx_desc_avail2); 3918 device_printf(dev, "Std mbuf failed = %ld\n", 3919 adapter->mbuf_alloc_failed); 3920 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3921 adapter->mbuf_cluster_failed); 3922 device_printf(dev, "Driver dropped packets = %ld\n", 3923 adapter->dropped_pkts); 3924 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3925 adapter->no_tx_dma_setup); 3926 } 3927 3928 static void 3929 em_print_hw_stats(struct adapter *adapter) 3930 { 3931 device_t dev = adapter->dev; 3932 3933 device_printf(dev, "Excessive collisions = %lld\n", 3934 (long long)adapter->stats.ecol); 3935 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3936 device_printf(dev, "Symbol errors = %lld\n", 3937 (long long)adapter->stats.symerrs); 3938 #endif 3939 device_printf(dev, "Sequence errors = %lld\n", 3940 (long long)adapter->stats.sec); 3941 device_printf(dev, "Defer count = %lld\n", 3942 (long long)adapter->stats.dc); 3943 device_printf(dev, "Missed Packets = %lld\n", 3944 (long long)adapter->stats.mpc); 3945 device_printf(dev, "Receive No Buffers = %lld\n", 3946 (long long)adapter->stats.rnbc); 3947 /* RLEC is inaccurate on some hardware, calculate our own. */ 3948 device_printf(dev, "Receive Length Errors = %lld\n", 3949 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 3950 device_printf(dev, "Receive errors = %lld\n", 3951 (long long)adapter->stats.rxerrc); 3952 device_printf(dev, "Crc errors = %lld\n", 3953 (long long)adapter->stats.crcerrs); 3954 device_printf(dev, "Alignment errors = %lld\n", 3955 (long long)adapter->stats.algnerrc); 3956 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3957 (long long)adapter->stats.cexterr); 3958 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 3959 device_printf(dev, "watchdog timeouts = %ld\n", 3960 adapter->watchdog_events); 3961 device_printf(dev, "XON Rcvd = %lld\n", 3962 (long long)adapter->stats.xonrxc); 3963 device_printf(dev, "XON Xmtd = %lld\n", 3964 (long long)adapter->stats.xontxc); 3965 device_printf(dev, "XOFF Rcvd = %lld\n", 3966 (long long)adapter->stats.xoffrxc); 3967 device_printf(dev, "XOFF Xmtd = %lld\n", 3968 (long long)adapter->stats.xofftxc); 3969 device_printf(dev, "Good Packets Rcvd = %lld\n", 3970 (long long)adapter->stats.gprc); 3971 device_printf(dev, "Good Packets Xmtd = %lld\n", 3972 (long long)adapter->stats.gptc); 3973 } 3974 3975 static void 3976 em_print_nvm_info(struct adapter *adapter) 3977 { 3978 uint16_t eeprom_data; 3979 int i, j, row = 0; 3980 3981 /* Its a bit crude, but it gets the job done */ 3982 kprintf("\nInterface EEPROM Dump:\n"); 3983 kprintf("Offset\n0x0000 "); 3984 for (i = 0, j = 0; i < 32; i++, j++) { 3985 if (j == 8) { /* Make the offset block */ 3986 j = 0; ++row; 3987 kprintf("\n0x00%x0 ",row); 3988 } 3989 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 3990 kprintf("%04x ", eeprom_data); 3991 } 3992 kprintf("\n"); 3993 } 3994 3995 static int 3996 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3997 { 3998 struct adapter *adapter; 3999 struct ifnet *ifp; 4000 int error, result; 4001 4002 result = -1; 4003 error = sysctl_handle_int(oidp, &result, 0, req); 4004 if (error || !req->newptr) 4005 return (error); 4006 4007 adapter = (struct adapter *)arg1; 4008 ifp = &adapter->arpcom.ac_if; 4009 4010 lwkt_serialize_enter(ifp->if_serializer); 4011 4012 if (result == 1) 4013 em_print_debug_info(adapter); 4014 4015 /* 4016 * This value will cause a hex dump of the 4017 * first 32 16-bit words of the EEPROM to 4018 * the screen. 4019 */ 4020 if (result == 2) 4021 em_print_nvm_info(adapter); 4022 4023 lwkt_serialize_exit(ifp->if_serializer); 4024 4025 return (error); 4026 } 4027 4028 static int 4029 em_sysctl_stats(SYSCTL_HANDLER_ARGS) 4030 { 4031 int error, result; 4032 4033 result = -1; 4034 error = sysctl_handle_int(oidp, &result, 0, req); 4035 if (error || !req->newptr) 4036 return (error); 4037 4038 if (result == 1) { 4039 struct adapter *adapter = (struct adapter *)arg1; 4040 struct ifnet *ifp = &adapter->arpcom.ac_if; 4041 4042 lwkt_serialize_enter(ifp->if_serializer); 4043 em_print_hw_stats(adapter); 4044 lwkt_serialize_exit(ifp->if_serializer); 4045 } 4046 return (error); 4047 } 4048 4049 static void 4050 em_add_sysctl(struct adapter *adapter) 4051 { 4052 sysctl_ctx_init(&adapter->sysctl_ctx); 4053 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx, 4054 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 4055 device_get_nameunit(adapter->dev), 4056 CTLFLAG_RD, 0, ""); 4057 if (adapter->sysctl_tree == NULL) { 4058 device_printf(adapter->dev, "can't add sysctl node\n"); 4059 } else { 4060 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4061 SYSCTL_CHILDREN(adapter->sysctl_tree), 4062 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4063 em_sysctl_debug_info, "I", "Debug Information"); 4064 4065 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4066 SYSCTL_CHILDREN(adapter->sysctl_tree), 4067 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4068 em_sysctl_stats, "I", "Statistics"); 4069 4070 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 4071 SYSCTL_CHILDREN(adapter->sysctl_tree), 4072 OID_AUTO, "rxd", CTLFLAG_RD, 4073 &adapter->num_rx_desc, 0, NULL); 4074 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 4075 SYSCTL_CHILDREN(adapter->sysctl_tree), 4076 OID_AUTO, "txd", CTLFLAG_RD, 4077 &adapter->num_tx_desc, 0, NULL); 4078 4079 if (adapter->hw.mac.type >= e1000_82540) { 4080 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4081 SYSCTL_CHILDREN(adapter->sysctl_tree), 4082 OID_AUTO, "int_throttle_ceil", 4083 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4084 em_sysctl_int_throttle, "I", 4085 "interrupt throttling rate"); 4086 } 4087 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 4088 SYSCTL_CHILDREN(adapter->sysctl_tree), 4089 OID_AUTO, "int_tx_nsegs", 4090 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4091 em_sysctl_int_tx_nsegs, "I", 4092 "# segments per TX interrupt"); 4093 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 4094 SYSCTL_CHILDREN(adapter->sysctl_tree), 4095 OID_AUTO, "wreg_tx_nsegs", CTLFLAG_RW, 4096 &adapter->tx_wreg_nsegs, 0, 4097 "# segments before write to hardware register"); 4098 } 4099 } 4100 4101 static int 4102 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 4103 { 4104 struct adapter *adapter = (void *)arg1; 4105 struct ifnet *ifp = &adapter->arpcom.ac_if; 4106 int error, throttle; 4107 4108 throttle = adapter->int_throttle_ceil; 4109 error = sysctl_handle_int(oidp, &throttle, 0, req); 4110 if (error || req->newptr == NULL) 4111 return error; 4112 if (throttle < 0 || throttle > 1000000000 / 256) 4113 return EINVAL; 4114 4115 if (throttle) { 4116 /* 4117 * Set the interrupt throttling rate in 256ns increments, 4118 * recalculate sysctl value assignment to get exact frequency. 4119 */ 4120 throttle = 1000000000 / 256 / throttle; 4121 4122 /* Upper 16bits of ITR is reserved and should be zero */ 4123 if (throttle & 0xffff0000) 4124 return EINVAL; 4125 } 4126 4127 lwkt_serialize_enter(ifp->if_serializer); 4128 4129 if (throttle) 4130 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 4131 else 4132 adapter->int_throttle_ceil = 0; 4133 4134 if (ifp->if_flags & IFF_RUNNING) 4135 em_set_itr(adapter, throttle); 4136 4137 lwkt_serialize_exit(ifp->if_serializer); 4138 4139 if (bootverbose) { 4140 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 4141 adapter->int_throttle_ceil); 4142 } 4143 return 0; 4144 } 4145 4146 static int 4147 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 4148 { 4149 struct adapter *adapter = (void *)arg1; 4150 struct ifnet *ifp = &adapter->arpcom.ac_if; 4151 int error, segs; 4152 4153 segs = adapter->tx_int_nsegs; 4154 error = sysctl_handle_int(oidp, &segs, 0, req); 4155 if (error || req->newptr == NULL) 4156 return error; 4157 if (segs <= 0) 4158 return EINVAL; 4159 4160 lwkt_serialize_enter(ifp->if_serializer); 4161 4162 /* 4163 * Don't allow int_tx_nsegs to become: 4164 * o Less the oact_tx_desc 4165 * o Too large that no TX desc will cause TX interrupt to 4166 * be generated (OACTIVE will never recover) 4167 * o Too small that will cause tx_dd[] overflow 4168 */ 4169 if (segs < adapter->oact_tx_desc || 4170 segs >= adapter->num_tx_desc - adapter->oact_tx_desc || 4171 segs < adapter->num_tx_desc / EM_TXDD_SAFE) { 4172 error = EINVAL; 4173 } else { 4174 error = 0; 4175 adapter->tx_int_nsegs = segs; 4176 } 4177 4178 lwkt_serialize_exit(ifp->if_serializer); 4179 4180 return error; 4181 } 4182 4183 static void 4184 em_set_itr(struct adapter *adapter, uint32_t itr) 4185 { 4186 E1000_WRITE_REG(&adapter->hw, E1000_ITR, itr); 4187 if (adapter->hw.mac.type == e1000_82574) { 4188 int i; 4189 4190 /* 4191 * When using MSIX interrupts we need to 4192 * throttle using the EITR register 4193 */ 4194 for (i = 0; i < 4; ++i) { 4195 E1000_WRITE_REG(&adapter->hw, 4196 E1000_EITR_82574(i), itr); 4197 } 4198 } 4199 } 4200 4201 static void 4202 em_disable_aspm(struct adapter *adapter) 4203 { 4204 uint16_t link_cap, link_ctrl, disable; 4205 uint8_t pcie_ptr, reg; 4206 device_t dev = adapter->dev; 4207 4208 switch (adapter->hw.mac.type) { 4209 case e1000_82571: 4210 case e1000_82572: 4211 case e1000_82573: 4212 /* 4213 * 82573 specification update 4214 * errata #8 disable L0s 4215 * errata #41 disable L1 4216 * 4217 * 82571/82572 specification update 4218 # errata #13 disable L1 4219 * errata #68 disable L0s 4220 */ 4221 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4222 break; 4223 4224 case e1000_82574: 4225 case e1000_82583: 4226 /* 4227 * 82574 specification update errata #20 4228 * 82583 specification update errata #9 4229 * 4230 * There is no need to disable L1 4231 */ 4232 disable = PCIEM_LNKCTL_ASPM_L0S; 4233 break; 4234 4235 default: 4236 return; 4237 } 4238 4239 pcie_ptr = pci_get_pciecap_ptr(dev); 4240 if (pcie_ptr == 0) 4241 return; 4242 4243 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4244 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4245 return; 4246 4247 if (bootverbose) { 4248 if_printf(&adapter->arpcom.ac_if, 4249 "disable ASPM %#02x\n", disable); 4250 } 4251 4252 reg = pcie_ptr + PCIER_LINKCTRL; 4253 link_ctrl = pci_read_config(dev, reg, 2); 4254 link_ctrl &= ~disable; 4255 pci_write_config(dev, reg, link_ctrl, 2); 4256 } 4257 4258 static int 4259 em_tso_pullup(struct adapter *adapter, struct mbuf **mp) 4260 { 4261 int iphlen, hoff, thoff, ex = 0; 4262 struct mbuf *m; 4263 struct ip *ip; 4264 4265 m = *mp; 4266 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4267 4268 iphlen = m->m_pkthdr.csum_iphlen; 4269 thoff = m->m_pkthdr.csum_thlen; 4270 hoff = m->m_pkthdr.csum_lhlen; 4271 4272 KASSERT(iphlen > 0, ("invalid ip hlen")); 4273 KASSERT(thoff > 0, ("invalid tcp hlen")); 4274 KASSERT(hoff > 0, ("invalid ether hlen")); 4275 4276 if (adapter->flags & EM_FLAG_TSO_PULLEX) 4277 ex = 4; 4278 4279 if (m->m_len < hoff + iphlen + thoff + ex) { 4280 m = m_pullup(m, hoff + iphlen + thoff + ex); 4281 if (m == NULL) { 4282 *mp = NULL; 4283 return ENOBUFS; 4284 } 4285 *mp = m; 4286 } 4287 ip = mtodoff(m, struct ip *, hoff); 4288 ip->ip_len = 0; 4289 4290 return 0; 4291 } 4292 4293 static int 4294 em_tso_setup(struct adapter *adapter, struct mbuf *mp, 4295 uint32_t *txd_upper, uint32_t *txd_lower) 4296 { 4297 struct e1000_context_desc *TXD; 4298 int hoff, iphlen, thoff, hlen; 4299 int mss, pktlen, curr_txd; 4300 4301 iphlen = mp->m_pkthdr.csum_iphlen; 4302 thoff = mp->m_pkthdr.csum_thlen; 4303 hoff = mp->m_pkthdr.csum_lhlen; 4304 mss = mp->m_pkthdr.tso_segsz; 4305 pktlen = mp->m_pkthdr.len; 4306 4307 if (adapter->csum_flags == CSUM_TSO && 4308 adapter->csum_iphlen == iphlen && 4309 adapter->csum_lhlen == hoff && 4310 adapter->csum_thlen == thoff && 4311 adapter->csum_mss == mss && 4312 adapter->csum_pktlen == pktlen) { 4313 *txd_upper = adapter->csum_txd_upper; 4314 *txd_lower = adapter->csum_txd_lower; 4315 return 0; 4316 } 4317 hlen = hoff + iphlen + thoff; 4318 4319 /* 4320 * Setup a new TSO context. 4321 */ 4322 4323 curr_txd = adapter->next_avail_tx_desc; 4324 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 4325 4326 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4327 E1000_TXD_DTYP_D | /* Data descr type */ 4328 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4329 4330 /* IP and/or TCP header checksum calculation and insertion. */ 4331 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4332 4333 /* 4334 * Start offset for header checksum calculation. 4335 * End offset for header checksum calculation. 4336 * Offset of place put the checksum. 4337 */ 4338 TXD->lower_setup.ip_fields.ipcss = hoff; 4339 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4340 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4341 4342 /* 4343 * Start offset for payload checksum calculation. 4344 * End offset for payload checksum calculation. 4345 * Offset of place to put the checksum. 4346 */ 4347 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4348 TXD->upper_setup.tcp_fields.tucse = 0; 4349 TXD->upper_setup.tcp_fields.tucso = 4350 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4351 4352 /* 4353 * Payload size per packet w/o any headers. 4354 * Length of all headers up to payload. 4355 */ 4356 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4357 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4358 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4359 E1000_TXD_CMD_DEXT | /* Extended descr */ 4360 E1000_TXD_CMD_TSE | /* TSE context */ 4361 E1000_TXD_CMD_IP | /* Do IP csum */ 4362 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4363 (pktlen - hlen)); /* Total len */ 4364 4365 /* Save the information for this TSO context */ 4366 adapter->csum_flags = CSUM_TSO; 4367 adapter->csum_lhlen = hoff; 4368 adapter->csum_iphlen = iphlen; 4369 adapter->csum_thlen = thoff; 4370 adapter->csum_mss = mss; 4371 adapter->csum_pktlen = pktlen; 4372 adapter->csum_txd_upper = *txd_upper; 4373 adapter->csum_txd_lower = *txd_lower; 4374 4375 if (++curr_txd == adapter->num_tx_desc) 4376 curr_txd = 0; 4377 4378 KKASSERT(adapter->num_tx_desc_avail > 0); 4379 adapter->num_tx_desc_avail--; 4380 4381 adapter->next_avail_tx_desc = curr_txd; 4382 return 1; 4383 } 4384