1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2014, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 */ 67 /* 68 * SERIALIZATION API RULES: 69 * 70 * - We must call lwkt_serialize_handler_enable() prior to enabling the 71 * hardware interrupt and lwkt_serialize_handler_disable() after disabling 72 * the hardware interrupt in order to avoid handler execution races from 73 * scheduled interrupt threads. 74 */ 75 76 #include "opt_ifpoll.h" 77 78 #include <sys/param.h> 79 #include <sys/bus.h> 80 #include <sys/endian.h> 81 #include <sys/interrupt.h> 82 #include <sys/kernel.h> 83 #include <sys/ktr.h> 84 #include <sys/malloc.h> 85 #include <sys/mbuf.h> 86 #include <sys/proc.h> 87 #include <sys/rman.h> 88 #include <sys/serialize.h> 89 #include <sys/socket.h> 90 #include <sys/sockio.h> 91 #include <sys/sysctl.h> 92 #include <sys/systm.h> 93 94 #include <net/bpf.h> 95 #include <net/ethernet.h> 96 #include <net/if.h> 97 #include <net/if_arp.h> 98 #include <net/if_dl.h> 99 #include <net/if_media.h> 100 #include <net/if_poll.h> 101 #include <net/ifq_var.h> 102 #include <net/vlan/if_vlan_var.h> 103 #include <net/vlan/if_vlan_ether.h> 104 105 #include <netinet/ip.h> 106 #include <netinet/tcp.h> 107 #include <netinet/udp.h> 108 109 #include <bus/pci/pcivar.h> 110 #include <bus/pci/pcireg.h> 111 112 #include <dev/netif/ig_hal/e1000_api.h> 113 #include <dev/netif/ig_hal/e1000_82571.h> 114 #include <dev/netif/ig_hal/e1000_dragonfly.h> 115 #include <dev/netif/em/if_em.h> 116 117 #define DEBUG_HW 0 118 119 #define EM_NAME "Intel(R) PRO/1000 Network Connection " 120 #define EM_VER " 7.4.2" 121 122 #define _EM_DEVICE(id, ret) \ 123 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER } 124 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100) 125 #define EM_DEVICE(id) _EM_DEVICE(id, 0) 126 #define EM_DEVICE_NULL { 0, 0, 0, NULL } 127 128 static const struct em_vendor_info em_vendor_info_array[] = { 129 EM_DEVICE(82540EM), 130 EM_DEVICE(82540EM_LOM), 131 EM_DEVICE(82540EP), 132 EM_DEVICE(82540EP_LOM), 133 EM_DEVICE(82540EP_LP), 134 135 EM_DEVICE(82541EI), 136 EM_DEVICE(82541ER), 137 EM_DEVICE(82541ER_LOM), 138 EM_DEVICE(82541EI_MOBILE), 139 EM_DEVICE(82541GI), 140 EM_DEVICE(82541GI_LF), 141 EM_DEVICE(82541GI_MOBILE), 142 143 EM_DEVICE(82542), 144 145 EM_DEVICE(82543GC_FIBER), 146 EM_DEVICE(82543GC_COPPER), 147 148 EM_DEVICE(82544EI_COPPER), 149 EM_DEVICE(82544EI_FIBER), 150 EM_DEVICE(82544GC_COPPER), 151 EM_DEVICE(82544GC_LOM), 152 153 EM_DEVICE(82545EM_COPPER), 154 EM_DEVICE(82545EM_FIBER), 155 EM_DEVICE(82545GM_COPPER), 156 EM_DEVICE(82545GM_FIBER), 157 EM_DEVICE(82545GM_SERDES), 158 159 EM_DEVICE(82546EB_COPPER), 160 EM_DEVICE(82546EB_FIBER), 161 EM_DEVICE(82546EB_QUAD_COPPER), 162 EM_DEVICE(82546GB_COPPER), 163 EM_DEVICE(82546GB_FIBER), 164 EM_DEVICE(82546GB_SERDES), 165 EM_DEVICE(82546GB_PCIE), 166 EM_DEVICE(82546GB_QUAD_COPPER), 167 EM_DEVICE(82546GB_QUAD_COPPER_KSP3), 168 169 EM_DEVICE(82547EI), 170 EM_DEVICE(82547EI_MOBILE), 171 EM_DEVICE(82547GI), 172 173 EM_EMX_DEVICE(82571EB_COPPER), 174 EM_EMX_DEVICE(82571EB_FIBER), 175 EM_EMX_DEVICE(82571EB_SERDES), 176 EM_EMX_DEVICE(82571EB_SERDES_DUAL), 177 EM_EMX_DEVICE(82571EB_SERDES_QUAD), 178 EM_EMX_DEVICE(82571EB_QUAD_COPPER), 179 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP), 180 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP), 181 EM_EMX_DEVICE(82571EB_QUAD_FIBER), 182 EM_EMX_DEVICE(82571PT_QUAD_COPPER), 183 184 EM_EMX_DEVICE(82572EI_COPPER), 185 EM_EMX_DEVICE(82572EI_FIBER), 186 EM_EMX_DEVICE(82572EI_SERDES), 187 EM_EMX_DEVICE(82572EI), 188 189 EM_EMX_DEVICE(82573E), 190 EM_EMX_DEVICE(82573E_IAMT), 191 EM_EMX_DEVICE(82573L), 192 193 EM_DEVICE(82583V), 194 195 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT), 196 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT), 197 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT), 198 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT), 199 200 EM_DEVICE(ICH8_IGP_M_AMT), 201 EM_DEVICE(ICH8_IGP_AMT), 202 EM_DEVICE(ICH8_IGP_C), 203 EM_DEVICE(ICH8_IFE), 204 EM_DEVICE(ICH8_IFE_GT), 205 EM_DEVICE(ICH8_IFE_G), 206 EM_DEVICE(ICH8_IGP_M), 207 EM_DEVICE(ICH8_82567V_3), 208 209 EM_DEVICE(ICH9_IGP_M_AMT), 210 EM_DEVICE(ICH9_IGP_AMT), 211 EM_DEVICE(ICH9_IGP_C), 212 EM_DEVICE(ICH9_IGP_M), 213 EM_DEVICE(ICH9_IGP_M_V), 214 EM_DEVICE(ICH9_IFE), 215 EM_DEVICE(ICH9_IFE_GT), 216 EM_DEVICE(ICH9_IFE_G), 217 EM_DEVICE(ICH9_BM), 218 219 EM_EMX_DEVICE(82574L), 220 EM_EMX_DEVICE(82574LA), 221 222 EM_DEVICE(ICH10_R_BM_LM), 223 EM_DEVICE(ICH10_R_BM_LF), 224 EM_DEVICE(ICH10_R_BM_V), 225 EM_DEVICE(ICH10_D_BM_LM), 226 EM_DEVICE(ICH10_D_BM_LF), 227 EM_DEVICE(ICH10_D_BM_V), 228 229 EM_DEVICE(PCH_M_HV_LM), 230 EM_DEVICE(PCH_M_HV_LC), 231 EM_DEVICE(PCH_D_HV_DM), 232 EM_DEVICE(PCH_D_HV_DC), 233 234 EM_DEVICE(PCH2_LV_LM), 235 EM_DEVICE(PCH2_LV_V), 236 237 EM_EMX_DEVICE(PCH_LPT_I217_LM), 238 EM_EMX_DEVICE(PCH_LPT_I217_V), 239 EM_EMX_DEVICE(PCH_LPTLP_I218_LM), 240 EM_EMX_DEVICE(PCH_LPTLP_I218_V), 241 EM_EMX_DEVICE(PCH_I218_LM2), 242 EM_EMX_DEVICE(PCH_I218_V2), 243 EM_EMX_DEVICE(PCH_I218_LM3), 244 EM_EMX_DEVICE(PCH_I218_V3), 245 EM_EMX_DEVICE(PCH_SPT_I219_LM), 246 EM_EMX_DEVICE(PCH_SPT_I219_V), 247 EM_EMX_DEVICE(PCH_SPT_I219_LM2), 248 EM_EMX_DEVICE(PCH_SPT_I219_V2), 249 250 /* required last entry */ 251 EM_DEVICE_NULL 252 }; 253 254 static int em_probe(device_t); 255 static int em_attach(device_t); 256 static int em_detach(device_t); 257 static int em_shutdown(device_t); 258 static int em_suspend(device_t); 259 static int em_resume(device_t); 260 261 static void em_init(void *); 262 static void em_stop(struct adapter *); 263 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 264 static void em_start(struct ifnet *, struct ifaltq_subque *); 265 #ifdef IFPOLL_ENABLE 266 static void em_npoll(struct ifnet *, struct ifpoll_info *); 267 static void em_npoll_compat(struct ifnet *, void *, int); 268 #endif 269 static void em_watchdog(struct ifnet *); 270 static void em_media_status(struct ifnet *, struct ifmediareq *); 271 static int em_media_change(struct ifnet *); 272 static void em_timer(void *); 273 274 static void em_intr(void *); 275 static void em_intr_mask(void *); 276 static void em_intr_body(struct adapter *, boolean_t); 277 static void em_rxeof(struct adapter *, int); 278 static void em_txeof(struct adapter *); 279 static void em_tx_collect(struct adapter *); 280 static void em_tx_purge(struct adapter *); 281 static void em_enable_intr(struct adapter *); 282 static void em_disable_intr(struct adapter *); 283 284 static int em_dma_malloc(struct adapter *, bus_size_t, 285 struct em_dma_alloc *); 286 static void em_dma_free(struct adapter *, struct em_dma_alloc *); 287 static void em_init_tx_ring(struct adapter *); 288 static int em_init_rx_ring(struct adapter *); 289 static int em_create_tx_ring(struct adapter *); 290 static int em_create_rx_ring(struct adapter *); 291 static void em_destroy_tx_ring(struct adapter *, int); 292 static void em_destroy_rx_ring(struct adapter *, int); 293 static int em_newbuf(struct adapter *, int, int); 294 static int em_encap(struct adapter *, struct mbuf **, int *, int *); 295 static void em_rxcsum(struct adapter *, struct e1000_rx_desc *, 296 struct mbuf *); 297 static int em_txcsum(struct adapter *, struct mbuf *, 298 uint32_t *, uint32_t *); 299 static int em_tso_pullup(struct adapter *, struct mbuf **); 300 static int em_tso_setup(struct adapter *, struct mbuf *, 301 uint32_t *, uint32_t *); 302 303 static int em_get_hw_info(struct adapter *); 304 static int em_is_valid_eaddr(const uint8_t *); 305 static int em_alloc_pci_res(struct adapter *); 306 static void em_free_pci_res(struct adapter *); 307 static int em_reset(struct adapter *); 308 static void em_setup_ifp(struct adapter *); 309 static void em_init_tx_unit(struct adapter *); 310 static void em_init_rx_unit(struct adapter *); 311 static void em_update_stats(struct adapter *); 312 static void em_set_promisc(struct adapter *); 313 static void em_disable_promisc(struct adapter *); 314 static void em_set_multi(struct adapter *); 315 static void em_update_link_status(struct adapter *); 316 static void em_smartspeed(struct adapter *); 317 static void em_set_itr(struct adapter *, uint32_t); 318 static void em_disable_aspm(struct adapter *); 319 320 /* Hardware workarounds */ 321 static int em_82547_fifo_workaround(struct adapter *, int); 322 static void em_82547_update_fifo_head(struct adapter *, int); 323 static int em_82547_tx_fifo_reset(struct adapter *); 324 static void em_82547_move_tail(void *); 325 static void em_82547_move_tail_serialized(struct adapter *); 326 static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY); 327 328 static void em_print_debug_info(struct adapter *); 329 static void em_print_nvm_info(struct adapter *); 330 static void em_print_hw_stats(struct adapter *); 331 332 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 333 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 334 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 335 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 336 static void em_add_sysctl(struct adapter *adapter); 337 338 /* Management and WOL Support */ 339 static void em_get_mgmt(struct adapter *); 340 static void em_rel_mgmt(struct adapter *); 341 static void em_get_hw_control(struct adapter *); 342 static void em_rel_hw_control(struct adapter *); 343 static void em_enable_wol(device_t); 344 345 static device_method_t em_methods[] = { 346 /* Device interface */ 347 DEVMETHOD(device_probe, em_probe), 348 DEVMETHOD(device_attach, em_attach), 349 DEVMETHOD(device_detach, em_detach), 350 DEVMETHOD(device_shutdown, em_shutdown), 351 DEVMETHOD(device_suspend, em_suspend), 352 DEVMETHOD(device_resume, em_resume), 353 DEVMETHOD_END 354 }; 355 356 static driver_t em_driver = { 357 "em", 358 em_methods, 359 sizeof(struct adapter), 360 }; 361 362 static devclass_t em_devclass; 363 364 DECLARE_DUMMY_MODULE(if_em); 365 MODULE_DEPEND(em, ig_hal, 1, 1, 1); 366 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL); 367 368 /* 369 * Tunables 370 */ 371 static int em_int_throttle_ceil = EM_DEFAULT_ITR; 372 static int em_rxd = EM_DEFAULT_RXD; 373 static int em_txd = EM_DEFAULT_TXD; 374 static int em_smart_pwr_down = 0; 375 376 /* Controls whether promiscuous also shows bad packets */ 377 static int em_debug_sbp = FALSE; 378 379 static int em_82573_workaround = 1; 380 static int em_msi_enable = 1; 381 382 static char em_flowctrl[IFM_ETH_FC_STRLEN] = IFM_ETH_FC_RXPAUSE; 383 384 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil); 385 TUNABLE_INT("hw.em.rxd", &em_rxd); 386 TUNABLE_INT("hw.em.txd", &em_txd); 387 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 388 TUNABLE_INT("hw.em.sbp", &em_debug_sbp); 389 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround); 390 TUNABLE_INT("hw.em.msi.enable", &em_msi_enable); 391 TUNABLE_STR("hw.em.flow_ctrl", em_flowctrl, sizeof(em_flowctrl)); 392 393 /* Global used in WOL setup with multiport cards */ 394 static int em_global_quad_port_a = 0; 395 396 /* Set this to one to display debug statistics */ 397 static int em_display_debug_stats = 0; 398 399 #if !defined(KTR_IF_EM) 400 #define KTR_IF_EM KTR_ALL 401 #endif 402 KTR_INFO_MASTER(if_em); 403 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin"); 404 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end"); 405 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet"); 406 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet"); 407 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean"); 408 #define logif(name) KTR_LOG(if_em_ ## name) 409 410 static int 411 em_probe(device_t dev) 412 { 413 const struct em_vendor_info *ent; 414 uint16_t vid, did; 415 416 vid = pci_get_vendor(dev); 417 did = pci_get_device(dev); 418 419 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) { 420 if (vid == ent->vendor_id && did == ent->device_id) { 421 device_set_desc(dev, ent->desc); 422 device_set_async_attach(dev, TRUE); 423 return (ent->ret); 424 } 425 } 426 return (ENXIO); 427 } 428 429 static int 430 em_attach(device_t dev) 431 { 432 struct adapter *adapter = device_get_softc(dev); 433 struct ifnet *ifp = &adapter->arpcom.ac_if; 434 int tsize, rsize; 435 int error = 0; 436 uint16_t eeprom_data, device_id, apme_mask; 437 driver_intr_t *intr_func; 438 char flowctrl[IFM_ETH_FC_STRLEN]; 439 440 adapter->dev = adapter->osdep.dev = dev; 441 442 callout_init_mp(&adapter->timer); 443 callout_init_mp(&adapter->tx_fifo_timer); 444 445 ifmedia_init(&adapter->media, IFM_IMASK | IFM_ETH_FCMASK, 446 em_media_change, em_media_status); 447 448 /* Determine hardware and mac info */ 449 error = em_get_hw_info(adapter); 450 if (error) { 451 device_printf(dev, "Identify hardware failed\n"); 452 goto fail; 453 } 454 455 /* Setup PCI resources */ 456 error = em_alloc_pci_res(adapter); 457 if (error) { 458 device_printf(dev, "Allocation of PCI resources failed\n"); 459 goto fail; 460 } 461 462 /* 463 * For ICH8 and family we need to map the flash memory, 464 * and this must happen after the MAC is identified. 465 * 466 * (SPT does not map the flash with a separate BAR) 467 */ 468 if (adapter->hw.mac.type == e1000_ich8lan || 469 adapter->hw.mac.type == e1000_ich9lan || 470 adapter->hw.mac.type == e1000_ich10lan || 471 adapter->hw.mac.type == e1000_pchlan || 472 adapter->hw.mac.type == e1000_pch2lan || 473 adapter->hw.mac.type == e1000_pch_lpt) { 474 adapter->flash_rid = EM_BAR_FLASH; 475 476 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 477 &adapter->flash_rid, RF_ACTIVE); 478 if (adapter->flash == NULL) { 479 device_printf(dev, "Mapping of Flash failed\n"); 480 error = ENXIO; 481 goto fail; 482 } 483 adapter->osdep.flash_bus_space_tag = 484 rman_get_bustag(adapter->flash); 485 adapter->osdep.flash_bus_space_handle = 486 rman_get_bushandle(adapter->flash); 487 488 /* 489 * This is used in the shared code 490 * XXX this goof is actually not used. 491 */ 492 adapter->hw.flash_address = (uint8_t *)adapter->flash; 493 } 494 495 switch (adapter->hw.mac.type) { 496 case e1000_82571: 497 case e1000_82572: 498 case e1000_pch_lpt: 499 case e1000_pch_spt: 500 /* 501 * Pullup extra 4bytes into the first data segment for 502 * TSO, see: 503 * 82571/82572 specification update errata #7 504 * 505 * Same applies to I217 (and maybe I218 and I219). 506 * 507 * NOTE: 508 * 4bytes instead of 2bytes, which are mentioned in the 509 * errata, are pulled; mainly to keep rest of the data 510 * properly aligned. 511 */ 512 adapter->flags |= EM_FLAG_TSO_PULLEX; 513 /* FALL THROUGH */ 514 515 default: 516 if (pci_is_pcie(dev)) 517 adapter->flags |= EM_FLAG_TSO; 518 break; 519 } 520 521 /* Do Shared Code initialization */ 522 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 523 device_printf(dev, "Setup of Shared code failed\n"); 524 error = ENXIO; 525 goto fail; 526 } 527 528 e1000_get_bus_info(&adapter->hw); 529 530 /* 531 * Validate number of transmit and receive descriptors. It 532 * must not exceed hardware maximum, and must be multiple 533 * of E1000_DBA_ALIGN. 534 */ 535 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 || 536 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || 537 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || 538 em_txd < EM_MIN_TXD) { 539 if (adapter->hw.mac.type < e1000_82544) 540 adapter->num_tx_desc = EM_MAX_TXD_82543; 541 else 542 adapter->num_tx_desc = EM_DEFAULT_TXD; 543 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 544 adapter->num_tx_desc, em_txd); 545 } else { 546 adapter->num_tx_desc = em_txd; 547 } 548 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 || 549 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || 550 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || 551 em_rxd < EM_MIN_RXD) { 552 if (adapter->hw.mac.type < e1000_82544) 553 adapter->num_rx_desc = EM_MAX_RXD_82543; 554 else 555 adapter->num_rx_desc = EM_DEFAULT_RXD; 556 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 557 adapter->num_rx_desc, em_rxd); 558 } else { 559 adapter->num_rx_desc = em_rxd; 560 } 561 562 adapter->hw.mac.autoneg = DO_AUTO_NEG; 563 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 564 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 565 adapter->rx_buffer_len = MCLBYTES; 566 567 /* 568 * Interrupt throttle rate 569 */ 570 if (em_int_throttle_ceil == 0) { 571 adapter->int_throttle_ceil = 0; 572 } else { 573 int throttle = em_int_throttle_ceil; 574 575 if (throttle < 0) 576 throttle = EM_DEFAULT_ITR; 577 578 /* Recalculate the tunable value to get the exact frequency. */ 579 throttle = 1000000000 / 256 / throttle; 580 581 /* Upper 16bits of ITR is reserved and should be zero */ 582 if (throttle & 0xffff0000) 583 throttle = 1000000000 / 256 / EM_DEFAULT_ITR; 584 585 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 586 } 587 588 e1000_init_script_state_82541(&adapter->hw, TRUE); 589 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 590 591 /* Copper options */ 592 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 593 adapter->hw.phy.mdix = AUTO_ALL_MODES; 594 adapter->hw.phy.disable_polarity_correction = FALSE; 595 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 596 } 597 598 /* Set the frame limits assuming standard ethernet sized frames. */ 599 adapter->hw.mac.max_frame_size = 600 ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 601 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN; 602 603 /* This controls when hardware reports transmit completion status. */ 604 adapter->hw.mac.report_tx_early = 1; 605 606 /* 607 * Create top level busdma tag 608 */ 609 error = bus_dma_tag_create(NULL, 1, 0, 610 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 611 NULL, NULL, 612 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 613 0, &adapter->parent_dtag); 614 if (error) { 615 device_printf(dev, "could not create top level DMA tag\n"); 616 goto fail; 617 } 618 619 /* 620 * Allocate Transmit Descriptor ring 621 */ 622 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 623 EM_DBA_ALIGN); 624 error = em_dma_malloc(adapter, tsize, &adapter->txdma); 625 if (error) { 626 device_printf(dev, "Unable to allocate tx_desc memory\n"); 627 goto fail; 628 } 629 adapter->tx_desc_base = adapter->txdma.dma_vaddr; 630 631 /* 632 * Allocate Receive Descriptor ring 633 */ 634 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 635 EM_DBA_ALIGN); 636 error = em_dma_malloc(adapter, rsize, &adapter->rxdma); 637 if (error) { 638 device_printf(dev, "Unable to allocate rx_desc memory\n"); 639 goto fail; 640 } 641 adapter->rx_desc_base = adapter->rxdma.dma_vaddr; 642 643 /* Allocate multicast array memory. */ 644 adapter->mta = kmalloc(ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES, 645 M_DEVBUF, M_WAITOK); 646 647 /* Indicate SOL/IDER usage */ 648 if (e1000_check_reset_block(&adapter->hw)) { 649 device_printf(dev, 650 "PHY reset is blocked due to SOL/IDER session.\n"); 651 } 652 653 /* Disable EEE */ 654 adapter->hw.dev_spec.ich8lan.eee_disable = 1; 655 656 /* 657 * Start from a known state, this is important in reading the 658 * nvm and mac from that. 659 */ 660 e1000_reset_hw(&adapter->hw); 661 662 /* Make sure we have a good EEPROM before we read from it */ 663 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 664 /* 665 * Some PCI-E parts fail the first check due to 666 * the link being in sleep state, call it again, 667 * if it fails a second time its a real issue. 668 */ 669 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 670 device_printf(dev, 671 "The EEPROM Checksum Is Not Valid\n"); 672 error = EIO; 673 goto fail; 674 } 675 } 676 677 /* Copy the permanent MAC address out of the EEPROM */ 678 if (e1000_read_mac_addr(&adapter->hw) < 0) { 679 device_printf(dev, "EEPROM read error while reading MAC" 680 " address\n"); 681 error = EIO; 682 goto fail; 683 } 684 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) { 685 device_printf(dev, "Invalid MAC address\n"); 686 error = EIO; 687 goto fail; 688 } 689 690 /* Disable ULP support */ 691 e1000_disable_ulp_lpt_lp(&adapter->hw, TRUE); 692 693 /* Allocate transmit descriptors and buffers */ 694 error = em_create_tx_ring(adapter); 695 if (error) { 696 device_printf(dev, "Could not setup transmit structures\n"); 697 goto fail; 698 } 699 700 /* Allocate receive descriptors and buffers */ 701 error = em_create_rx_ring(adapter); 702 if (error) { 703 device_printf(dev, "Could not setup receive structures\n"); 704 goto fail; 705 } 706 707 /* Manually turn off all interrupts */ 708 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 709 710 /* Determine if we have to control management hardware */ 711 if (e1000_enable_mng_pass_thru(&adapter->hw)) 712 adapter->flags |= EM_FLAG_HAS_MGMT; 713 714 /* 715 * Setup Wake-on-Lan 716 */ 717 apme_mask = EM_EEPROM_APME; 718 eeprom_data = 0; 719 switch (adapter->hw.mac.type) { 720 case e1000_82542: 721 case e1000_82543: 722 break; 723 724 case e1000_82573: 725 case e1000_82583: 726 adapter->flags |= EM_FLAG_HAS_AMT; 727 /* FALL THROUGH */ 728 729 case e1000_82546: 730 case e1000_82546_rev_3: 731 case e1000_82571: 732 case e1000_82572: 733 case e1000_80003es2lan: 734 if (adapter->hw.bus.func == 1) { 735 e1000_read_nvm(&adapter->hw, 736 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 737 } else { 738 e1000_read_nvm(&adapter->hw, 739 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 740 } 741 break; 742 743 case e1000_ich8lan: 744 case e1000_ich9lan: 745 case e1000_ich10lan: 746 case e1000_pchlan: 747 case e1000_pch2lan: 748 apme_mask = E1000_WUC_APME; 749 adapter->flags |= EM_FLAG_HAS_AMT; 750 eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC); 751 break; 752 753 default: 754 e1000_read_nvm(&adapter->hw, 755 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 756 break; 757 } 758 if (eeprom_data & apme_mask) 759 adapter->wol = E1000_WUFC_MAG | E1000_WUFC_MC; 760 761 /* 762 * We have the eeprom settings, now apply the special cases 763 * where the eeprom may be wrong or the board won't support 764 * wake on lan on a particular port 765 */ 766 device_id = pci_get_device(dev); 767 switch (device_id) { 768 case E1000_DEV_ID_82546GB_PCIE: 769 adapter->wol = 0; 770 break; 771 772 case E1000_DEV_ID_82546EB_FIBER: 773 case E1000_DEV_ID_82546GB_FIBER: 774 case E1000_DEV_ID_82571EB_FIBER: 775 /* 776 * Wake events only supported on port A for dual fiber 777 * regardless of eeprom setting 778 */ 779 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 780 E1000_STATUS_FUNC_1) 781 adapter->wol = 0; 782 break; 783 784 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 785 case E1000_DEV_ID_82571EB_QUAD_COPPER: 786 case E1000_DEV_ID_82571EB_QUAD_FIBER: 787 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 788 /* if quad port adapter, disable WoL on all but port A */ 789 if (em_global_quad_port_a != 0) 790 adapter->wol = 0; 791 /* Reset for multiple quad port adapters */ 792 if (++em_global_quad_port_a == 4) 793 em_global_quad_port_a = 0; 794 break; 795 } 796 797 /* XXX disable wol */ 798 adapter->wol = 0; 799 800 /* Setup flow control. */ 801 device_getenv_string(dev, "flow_ctrl", flowctrl, sizeof(flowctrl), 802 em_flowctrl); 803 adapter->ifm_flowctrl = ifmedia_str2ethfc(flowctrl); 804 if (adapter->hw.mac.type == e1000_pchlan) { 805 /* Only PAUSE reception is supported on PCH */ 806 adapter->ifm_flowctrl &= ~IFM_ETH_TXPAUSE; 807 } 808 809 /* Setup OS specific network interface */ 810 em_setup_ifp(adapter); 811 812 /* Add sysctl tree, must after em_setup_ifp() */ 813 em_add_sysctl(adapter); 814 815 #ifdef IFPOLL_ENABLE 816 /* Polling setup */ 817 ifpoll_compat_setup(&adapter->npoll, 818 device_get_sysctl_ctx(dev), device_get_sysctl_tree(dev), 819 device_get_unit(dev), ifp->if_serializer); 820 #endif 821 822 /* Reset the hardware */ 823 error = em_reset(adapter); 824 if (error) { 825 /* 826 * Some 82573 parts fail the first reset, call it again, 827 * if it fails a second time its a real issue. 828 */ 829 error = em_reset(adapter); 830 if (error) { 831 device_printf(dev, "Unable to reset the hardware\n"); 832 ether_ifdetach(ifp); 833 goto fail; 834 } 835 } 836 837 /* Initialize statistics */ 838 em_update_stats(adapter); 839 840 adapter->hw.mac.get_link_status = 1; 841 em_update_link_status(adapter); 842 843 /* Do we need workaround for 82544 PCI-X adapter? */ 844 if (adapter->hw.bus.type == e1000_bus_type_pcix && 845 adapter->hw.mac.type == e1000_82544) 846 adapter->pcix_82544 = TRUE; 847 else 848 adapter->pcix_82544 = FALSE; 849 850 if (adapter->pcix_82544) { 851 /* 852 * 82544 on PCI-X may split one TX segment 853 * into two TX descs, so we double its number 854 * of spare TX desc here. 855 */ 856 adapter->spare_tx_desc = 2 * EM_TX_SPARE; 857 } else { 858 adapter->spare_tx_desc = EM_TX_SPARE; 859 } 860 if (adapter->flags & EM_FLAG_TSO) 861 adapter->spare_tx_desc = EM_TX_SPARE_TSO; 862 adapter->tx_wreg_nsegs = EM_DEFAULT_TXWREG; 863 864 /* 865 * Keep following relationship between spare_tx_desc, oact_tx_desc 866 * and tx_int_nsegs: 867 * (spare_tx_desc + EM_TX_RESERVED) <= 868 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs 869 */ 870 adapter->oact_tx_desc = adapter->num_tx_desc / 8; 871 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX) 872 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX; 873 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED) 874 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED; 875 876 adapter->tx_int_nsegs = adapter->num_tx_desc / 16; 877 if (adapter->tx_int_nsegs < adapter->oact_tx_desc) 878 adapter->tx_int_nsegs = adapter->oact_tx_desc; 879 880 /* Non-AMT based hardware can now take control from firmware */ 881 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 882 EM_FLAG_HAS_MGMT && adapter->hw.mac.type >= e1000_82571) 883 em_get_hw_control(adapter); 884 885 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 886 887 /* 888 * Missing Interrupt Following ICR read: 889 * 890 * 82571/82572 specification update errata #76 891 * 82573 specification update errata #31 892 * 82574 specification update errata #12 893 * 82583 specification update errata #4 894 */ 895 intr_func = em_intr; 896 if ((adapter->flags & EM_FLAG_SHARED_INTR) && 897 (adapter->hw.mac.type == e1000_82571 || 898 adapter->hw.mac.type == e1000_82572 || 899 adapter->hw.mac.type == e1000_82573 || 900 adapter->hw.mac.type == e1000_82574 || 901 adapter->hw.mac.type == e1000_82583)) 902 intr_func = em_intr_mask; 903 904 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE, 905 intr_func, adapter, &adapter->intr_tag, 906 ifp->if_serializer); 907 if (error) { 908 device_printf(dev, "Failed to register interrupt handler"); 909 ether_ifdetach(ifp); 910 goto fail; 911 } 912 return (0); 913 fail: 914 em_detach(dev); 915 return (error); 916 } 917 918 static int 919 em_detach(device_t dev) 920 { 921 struct adapter *adapter = device_get_softc(dev); 922 923 if (device_is_attached(dev)) { 924 struct ifnet *ifp = &adapter->arpcom.ac_if; 925 926 lwkt_serialize_enter(ifp->if_serializer); 927 928 em_stop(adapter); 929 930 e1000_phy_hw_reset(&adapter->hw); 931 932 em_rel_mgmt(adapter); 933 em_rel_hw_control(adapter); 934 935 if (adapter->wol) { 936 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 937 E1000_WUC_PME_EN); 938 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 939 em_enable_wol(dev); 940 } 941 942 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag); 943 944 lwkt_serialize_exit(ifp->if_serializer); 945 946 ether_ifdetach(ifp); 947 } else if (adapter->memory != NULL) { 948 em_rel_hw_control(adapter); 949 } 950 951 ifmedia_removeall(&adapter->media); 952 bus_generic_detach(dev); 953 954 em_free_pci_res(adapter); 955 956 em_destroy_tx_ring(adapter, adapter->num_tx_desc); 957 em_destroy_rx_ring(adapter, adapter->num_rx_desc); 958 959 /* Free Transmit Descriptor ring */ 960 if (adapter->tx_desc_base) 961 em_dma_free(adapter, &adapter->txdma); 962 963 /* Free Receive Descriptor ring */ 964 if (adapter->rx_desc_base) 965 em_dma_free(adapter, &adapter->rxdma); 966 967 /* Free top level busdma tag */ 968 if (adapter->parent_dtag != NULL) 969 bus_dma_tag_destroy(adapter->parent_dtag); 970 971 if (adapter->mta != NULL) 972 kfree(adapter->mta, M_DEVBUF); 973 974 return (0); 975 } 976 977 static int 978 em_shutdown(device_t dev) 979 { 980 return em_suspend(dev); 981 } 982 983 static int 984 em_suspend(device_t dev) 985 { 986 struct adapter *adapter = device_get_softc(dev); 987 struct ifnet *ifp = &adapter->arpcom.ac_if; 988 989 lwkt_serialize_enter(ifp->if_serializer); 990 991 em_stop(adapter); 992 993 em_rel_mgmt(adapter); 994 em_rel_hw_control(adapter); 995 996 if (adapter->wol) { 997 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 998 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 999 em_enable_wol(dev); 1000 } 1001 1002 lwkt_serialize_exit(ifp->if_serializer); 1003 1004 return bus_generic_suspend(dev); 1005 } 1006 1007 static int 1008 em_resume(device_t dev) 1009 { 1010 struct adapter *adapter = device_get_softc(dev); 1011 struct ifnet *ifp = &adapter->arpcom.ac_if; 1012 1013 lwkt_serialize_enter(ifp->if_serializer); 1014 1015 if (adapter->hw.mac.type == e1000_pch2lan) 1016 e1000_resume_workarounds_pchlan(&adapter->hw); 1017 1018 em_init(adapter); 1019 em_get_mgmt(adapter); 1020 if_devstart(ifp); 1021 1022 lwkt_serialize_exit(ifp->if_serializer); 1023 1024 return bus_generic_resume(dev); 1025 } 1026 1027 static void 1028 em_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) 1029 { 1030 struct adapter *adapter = ifp->if_softc; 1031 struct mbuf *m_head; 1032 int idx = -1, nsegs = 0; 1033 1034 ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); 1035 ASSERT_SERIALIZED(ifp->if_serializer); 1036 1037 if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) 1038 return; 1039 1040 if (!adapter->link_active) { 1041 ifq_purge(&ifp->if_snd); 1042 return; 1043 } 1044 1045 while (!ifq_is_empty(&ifp->if_snd)) { 1046 /* Now do we at least have a minimal? */ 1047 if (EM_IS_OACTIVE(adapter)) { 1048 em_tx_collect(adapter); 1049 if (EM_IS_OACTIVE(adapter)) { 1050 ifq_set_oactive(&ifp->if_snd); 1051 adapter->no_tx_desc_avail1++; 1052 break; 1053 } 1054 } 1055 1056 logif(pkt_txqueue); 1057 m_head = ifq_dequeue(&ifp->if_snd); 1058 if (m_head == NULL) 1059 break; 1060 1061 if (em_encap(adapter, &m_head, &nsegs, &idx)) { 1062 IFNET_STAT_INC(ifp, oerrors, 1); 1063 em_tx_collect(adapter); 1064 continue; 1065 } 1066 1067 /* 1068 * TX interrupt are aggressively aggregated, so increasing 1069 * opackets at TX interrupt time will make the opackets 1070 * statistics vastly inaccurate; we do the opackets increment 1071 * now. 1072 */ 1073 IFNET_STAT_INC(ifp, opackets, 1); 1074 1075 if (nsegs >= adapter->tx_wreg_nsegs && idx >= 0) { 1076 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1077 nsegs = 0; 1078 idx = -1; 1079 } 1080 1081 /* Send a copy of the frame to the BPF listener */ 1082 ETHER_BPF_MTAP(ifp, m_head); 1083 1084 /* Set timeout in case hardware has problems transmitting. */ 1085 ifp->if_timer = EM_TX_TIMEOUT; 1086 } 1087 if (idx >= 0) 1088 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), idx); 1089 } 1090 1091 static int 1092 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 1093 { 1094 struct adapter *adapter = ifp->if_softc; 1095 struct ifreq *ifr = (struct ifreq *)data; 1096 uint16_t eeprom_data = 0; 1097 int max_frame_size, mask, reinit; 1098 int error = 0; 1099 1100 ASSERT_SERIALIZED(ifp->if_serializer); 1101 1102 switch (command) { 1103 case SIOCSIFMTU: 1104 switch (adapter->hw.mac.type) { 1105 case e1000_82573: 1106 /* 1107 * 82573 only supports jumbo frames 1108 * if ASPM is disabled. 1109 */ 1110 e1000_read_nvm(&adapter->hw, 1111 NVM_INIT_3GIO_3, 1, &eeprom_data); 1112 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 1113 max_frame_size = ETHER_MAX_LEN; 1114 break; 1115 } 1116 /* FALL THROUGH */ 1117 1118 /* Limit Jumbo Frame size */ 1119 case e1000_82571: 1120 case e1000_82572: 1121 case e1000_ich9lan: 1122 case e1000_ich10lan: 1123 case e1000_pch2lan: 1124 case e1000_pch_lpt: 1125 case e1000_pch_spt: 1126 case e1000_82574: 1127 case e1000_82583: 1128 case e1000_80003es2lan: 1129 max_frame_size = 9234; 1130 break; 1131 1132 case e1000_pchlan: 1133 max_frame_size = 4096; 1134 break; 1135 1136 /* Adapters that do not support jumbo frames */ 1137 case e1000_82542: 1138 case e1000_ich8lan: 1139 max_frame_size = ETHER_MAX_LEN; 1140 break; 1141 1142 default: 1143 max_frame_size = MAX_JUMBO_FRAME_SIZE; 1144 break; 1145 } 1146 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 1147 ETHER_CRC_LEN) { 1148 error = EINVAL; 1149 break; 1150 } 1151 1152 ifp->if_mtu = ifr->ifr_mtu; 1153 adapter->hw.mac.max_frame_size = 1154 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1155 1156 if (ifp->if_flags & IFF_RUNNING) 1157 em_init(adapter); 1158 break; 1159 1160 case SIOCSIFFLAGS: 1161 if (ifp->if_flags & IFF_UP) { 1162 if ((ifp->if_flags & IFF_RUNNING)) { 1163 if ((ifp->if_flags ^ adapter->if_flags) & 1164 (IFF_PROMISC | IFF_ALLMULTI)) { 1165 em_disable_promisc(adapter); 1166 em_set_promisc(adapter); 1167 } 1168 } else { 1169 em_init(adapter); 1170 } 1171 } else if (ifp->if_flags & IFF_RUNNING) { 1172 em_stop(adapter); 1173 } 1174 adapter->if_flags = ifp->if_flags; 1175 break; 1176 1177 case SIOCADDMULTI: 1178 case SIOCDELMULTI: 1179 if (ifp->if_flags & IFF_RUNNING) { 1180 em_disable_intr(adapter); 1181 em_set_multi(adapter); 1182 if (adapter->hw.mac.type == e1000_82542 && 1183 adapter->hw.revision_id == E1000_REVISION_2) 1184 em_init_rx_unit(adapter); 1185 #ifdef IFPOLL_ENABLE 1186 if (!(ifp->if_flags & IFF_NPOLLING)) 1187 #endif 1188 em_enable_intr(adapter); 1189 } 1190 break; 1191 1192 case SIOCSIFMEDIA: 1193 /* Check SOL/IDER usage */ 1194 if (e1000_check_reset_block(&adapter->hw)) { 1195 device_printf(adapter->dev, "Media change is" 1196 " blocked due to SOL/IDER session.\n"); 1197 break; 1198 } 1199 /* FALL THROUGH */ 1200 1201 case SIOCGIFMEDIA: 1202 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1203 break; 1204 1205 case SIOCSIFCAP: 1206 reinit = 0; 1207 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1208 if (mask & IFCAP_RXCSUM) { 1209 ifp->if_capenable ^= IFCAP_RXCSUM; 1210 reinit = 1; 1211 } 1212 if (mask & IFCAP_TXCSUM) { 1213 ifp->if_capenable ^= IFCAP_TXCSUM; 1214 if (ifp->if_capenable & IFCAP_TXCSUM) 1215 ifp->if_hwassist |= EM_CSUM_FEATURES; 1216 else 1217 ifp->if_hwassist &= ~EM_CSUM_FEATURES; 1218 } 1219 if (mask & IFCAP_TSO) { 1220 ifp->if_capenable ^= IFCAP_TSO; 1221 if (ifp->if_capenable & IFCAP_TSO) 1222 ifp->if_hwassist |= CSUM_TSO; 1223 else 1224 ifp->if_hwassist &= ~CSUM_TSO; 1225 } 1226 if (mask & IFCAP_VLAN_HWTAGGING) { 1227 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1228 reinit = 1; 1229 } 1230 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1231 em_init(adapter); 1232 break; 1233 1234 default: 1235 error = ether_ioctl(ifp, command, data); 1236 break; 1237 } 1238 return (error); 1239 } 1240 1241 static void 1242 em_watchdog(struct ifnet *ifp) 1243 { 1244 struct adapter *adapter = ifp->if_softc; 1245 1246 ASSERT_SERIALIZED(ifp->if_serializer); 1247 1248 /* 1249 * The timer is set to 5 every time start queues a packet. 1250 * Then txeof keeps resetting it as long as it cleans at 1251 * least one descriptor. 1252 * Finally, anytime all descriptors are clean the timer is 1253 * set to 0. 1254 */ 1255 1256 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1257 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) { 1258 /* 1259 * If we reach here, all TX jobs are completed and 1260 * the TX engine should have been idled for some time. 1261 * We don't need to call if_devstart() here. 1262 */ 1263 ifq_clr_oactive(&ifp->if_snd); 1264 ifp->if_timer = 0; 1265 return; 1266 } 1267 1268 /* 1269 * If we are in this routine because of pause frames, then 1270 * don't reset the hardware. 1271 */ 1272 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 1273 E1000_STATUS_TXOFF) { 1274 ifp->if_timer = EM_TX_TIMEOUT; 1275 return; 1276 } 1277 1278 if (e1000_check_for_link(&adapter->hw) == 0) 1279 if_printf(ifp, "watchdog timeout -- resetting\n"); 1280 1281 IFNET_STAT_INC(ifp, oerrors, 1); 1282 adapter->watchdog_events++; 1283 1284 em_init(adapter); 1285 1286 if (!ifq_is_empty(&ifp->if_snd)) 1287 if_devstart(ifp); 1288 } 1289 1290 static void 1291 em_init(void *xsc) 1292 { 1293 struct adapter *adapter = xsc; 1294 struct ifnet *ifp = &adapter->arpcom.ac_if; 1295 device_t dev = adapter->dev; 1296 1297 ASSERT_SERIALIZED(ifp->if_serializer); 1298 1299 em_stop(adapter); 1300 1301 /* Get the latest mac address, User can use a LAA */ 1302 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); 1303 1304 /* Put the address into the Receive Address Array */ 1305 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1306 1307 /* 1308 * With the 82571 adapter, RAR[0] may be overwritten 1309 * when the other port is reset, we make a duplicate 1310 * in RAR[14] for that eventuality, this assures 1311 * the interface continues to function. 1312 */ 1313 if (adapter->hw.mac.type == e1000_82571) { 1314 e1000_set_laa_state_82571(&adapter->hw, TRUE); 1315 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 1316 E1000_RAR_ENTRIES - 1); 1317 } 1318 1319 /* Reset the hardware */ 1320 if (em_reset(adapter)) { 1321 device_printf(dev, "Unable to reset the hardware\n"); 1322 /* XXX em_stop()? */ 1323 return; 1324 } 1325 em_update_link_status(adapter); 1326 1327 /* Setup VLAN support, basic and offload if available */ 1328 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1329 1330 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1331 uint32_t ctrl; 1332 1333 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1334 ctrl |= E1000_CTRL_VME; 1335 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1336 } 1337 1338 /* Configure for OS presence */ 1339 em_get_mgmt(adapter); 1340 1341 /* Prepare transmit descriptors and buffers */ 1342 em_init_tx_ring(adapter); 1343 em_init_tx_unit(adapter); 1344 1345 /* Setup Multicast table */ 1346 em_set_multi(adapter); 1347 1348 /* Prepare receive descriptors and buffers */ 1349 if (em_init_rx_ring(adapter)) { 1350 device_printf(dev, "Could not setup receive structures\n"); 1351 em_stop(adapter); 1352 return; 1353 } 1354 em_init_rx_unit(adapter); 1355 1356 /* Don't lose promiscuous settings */ 1357 em_set_promisc(adapter); 1358 1359 ifp->if_flags |= IFF_RUNNING; 1360 ifq_clr_oactive(&ifp->if_snd); 1361 1362 callout_reset(&adapter->timer, hz, em_timer, adapter); 1363 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1364 1365 /* MSI/X configuration for 82574 */ 1366 if (adapter->hw.mac.type == e1000_82574) { 1367 int tmp; 1368 1369 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 1370 tmp |= E1000_CTRL_EXT_PBA_CLR; 1371 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); 1372 /* 1373 * XXX MSIX 1374 * Set the IVAR - interrupt vector routing. 1375 * Each nibble represents a vector, high bit 1376 * is enable, other 3 bits are the MSIX table 1377 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1378 * Link (other) to 2, hence the magic number. 1379 */ 1380 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); 1381 } 1382 1383 #ifdef IFPOLL_ENABLE 1384 /* 1385 * Only enable interrupts if we are not polling, make sure 1386 * they are off otherwise. 1387 */ 1388 if (ifp->if_flags & IFF_NPOLLING) 1389 em_disable_intr(adapter); 1390 else 1391 #endif /* IFPOLL_ENABLE */ 1392 em_enable_intr(adapter); 1393 1394 /* AMT based hardware can now take control from firmware */ 1395 if ((adapter->flags & (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT)) == 1396 (EM_FLAG_HAS_MGMT | EM_FLAG_HAS_AMT) && 1397 adapter->hw.mac.type >= e1000_82571) 1398 em_get_hw_control(adapter); 1399 } 1400 1401 #ifdef IFPOLL_ENABLE 1402 1403 static void 1404 em_npoll_compat(struct ifnet *ifp, void *arg __unused, int count) 1405 { 1406 struct adapter *adapter = ifp->if_softc; 1407 1408 ASSERT_SERIALIZED(ifp->if_serializer); 1409 1410 if (adapter->npoll.ifpc_stcount-- == 0) { 1411 uint32_t reg_icr; 1412 1413 adapter->npoll.ifpc_stcount = adapter->npoll.ifpc_stfrac; 1414 1415 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1416 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1417 callout_stop(&adapter->timer); 1418 adapter->hw.mac.get_link_status = 1; 1419 em_update_link_status(adapter); 1420 callout_reset(&adapter->timer, hz, em_timer, adapter); 1421 } 1422 } 1423 1424 em_rxeof(adapter, count); 1425 em_txeof(adapter); 1426 1427 if (!ifq_is_empty(&ifp->if_snd)) 1428 if_devstart(ifp); 1429 } 1430 1431 static void 1432 em_npoll(struct ifnet *ifp, struct ifpoll_info *info) 1433 { 1434 struct adapter *adapter = ifp->if_softc; 1435 1436 ASSERT_SERIALIZED(ifp->if_serializer); 1437 1438 if (info != NULL) { 1439 int cpuid = adapter->npoll.ifpc_cpuid; 1440 1441 info->ifpi_rx[cpuid].poll_func = em_npoll_compat; 1442 info->ifpi_rx[cpuid].arg = NULL; 1443 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 1444 1445 if (ifp->if_flags & IFF_RUNNING) 1446 em_disable_intr(adapter); 1447 ifq_set_cpuid(&ifp->if_snd, cpuid); 1448 } else { 1449 if (ifp->if_flags & IFF_RUNNING) 1450 em_enable_intr(adapter); 1451 ifq_set_cpuid(&ifp->if_snd, rman_get_cpuid(adapter->intr_res)); 1452 } 1453 } 1454 1455 #endif /* IFPOLL_ENABLE */ 1456 1457 static void 1458 em_intr(void *xsc) 1459 { 1460 em_intr_body(xsc, TRUE); 1461 } 1462 1463 static void 1464 em_intr_body(struct adapter *adapter, boolean_t chk_asserted) 1465 { 1466 struct ifnet *ifp = &adapter->arpcom.ac_if; 1467 uint32_t reg_icr; 1468 1469 logif(intr_beg); 1470 ASSERT_SERIALIZED(ifp->if_serializer); 1471 1472 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1473 1474 if (chk_asserted && 1475 ((adapter->hw.mac.type >= e1000_82571 && 1476 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) || 1477 reg_icr == 0)) { 1478 logif(intr_end); 1479 return; 1480 } 1481 1482 /* 1483 * XXX: some laptops trigger several spurious interrupts 1484 * on em(4) when in the resume cycle. The ICR register 1485 * reports all-ones value in this case. Processing such 1486 * interrupts would lead to a freeze. I don't know why. 1487 */ 1488 if (reg_icr == 0xffffffff) { 1489 logif(intr_end); 1490 return; 1491 } 1492 1493 if (ifp->if_flags & IFF_RUNNING) { 1494 if (reg_icr & 1495 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) 1496 em_rxeof(adapter, -1); 1497 if (reg_icr & E1000_ICR_TXDW) { 1498 em_txeof(adapter); 1499 if (!ifq_is_empty(&ifp->if_snd)) 1500 if_devstart(ifp); 1501 } 1502 } 1503 1504 /* Link status change */ 1505 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1506 callout_stop(&adapter->timer); 1507 adapter->hw.mac.get_link_status = 1; 1508 em_update_link_status(adapter); 1509 1510 /* Deal with TX cruft when link lost */ 1511 em_tx_purge(adapter); 1512 1513 callout_reset(&adapter->timer, hz, em_timer, adapter); 1514 } 1515 1516 if (reg_icr & E1000_ICR_RXO) 1517 adapter->rx_overruns++; 1518 1519 logif(intr_end); 1520 } 1521 1522 static void 1523 em_intr_mask(void *xsc) 1524 { 1525 struct adapter *adapter = xsc; 1526 1527 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 1528 /* 1529 * NOTE: 1530 * ICR.INT_ASSERTED bit will never be set if IMS is 0, 1531 * so don't check it. 1532 */ 1533 em_intr_body(adapter, FALSE); 1534 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); 1535 } 1536 1537 static void 1538 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1539 { 1540 struct adapter *adapter = ifp->if_softc; 1541 1542 ASSERT_SERIALIZED(ifp->if_serializer); 1543 1544 em_update_link_status(adapter); 1545 1546 ifmr->ifm_status = IFM_AVALID; 1547 ifmr->ifm_active = IFM_ETHER; 1548 1549 if (!adapter->link_active) { 1550 if (adapter->hw.mac.autoneg) 1551 ifmr->ifm_active |= IFM_NONE; 1552 else 1553 ifmr->ifm_active = adapter->media.ifm_media; 1554 return; 1555 } 1556 1557 ifmr->ifm_status |= IFM_ACTIVE; 1558 if (adapter->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 1559 ifmr->ifm_active |= adapter->ifm_flowctrl; 1560 1561 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 1562 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 1563 u_char fiber_type = IFM_1000_SX; 1564 1565 if (adapter->hw.mac.type == e1000_82545) 1566 fiber_type = IFM_1000_LX; 1567 ifmr->ifm_active |= fiber_type | IFM_FDX; 1568 } else { 1569 switch (adapter->link_speed) { 1570 case 10: 1571 ifmr->ifm_active |= IFM_10_T; 1572 break; 1573 case 100: 1574 ifmr->ifm_active |= IFM_100_TX; 1575 break; 1576 1577 case 1000: 1578 ifmr->ifm_active |= IFM_1000_T; 1579 break; 1580 } 1581 if (adapter->link_duplex == FULL_DUPLEX) 1582 ifmr->ifm_active |= IFM_FDX; 1583 else 1584 ifmr->ifm_active |= IFM_HDX; 1585 } 1586 if (ifmr->ifm_active & IFM_FDX) { 1587 ifmr->ifm_active |= 1588 e1000_fc2ifmedia(adapter->hw.fc.current_mode); 1589 } 1590 } 1591 1592 static int 1593 em_media_change(struct ifnet *ifp) 1594 { 1595 struct adapter *adapter = ifp->if_softc; 1596 struct ifmedia *ifm = &adapter->media; 1597 1598 ASSERT_SERIALIZED(ifp->if_serializer); 1599 1600 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1601 return (EINVAL); 1602 1603 if (adapter->hw.mac.type == e1000_pchlan && 1604 (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)) { 1605 if (bootverbose) 1606 if_printf(ifp, "TX PAUSE is not supported on PCH\n"); 1607 return EINVAL; 1608 } 1609 1610 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1611 case IFM_AUTO: 1612 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1613 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1614 break; 1615 1616 case IFM_1000_LX: 1617 case IFM_1000_SX: 1618 case IFM_1000_T: 1619 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1620 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1621 break; 1622 1623 case IFM_100_TX: 1624 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1625 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1626 } else { 1627 if (IFM_OPTIONS(ifm->ifm_media) & 1628 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1629 if (bootverbose) { 1630 if_printf(ifp, "Flow control is not " 1631 "allowed for half-duplex\n"); 1632 } 1633 return EINVAL; 1634 } 1635 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1636 } 1637 adapter->hw.mac.autoneg = FALSE; 1638 adapter->hw.phy.autoneg_advertised = 0; 1639 break; 1640 1641 case IFM_10_T: 1642 if (IFM_OPTIONS(ifm->ifm_media) & IFM_FDX) { 1643 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1644 } else { 1645 if (IFM_OPTIONS(ifm->ifm_media) & 1646 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) { 1647 if (bootverbose) { 1648 if_printf(ifp, "Flow control is not " 1649 "allowed for half-duplex\n"); 1650 } 1651 return EINVAL; 1652 } 1653 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1654 } 1655 adapter->hw.mac.autoneg = FALSE; 1656 adapter->hw.phy.autoneg_advertised = 0; 1657 break; 1658 1659 default: 1660 if (bootverbose) { 1661 if_printf(ifp, "Unsupported media type %d\n", 1662 IFM_SUBTYPE(ifm->ifm_media)); 1663 } 1664 return EINVAL; 1665 } 1666 adapter->ifm_flowctrl = ifm->ifm_media & IFM_ETH_FCMASK; 1667 1668 if (ifp->if_flags & IFF_RUNNING) 1669 em_init(adapter); 1670 1671 return (0); 1672 } 1673 1674 static int 1675 em_encap(struct adapter *adapter, struct mbuf **m_headp, 1676 int *segs_used, int *idx) 1677 { 1678 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1679 bus_dmamap_t map; 1680 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1681 struct e1000_tx_desc *ctxd = NULL; 1682 struct mbuf *m_head = *m_headp; 1683 uint32_t txd_upper, txd_lower, txd_used, cmd = 0; 1684 int maxsegs, nsegs, i, j, first, last = 0, error; 1685 1686 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1687 error = em_tso_pullup(adapter, m_headp); 1688 if (error) 1689 return error; 1690 m_head = *m_headp; 1691 } 1692 1693 txd_upper = txd_lower = 0; 1694 txd_used = 0; 1695 1696 /* 1697 * Capture the first descriptor index, this descriptor 1698 * will have the index of the EOP which is the only one 1699 * that now gets a DONE bit writeback. 1700 */ 1701 first = adapter->next_avail_tx_desc; 1702 tx_buffer = &adapter->tx_buffer_area[first]; 1703 tx_buffer_mapped = tx_buffer; 1704 map = tx_buffer->map; 1705 1706 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED; 1707 KASSERT(maxsegs >= adapter->spare_tx_desc, 1708 ("not enough spare TX desc")); 1709 if (adapter->pcix_82544) { 1710 /* Half it; see the comment in em_attach() */ 1711 maxsegs >>= 1; 1712 } 1713 if (maxsegs > EM_MAX_SCATTER) 1714 maxsegs = EM_MAX_SCATTER; 1715 1716 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp, 1717 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1718 if (error) { 1719 if (error == ENOBUFS) 1720 adapter->mbuf_alloc_failed++; 1721 else 1722 adapter->no_tx_dma_setup++; 1723 1724 m_freem(*m_headp); 1725 *m_headp = NULL; 1726 return error; 1727 } 1728 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1729 1730 m_head = *m_headp; 1731 adapter->tx_nsegs += nsegs; 1732 *segs_used += nsegs; 1733 1734 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 1735 /* TSO will consume one TX desc */ 1736 i = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower); 1737 adapter->tx_nsegs += i; 1738 *segs_used += i; 1739 } else if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) { 1740 /* TX csum offloading will consume one TX desc */ 1741 i = em_txcsum(adapter, m_head, &txd_upper, &txd_lower); 1742 adapter->tx_nsegs += i; 1743 *segs_used += i; 1744 } 1745 1746 /* Handle VLAN tag */ 1747 if (m_head->m_flags & M_VLANTAG) { 1748 /* Set the vlan id. */ 1749 txd_upper |= (htole16(m_head->m_pkthdr.ether_vlantag) << 16); 1750 /* Tell hardware to add tag */ 1751 txd_lower |= htole32(E1000_TXD_CMD_VLE); 1752 } 1753 1754 i = adapter->next_avail_tx_desc; 1755 1756 /* Set up our transmit descriptors */ 1757 for (j = 0; j < nsegs; j++) { 1758 /* If adapter is 82544 and on PCIX bus */ 1759 if(adapter->pcix_82544) { 1760 DESC_ARRAY desc_array; 1761 uint32_t array_elements, counter; 1762 1763 /* 1764 * Check the Address and Length combination and 1765 * split the data accordingly 1766 */ 1767 array_elements = em_82544_fill_desc(segs[j].ds_addr, 1768 segs[j].ds_len, &desc_array); 1769 for (counter = 0; counter < array_elements; counter++) { 1770 KKASSERT(txd_used < adapter->num_tx_desc_avail); 1771 1772 tx_buffer = &adapter->tx_buffer_area[i]; 1773 ctxd = &adapter->tx_desc_base[i]; 1774 1775 ctxd->buffer_addr = htole64( 1776 desc_array.descriptor[counter].address); 1777 ctxd->lower.data = htole32( 1778 E1000_TXD_CMD_IFCS | txd_lower | 1779 desc_array.descriptor[counter].length); 1780 ctxd->upper.data = htole32(txd_upper); 1781 1782 last = i; 1783 if (++i == adapter->num_tx_desc) 1784 i = 0; 1785 1786 txd_used++; 1787 } 1788 } else { 1789 tx_buffer = &adapter->tx_buffer_area[i]; 1790 ctxd = &adapter->tx_desc_base[i]; 1791 1792 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1793 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1794 txd_lower | segs[j].ds_len); 1795 ctxd->upper.data = htole32(txd_upper); 1796 1797 last = i; 1798 if (++i == adapter->num_tx_desc) 1799 i = 0; 1800 } 1801 } 1802 1803 adapter->next_avail_tx_desc = i; 1804 if (adapter->pcix_82544) { 1805 KKASSERT(adapter->num_tx_desc_avail > txd_used); 1806 adapter->num_tx_desc_avail -= txd_used; 1807 } else { 1808 KKASSERT(adapter->num_tx_desc_avail > nsegs); 1809 adapter->num_tx_desc_avail -= nsegs; 1810 } 1811 1812 tx_buffer->m_head = m_head; 1813 tx_buffer_mapped->map = tx_buffer->map; 1814 tx_buffer->map = map; 1815 1816 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) { 1817 adapter->tx_nsegs = 0; 1818 1819 /* 1820 * Report Status (RS) is turned on 1821 * every tx_int_nsegs descriptors. 1822 */ 1823 cmd = E1000_TXD_CMD_RS; 1824 1825 /* 1826 * Keep track of the descriptor, which will 1827 * be written back by hardware. 1828 */ 1829 adapter->tx_dd[adapter->tx_dd_tail] = last; 1830 EM_INC_TXDD_IDX(adapter->tx_dd_tail); 1831 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head); 1832 } 1833 1834 /* 1835 * Last Descriptor of Packet needs End Of Packet (EOP) 1836 */ 1837 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1838 1839 if (adapter->hw.mac.type == e1000_82547) { 1840 /* 1841 * Advance the Transmit Descriptor Tail (TDT), this tells the 1842 * E1000 that this frame is available to transmit. 1843 */ 1844 if (adapter->link_duplex == HALF_DUPLEX) { 1845 em_82547_move_tail_serialized(adapter); 1846 } else { 1847 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1848 em_82547_update_fifo_head(adapter, 1849 m_head->m_pkthdr.len); 1850 } 1851 } else { 1852 /* 1853 * Defer TDT updating, until enough descriptors are setup 1854 */ 1855 *idx = i; 1856 } 1857 return (0); 1858 } 1859 1860 /* 1861 * 82547 workaround to avoid controller hang in half-duplex environment. 1862 * The workaround is to avoid queuing a large packet that would span 1863 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1864 * in this case. We do that only when FIFO is quiescent. 1865 */ 1866 static void 1867 em_82547_move_tail_serialized(struct adapter *adapter) 1868 { 1869 struct e1000_tx_desc *tx_desc; 1870 uint16_t hw_tdt, sw_tdt, length = 0; 1871 bool eop = 0; 1872 1873 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer); 1874 1875 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1876 sw_tdt = adapter->next_avail_tx_desc; 1877 1878 while (hw_tdt != sw_tdt) { 1879 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1880 length += tx_desc->lower.flags.length; 1881 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1882 if (++hw_tdt == adapter->num_tx_desc) 1883 hw_tdt = 0; 1884 1885 if (eop) { 1886 if (em_82547_fifo_workaround(adapter, length)) { 1887 adapter->tx_fifo_wrk_cnt++; 1888 callout_reset(&adapter->tx_fifo_timer, 1, 1889 em_82547_move_tail, adapter); 1890 break; 1891 } 1892 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1893 em_82547_update_fifo_head(adapter, length); 1894 length = 0; 1895 } 1896 } 1897 } 1898 1899 static void 1900 em_82547_move_tail(void *xsc) 1901 { 1902 struct adapter *adapter = xsc; 1903 struct ifnet *ifp = &adapter->arpcom.ac_if; 1904 1905 lwkt_serialize_enter(ifp->if_serializer); 1906 em_82547_move_tail_serialized(adapter); 1907 lwkt_serialize_exit(ifp->if_serializer); 1908 } 1909 1910 static int 1911 em_82547_fifo_workaround(struct adapter *adapter, int len) 1912 { 1913 int fifo_space, fifo_pkt_len; 1914 1915 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1916 1917 if (adapter->link_duplex == HALF_DUPLEX) { 1918 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1919 1920 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1921 if (em_82547_tx_fifo_reset(adapter)) 1922 return (0); 1923 else 1924 return (1); 1925 } 1926 } 1927 return (0); 1928 } 1929 1930 static void 1931 em_82547_update_fifo_head(struct adapter *adapter, int len) 1932 { 1933 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1934 1935 /* tx_fifo_head is always 16 byte aligned */ 1936 adapter->tx_fifo_head += fifo_pkt_len; 1937 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 1938 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1939 } 1940 1941 static int 1942 em_82547_tx_fifo_reset(struct adapter *adapter) 1943 { 1944 uint32_t tctl; 1945 1946 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1947 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1948 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1949 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1950 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1951 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1952 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1953 /* Disable TX unit */ 1954 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1955 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1956 tctl & ~E1000_TCTL_EN); 1957 1958 /* Reset FIFO pointers */ 1959 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1960 adapter->tx_head_addr); 1961 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1962 adapter->tx_head_addr); 1963 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1964 adapter->tx_head_addr); 1965 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1966 adapter->tx_head_addr); 1967 1968 /* Re-enable TX unit */ 1969 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1970 E1000_WRITE_FLUSH(&adapter->hw); 1971 1972 adapter->tx_fifo_head = 0; 1973 adapter->tx_fifo_reset_cnt++; 1974 1975 return (TRUE); 1976 } else { 1977 return (FALSE); 1978 } 1979 } 1980 1981 static void 1982 em_set_promisc(struct adapter *adapter) 1983 { 1984 struct ifnet *ifp = &adapter->arpcom.ac_if; 1985 uint32_t reg_rctl; 1986 1987 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1988 1989 if (ifp->if_flags & IFF_PROMISC) { 1990 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1991 /* Turn this on if you want to see bad packets */ 1992 if (em_debug_sbp) 1993 reg_rctl |= E1000_RCTL_SBP; 1994 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1995 } else if (ifp->if_flags & IFF_ALLMULTI) { 1996 reg_rctl |= E1000_RCTL_MPE; 1997 reg_rctl &= ~E1000_RCTL_UPE; 1998 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1999 } 2000 } 2001 2002 static void 2003 em_disable_promisc(struct adapter *adapter) 2004 { 2005 uint32_t reg_rctl; 2006 2007 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2008 2009 reg_rctl &= ~E1000_RCTL_UPE; 2010 reg_rctl &= ~E1000_RCTL_MPE; 2011 reg_rctl &= ~E1000_RCTL_SBP; 2012 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2013 } 2014 2015 static void 2016 em_set_multi(struct adapter *adapter) 2017 { 2018 struct ifnet *ifp = &adapter->arpcom.ac_if; 2019 struct ifmultiaddr *ifma; 2020 uint32_t reg_rctl = 0; 2021 uint8_t *mta; 2022 int mcnt = 0; 2023 2024 mta = adapter->mta; 2025 bzero(mta, ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES); 2026 2027 if (adapter->hw.mac.type == e1000_82542 && 2028 adapter->hw.revision_id == E1000_REVISION_2) { 2029 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2030 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2031 e1000_pci_clear_mwi(&adapter->hw); 2032 reg_rctl |= E1000_RCTL_RST; 2033 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2034 msec_delay(5); 2035 } 2036 2037 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2038 if (ifma->ifma_addr->sa_family != AF_LINK) 2039 continue; 2040 2041 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 2042 break; 2043 2044 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2045 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 2046 mcnt++; 2047 } 2048 2049 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 2050 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2051 reg_rctl |= E1000_RCTL_MPE; 2052 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2053 } else { 2054 e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); 2055 } 2056 2057 if (adapter->hw.mac.type == e1000_82542 && 2058 adapter->hw.revision_id == E1000_REVISION_2) { 2059 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 2060 reg_rctl &= ~E1000_RCTL_RST; 2061 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 2062 msec_delay(5); 2063 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 2064 e1000_pci_set_mwi(&adapter->hw); 2065 } 2066 } 2067 2068 /* 2069 * This routine checks for link status and updates statistics. 2070 */ 2071 static void 2072 em_timer(void *xsc) 2073 { 2074 struct adapter *adapter = xsc; 2075 struct ifnet *ifp = &adapter->arpcom.ac_if; 2076 2077 lwkt_serialize_enter(ifp->if_serializer); 2078 2079 em_update_link_status(adapter); 2080 em_update_stats(adapter); 2081 2082 /* Reset LAA into RAR[0] on 82571 */ 2083 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) 2084 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 2085 2086 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 2087 em_print_hw_stats(adapter); 2088 2089 em_smartspeed(adapter); 2090 2091 callout_reset(&adapter->timer, hz, em_timer, adapter); 2092 2093 lwkt_serialize_exit(ifp->if_serializer); 2094 } 2095 2096 static void 2097 em_update_link_status(struct adapter *adapter) 2098 { 2099 struct e1000_hw *hw = &adapter->hw; 2100 struct ifnet *ifp = &adapter->arpcom.ac_if; 2101 device_t dev = adapter->dev; 2102 uint32_t link_check = 0; 2103 2104 /* Get the cached link value or read phy for real */ 2105 switch (hw->phy.media_type) { 2106 case e1000_media_type_copper: 2107 if (hw->mac.get_link_status) { 2108 /* Do the work to read phy */ 2109 e1000_check_for_link(hw); 2110 link_check = !hw->mac.get_link_status; 2111 if (link_check) /* ESB2 fix */ 2112 e1000_cfg_on_link_up(hw); 2113 } else { 2114 link_check = TRUE; 2115 } 2116 break; 2117 2118 case e1000_media_type_fiber: 2119 e1000_check_for_link(hw); 2120 link_check = 2121 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 2122 break; 2123 2124 case e1000_media_type_internal_serdes: 2125 e1000_check_for_link(hw); 2126 link_check = adapter->hw.mac.serdes_has_link; 2127 break; 2128 2129 case e1000_media_type_unknown: 2130 default: 2131 break; 2132 } 2133 2134 /* Now check for a transition */ 2135 if (link_check && adapter->link_active == 0) { 2136 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 2137 &adapter->link_duplex); 2138 2139 /* 2140 * Check if we should enable/disable SPEED_MODE bit on 2141 * 82571/82572 2142 */ 2143 if (adapter->link_speed != SPEED_1000 && 2144 (hw->mac.type == e1000_82571 || 2145 hw->mac.type == e1000_82572)) { 2146 int tarc0; 2147 2148 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 2149 tarc0 &= ~SPEED_MODE_BIT; 2150 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 2151 } 2152 if (bootverbose) { 2153 char flowctrl[IFM_ETH_FC_STRLEN]; 2154 2155 e1000_fc2str(hw->fc.current_mode, flowctrl, 2156 sizeof(flowctrl)); 2157 device_printf(dev, "Link is up %d Mbps %s, " 2158 "Flow control: %s\n", 2159 adapter->link_speed, 2160 (adapter->link_duplex == FULL_DUPLEX) ? 2161 "Full Duplex" : "Half Duplex", 2162 flowctrl); 2163 } 2164 if (adapter->ifm_flowctrl & IFM_ETH_FORCEPAUSE) 2165 e1000_force_flowctrl(hw, adapter->ifm_flowctrl); 2166 adapter->link_active = 1; 2167 adapter->smartspeed = 0; 2168 ifp->if_baudrate = adapter->link_speed * 1000000; 2169 ifp->if_link_state = LINK_STATE_UP; 2170 if_link_state_change(ifp); 2171 } else if (!link_check && adapter->link_active == 1) { 2172 ifp->if_baudrate = adapter->link_speed = 0; 2173 adapter->link_duplex = 0; 2174 if (bootverbose) 2175 device_printf(dev, "Link is Down\n"); 2176 adapter->link_active = 0; 2177 #if 0 2178 /* Link down, disable watchdog */ 2179 if->if_timer = 0; 2180 #endif 2181 ifp->if_link_state = LINK_STATE_DOWN; 2182 if_link_state_change(ifp); 2183 } 2184 } 2185 2186 static void 2187 em_stop(struct adapter *adapter) 2188 { 2189 struct ifnet *ifp = &adapter->arpcom.ac_if; 2190 int i; 2191 2192 ASSERT_SERIALIZED(ifp->if_serializer); 2193 2194 em_disable_intr(adapter); 2195 2196 callout_stop(&adapter->timer); 2197 callout_stop(&adapter->tx_fifo_timer); 2198 2199 ifp->if_flags &= ~IFF_RUNNING; 2200 ifq_clr_oactive(&ifp->if_snd); 2201 ifp->if_timer = 0; 2202 2203 e1000_reset_hw(&adapter->hw); 2204 if (adapter->hw.mac.type >= e1000_82544) 2205 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2206 2207 for (i = 0; i < adapter->num_tx_desc; i++) { 2208 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i]; 2209 2210 if (tx_buffer->m_head != NULL) { 2211 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2212 m_freem(tx_buffer->m_head); 2213 tx_buffer->m_head = NULL; 2214 } 2215 } 2216 2217 for (i = 0; i < adapter->num_rx_desc; i++) { 2218 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i]; 2219 2220 if (rx_buffer->m_head != NULL) { 2221 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2222 m_freem(rx_buffer->m_head); 2223 rx_buffer->m_head = NULL; 2224 } 2225 } 2226 2227 if (adapter->fmp != NULL) 2228 m_freem(adapter->fmp); 2229 adapter->fmp = NULL; 2230 adapter->lmp = NULL; 2231 2232 adapter->csum_flags = 0; 2233 adapter->csum_lhlen = 0; 2234 adapter->csum_iphlen = 0; 2235 adapter->csum_thlen = 0; 2236 adapter->csum_mss = 0; 2237 adapter->csum_pktlen = 0; 2238 2239 adapter->tx_dd_head = 0; 2240 adapter->tx_dd_tail = 0; 2241 adapter->tx_nsegs = 0; 2242 } 2243 2244 static int 2245 em_get_hw_info(struct adapter *adapter) 2246 { 2247 device_t dev = adapter->dev; 2248 2249 /* Save off the information about this board */ 2250 adapter->hw.vendor_id = pci_get_vendor(dev); 2251 adapter->hw.device_id = pci_get_device(dev); 2252 adapter->hw.revision_id = pci_get_revid(dev); 2253 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev); 2254 adapter->hw.subsystem_device_id = pci_get_subdevice(dev); 2255 2256 /* Do Shared Code Init and Setup */ 2257 if (e1000_set_mac_type(&adapter->hw)) 2258 return ENXIO; 2259 return 0; 2260 } 2261 2262 static int 2263 em_alloc_pci_res(struct adapter *adapter) 2264 { 2265 device_t dev = adapter->dev; 2266 u_int intr_flags; 2267 int val, rid, msi_enable; 2268 2269 /* Enable bus mastering */ 2270 pci_enable_busmaster(dev); 2271 2272 adapter->memory_rid = EM_BAR_MEM; 2273 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2274 &adapter->memory_rid, RF_ACTIVE); 2275 if (adapter->memory == NULL) { 2276 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2277 return (ENXIO); 2278 } 2279 adapter->osdep.mem_bus_space_tag = 2280 rman_get_bustag(adapter->memory); 2281 adapter->osdep.mem_bus_space_handle = 2282 rman_get_bushandle(adapter->memory); 2283 2284 /* XXX This is quite goofy, it is not actually used */ 2285 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2286 2287 /* Only older adapters use IO mapping */ 2288 if (adapter->hw.mac.type > e1000_82543 && 2289 adapter->hw.mac.type < e1000_82571) { 2290 /* Figure our where our IO BAR is ? */ 2291 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) { 2292 val = pci_read_config(dev, rid, 4); 2293 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2294 adapter->io_rid = rid; 2295 break; 2296 } 2297 rid += 4; 2298 /* check for 64bit BAR */ 2299 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2300 rid += 4; 2301 } 2302 if (rid >= PCIR_CARDBUSCIS) { 2303 device_printf(dev, "Unable to locate IO BAR\n"); 2304 return (ENXIO); 2305 } 2306 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2307 &adapter->io_rid, RF_ACTIVE); 2308 if (adapter->ioport == NULL) { 2309 device_printf(dev, "Unable to allocate bus resource: " 2310 "ioport\n"); 2311 return (ENXIO); 2312 } 2313 adapter->hw.io_base = 0; 2314 adapter->osdep.io_bus_space_tag = 2315 rman_get_bustag(adapter->ioport); 2316 adapter->osdep.io_bus_space_handle = 2317 rman_get_bushandle(adapter->ioport); 2318 } 2319 2320 /* 2321 * Don't enable MSI-X on 82574, see: 2322 * 82574 specification update errata #15 2323 * 2324 * Don't enable MSI on PCI/PCI-X chips, see: 2325 * 82540 specification update errata #6 2326 * 82545 specification update errata #4 2327 * 2328 * Don't enable MSI on 82571/82572, see: 2329 * 82571/82572 specification update errata #63 2330 */ 2331 msi_enable = em_msi_enable; 2332 if (msi_enable && 2333 (!pci_is_pcie(dev) || 2334 adapter->hw.mac.type == e1000_82571 || 2335 adapter->hw.mac.type == e1000_82572)) 2336 msi_enable = 0; 2337 again: 2338 adapter->intr_type = pci_alloc_1intr(dev, msi_enable, 2339 &adapter->intr_rid, &intr_flags); 2340 2341 if (adapter->intr_type == PCI_INTR_TYPE_LEGACY) { 2342 int unshared; 2343 2344 unshared = device_getenv_int(dev, "irq.unshared", 0); 2345 if (!unshared) { 2346 adapter->flags |= EM_FLAG_SHARED_INTR; 2347 if (bootverbose) 2348 device_printf(dev, "IRQ shared\n"); 2349 } else { 2350 intr_flags &= ~RF_SHAREABLE; 2351 if (bootverbose) 2352 device_printf(dev, "IRQ unshared\n"); 2353 } 2354 } 2355 2356 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2357 &adapter->intr_rid, intr_flags); 2358 if (adapter->intr_res == NULL) { 2359 device_printf(dev, "Unable to allocate bus resource: %s\n", 2360 adapter->intr_type == PCI_INTR_TYPE_MSI ? 2361 "MSI" : "legacy intr"); 2362 if (!msi_enable) { 2363 /* Retry with MSI. */ 2364 msi_enable = 1; 2365 adapter->flags &= ~EM_FLAG_SHARED_INTR; 2366 goto again; 2367 } 2368 return (ENXIO); 2369 } 2370 2371 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2372 adapter->hw.back = &adapter->osdep; 2373 return (0); 2374 } 2375 2376 static void 2377 em_free_pci_res(struct adapter *adapter) 2378 { 2379 device_t dev = adapter->dev; 2380 2381 if (adapter->intr_res != NULL) { 2382 bus_release_resource(dev, SYS_RES_IRQ, 2383 adapter->intr_rid, adapter->intr_res); 2384 } 2385 2386 if (adapter->intr_type == PCI_INTR_TYPE_MSI) 2387 pci_release_msi(dev); 2388 2389 if (adapter->memory != NULL) { 2390 bus_release_resource(dev, SYS_RES_MEMORY, 2391 adapter->memory_rid, adapter->memory); 2392 } 2393 2394 if (adapter->flash != NULL) { 2395 bus_release_resource(dev, SYS_RES_MEMORY, 2396 adapter->flash_rid, adapter->flash); 2397 } 2398 2399 if (adapter->ioport != NULL) { 2400 bus_release_resource(dev, SYS_RES_IOPORT, 2401 adapter->io_rid, adapter->ioport); 2402 } 2403 } 2404 2405 static int 2406 em_reset(struct adapter *adapter) 2407 { 2408 device_t dev = adapter->dev; 2409 uint16_t rx_buffer_size; 2410 uint32_t pba; 2411 2412 /* When hardware is reset, fifo_head is also reset */ 2413 adapter->tx_fifo_head = 0; 2414 2415 /* Set up smart power down as default off on newer adapters. */ 2416 if (!em_smart_pwr_down && 2417 (adapter->hw.mac.type == e1000_82571 || 2418 adapter->hw.mac.type == e1000_82572)) { 2419 uint16_t phy_tmp = 0; 2420 2421 /* Speed up time to link by disabling smart power down. */ 2422 e1000_read_phy_reg(&adapter->hw, 2423 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2424 phy_tmp &= ~IGP02E1000_PM_SPD; 2425 e1000_write_phy_reg(&adapter->hw, 2426 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2427 } 2428 2429 /* 2430 * Packet Buffer Allocation (PBA) 2431 * Writing PBA sets the receive portion of the buffer 2432 * the remainder is used for the transmit buffer. 2433 * 2434 * Devices before the 82547 had a Packet Buffer of 64K. 2435 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 2436 * After the 82547 the buffer was reduced to 40K. 2437 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 2438 * Note: default does not leave enough room for Jumbo Frame >10k. 2439 */ 2440 switch (adapter->hw.mac.type) { 2441 case e1000_82547: 2442 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 2443 if (adapter->hw.mac.max_frame_size > 8192) 2444 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 2445 else 2446 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 2447 adapter->tx_fifo_head = 0; 2448 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 2449 adapter->tx_fifo_size = 2450 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 2451 break; 2452 2453 /* Total Packet Buffer on these is 48K */ 2454 case e1000_82571: 2455 case e1000_82572: 2456 case e1000_80003es2lan: 2457 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 2458 break; 2459 2460 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 2461 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 2462 break; 2463 2464 case e1000_82574: 2465 case e1000_82583: 2466 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 2467 break; 2468 2469 case e1000_ich8lan: 2470 pba = E1000_PBA_8K; 2471 break; 2472 2473 case e1000_ich9lan: 2474 case e1000_ich10lan: 2475 #define E1000_PBA_10K 0x000A 2476 pba = E1000_PBA_10K; 2477 break; 2478 2479 case e1000_pchlan: 2480 case e1000_pch2lan: 2481 case e1000_pch_lpt: 2482 case e1000_pch_spt: 2483 pba = E1000_PBA_26K; 2484 break; 2485 2486 default: 2487 /* Devices before 82547 had a Packet Buffer of 64K. */ 2488 if (adapter->hw.mac.max_frame_size > 8192) 2489 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 2490 else 2491 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 2492 } 2493 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 2494 2495 /* 2496 * These parameters control the automatic generation (Tx) and 2497 * response (Rx) to Ethernet PAUSE frames. 2498 * - High water mark should allow for at least two frames to be 2499 * received after sending an XOFF. 2500 * - Low water mark works best when it is very near the high water mark. 2501 * This allows the receiver to restart by sending XON when it has 2502 * drained a bit. Here we use an arbitary value of 1500 which will 2503 * restart after one full frame is pulled from the buffer. There 2504 * could be several smaller frames in the buffer and if so they will 2505 * not trigger the XON until their total number reduces the buffer 2506 * by 1500. 2507 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2508 */ 2509 rx_buffer_size = 2510 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10; 2511 2512 adapter->hw.fc.high_water = rx_buffer_size - 2513 roundup2(adapter->hw.mac.max_frame_size, 1024); 2514 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2515 2516 if (adapter->hw.mac.type == e1000_80003es2lan) 2517 adapter->hw.fc.pause_time = 0xFFFF; 2518 else 2519 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2520 2521 adapter->hw.fc.send_xon = TRUE; 2522 2523 adapter->hw.fc.requested_mode = e1000_ifmedia2fc(adapter->ifm_flowctrl); 2524 2525 /* 2526 * Device specific overrides/settings 2527 */ 2528 switch (adapter->hw.mac.type) { 2529 case e1000_pchlan: 2530 KASSERT(adapter->hw.fc.requested_mode == e1000_fc_rx_pause || 2531 adapter->hw.fc.requested_mode == e1000_fc_none, 2532 ("unsupported flow control on PCH %d", 2533 adapter->hw.fc.requested_mode)); 2534 adapter->hw.fc.pause_time = 0xFFFF; /* override */ 2535 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2536 adapter->hw.fc.high_water = 0x3500; 2537 adapter->hw.fc.low_water = 0x1500; 2538 } else { 2539 adapter->hw.fc.high_water = 0x5000; 2540 adapter->hw.fc.low_water = 0x3000; 2541 } 2542 adapter->hw.fc.refresh_time = 0x1000; 2543 break; 2544 2545 case e1000_pch2lan: 2546 case e1000_pch_lpt: 2547 case e1000_pch_spt: 2548 adapter->hw.fc.high_water = 0x5C20; 2549 adapter->hw.fc.low_water = 0x5048; 2550 adapter->hw.fc.pause_time = 0x0650; 2551 adapter->hw.fc.refresh_time = 0x0400; 2552 /* Jumbos need adjusted PBA */ 2553 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) 2554 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 12); 2555 else 2556 E1000_WRITE_REG(&adapter->hw, E1000_PBA, 26); 2557 break; 2558 2559 case e1000_ich9lan: 2560 case e1000_ich10lan: 2561 if (adapter->arpcom.ac_if.if_mtu > ETHERMTU) { 2562 adapter->hw.fc.high_water = 0x2800; 2563 adapter->hw.fc.low_water = 2564 adapter->hw.fc.high_water - 8; 2565 break; 2566 } 2567 /* FALL THROUGH */ 2568 default: 2569 if (adapter->hw.mac.type == e1000_80003es2lan) 2570 adapter->hw.fc.pause_time = 0xFFFF; 2571 break; 2572 } 2573 2574 /* Issue a global reset */ 2575 e1000_reset_hw(&adapter->hw); 2576 if (adapter->hw.mac.type >= e1000_82544) 2577 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2578 em_disable_aspm(adapter); 2579 2580 if (e1000_init_hw(&adapter->hw) < 0) { 2581 device_printf(dev, "Hardware Initialization Failed\n"); 2582 return (EIO); 2583 } 2584 2585 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 2586 e1000_get_phy_info(&adapter->hw); 2587 e1000_check_for_link(&adapter->hw); 2588 2589 return (0); 2590 } 2591 2592 static void 2593 em_setup_ifp(struct adapter *adapter) 2594 { 2595 struct ifnet *ifp = &adapter->arpcom.ac_if; 2596 2597 if_initname(ifp, device_get_name(adapter->dev), 2598 device_get_unit(adapter->dev)); 2599 ifp->if_softc = adapter; 2600 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2601 ifp->if_init = em_init; 2602 ifp->if_ioctl = em_ioctl; 2603 ifp->if_start = em_start; 2604 #ifdef IFPOLL_ENABLE 2605 ifp->if_npoll = em_npoll; 2606 #endif 2607 ifp->if_watchdog = em_watchdog; 2608 ifp->if_nmbclusters = adapter->num_rx_desc; 2609 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1); 2610 ifq_set_ready(&ifp->if_snd); 2611 2612 ether_ifattach(ifp, adapter->hw.mac.addr, NULL); 2613 2614 ifp->if_capabilities = IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2615 if (adapter->hw.mac.type >= e1000_82543) 2616 ifp->if_capabilities |= IFCAP_HWCSUM; 2617 if (adapter->flags & EM_FLAG_TSO) 2618 ifp->if_capabilities |= IFCAP_TSO; 2619 ifp->if_capenable = ifp->if_capabilities; 2620 2621 if (ifp->if_capenable & IFCAP_TXCSUM) 2622 ifp->if_hwassist |= EM_CSUM_FEATURES; 2623 if (ifp->if_capenable & IFCAP_TSO) 2624 ifp->if_hwassist |= CSUM_TSO; 2625 2626 /* 2627 * Tell the upper layer(s) we support long frames. 2628 */ 2629 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2630 2631 /* 2632 * Specify the media types supported by this adapter and register 2633 * callbacks to update media and link information 2634 */ 2635 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2636 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 2637 u_char fiber_type = IFM_1000_SX; /* default type */ 2638 2639 if (adapter->hw.mac.type == e1000_82545) 2640 fiber_type = IFM_1000_LX; 2641 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2642 0, NULL); 2643 } else { 2644 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2645 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2646 0, NULL); 2647 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2648 0, NULL); 2649 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2650 0, NULL); 2651 if (adapter->hw.phy.type != e1000_phy_ife) { 2652 ifmedia_add(&adapter->media, 2653 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2654 } 2655 } 2656 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2657 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO | 2658 adapter->ifm_flowctrl); 2659 } 2660 2661 2662 /* 2663 * Workaround for SmartSpeed on 82541 and 82547 controllers 2664 */ 2665 static void 2666 em_smartspeed(struct adapter *adapter) 2667 { 2668 uint16_t phy_tmp; 2669 2670 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp || 2671 adapter->hw.mac.autoneg == 0 || 2672 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2673 return; 2674 2675 if (adapter->smartspeed == 0) { 2676 /* 2677 * If Master/Slave config fault is asserted twice, 2678 * we assume back-to-back 2679 */ 2680 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2681 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2682 return; 2683 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2684 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2685 e1000_read_phy_reg(&adapter->hw, 2686 PHY_1000T_CTRL, &phy_tmp); 2687 if (phy_tmp & CR_1000T_MS_ENABLE) { 2688 phy_tmp &= ~CR_1000T_MS_ENABLE; 2689 e1000_write_phy_reg(&adapter->hw, 2690 PHY_1000T_CTRL, phy_tmp); 2691 adapter->smartspeed++; 2692 if (adapter->hw.mac.autoneg && 2693 !e1000_phy_setup_autoneg(&adapter->hw) && 2694 !e1000_read_phy_reg(&adapter->hw, 2695 PHY_CONTROL, &phy_tmp)) { 2696 phy_tmp |= MII_CR_AUTO_NEG_EN | 2697 MII_CR_RESTART_AUTO_NEG; 2698 e1000_write_phy_reg(&adapter->hw, 2699 PHY_CONTROL, phy_tmp); 2700 } 2701 } 2702 } 2703 return; 2704 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2705 /* If still no link, perhaps using 2/3 pair cable */ 2706 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2707 phy_tmp |= CR_1000T_MS_ENABLE; 2708 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2709 if (adapter->hw.mac.autoneg && 2710 !e1000_phy_setup_autoneg(&adapter->hw) && 2711 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2712 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2713 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2714 } 2715 } 2716 2717 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2718 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2719 adapter->smartspeed = 0; 2720 } 2721 2722 static int 2723 em_dma_malloc(struct adapter *adapter, bus_size_t size, 2724 struct em_dma_alloc *dma) 2725 { 2726 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag, 2727 EM_DBA_ALIGN, size, BUS_DMA_WAITOK, 2728 &dma->dma_tag, &dma->dma_map, 2729 &dma->dma_paddr); 2730 if (dma->dma_vaddr == NULL) 2731 return ENOMEM; 2732 else 2733 return 0; 2734 } 2735 2736 static void 2737 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2738 { 2739 if (dma->dma_tag == NULL) 2740 return; 2741 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2742 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2743 bus_dma_tag_destroy(dma->dma_tag); 2744 } 2745 2746 static int 2747 em_create_tx_ring(struct adapter *adapter) 2748 { 2749 device_t dev = adapter->dev; 2750 struct em_buffer *tx_buffer; 2751 int error, i; 2752 2753 adapter->tx_buffer_area = 2754 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc, 2755 M_DEVBUF, M_WAITOK | M_ZERO); 2756 2757 /* 2758 * Create DMA tags for tx buffers 2759 */ 2760 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2761 1, 0, /* alignment, bounds */ 2762 BUS_SPACE_MAXADDR, /* lowaddr */ 2763 BUS_SPACE_MAXADDR, /* highaddr */ 2764 NULL, NULL, /* filter, filterarg */ 2765 EM_TSO_SIZE, /* maxsize */ 2766 EM_MAX_SCATTER, /* nsegments */ 2767 PAGE_SIZE, /* maxsegsize */ 2768 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2769 BUS_DMA_ONEBPAGE, /* flags */ 2770 &adapter->txtag); 2771 if (error) { 2772 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2773 kfree(adapter->tx_buffer_area, M_DEVBUF); 2774 adapter->tx_buffer_area = NULL; 2775 return error; 2776 } 2777 2778 /* 2779 * Create DMA maps for tx buffers 2780 */ 2781 for (i = 0; i < adapter->num_tx_desc; i++) { 2782 tx_buffer = &adapter->tx_buffer_area[i]; 2783 2784 error = bus_dmamap_create(adapter->txtag, 2785 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2786 &tx_buffer->map); 2787 if (error) { 2788 device_printf(dev, "Unable to create TX DMA map\n"); 2789 em_destroy_tx_ring(adapter, i); 2790 return error; 2791 } 2792 } 2793 return (0); 2794 } 2795 2796 static void 2797 em_init_tx_ring(struct adapter *adapter) 2798 { 2799 /* Clear the old ring contents */ 2800 bzero(adapter->tx_desc_base, 2801 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2802 2803 /* Reset state */ 2804 adapter->next_avail_tx_desc = 0; 2805 adapter->next_tx_to_clean = 0; 2806 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2807 } 2808 2809 static void 2810 em_init_tx_unit(struct adapter *adapter) 2811 { 2812 uint32_t tctl, tarc, tipg = 0; 2813 uint64_t bus_addr; 2814 2815 /* Setup the Base and Length of the Tx Descriptor Ring */ 2816 bus_addr = adapter->txdma.dma_paddr; 2817 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2818 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2819 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2820 (uint32_t)(bus_addr >> 32)); 2821 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2822 (uint32_t)bus_addr); 2823 /* Setup the HW Tx Head and Tail descriptor pointers */ 2824 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2825 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2826 2827 /* Set the default values for the Tx Inter Packet Gap timer */ 2828 switch (adapter->hw.mac.type) { 2829 case e1000_82542: 2830 tipg = DEFAULT_82542_TIPG_IPGT; 2831 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2832 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2833 break; 2834 2835 case e1000_80003es2lan: 2836 tipg = DEFAULT_82543_TIPG_IPGR1; 2837 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2838 E1000_TIPG_IPGR2_SHIFT; 2839 break; 2840 2841 default: 2842 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2843 adapter->hw.phy.media_type == 2844 e1000_media_type_internal_serdes) 2845 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2846 else 2847 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2848 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2849 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2850 break; 2851 } 2852 2853 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2854 2855 /* NOTE: 0 is not allowed for TIDV */ 2856 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1); 2857 if(adapter->hw.mac.type >= e1000_82540) 2858 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0); 2859 2860 if (adapter->hw.mac.type == e1000_82571 || 2861 adapter->hw.mac.type == e1000_82572) { 2862 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2863 tarc |= SPEED_MODE_BIT; 2864 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2865 } else if (adapter->hw.mac.type == e1000_80003es2lan) { 2866 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2867 tarc |= 1; 2868 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2869 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2870 tarc |= 1; 2871 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2872 } 2873 2874 /* Program the Transmit Control Register */ 2875 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2876 tctl &= ~E1000_TCTL_CT; 2877 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2878 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2879 2880 if (adapter->hw.mac.type >= e1000_82571) 2881 tctl |= E1000_TCTL_MULR; 2882 2883 /* This write will effectively turn on the transmit unit. */ 2884 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2885 2886 if (adapter->hw.mac.type == e1000_82571 || 2887 adapter->hw.mac.type == e1000_82572 || 2888 adapter->hw.mac.type == e1000_80003es2lan) { 2889 /* Bit 28 of TARC1 must be cleared when MULR is enabled */ 2890 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2891 tarc &= ~(1 << 28); 2892 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2893 } 2894 } 2895 2896 static void 2897 em_destroy_tx_ring(struct adapter *adapter, int ndesc) 2898 { 2899 struct em_buffer *tx_buffer; 2900 int i; 2901 2902 if (adapter->tx_buffer_area == NULL) 2903 return; 2904 2905 for (i = 0; i < ndesc; i++) { 2906 tx_buffer = &adapter->tx_buffer_area[i]; 2907 2908 KKASSERT(tx_buffer->m_head == NULL); 2909 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2910 } 2911 bus_dma_tag_destroy(adapter->txtag); 2912 2913 kfree(adapter->tx_buffer_area, M_DEVBUF); 2914 adapter->tx_buffer_area = NULL; 2915 } 2916 2917 /* 2918 * The offload context needs to be set when we transfer the first 2919 * packet of a particular protocol (TCP/UDP). This routine has been 2920 * enhanced to deal with inserted VLAN headers. 2921 * 2922 * If the new packet's ether header length, ip header length and 2923 * csum offloading type are same as the previous packet, we should 2924 * avoid allocating a new csum context descriptor; mainly to take 2925 * advantage of the pipeline effect of the TX data read request. 2926 * 2927 * This function returns number of TX descrptors allocated for 2928 * csum context. 2929 */ 2930 static int 2931 em_txcsum(struct adapter *adapter, struct mbuf *mp, 2932 uint32_t *txd_upper, uint32_t *txd_lower) 2933 { 2934 struct e1000_context_desc *TXD; 2935 int curr_txd, ehdrlen, csum_flags; 2936 uint32_t cmd, hdr_len, ip_hlen; 2937 2938 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES; 2939 ip_hlen = mp->m_pkthdr.csum_iphlen; 2940 ehdrlen = mp->m_pkthdr.csum_lhlen; 2941 2942 if (adapter->csum_lhlen == ehdrlen && 2943 adapter->csum_iphlen == ip_hlen && 2944 adapter->csum_flags == csum_flags) { 2945 /* 2946 * Same csum offload context as the previous packets; 2947 * just return. 2948 */ 2949 *txd_upper = adapter->csum_txd_upper; 2950 *txd_lower = adapter->csum_txd_lower; 2951 return 0; 2952 } 2953 2954 /* 2955 * Setup a new csum offload context. 2956 */ 2957 2958 curr_txd = adapter->next_avail_tx_desc; 2959 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 2960 2961 cmd = 0; 2962 2963 /* Setup of IP header checksum. */ 2964 if (csum_flags & CSUM_IP) { 2965 /* 2966 * Start offset for header checksum calculation. 2967 * End offset for header checksum calculation. 2968 * Offset of place to put the checksum. 2969 */ 2970 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2971 TXD->lower_setup.ip_fields.ipcse = 2972 htole16(ehdrlen + ip_hlen - 1); 2973 TXD->lower_setup.ip_fields.ipcso = 2974 ehdrlen + offsetof(struct ip, ip_sum); 2975 cmd |= E1000_TXD_CMD_IP; 2976 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2977 } 2978 hdr_len = ehdrlen + ip_hlen; 2979 2980 if (csum_flags & CSUM_TCP) { 2981 /* 2982 * Start offset for payload checksum calculation. 2983 * End offset for payload checksum calculation. 2984 * Offset of place to put the checksum. 2985 */ 2986 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2987 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2988 TXD->upper_setup.tcp_fields.tucso = 2989 hdr_len + offsetof(struct tcphdr, th_sum); 2990 cmd |= E1000_TXD_CMD_TCP; 2991 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2992 } else if (csum_flags & CSUM_UDP) { 2993 /* 2994 * Start offset for header checksum calculation. 2995 * End offset for header checksum calculation. 2996 * Offset of place to put the checksum. 2997 */ 2998 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2999 TXD->upper_setup.tcp_fields.tucse = htole16(0); 3000 TXD->upper_setup.tcp_fields.tucso = 3001 hdr_len + offsetof(struct udphdr, uh_sum); 3002 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 3003 } 3004 3005 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 3006 E1000_TXD_DTYP_D; /* Data descr */ 3007 3008 /* Save the information for this csum offloading context */ 3009 adapter->csum_lhlen = ehdrlen; 3010 adapter->csum_iphlen = ip_hlen; 3011 adapter->csum_flags = csum_flags; 3012 adapter->csum_txd_upper = *txd_upper; 3013 adapter->csum_txd_lower = *txd_lower; 3014 3015 TXD->tcp_seg_setup.data = htole32(0); 3016 TXD->cmd_and_length = 3017 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 3018 3019 if (++curr_txd == adapter->num_tx_desc) 3020 curr_txd = 0; 3021 3022 KKASSERT(adapter->num_tx_desc_avail > 0); 3023 adapter->num_tx_desc_avail--; 3024 3025 adapter->next_avail_tx_desc = curr_txd; 3026 return 1; 3027 } 3028 3029 static void 3030 em_txeof(struct adapter *adapter) 3031 { 3032 struct ifnet *ifp = &adapter->arpcom.ac_if; 3033 struct em_buffer *tx_buffer; 3034 int first, num_avail; 3035 3036 if (adapter->tx_dd_head == adapter->tx_dd_tail) 3037 return; 3038 3039 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3040 return; 3041 3042 num_avail = adapter->num_tx_desc_avail; 3043 first = adapter->next_tx_to_clean; 3044 3045 while (adapter->tx_dd_head != adapter->tx_dd_tail) { 3046 struct e1000_tx_desc *tx_desc; 3047 int dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3048 3049 tx_desc = &adapter->tx_desc_base[dd_idx]; 3050 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 3051 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3052 3053 if (++dd_idx == adapter->num_tx_desc) 3054 dd_idx = 0; 3055 3056 while (first != dd_idx) { 3057 logif(pkt_txclean); 3058 3059 num_avail++; 3060 3061 tx_buffer = &adapter->tx_buffer_area[first]; 3062 if (tx_buffer->m_head) { 3063 bus_dmamap_unload(adapter->txtag, 3064 tx_buffer->map); 3065 m_freem(tx_buffer->m_head); 3066 tx_buffer->m_head = NULL; 3067 } 3068 3069 if (++first == adapter->num_tx_desc) 3070 first = 0; 3071 } 3072 } else { 3073 break; 3074 } 3075 } 3076 adapter->next_tx_to_clean = first; 3077 adapter->num_tx_desc_avail = num_avail; 3078 3079 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3080 adapter->tx_dd_head = 0; 3081 adapter->tx_dd_tail = 0; 3082 } 3083 3084 if (!EM_IS_OACTIVE(adapter)) { 3085 ifq_clr_oactive(&ifp->if_snd); 3086 3087 /* All clean, turn off the timer */ 3088 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3089 ifp->if_timer = 0; 3090 } 3091 } 3092 3093 static void 3094 em_tx_collect(struct adapter *adapter) 3095 { 3096 struct ifnet *ifp = &adapter->arpcom.ac_if; 3097 struct em_buffer *tx_buffer; 3098 int tdh, first, num_avail, dd_idx = -1; 3099 3100 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3101 return; 3102 3103 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 3104 if (tdh == adapter->next_tx_to_clean) 3105 return; 3106 3107 if (adapter->tx_dd_head != adapter->tx_dd_tail) 3108 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3109 3110 num_avail = adapter->num_tx_desc_avail; 3111 first = adapter->next_tx_to_clean; 3112 3113 while (first != tdh) { 3114 logif(pkt_txclean); 3115 3116 num_avail++; 3117 3118 tx_buffer = &adapter->tx_buffer_area[first]; 3119 if (tx_buffer->m_head) { 3120 bus_dmamap_unload(adapter->txtag, 3121 tx_buffer->map); 3122 m_freem(tx_buffer->m_head); 3123 tx_buffer->m_head = NULL; 3124 } 3125 3126 if (first == dd_idx) { 3127 EM_INC_TXDD_IDX(adapter->tx_dd_head); 3128 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 3129 adapter->tx_dd_head = 0; 3130 adapter->tx_dd_tail = 0; 3131 dd_idx = -1; 3132 } else { 3133 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 3134 } 3135 } 3136 3137 if (++first == adapter->num_tx_desc) 3138 first = 0; 3139 } 3140 adapter->next_tx_to_clean = first; 3141 adapter->num_tx_desc_avail = num_avail; 3142 3143 if (!EM_IS_OACTIVE(adapter)) { 3144 ifq_clr_oactive(&ifp->if_snd); 3145 3146 /* All clean, turn off the timer */ 3147 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 3148 ifp->if_timer = 0; 3149 } 3150 } 3151 3152 /* 3153 * When Link is lost sometimes there is work still in the TX ring 3154 * which will result in a watchdog, rather than allow that do an 3155 * attempted cleanup and then reinit here. Note that this has been 3156 * seens mostly with fiber adapters. 3157 */ 3158 static void 3159 em_tx_purge(struct adapter *adapter) 3160 { 3161 struct ifnet *ifp = &adapter->arpcom.ac_if; 3162 3163 if (!adapter->link_active && ifp->if_timer) { 3164 em_tx_collect(adapter); 3165 if (ifp->if_timer) { 3166 if_printf(ifp, "Link lost, TX pending, reinit\n"); 3167 ifp->if_timer = 0; 3168 em_init(adapter); 3169 } 3170 } 3171 } 3172 3173 static int 3174 em_newbuf(struct adapter *adapter, int i, int init) 3175 { 3176 struct mbuf *m; 3177 bus_dma_segment_t seg; 3178 bus_dmamap_t map; 3179 struct em_buffer *rx_buffer; 3180 int error, nseg; 3181 3182 m = m_getcl(init ? M_WAITOK : M_NOWAIT, MT_DATA, M_PKTHDR); 3183 if (m == NULL) { 3184 adapter->mbuf_cluster_failed++; 3185 if (init) { 3186 if_printf(&adapter->arpcom.ac_if, 3187 "Unable to allocate RX mbuf\n"); 3188 } 3189 return (ENOBUFS); 3190 } 3191 m->m_len = m->m_pkthdr.len = MCLBYTES; 3192 3193 if (adapter->hw.mac.max_frame_size <= MCLBYTES - ETHER_ALIGN) 3194 m_adj(m, ETHER_ALIGN); 3195 3196 error = bus_dmamap_load_mbuf_segment(adapter->rxtag, 3197 adapter->rx_sparemap, m, 3198 &seg, 1, &nseg, BUS_DMA_NOWAIT); 3199 if (error) { 3200 m_freem(m); 3201 if (init) { 3202 if_printf(&adapter->arpcom.ac_if, 3203 "Unable to load RX mbuf\n"); 3204 } 3205 return (error); 3206 } 3207 3208 rx_buffer = &adapter->rx_buffer_area[i]; 3209 if (rx_buffer->m_head != NULL) 3210 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 3211 3212 map = rx_buffer->map; 3213 rx_buffer->map = adapter->rx_sparemap; 3214 adapter->rx_sparemap = map; 3215 3216 rx_buffer->m_head = m; 3217 3218 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr); 3219 return (0); 3220 } 3221 3222 static int 3223 em_create_rx_ring(struct adapter *adapter) 3224 { 3225 device_t dev = adapter->dev; 3226 struct em_buffer *rx_buffer; 3227 int i, error; 3228 3229 adapter->rx_buffer_area = 3230 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 3231 M_DEVBUF, M_WAITOK | M_ZERO); 3232 3233 /* 3234 * Create DMA tag for rx buffers 3235 */ 3236 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 3237 1, 0, /* alignment, bounds */ 3238 BUS_SPACE_MAXADDR, /* lowaddr */ 3239 BUS_SPACE_MAXADDR, /* highaddr */ 3240 NULL, NULL, /* filter, filterarg */ 3241 MCLBYTES, /* maxsize */ 3242 1, /* nsegments */ 3243 MCLBYTES, /* maxsegsize */ 3244 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 3245 &adapter->rxtag); 3246 if (error) { 3247 device_printf(dev, "Unable to allocate RX DMA tag\n"); 3248 kfree(adapter->rx_buffer_area, M_DEVBUF); 3249 adapter->rx_buffer_area = NULL; 3250 return error; 3251 } 3252 3253 /* 3254 * Create spare DMA map for rx buffers 3255 */ 3256 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3257 &adapter->rx_sparemap); 3258 if (error) { 3259 device_printf(dev, "Unable to create spare RX DMA map\n"); 3260 bus_dma_tag_destroy(adapter->rxtag); 3261 kfree(adapter->rx_buffer_area, M_DEVBUF); 3262 adapter->rx_buffer_area = NULL; 3263 return error; 3264 } 3265 3266 /* 3267 * Create DMA maps for rx buffers 3268 */ 3269 for (i = 0; i < adapter->num_rx_desc; i++) { 3270 rx_buffer = &adapter->rx_buffer_area[i]; 3271 3272 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 3273 &rx_buffer->map); 3274 if (error) { 3275 device_printf(dev, "Unable to create RX DMA map\n"); 3276 em_destroy_rx_ring(adapter, i); 3277 return error; 3278 } 3279 } 3280 return (0); 3281 } 3282 3283 static int 3284 em_init_rx_ring(struct adapter *adapter) 3285 { 3286 int i, error; 3287 3288 /* Reset descriptor ring */ 3289 bzero(adapter->rx_desc_base, 3290 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3291 3292 /* Allocate new ones. */ 3293 for (i = 0; i < adapter->num_rx_desc; i++) { 3294 error = em_newbuf(adapter, i, 1); 3295 if (error) 3296 return (error); 3297 } 3298 3299 /* Setup our descriptor pointers */ 3300 adapter->next_rx_desc_to_check = 0; 3301 3302 return (0); 3303 } 3304 3305 static void 3306 em_init_rx_unit(struct adapter *adapter) 3307 { 3308 struct ifnet *ifp = &adapter->arpcom.ac_if; 3309 uint64_t bus_addr; 3310 uint32_t rctl; 3311 3312 /* 3313 * Make sure receives are disabled while setting 3314 * up the descriptor ring 3315 */ 3316 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3317 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3318 3319 if (adapter->hw.mac.type >= e1000_82540) { 3320 uint32_t itr; 3321 3322 /* 3323 * Set the interrupt throttling rate. Value is calculated 3324 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3325 */ 3326 if (adapter->int_throttle_ceil) 3327 itr = 1000000000 / 256 / adapter->int_throttle_ceil; 3328 else 3329 itr = 0; 3330 em_set_itr(adapter, itr); 3331 } 3332 3333 /* Disable accelerated ackknowledge */ 3334 if (adapter->hw.mac.type == e1000_82574) { 3335 E1000_WRITE_REG(&adapter->hw, 3336 E1000_RFCTL, E1000_RFCTL_ACK_DIS); 3337 } 3338 3339 /* Receive Checksum Offload for TCP and UDP */ 3340 if (ifp->if_capenable & IFCAP_RXCSUM) { 3341 uint32_t rxcsum; 3342 3343 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3344 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3345 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3346 } 3347 3348 /* 3349 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3350 * long latencies are observed, like Lenovo X60. This 3351 * change eliminates the problem, but since having positive 3352 * values in RDTR is a known source of problems on other 3353 * platforms another solution is being sought. 3354 */ 3355 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) { 3356 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573); 3357 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573); 3358 } 3359 3360 /* 3361 * Setup the Base and Length of the Rx Descriptor Ring 3362 */ 3363 bus_addr = adapter->rxdma.dma_paddr; 3364 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3365 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3366 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3367 (uint32_t)(bus_addr >> 32)); 3368 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3369 (uint32_t)bus_addr); 3370 3371 /* 3372 * Setup the HW Rx Head and Tail Descriptor Pointers 3373 */ 3374 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3375 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); 3376 3377 /* Set PTHRESH for improved jumbo performance */ 3378 if (((adapter->hw.mac.type == e1000_ich9lan) || 3379 (adapter->hw.mac.type == e1000_pch2lan) || 3380 (adapter->hw.mac.type == e1000_ich10lan)) && 3381 (ifp->if_mtu > ETHERMTU)) { 3382 uint32_t rxdctl; 3383 3384 rxdctl = E1000_READ_REG(&adapter->hw, E1000_RXDCTL(0)); 3385 E1000_WRITE_REG(&adapter->hw, E1000_RXDCTL(0), rxdctl | 3); 3386 } 3387 3388 if (adapter->hw.mac.type >= e1000_pch2lan) { 3389 if (ifp->if_mtu > ETHERMTU) 3390 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, TRUE); 3391 else 3392 e1000_lv_jumbo_workaround_ich8lan(&adapter->hw, FALSE); 3393 } 3394 3395 /* Setup the Receive Control Register */ 3396 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3397 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3398 E1000_RCTL_RDMTS_HALF | 3399 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3400 3401 /* Make sure VLAN Filters are off */ 3402 rctl &= ~E1000_RCTL_VFE; 3403 3404 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3405 rctl |= E1000_RCTL_SBP; 3406 else 3407 rctl &= ~E1000_RCTL_SBP; 3408 3409 switch (adapter->rx_buffer_len) { 3410 default: 3411 case 2048: 3412 rctl |= E1000_RCTL_SZ_2048; 3413 break; 3414 3415 case 4096: 3416 rctl |= E1000_RCTL_SZ_4096 | 3417 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3418 break; 3419 3420 case 8192: 3421 rctl |= E1000_RCTL_SZ_8192 | 3422 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3423 break; 3424 3425 case 16384: 3426 rctl |= E1000_RCTL_SZ_16384 | 3427 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3428 break; 3429 } 3430 3431 if (ifp->if_mtu > ETHERMTU) 3432 rctl |= E1000_RCTL_LPE; 3433 else 3434 rctl &= ~E1000_RCTL_LPE; 3435 3436 /* Enable Receives */ 3437 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3438 } 3439 3440 static void 3441 em_destroy_rx_ring(struct adapter *adapter, int ndesc) 3442 { 3443 struct em_buffer *rx_buffer; 3444 int i; 3445 3446 if (adapter->rx_buffer_area == NULL) 3447 return; 3448 3449 for (i = 0; i < ndesc; i++) { 3450 rx_buffer = &adapter->rx_buffer_area[i]; 3451 3452 KKASSERT(rx_buffer->m_head == NULL); 3453 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 3454 } 3455 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3456 bus_dma_tag_destroy(adapter->rxtag); 3457 3458 kfree(adapter->rx_buffer_area, M_DEVBUF); 3459 adapter->rx_buffer_area = NULL; 3460 } 3461 3462 static void 3463 em_rxeof(struct adapter *adapter, int count) 3464 { 3465 struct ifnet *ifp = &adapter->arpcom.ac_if; 3466 uint8_t status, accept_frame = 0, eop = 0; 3467 uint16_t len, desc_len, prev_len_adj; 3468 struct e1000_rx_desc *current_desc; 3469 struct mbuf *mp; 3470 int i; 3471 3472 i = adapter->next_rx_desc_to_check; 3473 current_desc = &adapter->rx_desc_base[i]; 3474 3475 if (!(current_desc->status & E1000_RXD_STAT_DD)) 3476 return; 3477 3478 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) { 3479 struct mbuf *m = NULL; 3480 3481 logif(pkt_receive); 3482 3483 mp = adapter->rx_buffer_area[i].m_head; 3484 3485 /* 3486 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3487 * needs to access the last received byte in the mbuf. 3488 */ 3489 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3490 BUS_DMASYNC_POSTREAD); 3491 3492 accept_frame = 1; 3493 prev_len_adj = 0; 3494 desc_len = le16toh(current_desc->length); 3495 status = current_desc->status; 3496 if (status & E1000_RXD_STAT_EOP) { 3497 count--; 3498 eop = 1; 3499 if (desc_len < ETHER_CRC_LEN) { 3500 len = 0; 3501 prev_len_adj = ETHER_CRC_LEN - desc_len; 3502 } else { 3503 len = desc_len - ETHER_CRC_LEN; 3504 } 3505 } else { 3506 eop = 0; 3507 len = desc_len; 3508 } 3509 3510 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3511 uint8_t last_byte; 3512 uint32_t pkt_len = desc_len; 3513 3514 if (adapter->fmp != NULL) 3515 pkt_len += adapter->fmp->m_pkthdr.len; 3516 3517 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3518 if (TBI_ACCEPT(&adapter->hw, status, 3519 current_desc->errors, pkt_len, last_byte, 3520 adapter->min_frame_size, 3521 adapter->hw.mac.max_frame_size)) { 3522 e1000_tbi_adjust_stats_82543(&adapter->hw, 3523 &adapter->stats, pkt_len, 3524 adapter->hw.mac.addr, 3525 adapter->hw.mac.max_frame_size); 3526 if (len > 0) 3527 len--; 3528 } else { 3529 accept_frame = 0; 3530 } 3531 } 3532 3533 if (accept_frame) { 3534 if (em_newbuf(adapter, i, 0) != 0) { 3535 IFNET_STAT_INC(ifp, iqdrops, 1); 3536 goto discard; 3537 } 3538 3539 /* Assign correct length to the current fragment */ 3540 mp->m_len = len; 3541 3542 if (adapter->fmp == NULL) { 3543 mp->m_pkthdr.len = len; 3544 adapter->fmp = mp; /* Store the first mbuf */ 3545 adapter->lmp = mp; 3546 } else { 3547 /* 3548 * Chain mbuf's together 3549 */ 3550 3551 /* 3552 * Adjust length of previous mbuf in chain if 3553 * we received less than 4 bytes in the last 3554 * descriptor. 3555 */ 3556 if (prev_len_adj > 0) { 3557 adapter->lmp->m_len -= prev_len_adj; 3558 adapter->fmp->m_pkthdr.len -= 3559 prev_len_adj; 3560 } 3561 adapter->lmp->m_next = mp; 3562 adapter->lmp = adapter->lmp->m_next; 3563 adapter->fmp->m_pkthdr.len += len; 3564 } 3565 3566 if (eop) { 3567 adapter->fmp->m_pkthdr.rcvif = ifp; 3568 IFNET_STAT_INC(ifp, ipackets, 1); 3569 3570 if (ifp->if_capenable & IFCAP_RXCSUM) { 3571 em_rxcsum(adapter, current_desc, 3572 adapter->fmp); 3573 } 3574 3575 if (status & E1000_RXD_STAT_VP) { 3576 adapter->fmp->m_pkthdr.ether_vlantag = 3577 (le16toh(current_desc->special) & 3578 E1000_RXD_SPC_VLAN_MASK); 3579 adapter->fmp->m_flags |= M_VLANTAG; 3580 } 3581 m = adapter->fmp; 3582 adapter->fmp = NULL; 3583 adapter->lmp = NULL; 3584 } 3585 } else { 3586 IFNET_STAT_INC(ifp, ierrors, 1); 3587 discard: 3588 #ifdef foo 3589 /* Reuse loaded DMA map and just update mbuf chain */ 3590 mp = adapter->rx_buffer_area[i].m_head; 3591 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3592 mp->m_data = mp->m_ext.ext_buf; 3593 mp->m_next = NULL; 3594 if (adapter->hw.mac.max_frame_size <= 3595 (MCLBYTES - ETHER_ALIGN)) 3596 m_adj(mp, ETHER_ALIGN); 3597 #endif 3598 if (adapter->fmp != NULL) { 3599 m_freem(adapter->fmp); 3600 adapter->fmp = NULL; 3601 adapter->lmp = NULL; 3602 } 3603 m = NULL; 3604 } 3605 3606 /* Zero out the receive descriptors status. */ 3607 current_desc->status = 0; 3608 3609 if (m != NULL) 3610 ifp->if_input(ifp, m, NULL, -1); 3611 3612 /* Advance our pointers to the next descriptor. */ 3613 if (++i == adapter->num_rx_desc) 3614 i = 0; 3615 current_desc = &adapter->rx_desc_base[i]; 3616 } 3617 adapter->next_rx_desc_to_check = i; 3618 3619 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3620 if (--i < 0) 3621 i = adapter->num_rx_desc - 1; 3622 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3623 } 3624 3625 static void 3626 em_rxcsum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, 3627 struct mbuf *mp) 3628 { 3629 /* 82543 or newer only */ 3630 if (adapter->hw.mac.type < e1000_82543 || 3631 /* Ignore Checksum bit is set */ 3632 (rx_desc->status & E1000_RXD_STAT_IXSM)) 3633 return; 3634 3635 if ((rx_desc->status & E1000_RXD_STAT_IPCS) && 3636 !(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3637 /* IP Checksum Good */ 3638 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 3639 } 3640 3641 if ((rx_desc->status & E1000_RXD_STAT_TCPCS) && 3642 !(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3643 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3644 CSUM_PSEUDO_HDR | 3645 CSUM_FRAG_NOT_CHECKED; 3646 mp->m_pkthdr.csum_data = htons(0xffff); 3647 } 3648 } 3649 3650 static void 3651 em_enable_intr(struct adapter *adapter) 3652 { 3653 uint32_t ims_mask = IMS_ENABLE_MASK; 3654 3655 lwkt_serialize_handler_enable(adapter->arpcom.ac_if.if_serializer); 3656 3657 #if 0 3658 /* XXX MSIX */ 3659 if (adapter->hw.mac.type == e1000_82574) { 3660 E1000_WRITE_REG(&adapter->hw, EM_EIAC, EM_MSIX_MASK); 3661 ims_mask |= EM_MSIX_MASK; 3662 } 3663 #endif 3664 E1000_WRITE_REG(&adapter->hw, E1000_IMS, ims_mask); 3665 } 3666 3667 static void 3668 em_disable_intr(struct adapter *adapter) 3669 { 3670 uint32_t clear = 0xffffffff; 3671 3672 /* 3673 * The first version of 82542 had an errata where when link was forced 3674 * it would stay up even up even if the cable was disconnected. 3675 * Sequence errors were used to detect the disconnect and then the 3676 * driver would unforce the link. This code in the in the ISR. For 3677 * this to work correctly the Sequence error interrupt had to be 3678 * enabled all the time. 3679 */ 3680 if (adapter->hw.mac.type == e1000_82542 && 3681 adapter->hw.revision_id == E1000_REVISION_2) 3682 clear &= ~E1000_ICR_RXSEQ; 3683 else if (adapter->hw.mac.type == e1000_82574) 3684 E1000_WRITE_REG(&adapter->hw, EM_EIAC, 0); 3685 3686 E1000_WRITE_REG(&adapter->hw, E1000_IMC, clear); 3687 3688 adapter->npoll.ifpc_stcount = 0; 3689 3690 lwkt_serialize_handler_disable(adapter->arpcom.ac_if.if_serializer); 3691 } 3692 3693 /* 3694 * Bit of a misnomer, what this really means is 3695 * to enable OS management of the system... aka 3696 * to disable special hardware management features 3697 */ 3698 static void 3699 em_get_mgmt(struct adapter *adapter) 3700 { 3701 /* A shared code workaround */ 3702 #define E1000_82542_MANC2H E1000_MANC2H 3703 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3704 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 3705 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3706 3707 /* disable hardware interception of ARP */ 3708 manc &= ~(E1000_MANC_ARP_EN); 3709 3710 /* enable receiving management packets to the host */ 3711 if (adapter->hw.mac.type >= e1000_82571) { 3712 manc |= E1000_MANC_EN_MNG2HOST; 3713 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3714 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3715 manc2h |= E1000_MNG2HOST_PORT_623; 3716 manc2h |= E1000_MNG2HOST_PORT_664; 3717 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 3718 } 3719 3720 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3721 } 3722 } 3723 3724 /* 3725 * Give control back to hardware management 3726 * controller if there is one. 3727 */ 3728 static void 3729 em_rel_mgmt(struct adapter *adapter) 3730 { 3731 if (adapter->flags & EM_FLAG_HAS_MGMT) { 3732 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3733 3734 /* re-enable hardware interception of ARP */ 3735 manc |= E1000_MANC_ARP_EN; 3736 3737 if (adapter->hw.mac.type >= e1000_82571) 3738 manc &= ~E1000_MANC_EN_MNG2HOST; 3739 3740 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3741 } 3742 } 3743 3744 /* 3745 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3746 * For ASF and Pass Through versions of f/w this means that 3747 * the driver is loaded. For AMT version (only with 82573) 3748 * of the f/w this means that the network i/f is open. 3749 */ 3750 static void 3751 em_get_hw_control(struct adapter *adapter) 3752 { 3753 /* Let firmware know the driver has taken over */ 3754 if (adapter->hw.mac.type == e1000_82573) { 3755 uint32_t swsm; 3756 3757 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3758 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3759 swsm | E1000_SWSM_DRV_LOAD); 3760 } else { 3761 uint32_t ctrl_ext; 3762 3763 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3764 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3765 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3766 } 3767 adapter->flags |= EM_FLAG_HW_CTRL; 3768 } 3769 3770 /* 3771 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3772 * For ASF and Pass Through versions of f/w this means that the 3773 * driver is no longer loaded. For AMT version (only with 82573) 3774 * of the f/w this means that the network i/f is closed. 3775 */ 3776 static void 3777 em_rel_hw_control(struct adapter *adapter) 3778 { 3779 if ((adapter->flags & EM_FLAG_HW_CTRL) == 0) 3780 return; 3781 adapter->flags &= ~EM_FLAG_HW_CTRL; 3782 3783 /* Let firmware taken over control of h/w */ 3784 if (adapter->hw.mac.type == e1000_82573) { 3785 uint32_t swsm; 3786 3787 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3788 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3789 swsm & ~E1000_SWSM_DRV_LOAD); 3790 } else { 3791 uint32_t ctrl_ext; 3792 3793 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3794 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3795 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3796 } 3797 } 3798 3799 static int 3800 em_is_valid_eaddr(const uint8_t *addr) 3801 { 3802 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3803 3804 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3805 return (FALSE); 3806 3807 return (TRUE); 3808 } 3809 3810 /* 3811 * Enable PCI Wake On Lan capability 3812 */ 3813 void 3814 em_enable_wol(device_t dev) 3815 { 3816 uint16_t cap, status; 3817 uint8_t id; 3818 3819 /* First find the capabilities pointer*/ 3820 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3821 3822 /* Read the PM Capabilities */ 3823 id = pci_read_config(dev, cap, 1); 3824 if (id != PCIY_PMG) /* Something wrong */ 3825 return; 3826 3827 /* 3828 * OK, we have the power capabilities, 3829 * so now get the status register 3830 */ 3831 cap += PCIR_POWER_STATUS; 3832 status = pci_read_config(dev, cap, 2); 3833 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3834 pci_write_config(dev, cap, status, 2); 3835 } 3836 3837 3838 /* 3839 * 82544 Coexistence issue workaround. 3840 * There are 2 issues. 3841 * 1. Transmit Hang issue. 3842 * To detect this issue, following equation can be used... 3843 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3844 * If SUM[3:0] is in between 1 to 4, we will have this issue. 3845 * 3846 * 2. DAC issue. 3847 * To detect this issue, following equation can be used... 3848 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3849 * If SUM[3:0] is in between 9 to c, we will have this issue. 3850 * 3851 * WORKAROUND: 3852 * Make sure we do not have ending address 3853 * as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3854 */ 3855 static uint32_t 3856 em_82544_fill_desc(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array) 3857 { 3858 uint32_t safe_terminator; 3859 3860 /* 3861 * Since issue is sensitive to length and address. 3862 * Let us first check the address... 3863 */ 3864 if (length <= 4) { 3865 desc_array->descriptor[0].address = address; 3866 desc_array->descriptor[0].length = length; 3867 desc_array->elements = 1; 3868 return (desc_array->elements); 3869 } 3870 3871 safe_terminator = 3872 (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 3873 3874 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3875 if (safe_terminator == 0 || 3876 (safe_terminator > 4 && safe_terminator < 9) || 3877 (safe_terminator > 0xC && safe_terminator <= 0xF)) { 3878 desc_array->descriptor[0].address = address; 3879 desc_array->descriptor[0].length = length; 3880 desc_array->elements = 1; 3881 return (desc_array->elements); 3882 } 3883 3884 desc_array->descriptor[0].address = address; 3885 desc_array->descriptor[0].length = length - 4; 3886 desc_array->descriptor[1].address = address + (length - 4); 3887 desc_array->descriptor[1].length = 4; 3888 desc_array->elements = 2; 3889 return (desc_array->elements); 3890 } 3891 3892 static void 3893 em_update_stats(struct adapter *adapter) 3894 { 3895 struct ifnet *ifp = &adapter->arpcom.ac_if; 3896 3897 if (adapter->hw.phy.media_type == e1000_media_type_copper || 3898 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3899 adapter->stats.symerrs += 3900 E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 3901 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 3902 } 3903 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 3904 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 3905 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 3906 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 3907 3908 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 3909 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 3910 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 3911 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 3912 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 3913 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 3914 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 3915 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 3916 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 3917 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 3918 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 3919 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 3920 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 3921 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 3922 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 3923 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 3924 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 3925 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 3926 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 3927 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 3928 3929 /* For the 64-bit byte counters the low dword must be read first. */ 3930 /* Both registers clear on the read of the high dword */ 3931 3932 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); 3933 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); 3934 3935 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 3936 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 3937 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 3938 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 3939 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 3940 3941 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 3942 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 3943 3944 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 3945 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 3946 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 3947 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 3948 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 3949 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 3950 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 3951 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 3952 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 3953 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 3954 3955 if (adapter->hw.mac.type >= e1000_82543) { 3956 adapter->stats.algnerrc += 3957 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 3958 adapter->stats.rxerrc += 3959 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 3960 adapter->stats.tncrs += 3961 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 3962 adapter->stats.cexterr += 3963 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 3964 adapter->stats.tsctc += 3965 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 3966 adapter->stats.tsctfc += 3967 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 3968 } 3969 3970 IFNET_STAT_SET(ifp, collisions, adapter->stats.colc); 3971 3972 /* Rx Errors */ 3973 IFNET_STAT_SET(ifp, ierrors, 3974 adapter->dropped_pkts + adapter->stats.rxerrc + 3975 adapter->stats.crcerrs + adapter->stats.algnerrc + 3976 adapter->stats.ruc + adapter->stats.roc + 3977 adapter->stats.mpc + adapter->stats.cexterr); 3978 3979 /* Tx Errors */ 3980 IFNET_STAT_SET(ifp, oerrors, 3981 adapter->stats.ecol + adapter->stats.latecol + 3982 adapter->watchdog_events); 3983 } 3984 3985 static void 3986 em_print_debug_info(struct adapter *adapter) 3987 { 3988 device_t dev = adapter->dev; 3989 uint8_t *hw_addr = adapter->hw.hw_addr; 3990 3991 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3992 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3993 E1000_READ_REG(&adapter->hw, E1000_CTRL), 3994 E1000_READ_REG(&adapter->hw, E1000_RCTL)); 3995 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3996 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3997 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); 3998 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3999 adapter->hw.fc.high_water, 4000 adapter->hw.fc.low_water); 4001 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 4002 E1000_READ_REG(&adapter->hw, E1000_TIDV), 4003 E1000_READ_REG(&adapter->hw, E1000_TADV)); 4004 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 4005 E1000_READ_REG(&adapter->hw, E1000_RDTR), 4006 E1000_READ_REG(&adapter->hw, E1000_RADV)); 4007 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 4008 (long long)adapter->tx_fifo_wrk_cnt, 4009 (long long)adapter->tx_fifo_reset_cnt); 4010 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 4011 E1000_READ_REG(&adapter->hw, E1000_TDH(0)), 4012 E1000_READ_REG(&adapter->hw, E1000_TDT(0))); 4013 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 4014 E1000_READ_REG(&adapter->hw, E1000_RDH(0)), 4015 E1000_READ_REG(&adapter->hw, E1000_RDT(0))); 4016 device_printf(dev, "Num Tx descriptors avail = %d\n", 4017 adapter->num_tx_desc_avail); 4018 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 4019 adapter->no_tx_desc_avail1); 4020 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 4021 adapter->no_tx_desc_avail2); 4022 device_printf(dev, "Std mbuf failed = %ld\n", 4023 adapter->mbuf_alloc_failed); 4024 device_printf(dev, "Std mbuf cluster failed = %ld\n", 4025 adapter->mbuf_cluster_failed); 4026 device_printf(dev, "Driver dropped packets = %ld\n", 4027 adapter->dropped_pkts); 4028 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 4029 adapter->no_tx_dma_setup); 4030 } 4031 4032 static void 4033 em_print_hw_stats(struct adapter *adapter) 4034 { 4035 device_t dev = adapter->dev; 4036 4037 device_printf(dev, "Excessive collisions = %lld\n", 4038 (long long)adapter->stats.ecol); 4039 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 4040 device_printf(dev, "Symbol errors = %lld\n", 4041 (long long)adapter->stats.symerrs); 4042 #endif 4043 device_printf(dev, "Sequence errors = %lld\n", 4044 (long long)adapter->stats.sec); 4045 device_printf(dev, "Defer count = %lld\n", 4046 (long long)adapter->stats.dc); 4047 device_printf(dev, "Missed Packets = %lld\n", 4048 (long long)adapter->stats.mpc); 4049 device_printf(dev, "Receive No Buffers = %lld\n", 4050 (long long)adapter->stats.rnbc); 4051 /* RLEC is inaccurate on some hardware, calculate our own. */ 4052 device_printf(dev, "Receive Length Errors = %lld\n", 4053 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 4054 device_printf(dev, "Receive errors = %lld\n", 4055 (long long)adapter->stats.rxerrc); 4056 device_printf(dev, "Crc errors = %lld\n", 4057 (long long)adapter->stats.crcerrs); 4058 device_printf(dev, "Alignment errors = %lld\n", 4059 (long long)adapter->stats.algnerrc); 4060 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 4061 (long long)adapter->stats.cexterr); 4062 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 4063 device_printf(dev, "watchdog timeouts = %ld\n", 4064 adapter->watchdog_events); 4065 device_printf(dev, "XON Rcvd = %lld\n", 4066 (long long)adapter->stats.xonrxc); 4067 device_printf(dev, "XON Xmtd = %lld\n", 4068 (long long)adapter->stats.xontxc); 4069 device_printf(dev, "XOFF Rcvd = %lld\n", 4070 (long long)adapter->stats.xoffrxc); 4071 device_printf(dev, "XOFF Xmtd = %lld\n", 4072 (long long)adapter->stats.xofftxc); 4073 device_printf(dev, "Good Packets Rcvd = %lld\n", 4074 (long long)adapter->stats.gprc); 4075 device_printf(dev, "Good Packets Xmtd = %lld\n", 4076 (long long)adapter->stats.gptc); 4077 } 4078 4079 static void 4080 em_print_nvm_info(struct adapter *adapter) 4081 { 4082 uint16_t eeprom_data; 4083 int i, j, row = 0; 4084 4085 /* Its a bit crude, but it gets the job done */ 4086 kprintf("\nInterface EEPROM Dump:\n"); 4087 kprintf("Offset\n0x0000 "); 4088 for (i = 0, j = 0; i < 32; i++, j++) { 4089 if (j == 8) { /* Make the offset block */ 4090 j = 0; ++row; 4091 kprintf("\n0x00%x0 ",row); 4092 } 4093 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 4094 kprintf("%04x ", eeprom_data); 4095 } 4096 kprintf("\n"); 4097 } 4098 4099 static int 4100 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 4101 { 4102 struct adapter *adapter; 4103 struct ifnet *ifp; 4104 int error, result; 4105 4106 result = -1; 4107 error = sysctl_handle_int(oidp, &result, 0, req); 4108 if (error || !req->newptr) 4109 return (error); 4110 4111 adapter = (struct adapter *)arg1; 4112 ifp = &adapter->arpcom.ac_if; 4113 4114 lwkt_serialize_enter(ifp->if_serializer); 4115 4116 if (result == 1) 4117 em_print_debug_info(adapter); 4118 4119 /* 4120 * This value will cause a hex dump of the 4121 * first 32 16-bit words of the EEPROM to 4122 * the screen. 4123 */ 4124 if (result == 2) 4125 em_print_nvm_info(adapter); 4126 4127 lwkt_serialize_exit(ifp->if_serializer); 4128 4129 return (error); 4130 } 4131 4132 static int 4133 em_sysctl_stats(SYSCTL_HANDLER_ARGS) 4134 { 4135 int error, result; 4136 4137 result = -1; 4138 error = sysctl_handle_int(oidp, &result, 0, req); 4139 if (error || !req->newptr) 4140 return (error); 4141 4142 if (result == 1) { 4143 struct adapter *adapter = (struct adapter *)arg1; 4144 struct ifnet *ifp = &adapter->arpcom.ac_if; 4145 4146 lwkt_serialize_enter(ifp->if_serializer); 4147 em_print_hw_stats(adapter); 4148 lwkt_serialize_exit(ifp->if_serializer); 4149 } 4150 return (error); 4151 } 4152 4153 static void 4154 em_add_sysctl(struct adapter *adapter) 4155 { 4156 struct sysctl_ctx_list *ctx; 4157 struct sysctl_oid *tree; 4158 4159 ctx = device_get_sysctl_ctx(adapter->dev); 4160 tree = device_get_sysctl_tree(adapter->dev); 4161 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4162 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4163 em_sysctl_debug_info, "I", "Debug Information"); 4164 4165 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4166 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4167 em_sysctl_stats, "I", "Statistics"); 4168 4169 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4170 OID_AUTO, "rxd", CTLFLAG_RD, 4171 &adapter->num_rx_desc, 0, NULL); 4172 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4173 OID_AUTO, "txd", CTLFLAG_RD, 4174 &adapter->num_tx_desc, 0, NULL); 4175 4176 if (adapter->hw.mac.type >= e1000_82540) { 4177 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4178 OID_AUTO, "int_throttle_ceil", 4179 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4180 em_sysctl_int_throttle, "I", 4181 "interrupt throttling rate"); 4182 } 4183 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), 4184 OID_AUTO, "int_tx_nsegs", 4185 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 4186 em_sysctl_int_tx_nsegs, "I", 4187 "# segments per TX interrupt"); 4188 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), 4189 OID_AUTO, "wreg_tx_nsegs", CTLFLAG_RW, 4190 &adapter->tx_wreg_nsegs, 0, 4191 "# segments before write to hardware register"); 4192 } 4193 4194 static int 4195 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 4196 { 4197 struct adapter *adapter = (void *)arg1; 4198 struct ifnet *ifp = &adapter->arpcom.ac_if; 4199 int error, throttle; 4200 4201 throttle = adapter->int_throttle_ceil; 4202 error = sysctl_handle_int(oidp, &throttle, 0, req); 4203 if (error || req->newptr == NULL) 4204 return error; 4205 if (throttle < 0 || throttle > 1000000000 / 256) 4206 return EINVAL; 4207 4208 if (throttle) { 4209 /* 4210 * Set the interrupt throttling rate in 256ns increments, 4211 * recalculate sysctl value assignment to get exact frequency. 4212 */ 4213 throttle = 1000000000 / 256 / throttle; 4214 4215 /* Upper 16bits of ITR is reserved and should be zero */ 4216 if (throttle & 0xffff0000) 4217 return EINVAL; 4218 } 4219 4220 lwkt_serialize_enter(ifp->if_serializer); 4221 4222 if (throttle) 4223 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 4224 else 4225 adapter->int_throttle_ceil = 0; 4226 4227 if (ifp->if_flags & IFF_RUNNING) 4228 em_set_itr(adapter, throttle); 4229 4230 lwkt_serialize_exit(ifp->if_serializer); 4231 4232 if (bootverbose) { 4233 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 4234 adapter->int_throttle_ceil); 4235 } 4236 return 0; 4237 } 4238 4239 static int 4240 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 4241 { 4242 struct adapter *adapter = (void *)arg1; 4243 struct ifnet *ifp = &adapter->arpcom.ac_if; 4244 int error, segs; 4245 4246 segs = adapter->tx_int_nsegs; 4247 error = sysctl_handle_int(oidp, &segs, 0, req); 4248 if (error || req->newptr == NULL) 4249 return error; 4250 if (segs <= 0) 4251 return EINVAL; 4252 4253 lwkt_serialize_enter(ifp->if_serializer); 4254 4255 /* 4256 * Don't allow int_tx_nsegs to become: 4257 * o Less the oact_tx_desc 4258 * o Too large that no TX desc will cause TX interrupt to 4259 * be generated (OACTIVE will never recover) 4260 * o Too small that will cause tx_dd[] overflow 4261 */ 4262 if (segs < adapter->oact_tx_desc || 4263 segs >= adapter->num_tx_desc - adapter->oact_tx_desc || 4264 segs < adapter->num_tx_desc / EM_TXDD_SAFE) { 4265 error = EINVAL; 4266 } else { 4267 error = 0; 4268 adapter->tx_int_nsegs = segs; 4269 } 4270 4271 lwkt_serialize_exit(ifp->if_serializer); 4272 4273 return error; 4274 } 4275 4276 static void 4277 em_set_itr(struct adapter *adapter, uint32_t itr) 4278 { 4279 E1000_WRITE_REG(&adapter->hw, E1000_ITR, itr); 4280 if (adapter->hw.mac.type == e1000_82574) { 4281 int i; 4282 4283 /* 4284 * When using MSIX interrupts we need to 4285 * throttle using the EITR register 4286 */ 4287 for (i = 0; i < 4; ++i) { 4288 E1000_WRITE_REG(&adapter->hw, 4289 E1000_EITR_82574(i), itr); 4290 } 4291 } 4292 } 4293 4294 static void 4295 em_disable_aspm(struct adapter *adapter) 4296 { 4297 uint16_t link_cap, link_ctrl, disable; 4298 uint8_t pcie_ptr, reg; 4299 device_t dev = adapter->dev; 4300 4301 switch (adapter->hw.mac.type) { 4302 case e1000_82571: 4303 case e1000_82572: 4304 case e1000_82573: 4305 /* 4306 * 82573 specification update 4307 * errata #8 disable L0s 4308 * errata #41 disable L1 4309 * 4310 * 82571/82572 specification update 4311 # errata #13 disable L1 4312 * errata #68 disable L0s 4313 */ 4314 disable = PCIEM_LNKCTL_ASPM_L0S | PCIEM_LNKCTL_ASPM_L1; 4315 break; 4316 4317 case e1000_82574: 4318 case e1000_82583: 4319 /* 4320 * 82574 specification update errata #20 4321 * 82583 specification update errata #9 4322 * 4323 * There is no need to disable L1 4324 */ 4325 disable = PCIEM_LNKCTL_ASPM_L0S; 4326 break; 4327 4328 default: 4329 return; 4330 } 4331 4332 pcie_ptr = pci_get_pciecap_ptr(dev); 4333 if (pcie_ptr == 0) 4334 return; 4335 4336 link_cap = pci_read_config(dev, pcie_ptr + PCIER_LINKCAP, 2); 4337 if ((link_cap & PCIEM_LNKCAP_ASPM_MASK) == 0) 4338 return; 4339 4340 if (bootverbose) { 4341 if_printf(&adapter->arpcom.ac_if, 4342 "disable ASPM %#02x\n", disable); 4343 } 4344 4345 reg = pcie_ptr + PCIER_LINKCTRL; 4346 link_ctrl = pci_read_config(dev, reg, 2); 4347 link_ctrl &= ~disable; 4348 pci_write_config(dev, reg, link_ctrl, 2); 4349 } 4350 4351 static int 4352 em_tso_pullup(struct adapter *adapter, struct mbuf **mp) 4353 { 4354 int iphlen, hoff, thoff, ex = 0; 4355 struct mbuf *m; 4356 struct ip *ip; 4357 4358 m = *mp; 4359 KASSERT(M_WRITABLE(m), ("TSO mbuf not writable")); 4360 4361 iphlen = m->m_pkthdr.csum_iphlen; 4362 thoff = m->m_pkthdr.csum_thlen; 4363 hoff = m->m_pkthdr.csum_lhlen; 4364 4365 KASSERT(iphlen > 0, ("invalid ip hlen")); 4366 KASSERT(thoff > 0, ("invalid tcp hlen")); 4367 KASSERT(hoff > 0, ("invalid ether hlen")); 4368 4369 if (adapter->flags & EM_FLAG_TSO_PULLEX) 4370 ex = 4; 4371 4372 if (m->m_len < hoff + iphlen + thoff + ex) { 4373 m = m_pullup(m, hoff + iphlen + thoff + ex); 4374 if (m == NULL) { 4375 *mp = NULL; 4376 return ENOBUFS; 4377 } 4378 *mp = m; 4379 } 4380 ip = mtodoff(m, struct ip *, hoff); 4381 ip->ip_len = 0; 4382 4383 return 0; 4384 } 4385 4386 static int 4387 em_tso_setup(struct adapter *adapter, struct mbuf *mp, 4388 uint32_t *txd_upper, uint32_t *txd_lower) 4389 { 4390 struct e1000_context_desc *TXD; 4391 int hoff, iphlen, thoff, hlen; 4392 int mss, pktlen, curr_txd; 4393 4394 iphlen = mp->m_pkthdr.csum_iphlen; 4395 thoff = mp->m_pkthdr.csum_thlen; 4396 hoff = mp->m_pkthdr.csum_lhlen; 4397 mss = mp->m_pkthdr.tso_segsz; 4398 pktlen = mp->m_pkthdr.len; 4399 4400 if (adapter->csum_flags == CSUM_TSO && 4401 adapter->csum_iphlen == iphlen && 4402 adapter->csum_lhlen == hoff && 4403 adapter->csum_thlen == thoff && 4404 adapter->csum_mss == mss && 4405 adapter->csum_pktlen == pktlen) { 4406 *txd_upper = adapter->csum_txd_upper; 4407 *txd_lower = adapter->csum_txd_lower; 4408 return 0; 4409 } 4410 hlen = hoff + iphlen + thoff; 4411 4412 /* 4413 * Setup a new TSO context. 4414 */ 4415 4416 curr_txd = adapter->next_avail_tx_desc; 4417 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 4418 4419 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 4420 E1000_TXD_DTYP_D | /* Data descr type */ 4421 E1000_TXD_CMD_TSE; /* Do TSE on this packet */ 4422 4423 /* IP and/or TCP header checksum calculation and insertion. */ 4424 *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8; 4425 4426 /* 4427 * Start offset for header checksum calculation. 4428 * End offset for header checksum calculation. 4429 * Offset of place put the checksum. 4430 */ 4431 TXD->lower_setup.ip_fields.ipcss = hoff; 4432 TXD->lower_setup.ip_fields.ipcse = htole16(hoff + iphlen - 1); 4433 TXD->lower_setup.ip_fields.ipcso = hoff + offsetof(struct ip, ip_sum); 4434 4435 /* 4436 * Start offset for payload checksum calculation. 4437 * End offset for payload checksum calculation. 4438 * Offset of place to put the checksum. 4439 */ 4440 TXD->upper_setup.tcp_fields.tucss = hoff + iphlen; 4441 TXD->upper_setup.tcp_fields.tucse = 0; 4442 TXD->upper_setup.tcp_fields.tucso = 4443 hoff + iphlen + offsetof(struct tcphdr, th_sum); 4444 4445 /* 4446 * Payload size per packet w/o any headers. 4447 * Length of all headers up to payload. 4448 */ 4449 TXD->tcp_seg_setup.fields.mss = htole16(mss); 4450 TXD->tcp_seg_setup.fields.hdr_len = hlen; 4451 TXD->cmd_and_length = htole32(E1000_TXD_CMD_IFCS | 4452 E1000_TXD_CMD_DEXT | /* Extended descr */ 4453 E1000_TXD_CMD_TSE | /* TSE context */ 4454 E1000_TXD_CMD_IP | /* Do IP csum */ 4455 E1000_TXD_CMD_TCP | /* Do TCP checksum */ 4456 (pktlen - hlen)); /* Total len */ 4457 4458 /* Save the information for this TSO context */ 4459 adapter->csum_flags = CSUM_TSO; 4460 adapter->csum_lhlen = hoff; 4461 adapter->csum_iphlen = iphlen; 4462 adapter->csum_thlen = thoff; 4463 adapter->csum_mss = mss; 4464 adapter->csum_pktlen = pktlen; 4465 adapter->csum_txd_upper = *txd_upper; 4466 adapter->csum_txd_lower = *txd_lower; 4467 4468 if (++curr_txd == adapter->num_tx_desc) 4469 curr_txd = 0; 4470 4471 KKASSERT(adapter->num_tx_desc_avail > 0); 4472 adapter->num_tx_desc_avail--; 4473 4474 adapter->next_avail_tx_desc = curr_txd; 4475 return 1; 4476 } 4477