1 /* 2 * Copyright (c) 2004 Joerg Sonnenberger <joerg@bec.de>. All rights reserved. 3 * 4 * Copyright (c) 2001-2008, Intel Corporation 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions are met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 12 * 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * 3. Neither the name of the Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived from 19 * this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 * 33 * 34 * Copyright (c) 2005 The DragonFly Project. All rights reserved. 35 * 36 * This code is derived from software contributed to The DragonFly Project 37 * by Matthew Dillon <dillon@backplane.com> 38 * 39 * Redistribution and use in source and binary forms, with or without 40 * modification, are permitted provided that the following conditions 41 * are met: 42 * 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in 47 * the documentation and/or other materials provided with the 48 * distribution. 49 * 3. Neither the name of The DragonFly Project nor the names of its 50 * contributors may be used to endorse or promote products derived 51 * from this software without specific, prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 54 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 56 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 57 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 59 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 60 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 61 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 62 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 63 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 * 66 */ 67 /* 68 * SERIALIZATION API RULES: 69 * 70 * - If the driver uses the same serializer for the interrupt as for the 71 * ifnet, most of the serialization will be done automatically for the 72 * driver. 73 * 74 * - ifmedia entry points will be serialized by the ifmedia code using the 75 * ifnet serializer. 76 * 77 * - if_* entry points except for if_input will be serialized by the IF 78 * and protocol layers. 79 * 80 * - The device driver must be sure to serialize access from timeout code 81 * installed by the device driver. 82 * 83 * - The device driver typically holds the serializer at the time it wishes 84 * to call if_input. 85 * 86 * - We must call lwkt_serialize_handler_enable() prior to enabling the 87 * hardware interrupt and lwkt_serialize_handler_disable() after disabling 88 * the hardware interrupt in order to avoid handler execution races from 89 * scheduled interrupt threads. 90 * 91 * NOTE! Since callers into the device driver hold the ifnet serializer, 92 * the device driver may be holding a serializer at the time it calls 93 * if_input even if it is not serializer-aware. 94 */ 95 96 #include "opt_polling.h" 97 98 #include <sys/param.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/interrupt.h> 102 #include <sys/kernel.h> 103 #include <sys/ktr.h> 104 #include <sys/malloc.h> 105 #include <sys/mbuf.h> 106 #include <sys/proc.h> 107 #include <sys/rman.h> 108 #include <sys/serialize.h> 109 #include <sys/socket.h> 110 #include <sys/sockio.h> 111 #include <sys/sysctl.h> 112 #include <sys/systm.h> 113 114 #include <net/bpf.h> 115 #include <net/ethernet.h> 116 #include <net/if.h> 117 #include <net/if_arp.h> 118 #include <net/if_dl.h> 119 #include <net/if_media.h> 120 #include <net/ifq_var.h> 121 #include <net/vlan/if_vlan_var.h> 122 #include <net/vlan/if_vlan_ether.h> 123 124 #include <netinet/in_systm.h> 125 #include <netinet/in.h> 126 #include <netinet/ip.h> 127 #include <netinet/tcp.h> 128 #include <netinet/udp.h> 129 130 #include <bus/pci/pcivar.h> 131 #include <bus/pci/pcireg.h> 132 133 #include <dev/netif/ig_hal/e1000_api.h> 134 #include <dev/netif/ig_hal/e1000_82571.h> 135 #include <dev/netif/em/if_em.h> 136 137 #define EM_NAME "Intel(R) PRO/1000 Network Connection " 138 #define EM_VER " 6.9.6" 139 140 #define _EM_DEVICE(id, ret) \ 141 { EM_VENDOR_ID, E1000_DEV_ID_##id, ret, EM_NAME #id EM_VER } 142 #define EM_EMX_DEVICE(id) _EM_DEVICE(id, -100) 143 #define EM_DEVICE(id) _EM_DEVICE(id, 0) 144 #define EM_DEVICE_NULL { 0, 0, 0, NULL } 145 146 static const struct em_vendor_info em_vendor_info_array[] = { 147 EM_DEVICE(82540EM), 148 EM_DEVICE(82540EM_LOM), 149 EM_DEVICE(82540EP), 150 EM_DEVICE(82540EP_LOM), 151 EM_DEVICE(82540EP_LP), 152 153 EM_DEVICE(82541EI), 154 EM_DEVICE(82541ER), 155 EM_DEVICE(82541ER_LOM), 156 EM_DEVICE(82541EI_MOBILE), 157 EM_DEVICE(82541GI), 158 EM_DEVICE(82541GI_LF), 159 EM_DEVICE(82541GI_MOBILE), 160 161 EM_DEVICE(82542), 162 163 EM_DEVICE(82543GC_FIBER), 164 EM_DEVICE(82543GC_COPPER), 165 166 EM_DEVICE(82544EI_COPPER), 167 EM_DEVICE(82544EI_FIBER), 168 EM_DEVICE(82544GC_COPPER), 169 EM_DEVICE(82544GC_LOM), 170 171 EM_DEVICE(82545EM_COPPER), 172 EM_DEVICE(82545EM_FIBER), 173 EM_DEVICE(82545GM_COPPER), 174 EM_DEVICE(82545GM_FIBER), 175 EM_DEVICE(82545GM_SERDES), 176 177 EM_DEVICE(82546EB_COPPER), 178 EM_DEVICE(82546EB_FIBER), 179 EM_DEVICE(82546EB_QUAD_COPPER), 180 EM_DEVICE(82546GB_COPPER), 181 EM_DEVICE(82546GB_FIBER), 182 EM_DEVICE(82546GB_SERDES), 183 EM_DEVICE(82546GB_PCIE), 184 EM_DEVICE(82546GB_QUAD_COPPER), 185 EM_DEVICE(82546GB_QUAD_COPPER_KSP3), 186 187 EM_DEVICE(82547EI), 188 EM_DEVICE(82547EI_MOBILE), 189 EM_DEVICE(82547GI), 190 191 EM_EMX_DEVICE(82571EB_COPPER), 192 EM_EMX_DEVICE(82571EB_FIBER), 193 EM_EMX_DEVICE(82571EB_SERDES), 194 EM_EMX_DEVICE(82571EB_SERDES_DUAL), 195 EM_EMX_DEVICE(82571EB_SERDES_QUAD), 196 EM_EMX_DEVICE(82571EB_QUAD_COPPER), 197 EM_EMX_DEVICE(82571EB_QUAD_COPPER_BP), 198 EM_EMX_DEVICE(82571EB_QUAD_COPPER_LP), 199 EM_EMX_DEVICE(82571EB_QUAD_FIBER), 200 EM_EMX_DEVICE(82571PT_QUAD_COPPER), 201 202 EM_EMX_DEVICE(82572EI_COPPER), 203 EM_EMX_DEVICE(82572EI_FIBER), 204 EM_EMX_DEVICE(82572EI_SERDES), 205 EM_EMX_DEVICE(82572EI), 206 207 EM_EMX_DEVICE(82573E), 208 EM_EMX_DEVICE(82573E_IAMT), 209 EM_EMX_DEVICE(82573L), 210 211 EM_EMX_DEVICE(80003ES2LAN_COPPER_SPT), 212 EM_EMX_DEVICE(80003ES2LAN_SERDES_SPT), 213 EM_EMX_DEVICE(80003ES2LAN_COPPER_DPT), 214 EM_EMX_DEVICE(80003ES2LAN_SERDES_DPT), 215 216 EM_DEVICE(ICH8_IGP_M_AMT), 217 EM_DEVICE(ICH8_IGP_AMT), 218 EM_DEVICE(ICH8_IGP_C), 219 EM_DEVICE(ICH8_IFE), 220 EM_DEVICE(ICH8_IFE_GT), 221 EM_DEVICE(ICH8_IFE_G), 222 EM_DEVICE(ICH8_IGP_M), 223 224 EM_DEVICE(ICH9_IGP_M_AMT), 225 EM_DEVICE(ICH9_IGP_AMT), 226 EM_DEVICE(ICH9_IGP_C), 227 EM_DEVICE(ICH9_IGP_M), 228 EM_DEVICE(ICH9_IGP_M_V), 229 EM_DEVICE(ICH9_IFE), 230 EM_DEVICE(ICH9_IFE_GT), 231 EM_DEVICE(ICH9_IFE_G), 232 EM_DEVICE(ICH9_BM), 233 234 EM_EMX_DEVICE(82574L), 235 236 EM_DEVICE(ICH10_R_BM_LM), 237 EM_DEVICE(ICH10_R_BM_LF), 238 EM_DEVICE(ICH10_R_BM_V), 239 EM_DEVICE(ICH10_D_BM_LM), 240 EM_DEVICE(ICH10_D_BM_LF), 241 242 /* required last entry */ 243 EM_DEVICE_NULL 244 }; 245 246 static int em_probe(device_t); 247 static int em_attach(device_t); 248 static int em_detach(device_t); 249 static int em_shutdown(device_t); 250 static int em_suspend(device_t); 251 static int em_resume(device_t); 252 253 static void em_init(void *); 254 static void em_stop(struct adapter *); 255 static int em_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 256 static void em_start(struct ifnet *); 257 #ifdef DEVICE_POLLING 258 static void em_poll(struct ifnet *, enum poll_cmd, int); 259 #endif 260 static void em_watchdog(struct ifnet *); 261 static void em_media_status(struct ifnet *, struct ifmediareq *); 262 static int em_media_change(struct ifnet *); 263 static void em_timer(void *); 264 265 static void em_intr(void *); 266 static void em_rxeof(struct adapter *, int); 267 static void em_txeof(struct adapter *); 268 static void em_tx_collect(struct adapter *); 269 static void em_tx_purge(struct adapter *); 270 static void em_enable_intr(struct adapter *); 271 static void em_disable_intr(struct adapter *); 272 273 static int em_dma_malloc(struct adapter *, bus_size_t, 274 struct em_dma_alloc *); 275 static void em_dma_free(struct adapter *, struct em_dma_alloc *); 276 static void em_init_tx_ring(struct adapter *); 277 static int em_init_rx_ring(struct adapter *); 278 static int em_create_tx_ring(struct adapter *); 279 static int em_create_rx_ring(struct adapter *); 280 static void em_destroy_tx_ring(struct adapter *, int); 281 static void em_destroy_rx_ring(struct adapter *, int); 282 static int em_newbuf(struct adapter *, int, int); 283 static int em_encap(struct adapter *, struct mbuf **); 284 static void em_rxcsum(struct adapter *, struct e1000_rx_desc *, 285 struct mbuf *); 286 static int em_txcsum_pullup(struct adapter *, struct mbuf **); 287 static int em_txcsum(struct adapter *, struct mbuf *, 288 uint32_t *, uint32_t *); 289 290 static int em_get_hw_info(struct adapter *); 291 static int em_is_valid_eaddr(const uint8_t *); 292 static int em_alloc_pci_res(struct adapter *); 293 static void em_free_pci_res(struct adapter *); 294 static int em_hw_init(struct adapter *); 295 static void em_setup_ifp(struct adapter *); 296 static void em_init_tx_unit(struct adapter *); 297 static void em_init_rx_unit(struct adapter *); 298 static void em_update_stats(struct adapter *); 299 static void em_set_promisc(struct adapter *); 300 static void em_disable_promisc(struct adapter *); 301 static void em_set_multi(struct adapter *); 302 static void em_update_link_status(struct adapter *); 303 static void em_smartspeed(struct adapter *); 304 305 /* Hardware workarounds */ 306 static int em_82547_fifo_workaround(struct adapter *, int); 307 static void em_82547_update_fifo_head(struct adapter *, int); 308 static int em_82547_tx_fifo_reset(struct adapter *); 309 static void em_82547_move_tail(void *); 310 static void em_82547_move_tail_serialized(struct adapter *); 311 static uint32_t em_82544_fill_desc(bus_addr_t, uint32_t, PDESC_ARRAY); 312 313 static void em_print_debug_info(struct adapter *); 314 static void em_print_nvm_info(struct adapter *); 315 static void em_print_hw_stats(struct adapter *); 316 317 static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); 318 static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); 319 static int em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS); 320 static int em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS); 321 static void em_add_sysctl(struct adapter *adapter); 322 323 /* Management and WOL Support */ 324 static void em_get_mgmt(struct adapter *); 325 static void em_rel_mgmt(struct adapter *); 326 static void em_get_hw_control(struct adapter *); 327 static void em_rel_hw_control(struct adapter *); 328 static void em_enable_wol(device_t); 329 330 static device_method_t em_methods[] = { 331 /* Device interface */ 332 DEVMETHOD(device_probe, em_probe), 333 DEVMETHOD(device_attach, em_attach), 334 DEVMETHOD(device_detach, em_detach), 335 DEVMETHOD(device_shutdown, em_shutdown), 336 DEVMETHOD(device_suspend, em_suspend), 337 DEVMETHOD(device_resume, em_resume), 338 { 0, 0 } 339 }; 340 341 static driver_t em_driver = { 342 "em", 343 em_methods, 344 sizeof(struct adapter), 345 }; 346 347 static devclass_t em_devclass; 348 349 DECLARE_DUMMY_MODULE(if_em); 350 MODULE_DEPEND(em, ig_hal, 1, 1, 1); 351 DRIVER_MODULE(if_em, pci, em_driver, em_devclass, NULL, NULL); 352 353 /* 354 * Tunables 355 */ 356 static int em_int_throttle_ceil = EM_DEFAULT_ITR; 357 static int em_rxd = EM_DEFAULT_RXD; 358 static int em_txd = EM_DEFAULT_TXD; 359 static int em_smart_pwr_down = FALSE; 360 361 /* Controls whether promiscuous also shows bad packets */ 362 static int em_debug_sbp = FALSE; 363 364 static int em_82573_workaround = TRUE; 365 366 TUNABLE_INT("hw.em.int_throttle_ceil", &em_int_throttle_ceil); 367 TUNABLE_INT("hw.em.rxd", &em_rxd); 368 TUNABLE_INT("hw.em.txd", &em_txd); 369 TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); 370 TUNABLE_INT("hw.em.sbp", &em_debug_sbp); 371 TUNABLE_INT("hw.em.82573_workaround", &em_82573_workaround); 372 373 /* Global used in WOL setup with multiport cards */ 374 static int em_global_quad_port_a = 0; 375 376 /* Set this to one to display debug statistics */ 377 static int em_display_debug_stats = 0; 378 379 #if !defined(KTR_IF_EM) 380 #define KTR_IF_EM KTR_ALL 381 #endif 382 KTR_INFO_MASTER(if_em); 383 KTR_INFO(KTR_IF_EM, if_em, intr_beg, 0, "intr begin", 0); 384 KTR_INFO(KTR_IF_EM, if_em, intr_end, 1, "intr end", 0); 385 KTR_INFO(KTR_IF_EM, if_em, pkt_receive, 4, "rx packet", 0); 386 KTR_INFO(KTR_IF_EM, if_em, pkt_txqueue, 5, "tx packet", 0); 387 KTR_INFO(KTR_IF_EM, if_em, pkt_txclean, 6, "tx clean", 0); 388 #define logif(name) KTR_LOG(if_em_ ## name) 389 390 static int 391 em_probe(device_t dev) 392 { 393 const struct em_vendor_info *ent; 394 uint16_t vid, did; 395 396 vid = pci_get_vendor(dev); 397 did = pci_get_device(dev); 398 399 for (ent = em_vendor_info_array; ent->desc != NULL; ++ent) { 400 if (vid == ent->vendor_id && did == ent->device_id) { 401 device_set_desc(dev, ent->desc); 402 device_set_async_attach(dev, TRUE); 403 return (ent->ret); 404 } 405 } 406 return (ENXIO); 407 } 408 409 static int 410 em_attach(device_t dev) 411 { 412 struct adapter *adapter = device_get_softc(dev); 413 struct ifnet *ifp = &adapter->arpcom.ac_if; 414 int tsize, rsize; 415 int error = 0; 416 uint16_t eeprom_data, device_id; 417 418 adapter->dev = adapter->osdep.dev = dev; 419 420 callout_init(&adapter->timer); 421 callout_init(&adapter->tx_fifo_timer); 422 423 /* Determine hardware and mac info */ 424 error = em_get_hw_info(adapter); 425 if (error) { 426 device_printf(dev, "Identify hardware failed\n"); 427 goto fail; 428 } 429 430 /* Setup PCI resources */ 431 error = em_alloc_pci_res(adapter); 432 if (error) { 433 device_printf(dev, "Allocation of PCI resources failed\n"); 434 goto fail; 435 } 436 437 /* 438 * For ICH8 and family we need to map the flash memory, 439 * and this must happen after the MAC is identified. 440 */ 441 if (adapter->hw.mac.type == e1000_ich8lan || 442 adapter->hw.mac.type == e1000_ich10lan || 443 adapter->hw.mac.type == e1000_ich9lan) { 444 adapter->flash_rid = EM_BAR_FLASH; 445 446 adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 447 &adapter->flash_rid, RF_ACTIVE); 448 if (adapter->flash == NULL) { 449 device_printf(dev, "Mapping of Flash failed\n"); 450 error = ENXIO; 451 goto fail; 452 } 453 adapter->osdep.flash_bus_space_tag = 454 rman_get_bustag(adapter->flash); 455 adapter->osdep.flash_bus_space_handle = 456 rman_get_bushandle(adapter->flash); 457 458 /* 459 * This is used in the shared code 460 * XXX this goof is actually not used. 461 */ 462 adapter->hw.flash_address = (uint8_t *)adapter->flash; 463 } 464 465 /* Do Shared Code initialization */ 466 if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { 467 device_printf(dev, "Setup of Shared code failed\n"); 468 error = ENXIO; 469 goto fail; 470 } 471 472 e1000_get_bus_info(&adapter->hw); 473 474 /* 475 * Validate number of transmit and receive descriptors. It 476 * must not exceed hardware maximum, and must be multiple 477 * of E1000_DBA_ALIGN. 478 */ 479 if ((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN != 0 || 480 (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || 481 (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || 482 em_txd < EM_MIN_TXD) { 483 device_printf(dev, "Using %d TX descriptors instead of %d!\n", 484 EM_DEFAULT_TXD, em_txd); 485 adapter->num_tx_desc = EM_DEFAULT_TXD; 486 } else { 487 adapter->num_tx_desc = em_txd; 488 } 489 if ((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN != 0 || 490 (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || 491 (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || 492 em_rxd < EM_MIN_RXD) { 493 device_printf(dev, "Using %d RX descriptors instead of %d!\n", 494 EM_DEFAULT_RXD, em_rxd); 495 adapter->num_rx_desc = EM_DEFAULT_RXD; 496 } else { 497 adapter->num_rx_desc = em_rxd; 498 } 499 500 adapter->hw.mac.autoneg = DO_AUTO_NEG; 501 adapter->hw.phy.autoneg_wait_to_complete = FALSE; 502 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 503 adapter->rx_buffer_len = MCLBYTES; 504 505 /* 506 * Interrupt throttle rate 507 */ 508 if (em_int_throttle_ceil == 0) { 509 adapter->int_throttle_ceil = 0; 510 } else { 511 int throttle = em_int_throttle_ceil; 512 513 if (throttle < 0) 514 throttle = EM_DEFAULT_ITR; 515 516 /* Recalculate the tunable value to get the exact frequency. */ 517 throttle = 1000000000 / 256 / throttle; 518 519 /* Upper 16bits of ITR is reserved and should be zero */ 520 if (throttle & 0xffff0000) 521 throttle = 1000000000 / 256 / EM_DEFAULT_ITR; 522 523 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 524 } 525 526 e1000_init_script_state_82541(&adapter->hw, TRUE); 527 e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); 528 529 /* Copper options */ 530 if (adapter->hw.phy.media_type == e1000_media_type_copper) { 531 adapter->hw.phy.mdix = AUTO_ALL_MODES; 532 adapter->hw.phy.disable_polarity_correction = FALSE; 533 adapter->hw.phy.ms_type = EM_MASTER_SLAVE; 534 } 535 536 /* Set the frame limits assuming standard ethernet sized frames. */ 537 adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; 538 adapter->min_frame_size = ETH_ZLEN + ETHER_CRC_LEN; 539 540 /* This controls when hardware reports transmit completion status. */ 541 adapter->hw.mac.report_tx_early = 1; 542 543 /* 544 * Create top level busdma tag 545 */ 546 error = bus_dma_tag_create(NULL, 1, 0, 547 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 548 NULL, NULL, 549 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 550 0, &adapter->parent_dtag); 551 if (error) { 552 device_printf(dev, "could not create top level DMA tag\n"); 553 goto fail; 554 } 555 556 /* 557 * Allocate Transmit Descriptor ring 558 */ 559 tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), 560 EM_DBA_ALIGN); 561 error = em_dma_malloc(adapter, tsize, &adapter->txdma); 562 if (error) { 563 device_printf(dev, "Unable to allocate tx_desc memory\n"); 564 goto fail; 565 } 566 adapter->tx_desc_base = adapter->txdma.dma_vaddr; 567 568 /* 569 * Allocate Receive Descriptor ring 570 */ 571 rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), 572 EM_DBA_ALIGN); 573 error = em_dma_malloc(adapter, rsize, &adapter->rxdma); 574 if (error) { 575 device_printf(dev, "Unable to allocate rx_desc memory\n"); 576 goto fail; 577 } 578 adapter->rx_desc_base = adapter->rxdma.dma_vaddr; 579 580 /* Make sure we have a good EEPROM before we read from it */ 581 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 582 /* 583 * Some PCI-E parts fail the first check due to 584 * the link being in sleep state, call it again, 585 * if it fails a second time its a real issue. 586 */ 587 if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { 588 device_printf(dev, 589 "The EEPROM Checksum Is Not Valid\n"); 590 error = EIO; 591 goto fail; 592 } 593 } 594 595 /* Initialize the hardware */ 596 error = em_hw_init(adapter); 597 if (error) { 598 device_printf(dev, "Unable to initialize the hardware\n"); 599 goto fail; 600 } 601 602 /* Copy the permanent MAC address out of the EEPROM */ 603 if (e1000_read_mac_addr(&adapter->hw) < 0) { 604 device_printf(dev, "EEPROM read error while reading MAC" 605 " address\n"); 606 error = EIO; 607 goto fail; 608 } 609 if (!em_is_valid_eaddr(adapter->hw.mac.addr)) { 610 device_printf(dev, "Invalid MAC address\n"); 611 error = EIO; 612 goto fail; 613 } 614 615 /* Allocate transmit descriptors and buffers */ 616 error = em_create_tx_ring(adapter); 617 if (error) { 618 device_printf(dev, "Could not setup transmit structures\n"); 619 goto fail; 620 } 621 622 /* Allocate receive descriptors and buffers */ 623 error = em_create_rx_ring(adapter); 624 if (error) { 625 device_printf(dev, "Could not setup receive structures\n"); 626 goto fail; 627 } 628 629 /* Manually turn off all interrupts */ 630 E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); 631 632 /* Setup OS specific network interface */ 633 em_setup_ifp(adapter); 634 635 /* Add sysctl tree, must after em_setup_ifp() */ 636 em_add_sysctl(adapter); 637 638 /* Initialize statistics */ 639 em_update_stats(adapter); 640 641 adapter->hw.mac.get_link_status = 1; 642 em_update_link_status(adapter); 643 644 /* Indicate SOL/IDER usage */ 645 if (e1000_check_reset_block(&adapter->hw)) { 646 device_printf(dev, 647 "PHY reset is blocked due to SOL/IDER session.\n"); 648 } 649 650 /* Determine if we have to control management hardware */ 651 adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); 652 653 /* 654 * Setup Wake-on-Lan 655 */ 656 switch (adapter->hw.mac.type) { 657 case e1000_82542: 658 case e1000_82543: 659 break; 660 661 case e1000_82546: 662 case e1000_82546_rev_3: 663 case e1000_82571: 664 case e1000_80003es2lan: 665 if (adapter->hw.bus.func == 1) { 666 e1000_read_nvm(&adapter->hw, 667 NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 668 } else { 669 e1000_read_nvm(&adapter->hw, 670 NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 671 } 672 eeprom_data &= EM_EEPROM_APME; 673 break; 674 675 default: 676 /* APME bit in EEPROM is mapped to WUC.APME */ 677 eeprom_data = 678 E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; 679 break; 680 } 681 if (eeprom_data) 682 adapter->wol = E1000_WUFC_MAG; 683 /* 684 * We have the eeprom settings, now apply the special cases 685 * where the eeprom may be wrong or the board won't support 686 * wake on lan on a particular port 687 */ 688 device_id = pci_get_device(dev); 689 switch (device_id) { 690 case E1000_DEV_ID_82546GB_PCIE: 691 adapter->wol = 0; 692 break; 693 694 case E1000_DEV_ID_82546EB_FIBER: 695 case E1000_DEV_ID_82546GB_FIBER: 696 case E1000_DEV_ID_82571EB_FIBER: 697 /* 698 * Wake events only supported on port A for dual fiber 699 * regardless of eeprom setting 700 */ 701 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 702 E1000_STATUS_FUNC_1) 703 adapter->wol = 0; 704 break; 705 706 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 707 case E1000_DEV_ID_82571EB_QUAD_COPPER: 708 case E1000_DEV_ID_82571EB_QUAD_FIBER: 709 case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: 710 /* if quad port adapter, disable WoL on all but port A */ 711 if (em_global_quad_port_a != 0) 712 adapter->wol = 0; 713 /* Reset for multiple quad port adapters */ 714 if (++em_global_quad_port_a == 4) 715 em_global_quad_port_a = 0; 716 break; 717 } 718 719 /* XXX disable wol */ 720 adapter->wol = 0; 721 722 /* Do we need workaround for 82544 PCI-X adapter? */ 723 if (adapter->hw.bus.type == e1000_bus_type_pcix && 724 adapter->hw.mac.type == e1000_82544) 725 adapter->pcix_82544 = TRUE; 726 else 727 adapter->pcix_82544 = FALSE; 728 729 if (adapter->pcix_82544) { 730 /* 731 * 82544 on PCI-X may split one TX segment 732 * into two TX descs, so we double its number 733 * of spare TX desc here. 734 */ 735 adapter->spare_tx_desc = 2 * EM_TX_SPARE; 736 } else { 737 adapter->spare_tx_desc = EM_TX_SPARE; 738 } 739 740 /* 741 * Keep following relationship between spare_tx_desc, oact_tx_desc 742 * and tx_int_nsegs: 743 * (spare_tx_desc + EM_TX_RESERVED) <= 744 * oact_tx_desc <= EM_TX_OACTIVE_MAX <= tx_int_nsegs 745 */ 746 adapter->oact_tx_desc = adapter->num_tx_desc / 8; 747 if (adapter->oact_tx_desc > EM_TX_OACTIVE_MAX) 748 adapter->oact_tx_desc = EM_TX_OACTIVE_MAX; 749 if (adapter->oact_tx_desc < adapter->spare_tx_desc + EM_TX_RESERVED) 750 adapter->oact_tx_desc = adapter->spare_tx_desc + EM_TX_RESERVED; 751 752 adapter->tx_int_nsegs = adapter->num_tx_desc / 16; 753 if (adapter->tx_int_nsegs < adapter->oact_tx_desc) 754 adapter->tx_int_nsegs = adapter->oact_tx_desc; 755 756 error = bus_setup_intr(dev, adapter->intr_res, INTR_MPSAFE, 757 em_intr, adapter, &adapter->intr_tag, 758 ifp->if_serializer); 759 if (error) { 760 device_printf(dev, "Failed to register interrupt handler"); 761 ether_ifdetach(&adapter->arpcom.ac_if); 762 goto fail; 763 } 764 765 ifp->if_cpuid = ithread_cpuid(rman_get_start(adapter->intr_res)); 766 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 767 return (0); 768 fail: 769 em_detach(dev); 770 return (error); 771 } 772 773 static int 774 em_detach(device_t dev) 775 { 776 struct adapter *adapter = device_get_softc(dev); 777 778 if (device_is_attached(dev)) { 779 struct ifnet *ifp = &adapter->arpcom.ac_if; 780 781 lwkt_serialize_enter(ifp->if_serializer); 782 783 em_stop(adapter); 784 785 e1000_phy_hw_reset(&adapter->hw); 786 787 em_rel_mgmt(adapter); 788 789 if ((adapter->hw.mac.type == e1000_82573 || 790 adapter->hw.mac.type == e1000_ich8lan || 791 adapter->hw.mac.type == e1000_ich10lan || 792 adapter->hw.mac.type == e1000_ich9lan) && 793 e1000_check_mng_mode(&adapter->hw)) 794 em_rel_hw_control(adapter); 795 796 if (adapter->wol) { 797 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 798 E1000_WUC_PME_EN); 799 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 800 em_enable_wol(dev); 801 } 802 803 bus_teardown_intr(dev, adapter->intr_res, adapter->intr_tag); 804 805 lwkt_serialize_exit(ifp->if_serializer); 806 807 ether_ifdetach(ifp); 808 } 809 bus_generic_detach(dev); 810 811 em_free_pci_res(adapter); 812 813 em_destroy_tx_ring(adapter, adapter->num_tx_desc); 814 em_destroy_rx_ring(adapter, adapter->num_rx_desc); 815 816 /* Free Transmit Descriptor ring */ 817 if (adapter->tx_desc_base) 818 em_dma_free(adapter, &adapter->txdma); 819 820 /* Free Receive Descriptor ring */ 821 if (adapter->rx_desc_base) 822 em_dma_free(adapter, &adapter->rxdma); 823 824 /* Free top level busdma tag */ 825 if (adapter->parent_dtag != NULL) 826 bus_dma_tag_destroy(adapter->parent_dtag); 827 828 /* Free sysctl tree */ 829 if (adapter->sysctl_tree != NULL) 830 sysctl_ctx_free(&adapter->sysctl_ctx); 831 832 return (0); 833 } 834 835 static int 836 em_shutdown(device_t dev) 837 { 838 return em_suspend(dev); 839 } 840 841 static int 842 em_suspend(device_t dev) 843 { 844 struct adapter *adapter = device_get_softc(dev); 845 struct ifnet *ifp = &adapter->arpcom.ac_if; 846 847 lwkt_serialize_enter(ifp->if_serializer); 848 849 em_stop(adapter); 850 851 em_rel_mgmt(adapter); 852 853 if ((adapter->hw.mac.type == e1000_82573 || 854 adapter->hw.mac.type == e1000_ich8lan || 855 adapter->hw.mac.type == e1000_ich10lan || 856 adapter->hw.mac.type == e1000_ich9lan) && 857 e1000_check_mng_mode(&adapter->hw)) 858 em_rel_hw_control(adapter); 859 860 if (adapter->wol) { 861 E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); 862 E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); 863 em_enable_wol(dev); 864 } 865 866 lwkt_serialize_exit(ifp->if_serializer); 867 868 return bus_generic_suspend(dev); 869 } 870 871 static int 872 em_resume(device_t dev) 873 { 874 struct adapter *adapter = device_get_softc(dev); 875 struct ifnet *ifp = &adapter->arpcom.ac_if; 876 877 lwkt_serialize_enter(ifp->if_serializer); 878 879 em_init(adapter); 880 em_get_mgmt(adapter); 881 if_devstart(ifp); 882 883 lwkt_serialize_exit(ifp->if_serializer); 884 885 return bus_generic_resume(dev); 886 } 887 888 static void 889 em_start(struct ifnet *ifp) 890 { 891 struct adapter *adapter = ifp->if_softc; 892 struct mbuf *m_head; 893 894 ASSERT_SERIALIZED(ifp->if_serializer); 895 896 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 897 return; 898 899 if (!adapter->link_active) { 900 ifq_purge(&ifp->if_snd); 901 return; 902 } 903 904 while (!ifq_is_empty(&ifp->if_snd)) { 905 /* Now do we at least have a minimal? */ 906 if (EM_IS_OACTIVE(adapter)) { 907 em_tx_collect(adapter); 908 if (EM_IS_OACTIVE(adapter)) { 909 ifp->if_flags |= IFF_OACTIVE; 910 adapter->no_tx_desc_avail1++; 911 break; 912 } 913 } 914 915 logif(pkt_txqueue); 916 m_head = ifq_dequeue(&ifp->if_snd, NULL); 917 if (m_head == NULL) 918 break; 919 920 if (em_encap(adapter, &m_head)) { 921 ifp->if_oerrors++; 922 em_tx_collect(adapter); 923 continue; 924 } 925 926 /* Send a copy of the frame to the BPF listener */ 927 ETHER_BPF_MTAP(ifp, m_head); 928 929 /* Set timeout in case hardware has problems transmitting. */ 930 ifp->if_timer = EM_TX_TIMEOUT; 931 } 932 } 933 934 static int 935 em_ioctl(struct ifnet *ifp, u_long command, caddr_t data, struct ucred *cr) 936 { 937 struct adapter *adapter = ifp->if_softc; 938 struct ifreq *ifr = (struct ifreq *)data; 939 uint16_t eeprom_data = 0; 940 int max_frame_size, mask, reinit; 941 int error = 0; 942 943 ASSERT_SERIALIZED(ifp->if_serializer); 944 945 switch (command) { 946 case SIOCSIFMTU: 947 switch (adapter->hw.mac.type) { 948 case e1000_82573: 949 /* 950 * 82573 only supports jumbo frames 951 * if ASPM is disabled. 952 */ 953 e1000_read_nvm(&adapter->hw, 954 NVM_INIT_3GIO_3, 1, &eeprom_data); 955 if (eeprom_data & NVM_WORD1A_ASPM_MASK) { 956 max_frame_size = ETHER_MAX_LEN; 957 break; 958 } 959 /* FALL THROUGH */ 960 961 /* Limit Jumbo Frame size */ 962 case e1000_82571: 963 case e1000_82572: 964 case e1000_ich9lan: 965 case e1000_ich10lan: 966 case e1000_82574: 967 case e1000_80003es2lan: 968 max_frame_size = 9234; 969 break; 970 971 /* Adapters that do not support jumbo frames */ 972 case e1000_82542: 973 case e1000_ich8lan: 974 max_frame_size = ETHER_MAX_LEN; 975 break; 976 977 default: 978 max_frame_size = MAX_JUMBO_FRAME_SIZE; 979 break; 980 } 981 if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - 982 ETHER_CRC_LEN) { 983 error = EINVAL; 984 break; 985 } 986 987 ifp->if_mtu = ifr->ifr_mtu; 988 adapter->max_frame_size = 989 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 990 991 if (ifp->if_flags & IFF_RUNNING) 992 em_init(adapter); 993 break; 994 995 case SIOCSIFFLAGS: 996 if (ifp->if_flags & IFF_UP) { 997 if ((ifp->if_flags & IFF_RUNNING)) { 998 if ((ifp->if_flags ^ adapter->if_flags) & 999 (IFF_PROMISC | IFF_ALLMULTI)) { 1000 em_disable_promisc(adapter); 1001 em_set_promisc(adapter); 1002 } 1003 } else { 1004 em_init(adapter); 1005 } 1006 } else if (ifp->if_flags & IFF_RUNNING) { 1007 em_stop(adapter); 1008 } 1009 adapter->if_flags = ifp->if_flags; 1010 break; 1011 1012 case SIOCADDMULTI: 1013 case SIOCDELMULTI: 1014 if (ifp->if_flags & IFF_RUNNING) { 1015 em_disable_intr(adapter); 1016 em_set_multi(adapter); 1017 if (adapter->hw.mac.type == e1000_82542 && 1018 adapter->hw.revision_id == E1000_REVISION_2) 1019 em_init_rx_unit(adapter); 1020 #ifdef DEVICE_POLLING 1021 if (!(ifp->if_flags & IFF_POLLING)) 1022 #endif 1023 em_enable_intr(adapter); 1024 } 1025 break; 1026 1027 case SIOCSIFMEDIA: 1028 /* Check SOL/IDER usage */ 1029 if (e1000_check_reset_block(&adapter->hw)) { 1030 device_printf(adapter->dev, "Media change is" 1031 " blocked due to SOL/IDER session.\n"); 1032 break; 1033 } 1034 /* FALL THROUGH */ 1035 1036 case SIOCGIFMEDIA: 1037 error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); 1038 break; 1039 1040 case SIOCSIFCAP: 1041 reinit = 0; 1042 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1043 if (mask & IFCAP_HWCSUM) { 1044 ifp->if_capenable ^= (mask & IFCAP_HWCSUM); 1045 reinit = 1; 1046 } 1047 if (mask & IFCAP_VLAN_HWTAGGING) { 1048 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1049 reinit = 1; 1050 } 1051 if (reinit && (ifp->if_flags & IFF_RUNNING)) 1052 em_init(adapter); 1053 break; 1054 1055 default: 1056 error = ether_ioctl(ifp, command, data); 1057 break; 1058 } 1059 return (error); 1060 } 1061 1062 static void 1063 em_watchdog(struct ifnet *ifp) 1064 { 1065 struct adapter *adapter = ifp->if_softc; 1066 1067 ASSERT_SERIALIZED(ifp->if_serializer); 1068 1069 /* 1070 * The timer is set to 5 every time start queues a packet. 1071 * Then txeof keeps resetting it as long as it cleans at 1072 * least one descriptor. 1073 * Finally, anytime all descriptors are clean the timer is 1074 * set to 0. 1075 */ 1076 1077 if (E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1078 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) { 1079 /* 1080 * If we reach here, all TX jobs are completed and 1081 * the TX engine should have been idled for some time. 1082 * We don't need to call if_devstart() here. 1083 */ 1084 ifp->if_flags &= ~IFF_OACTIVE; 1085 ifp->if_timer = 0; 1086 return; 1087 } 1088 1089 /* 1090 * If we are in this routine because of pause frames, then 1091 * don't reset the hardware. 1092 */ 1093 if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & 1094 E1000_STATUS_TXOFF) { 1095 ifp->if_timer = EM_TX_TIMEOUT; 1096 return; 1097 } 1098 1099 if (e1000_check_for_link(&adapter->hw) == 0) 1100 if_printf(ifp, "watchdog timeout -- resetting\n"); 1101 1102 ifp->if_oerrors++; 1103 adapter->watchdog_events++; 1104 1105 em_init(adapter); 1106 1107 if (!ifq_is_empty(&ifp->if_snd)) 1108 if_devstart(ifp); 1109 } 1110 1111 static void 1112 em_init(void *xsc) 1113 { 1114 struct adapter *adapter = xsc; 1115 struct ifnet *ifp = &adapter->arpcom.ac_if; 1116 device_t dev = adapter->dev; 1117 uint32_t pba; 1118 1119 ASSERT_SERIALIZED(ifp->if_serializer); 1120 1121 em_stop(adapter); 1122 1123 /* 1124 * Packet Buffer Allocation (PBA) 1125 * Writing PBA sets the receive portion of the buffer 1126 * the remainder is used for the transmit buffer. 1127 * 1128 * Devices before the 82547 had a Packet Buffer of 64K. 1129 * Default allocation: PBA=48K for Rx, leaving 16K for Tx. 1130 * After the 82547 the buffer was reduced to 40K. 1131 * Default allocation: PBA=30K for Rx, leaving 10K for Tx. 1132 * Note: default does not leave enough room for Jumbo Frame >10k. 1133 */ 1134 switch (adapter->hw.mac.type) { 1135 case e1000_82547: 1136 case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ 1137 if (adapter->max_frame_size > 8192) 1138 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ 1139 else 1140 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ 1141 adapter->tx_fifo_head = 0; 1142 adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; 1143 adapter->tx_fifo_size = 1144 (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; 1145 break; 1146 1147 /* Total Packet Buffer on these is 48K */ 1148 case e1000_82571: 1149 case e1000_82572: 1150 case e1000_80003es2lan: 1151 pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ 1152 break; 1153 1154 case e1000_82573: /* 82573: Total Packet Buffer is 32K */ 1155 pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ 1156 break; 1157 1158 case e1000_82574: 1159 pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ 1160 break; 1161 1162 case e1000_ich9lan: 1163 case e1000_ich10lan: 1164 #define E1000_PBA_10K 0x000A 1165 pba = E1000_PBA_10K; 1166 break; 1167 1168 case e1000_ich8lan: 1169 pba = E1000_PBA_8K; 1170 break; 1171 1172 default: 1173 /* Devices before 82547 had a Packet Buffer of 64K. */ 1174 if (adapter->max_frame_size > 8192) 1175 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ 1176 else 1177 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ 1178 } 1179 E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); 1180 1181 /* Get the latest mac address, User can use a LAA */ 1182 bcopy(IF_LLADDR(ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); 1183 1184 /* Put the address into the Receive Address Array */ 1185 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1186 1187 /* 1188 * With the 82571 adapter, RAR[0] may be overwritten 1189 * when the other port is reset, we make a duplicate 1190 * in RAR[14] for that eventuality, this assures 1191 * the interface continues to function. 1192 */ 1193 if (adapter->hw.mac.type == e1000_82571) { 1194 e1000_set_laa_state_82571(&adapter->hw, TRUE); 1195 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 1196 E1000_RAR_ENTRIES - 1); 1197 } 1198 1199 /* Initialize the hardware */ 1200 if (em_hw_init(adapter)) { 1201 device_printf(dev, "Unable to initialize the hardware\n"); 1202 /* XXX em_stop()? */ 1203 return; 1204 } 1205 em_update_link_status(adapter); 1206 1207 /* Setup VLAN support, basic and offload if available */ 1208 E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); 1209 1210 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { 1211 uint32_t ctrl; 1212 1213 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); 1214 ctrl |= E1000_CTRL_VME; 1215 E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); 1216 } 1217 1218 /* Set hardware offload abilities */ 1219 if (ifp->if_capenable & IFCAP_TXCSUM) 1220 ifp->if_hwassist = EM_CSUM_FEATURES; 1221 else 1222 ifp->if_hwassist = 0; 1223 1224 /* Configure for OS presence */ 1225 em_get_mgmt(adapter); 1226 1227 /* Prepare transmit descriptors and buffers */ 1228 em_init_tx_ring(adapter); 1229 em_init_tx_unit(adapter); 1230 1231 /* Setup Multicast table */ 1232 em_set_multi(adapter); 1233 1234 /* Prepare receive descriptors and buffers */ 1235 if (em_init_rx_ring(adapter)) { 1236 device_printf(dev, "Could not setup receive structures\n"); 1237 em_stop(adapter); 1238 return; 1239 } 1240 em_init_rx_unit(adapter); 1241 1242 /* Don't lose promiscuous settings */ 1243 em_set_promisc(adapter); 1244 1245 ifp->if_flags |= IFF_RUNNING; 1246 ifp->if_flags &= ~IFF_OACTIVE; 1247 1248 callout_reset(&adapter->timer, hz, em_timer, adapter); 1249 e1000_clear_hw_cntrs_base_generic(&adapter->hw); 1250 1251 /* MSI/X configuration for 82574 */ 1252 if (adapter->hw.mac.type == e1000_82574) { 1253 int tmp; 1254 1255 tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 1256 tmp |= E1000_CTRL_EXT_PBA_CLR; 1257 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); 1258 /* 1259 * Set the IVAR - interrupt vector routing. 1260 * Each nibble represents a vector, high bit 1261 * is enable, other 3 bits are the MSIX table 1262 * entry, we map RXQ0 to 0, TXQ0 to 1, and 1263 * Link (other) to 2, hence the magic number. 1264 */ 1265 E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); 1266 } 1267 1268 #ifdef DEVICE_POLLING 1269 /* 1270 * Only enable interrupts if we are not polling, make sure 1271 * they are off otherwise. 1272 */ 1273 if (ifp->if_flags & IFF_POLLING) 1274 em_disable_intr(adapter); 1275 else 1276 #endif /* DEVICE_POLLING */ 1277 em_enable_intr(adapter); 1278 1279 /* Don't reset the phy next time init gets called */ 1280 adapter->hw.phy.reset_disable = TRUE; 1281 } 1282 1283 #ifdef DEVICE_POLLING 1284 1285 static void 1286 em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1287 { 1288 struct adapter *adapter = ifp->if_softc; 1289 uint32_t reg_icr; 1290 1291 ASSERT_SERIALIZED(ifp->if_serializer); 1292 1293 switch (cmd) { 1294 case POLL_REGISTER: 1295 em_disable_intr(adapter); 1296 break; 1297 1298 case POLL_DEREGISTER: 1299 em_enable_intr(adapter); 1300 break; 1301 1302 case POLL_AND_CHECK_STATUS: 1303 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1304 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1305 callout_stop(&adapter->timer); 1306 adapter->hw.mac.get_link_status = 1; 1307 em_update_link_status(adapter); 1308 callout_reset(&adapter->timer, hz, em_timer, adapter); 1309 } 1310 /* FALL THROUGH */ 1311 case POLL_ONLY: 1312 if (ifp->if_flags & IFF_RUNNING) { 1313 em_rxeof(adapter, count); 1314 em_txeof(adapter); 1315 1316 if (!ifq_is_empty(&ifp->if_snd)) 1317 if_devstart(ifp); 1318 } 1319 break; 1320 } 1321 } 1322 1323 #endif /* DEVICE_POLLING */ 1324 1325 static void 1326 em_intr(void *xsc) 1327 { 1328 struct adapter *adapter = xsc; 1329 struct ifnet *ifp = &adapter->arpcom.ac_if; 1330 uint32_t reg_icr; 1331 1332 logif(intr_beg); 1333 ASSERT_SERIALIZED(ifp->if_serializer); 1334 1335 reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); 1336 1337 if ((adapter->hw.mac.type >= e1000_82571 && 1338 (reg_icr & E1000_ICR_INT_ASSERTED) == 0) || 1339 reg_icr == 0) { 1340 logif(intr_end); 1341 return; 1342 } 1343 1344 /* 1345 * XXX: some laptops trigger several spurious interrupts 1346 * on em(4) when in the resume cycle. The ICR register 1347 * reports all-ones value in this case. Processing such 1348 * interrupts would lead to a freeze. I don't know why. 1349 */ 1350 if (reg_icr == 0xffffffff) { 1351 logif(intr_end); 1352 return; 1353 } 1354 1355 if (ifp->if_flags & IFF_RUNNING) { 1356 if (reg_icr & 1357 (E1000_ICR_RXT0 | E1000_ICR_RXDMT0 | E1000_ICR_RXO)) 1358 em_rxeof(adapter, -1); 1359 if (reg_icr & E1000_ICR_TXDW) { 1360 em_txeof(adapter); 1361 if (!ifq_is_empty(&ifp->if_snd)) 1362 if_devstart(ifp); 1363 } 1364 } 1365 1366 /* Link status change */ 1367 if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 1368 callout_stop(&adapter->timer); 1369 adapter->hw.mac.get_link_status = 1; 1370 em_update_link_status(adapter); 1371 1372 /* Deal with TX cruft when link lost */ 1373 em_tx_purge(adapter); 1374 1375 callout_reset(&adapter->timer, hz, em_timer, adapter); 1376 } 1377 1378 if (reg_icr & E1000_ICR_RXO) 1379 adapter->rx_overruns++; 1380 1381 logif(intr_end); 1382 } 1383 1384 static void 1385 em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 1386 { 1387 struct adapter *adapter = ifp->if_softc; 1388 u_char fiber_type = IFM_1000_SX; 1389 1390 ASSERT_SERIALIZED(ifp->if_serializer); 1391 1392 em_update_link_status(adapter); 1393 1394 ifmr->ifm_status = IFM_AVALID; 1395 ifmr->ifm_active = IFM_ETHER; 1396 1397 if (!adapter->link_active) 1398 return; 1399 1400 ifmr->ifm_status |= IFM_ACTIVE; 1401 1402 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 1403 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 1404 if (adapter->hw.mac.type == e1000_82545) 1405 fiber_type = IFM_1000_LX; 1406 ifmr->ifm_active |= fiber_type | IFM_FDX; 1407 } else { 1408 switch (adapter->link_speed) { 1409 case 10: 1410 ifmr->ifm_active |= IFM_10_T; 1411 break; 1412 case 100: 1413 ifmr->ifm_active |= IFM_100_TX; 1414 break; 1415 1416 case 1000: 1417 ifmr->ifm_active |= IFM_1000_T; 1418 break; 1419 } 1420 if (adapter->link_duplex == FULL_DUPLEX) 1421 ifmr->ifm_active |= IFM_FDX; 1422 else 1423 ifmr->ifm_active |= IFM_HDX; 1424 } 1425 } 1426 1427 static int 1428 em_media_change(struct ifnet *ifp) 1429 { 1430 struct adapter *adapter = ifp->if_softc; 1431 struct ifmedia *ifm = &adapter->media; 1432 1433 ASSERT_SERIALIZED(ifp->if_serializer); 1434 1435 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1436 return (EINVAL); 1437 1438 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1439 case IFM_AUTO: 1440 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1441 adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; 1442 break; 1443 1444 case IFM_1000_LX: 1445 case IFM_1000_SX: 1446 case IFM_1000_T: 1447 adapter->hw.mac.autoneg = DO_AUTO_NEG; 1448 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; 1449 break; 1450 1451 case IFM_100_TX: 1452 adapter->hw.mac.autoneg = FALSE; 1453 adapter->hw.phy.autoneg_advertised = 0; 1454 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1455 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; 1456 else 1457 adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; 1458 break; 1459 1460 case IFM_10_T: 1461 adapter->hw.mac.autoneg = FALSE; 1462 adapter->hw.phy.autoneg_advertised = 0; 1463 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1464 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; 1465 else 1466 adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; 1467 break; 1468 1469 default: 1470 if_printf(ifp, "Unsupported media type\n"); 1471 break; 1472 } 1473 1474 /* 1475 * As the speed/duplex settings my have changed we need to 1476 * reset the PHY. 1477 */ 1478 adapter->hw.phy.reset_disable = FALSE; 1479 1480 em_init(adapter); 1481 1482 return (0); 1483 } 1484 1485 static int 1486 em_encap(struct adapter *adapter, struct mbuf **m_headp) 1487 { 1488 bus_dma_segment_t segs[EM_MAX_SCATTER]; 1489 bus_dmamap_t map; 1490 struct em_buffer *tx_buffer, *tx_buffer_mapped; 1491 struct e1000_tx_desc *ctxd = NULL; 1492 struct mbuf *m_head = *m_headp; 1493 uint32_t txd_upper, txd_lower, txd_used, cmd = 0; 1494 int maxsegs, nsegs, i, j, first, last = 0, error; 1495 1496 if (m_head->m_len < EM_TXCSUM_MINHL && 1497 (m_head->m_flags & EM_CSUM_FEATURES)) { 1498 /* 1499 * Make sure that ethernet header and ip.ip_hl are in 1500 * contiguous memory, since if TXCSUM is enabled, later 1501 * TX context descriptor's setup need to access ip.ip_hl. 1502 */ 1503 error = em_txcsum_pullup(adapter, m_headp); 1504 if (error) { 1505 KKASSERT(*m_headp == NULL); 1506 return error; 1507 } 1508 m_head = *m_headp; 1509 } 1510 1511 txd_upper = txd_lower = 0; 1512 txd_used = 0; 1513 1514 /* 1515 * Capture the first descriptor index, this descriptor 1516 * will have the index of the EOP which is the only one 1517 * that now gets a DONE bit writeback. 1518 */ 1519 first = adapter->next_avail_tx_desc; 1520 tx_buffer = &adapter->tx_buffer_area[first]; 1521 tx_buffer_mapped = tx_buffer; 1522 map = tx_buffer->map; 1523 1524 maxsegs = adapter->num_tx_desc_avail - EM_TX_RESERVED; 1525 KASSERT(maxsegs >= adapter->spare_tx_desc, 1526 ("not enough spare TX desc\n")); 1527 if (adapter->pcix_82544) { 1528 /* Half it; see the comment in em_attach() */ 1529 maxsegs >>= 1; 1530 } 1531 if (maxsegs > EM_MAX_SCATTER) 1532 maxsegs = EM_MAX_SCATTER; 1533 1534 error = bus_dmamap_load_mbuf_defrag(adapter->txtag, map, m_headp, 1535 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1536 if (error) { 1537 if (error == ENOBUFS) 1538 adapter->mbuf_alloc_failed++; 1539 else 1540 adapter->no_tx_dma_setup++; 1541 1542 m_freem(*m_headp); 1543 *m_headp = NULL; 1544 return error; 1545 } 1546 bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); 1547 1548 m_head = *m_headp; 1549 adapter->tx_nsegs += nsegs; 1550 1551 if (m_head->m_pkthdr.csum_flags & EM_CSUM_FEATURES) { 1552 /* TX csum offloading will consume one TX desc */ 1553 adapter->tx_nsegs += em_txcsum(adapter, m_head, 1554 &txd_upper, &txd_lower); 1555 } 1556 i = adapter->next_avail_tx_desc; 1557 1558 /* Set up our transmit descriptors */ 1559 for (j = 0; j < nsegs; j++) { 1560 /* If adapter is 82544 and on PCIX bus */ 1561 if(adapter->pcix_82544) { 1562 DESC_ARRAY desc_array; 1563 uint32_t array_elements, counter; 1564 1565 /* 1566 * Check the Address and Length combination and 1567 * split the data accordingly 1568 */ 1569 array_elements = em_82544_fill_desc(segs[j].ds_addr, 1570 segs[j].ds_len, &desc_array); 1571 for (counter = 0; counter < array_elements; counter++) { 1572 KKASSERT(txd_used < adapter->num_tx_desc_avail); 1573 1574 tx_buffer = &adapter->tx_buffer_area[i]; 1575 ctxd = &adapter->tx_desc_base[i]; 1576 1577 ctxd->buffer_addr = htole64( 1578 desc_array.descriptor[counter].address); 1579 ctxd->lower.data = htole32( 1580 E1000_TXD_CMD_IFCS | txd_lower | 1581 desc_array.descriptor[counter].length); 1582 ctxd->upper.data = htole32(txd_upper); 1583 1584 last = i; 1585 if (++i == adapter->num_tx_desc) 1586 i = 0; 1587 1588 txd_used++; 1589 } 1590 } else { 1591 tx_buffer = &adapter->tx_buffer_area[i]; 1592 ctxd = &adapter->tx_desc_base[i]; 1593 1594 ctxd->buffer_addr = htole64(segs[j].ds_addr); 1595 ctxd->lower.data = htole32(E1000_TXD_CMD_IFCS | 1596 txd_lower | segs[j].ds_len); 1597 ctxd->upper.data = htole32(txd_upper); 1598 1599 last = i; 1600 if (++i == adapter->num_tx_desc) 1601 i = 0; 1602 } 1603 } 1604 1605 adapter->next_avail_tx_desc = i; 1606 if (adapter->pcix_82544) { 1607 KKASSERT(adapter->num_tx_desc_avail > txd_used); 1608 adapter->num_tx_desc_avail -= txd_used; 1609 } else { 1610 KKASSERT(adapter->num_tx_desc_avail > nsegs); 1611 adapter->num_tx_desc_avail -= nsegs; 1612 } 1613 1614 /* Handle VLAN tag */ 1615 if (m_head->m_flags & M_VLANTAG) { 1616 /* Set the vlan id. */ 1617 ctxd->upper.fields.special = 1618 htole16(m_head->m_pkthdr.ether_vlantag); 1619 1620 /* Tell hardware to add tag */ 1621 ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); 1622 } 1623 1624 tx_buffer->m_head = m_head; 1625 tx_buffer_mapped->map = tx_buffer->map; 1626 tx_buffer->map = map; 1627 1628 if (adapter->tx_nsegs >= adapter->tx_int_nsegs) { 1629 adapter->tx_nsegs = 0; 1630 1631 /* 1632 * Report Status (RS) is turned on 1633 * every tx_int_nsegs descriptors. 1634 */ 1635 cmd = E1000_TXD_CMD_RS; 1636 1637 /* 1638 * Keep track of the descriptor, which will 1639 * be written back by hardware. 1640 */ 1641 adapter->tx_dd[adapter->tx_dd_tail] = last; 1642 EM_INC_TXDD_IDX(adapter->tx_dd_tail); 1643 KKASSERT(adapter->tx_dd_tail != adapter->tx_dd_head); 1644 } 1645 1646 /* 1647 * Last Descriptor of Packet needs End Of Packet (EOP) 1648 */ 1649 ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | cmd); 1650 1651 /* 1652 * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 1653 * that this frame is available to transmit. 1654 */ 1655 if (adapter->hw.mac.type == e1000_82547 && 1656 adapter->link_duplex == HALF_DUPLEX) { 1657 em_82547_move_tail_serialized(adapter); 1658 } else { 1659 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); 1660 if (adapter->hw.mac.type == e1000_82547) { 1661 em_82547_update_fifo_head(adapter, 1662 m_head->m_pkthdr.len); 1663 } 1664 } 1665 return (0); 1666 } 1667 1668 /* 1669 * 82547 workaround to avoid controller hang in half-duplex environment. 1670 * The workaround is to avoid queuing a large packet that would span 1671 * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers 1672 * in this case. We do that only when FIFO is quiescent. 1673 */ 1674 static void 1675 em_82547_move_tail_serialized(struct adapter *adapter) 1676 { 1677 struct e1000_tx_desc *tx_desc; 1678 uint16_t hw_tdt, sw_tdt, length = 0; 1679 bool eop = 0; 1680 1681 ASSERT_SERIALIZED(adapter->arpcom.ac_if.if_serializer); 1682 1683 hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); 1684 sw_tdt = adapter->next_avail_tx_desc; 1685 1686 while (hw_tdt != sw_tdt) { 1687 tx_desc = &adapter->tx_desc_base[hw_tdt]; 1688 length += tx_desc->lower.flags.length; 1689 eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; 1690 if (++hw_tdt == adapter->num_tx_desc) 1691 hw_tdt = 0; 1692 1693 if (eop) { 1694 if (em_82547_fifo_workaround(adapter, length)) { 1695 adapter->tx_fifo_wrk_cnt++; 1696 callout_reset(&adapter->tx_fifo_timer, 1, 1697 em_82547_move_tail, adapter); 1698 break; 1699 } 1700 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); 1701 em_82547_update_fifo_head(adapter, length); 1702 length = 0; 1703 } 1704 } 1705 } 1706 1707 static void 1708 em_82547_move_tail(void *xsc) 1709 { 1710 struct adapter *adapter = xsc; 1711 struct ifnet *ifp = &adapter->arpcom.ac_if; 1712 1713 lwkt_serialize_enter(ifp->if_serializer); 1714 em_82547_move_tail_serialized(adapter); 1715 lwkt_serialize_exit(ifp->if_serializer); 1716 } 1717 1718 static int 1719 em_82547_fifo_workaround(struct adapter *adapter, int len) 1720 { 1721 int fifo_space, fifo_pkt_len; 1722 1723 fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1724 1725 if (adapter->link_duplex == HALF_DUPLEX) { 1726 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 1727 1728 if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { 1729 if (em_82547_tx_fifo_reset(adapter)) 1730 return (0); 1731 else 1732 return (1); 1733 } 1734 } 1735 return (0); 1736 } 1737 1738 static void 1739 em_82547_update_fifo_head(struct adapter *adapter, int len) 1740 { 1741 int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); 1742 1743 /* tx_fifo_head is always 16 byte aligned */ 1744 adapter->tx_fifo_head += fifo_pkt_len; 1745 if (adapter->tx_fifo_head >= adapter->tx_fifo_size) 1746 adapter->tx_fifo_head -= adapter->tx_fifo_size; 1747 } 1748 1749 static int 1750 em_82547_tx_fifo_reset(struct adapter *adapter) 1751 { 1752 uint32_t tctl; 1753 1754 if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == 1755 E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && 1756 (E1000_READ_REG(&adapter->hw, E1000_TDFT) == 1757 E1000_READ_REG(&adapter->hw, E1000_TDFH)) && 1758 (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == 1759 E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && 1760 (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { 1761 /* Disable TX unit */ 1762 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 1763 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, 1764 tctl & ~E1000_TCTL_EN); 1765 1766 /* Reset FIFO pointers */ 1767 E1000_WRITE_REG(&adapter->hw, E1000_TDFT, 1768 adapter->tx_head_addr); 1769 E1000_WRITE_REG(&adapter->hw, E1000_TDFH, 1770 adapter->tx_head_addr); 1771 E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, 1772 adapter->tx_head_addr); 1773 E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, 1774 adapter->tx_head_addr); 1775 1776 /* Re-enable TX unit */ 1777 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 1778 E1000_WRITE_FLUSH(&adapter->hw); 1779 1780 adapter->tx_fifo_head = 0; 1781 adapter->tx_fifo_reset_cnt++; 1782 1783 return (TRUE); 1784 } else { 1785 return (FALSE); 1786 } 1787 } 1788 1789 static void 1790 em_set_promisc(struct adapter *adapter) 1791 { 1792 struct ifnet *ifp = &adapter->arpcom.ac_if; 1793 uint32_t reg_rctl; 1794 1795 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1796 1797 if (ifp->if_flags & IFF_PROMISC) { 1798 reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 1799 /* Turn this on if you want to see bad packets */ 1800 if (em_debug_sbp) 1801 reg_rctl |= E1000_RCTL_SBP; 1802 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1803 } else if (ifp->if_flags & IFF_ALLMULTI) { 1804 reg_rctl |= E1000_RCTL_MPE; 1805 reg_rctl &= ~E1000_RCTL_UPE; 1806 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1807 } 1808 } 1809 1810 static void 1811 em_disable_promisc(struct adapter *adapter) 1812 { 1813 uint32_t reg_rctl; 1814 1815 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1816 1817 reg_rctl &= ~E1000_RCTL_UPE; 1818 reg_rctl &= ~E1000_RCTL_MPE; 1819 reg_rctl &= ~E1000_RCTL_SBP; 1820 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1821 } 1822 1823 static void 1824 em_set_multi(struct adapter *adapter) 1825 { 1826 struct ifnet *ifp = &adapter->arpcom.ac_if; 1827 struct ifmultiaddr *ifma; 1828 uint32_t reg_rctl = 0; 1829 uint8_t mta[512]; /* Largest MTS is 4096 bits */ 1830 int mcnt = 0; 1831 1832 if (adapter->hw.mac.type == e1000_82542 && 1833 adapter->hw.revision_id == E1000_REVISION_2) { 1834 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1835 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1836 e1000_pci_clear_mwi(&adapter->hw); 1837 reg_rctl |= E1000_RCTL_RST; 1838 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1839 msec_delay(5); 1840 } 1841 1842 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1843 if (ifma->ifma_addr->sa_family != AF_LINK) 1844 continue; 1845 1846 if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) 1847 break; 1848 1849 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1850 &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN); 1851 mcnt++; 1852 } 1853 1854 if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { 1855 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1856 reg_rctl |= E1000_RCTL_MPE; 1857 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1858 } else { 1859 e1000_update_mc_addr_list(&adapter->hw, mta, 1860 mcnt, 1, adapter->hw.mac.rar_entry_count); 1861 } 1862 1863 if (adapter->hw.mac.type == e1000_82542 && 1864 adapter->hw.revision_id == E1000_REVISION_2) { 1865 reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 1866 reg_rctl &= ~E1000_RCTL_RST; 1867 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); 1868 msec_delay(5); 1869 if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 1870 e1000_pci_set_mwi(&adapter->hw); 1871 } 1872 } 1873 1874 /* 1875 * This routine checks for link status and updates statistics. 1876 */ 1877 static void 1878 em_timer(void *xsc) 1879 { 1880 struct adapter *adapter = xsc; 1881 struct ifnet *ifp = &adapter->arpcom.ac_if; 1882 1883 lwkt_serialize_enter(ifp->if_serializer); 1884 1885 em_update_link_status(adapter); 1886 em_update_stats(adapter); 1887 1888 /* Reset LAA into RAR[0] on 82571 */ 1889 if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) 1890 e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 1891 1892 if (em_display_debug_stats && (ifp->if_flags & IFF_RUNNING)) 1893 em_print_hw_stats(adapter); 1894 1895 em_smartspeed(adapter); 1896 1897 callout_reset(&adapter->timer, hz, em_timer, adapter); 1898 1899 lwkt_serialize_exit(ifp->if_serializer); 1900 } 1901 1902 static void 1903 em_update_link_status(struct adapter *adapter) 1904 { 1905 struct e1000_hw *hw = &adapter->hw; 1906 struct ifnet *ifp = &adapter->arpcom.ac_if; 1907 device_t dev = adapter->dev; 1908 uint32_t link_check = 0; 1909 1910 /* Get the cached link value or read phy for real */ 1911 switch (hw->phy.media_type) { 1912 case e1000_media_type_copper: 1913 if (hw->mac.get_link_status) { 1914 /* Do the work to read phy */ 1915 e1000_check_for_link(hw); 1916 link_check = !hw->mac.get_link_status; 1917 if (link_check) /* ESB2 fix */ 1918 e1000_cfg_on_link_up(hw); 1919 } else { 1920 link_check = TRUE; 1921 } 1922 break; 1923 1924 case e1000_media_type_fiber: 1925 e1000_check_for_link(hw); 1926 link_check = 1927 E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU; 1928 break; 1929 1930 case e1000_media_type_internal_serdes: 1931 e1000_check_for_link(hw); 1932 link_check = adapter->hw.mac.serdes_has_link; 1933 break; 1934 1935 case e1000_media_type_unknown: 1936 default: 1937 break; 1938 } 1939 1940 /* Now check for a transition */ 1941 if (link_check && adapter->link_active == 0) { 1942 e1000_get_speed_and_duplex(hw, &adapter->link_speed, 1943 &adapter->link_duplex); 1944 1945 /* 1946 * Check if we should enable/disable SPEED_MODE bit on 1947 * 82571/82572 1948 */ 1949 if (hw->mac.type == e1000_82571 || 1950 hw->mac.type == e1000_82572) { 1951 int tarc0; 1952 1953 tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); 1954 if (adapter->link_speed != SPEED_1000) 1955 tarc0 &= ~SPEED_MODE_BIT; 1956 else 1957 tarc0 |= SPEED_MODE_BIT; 1958 E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); 1959 } 1960 if (bootverbose) { 1961 device_printf(dev, "Link is up %d Mbps %s\n", 1962 adapter->link_speed, 1963 ((adapter->link_duplex == FULL_DUPLEX) ? 1964 "Full Duplex" : "Half Duplex")); 1965 } 1966 adapter->link_active = 1; 1967 adapter->smartspeed = 0; 1968 ifp->if_baudrate = adapter->link_speed * 1000000; 1969 ifp->if_link_state = LINK_STATE_UP; 1970 if_link_state_change(ifp); 1971 } else if (!link_check && adapter->link_active == 1) { 1972 ifp->if_baudrate = adapter->link_speed = 0; 1973 adapter->link_duplex = 0; 1974 if (bootverbose) 1975 device_printf(dev, "Link is Down\n"); 1976 adapter->link_active = 0; 1977 #if 0 1978 /* Link down, disable watchdog */ 1979 if->if_timer = 0; 1980 #endif 1981 ifp->if_link_state = LINK_STATE_DOWN; 1982 if_link_state_change(ifp); 1983 } 1984 } 1985 1986 static void 1987 em_stop(struct adapter *adapter) 1988 { 1989 struct ifnet *ifp = &adapter->arpcom.ac_if; 1990 int i; 1991 1992 ASSERT_SERIALIZED(ifp->if_serializer); 1993 1994 em_disable_intr(adapter); 1995 1996 callout_stop(&adapter->timer); 1997 callout_stop(&adapter->tx_fifo_timer); 1998 1999 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2000 ifp->if_timer = 0; 2001 2002 e1000_reset_hw(&adapter->hw); 2003 if (adapter->hw.mac.type >= e1000_82544) 2004 E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); 2005 2006 for (i = 0; i < adapter->num_tx_desc; i++) { 2007 struct em_buffer *tx_buffer = &adapter->tx_buffer_area[i]; 2008 2009 if (tx_buffer->m_head != NULL) { 2010 bus_dmamap_unload(adapter->txtag, tx_buffer->map); 2011 m_freem(tx_buffer->m_head); 2012 tx_buffer->m_head = NULL; 2013 } 2014 } 2015 2016 for (i = 0; i < adapter->num_rx_desc; i++) { 2017 struct em_buffer *rx_buffer = &adapter->rx_buffer_area[i]; 2018 2019 if (rx_buffer->m_head != NULL) { 2020 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2021 m_freem(rx_buffer->m_head); 2022 rx_buffer->m_head = NULL; 2023 } 2024 } 2025 2026 if (adapter->fmp != NULL) 2027 m_freem(adapter->fmp); 2028 adapter->fmp = NULL; 2029 adapter->lmp = NULL; 2030 2031 adapter->csum_flags = 0; 2032 adapter->csum_ehlen = 0; 2033 adapter->csum_iphlen = 0; 2034 2035 adapter->tx_dd_head = 0; 2036 adapter->tx_dd_tail = 0; 2037 adapter->tx_nsegs = 0; 2038 } 2039 2040 static int 2041 em_get_hw_info(struct adapter *adapter) 2042 { 2043 device_t dev = adapter->dev; 2044 2045 /* Save off the information about this board */ 2046 adapter->hw.vendor_id = pci_get_vendor(dev); 2047 adapter->hw.device_id = pci_get_device(dev); 2048 adapter->hw.revision_id = pci_get_revid(dev); 2049 adapter->hw.subsystem_vendor_id = pci_get_subvendor(dev); 2050 adapter->hw.subsystem_device_id = pci_get_subdevice(dev); 2051 2052 /* Do Shared Code Init and Setup */ 2053 if (e1000_set_mac_type(&adapter->hw)) 2054 return ENXIO; 2055 return 0; 2056 } 2057 2058 static int 2059 em_alloc_pci_res(struct adapter *adapter) 2060 { 2061 device_t dev = adapter->dev; 2062 int val, rid; 2063 2064 /* Enable bus mastering */ 2065 pci_enable_busmaster(dev); 2066 2067 adapter->memory_rid = EM_BAR_MEM; 2068 adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 2069 &adapter->memory_rid, RF_ACTIVE); 2070 if (adapter->memory == NULL) { 2071 device_printf(dev, "Unable to allocate bus resource: memory\n"); 2072 return (ENXIO); 2073 } 2074 adapter->osdep.mem_bus_space_tag = 2075 rman_get_bustag(adapter->memory); 2076 adapter->osdep.mem_bus_space_handle = 2077 rman_get_bushandle(adapter->memory); 2078 2079 /* XXX This is quite goofy, it is not actually used */ 2080 adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; 2081 2082 /* Only older adapters use IO mapping */ 2083 if (adapter->hw.mac.type > e1000_82543 && 2084 adapter->hw.mac.type < e1000_82571) { 2085 /* Figure our where our IO BAR is ? */ 2086 for (rid = PCIR_BAR(0); rid < PCIR_CARDBUSCIS;) { 2087 val = pci_read_config(dev, rid, 4); 2088 if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { 2089 adapter->io_rid = rid; 2090 break; 2091 } 2092 rid += 4; 2093 /* check for 64bit BAR */ 2094 if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) 2095 rid += 4; 2096 } 2097 if (rid >= PCIR_CARDBUSCIS) { 2098 device_printf(dev, "Unable to locate IO BAR\n"); 2099 return (ENXIO); 2100 } 2101 adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, 2102 &adapter->io_rid, RF_ACTIVE); 2103 if (adapter->ioport == NULL) { 2104 device_printf(dev, "Unable to allocate bus resource: " 2105 "ioport\n"); 2106 return (ENXIO); 2107 } 2108 adapter->hw.io_base = 0; 2109 adapter->osdep.io_bus_space_tag = 2110 rman_get_bustag(adapter->ioport); 2111 adapter->osdep.io_bus_space_handle = 2112 rman_get_bushandle(adapter->ioport); 2113 } 2114 2115 adapter->intr_rid = 0; 2116 adapter->intr_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 2117 &adapter->intr_rid, 2118 RF_SHAREABLE | RF_ACTIVE); 2119 if (adapter->intr_res == NULL) { 2120 device_printf(dev, "Unable to allocate bus resource: " 2121 "interrupt\n"); 2122 return (ENXIO); 2123 } 2124 2125 adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); 2126 adapter->hw.back = &adapter->osdep; 2127 return (0); 2128 } 2129 2130 static void 2131 em_free_pci_res(struct adapter *adapter) 2132 { 2133 device_t dev = adapter->dev; 2134 2135 if (adapter->intr_res != NULL) { 2136 bus_release_resource(dev, SYS_RES_IRQ, 2137 adapter->intr_rid, adapter->intr_res); 2138 } 2139 2140 if (adapter->memory != NULL) { 2141 bus_release_resource(dev, SYS_RES_MEMORY, 2142 adapter->memory_rid, adapter->memory); 2143 } 2144 2145 if (adapter->flash != NULL) { 2146 bus_release_resource(dev, SYS_RES_MEMORY, 2147 adapter->flash_rid, adapter->flash); 2148 } 2149 2150 if (adapter->ioport != NULL) { 2151 bus_release_resource(dev, SYS_RES_IOPORT, 2152 adapter->io_rid, adapter->ioport); 2153 } 2154 } 2155 2156 static int 2157 em_hw_init(struct adapter *adapter) 2158 { 2159 device_t dev = adapter->dev; 2160 uint16_t rx_buffer_size; 2161 2162 /* Issue a global reset */ 2163 e1000_reset_hw(&adapter->hw); 2164 2165 /* Get control from any management/hw control */ 2166 if ((adapter->hw.mac.type == e1000_82573 || 2167 adapter->hw.mac.type == e1000_ich8lan || 2168 adapter->hw.mac.type == e1000_ich10lan || 2169 adapter->hw.mac.type == e1000_ich9lan) && 2170 e1000_check_mng_mode(&adapter->hw)) 2171 em_get_hw_control(adapter); 2172 2173 /* When hardware is reset, fifo_head is also reset */ 2174 adapter->tx_fifo_head = 0; 2175 2176 /* Set up smart power down as default off on newer adapters. */ 2177 if (!em_smart_pwr_down && 2178 (adapter->hw.mac.type == e1000_82571 || 2179 adapter->hw.mac.type == e1000_82572)) { 2180 uint16_t phy_tmp = 0; 2181 2182 /* Speed up time to link by disabling smart power down. */ 2183 e1000_read_phy_reg(&adapter->hw, 2184 IGP02E1000_PHY_POWER_MGMT, &phy_tmp); 2185 phy_tmp &= ~IGP02E1000_PM_SPD; 2186 e1000_write_phy_reg(&adapter->hw, 2187 IGP02E1000_PHY_POWER_MGMT, phy_tmp); 2188 } 2189 2190 /* 2191 * These parameters control the automatic generation (Tx) and 2192 * response (Rx) to Ethernet PAUSE frames. 2193 * - High water mark should allow for at least two frames to be 2194 * received after sending an XOFF. 2195 * - Low water mark works best when it is very near the high water mark. 2196 * This allows the receiver to restart by sending XON when it has 2197 * drained a bit. Here we use an arbitary value of 1500 which will 2198 * restart after one full frame is pulled from the buffer. There 2199 * could be several smaller frames in the buffer and if so they will 2200 * not trigger the XON until their total number reduces the buffer 2201 * by 1500. 2202 * - The pause time is fairly large at 1000 x 512ns = 512 usec. 2203 */ 2204 rx_buffer_size = 2205 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10; 2206 2207 adapter->hw.fc.high_water = rx_buffer_size - 2208 roundup2(adapter->max_frame_size, 1024); 2209 adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; 2210 2211 if (adapter->hw.mac.type == e1000_80003es2lan) 2212 adapter->hw.fc.pause_time = 0xFFFF; 2213 else 2214 adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; 2215 adapter->hw.fc.send_xon = TRUE; 2216 adapter->hw.fc.requested_mode = e1000_fc_full; 2217 2218 if (e1000_init_hw(&adapter->hw) < 0) { 2219 device_printf(dev, "Hardware Initialization Failed\n"); 2220 return (EIO); 2221 } 2222 2223 e1000_check_for_link(&adapter->hw); 2224 2225 return (0); 2226 } 2227 2228 static void 2229 em_setup_ifp(struct adapter *adapter) 2230 { 2231 struct ifnet *ifp = &adapter->arpcom.ac_if; 2232 2233 if_initname(ifp, device_get_name(adapter->dev), 2234 device_get_unit(adapter->dev)); 2235 ifp->if_softc = adapter; 2236 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2237 ifp->if_init = em_init; 2238 ifp->if_ioctl = em_ioctl; 2239 ifp->if_start = em_start; 2240 #ifdef DEVICE_POLLING 2241 ifp->if_poll = em_poll; 2242 #endif 2243 ifp->if_watchdog = em_watchdog; 2244 ifq_set_maxlen(&ifp->if_snd, adapter->num_tx_desc - 1); 2245 ifq_set_ready(&ifp->if_snd); 2246 2247 ether_ifattach(ifp, adapter->hw.mac.addr, NULL); 2248 2249 if (adapter->hw.mac.type >= e1000_82543) 2250 ifp->if_capabilities = IFCAP_HWCSUM; 2251 2252 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; 2253 ifp->if_capenable = ifp->if_capabilities; 2254 2255 if (ifp->if_capenable & IFCAP_TXCSUM) 2256 ifp->if_hwassist = EM_CSUM_FEATURES; 2257 2258 /* 2259 * Tell the upper layer(s) we support long frames. 2260 */ 2261 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2262 2263 /* 2264 * Specify the media types supported by this adapter and register 2265 * callbacks to update media and link information 2266 */ 2267 ifmedia_init(&adapter->media, IFM_IMASK, 2268 em_media_change, em_media_status); 2269 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2270 adapter->hw.phy.media_type == e1000_media_type_internal_serdes) { 2271 u_char fiber_type = IFM_1000_SX; /* default type */ 2272 2273 if (adapter->hw.mac.type == e1000_82545) 2274 fiber_type = IFM_1000_LX; 2275 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 2276 0, NULL); 2277 ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); 2278 } else { 2279 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); 2280 ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 2281 0, NULL); 2282 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 2283 0, NULL); 2284 ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 2285 0, NULL); 2286 if (adapter->hw.phy.type != e1000_phy_ife) { 2287 ifmedia_add(&adapter->media, 2288 IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); 2289 ifmedia_add(&adapter->media, 2290 IFM_ETHER | IFM_1000_T, 0, NULL); 2291 } 2292 } 2293 ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2294 ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); 2295 } 2296 2297 2298 /* 2299 * Workaround for SmartSpeed on 82541 and 82547 controllers 2300 */ 2301 static void 2302 em_smartspeed(struct adapter *adapter) 2303 { 2304 uint16_t phy_tmp; 2305 2306 if (adapter->link_active || adapter->hw.phy.type != e1000_phy_igp || 2307 adapter->hw.mac.autoneg == 0 || 2308 (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) 2309 return; 2310 2311 if (adapter->smartspeed == 0) { 2312 /* 2313 * If Master/Slave config fault is asserted twice, 2314 * we assume back-to-back 2315 */ 2316 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2317 if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) 2318 return; 2319 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); 2320 if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { 2321 e1000_read_phy_reg(&adapter->hw, 2322 PHY_1000T_CTRL, &phy_tmp); 2323 if (phy_tmp & CR_1000T_MS_ENABLE) { 2324 phy_tmp &= ~CR_1000T_MS_ENABLE; 2325 e1000_write_phy_reg(&adapter->hw, 2326 PHY_1000T_CTRL, phy_tmp); 2327 adapter->smartspeed++; 2328 if (adapter->hw.mac.autoneg && 2329 !e1000_phy_setup_autoneg(&adapter->hw) && 2330 !e1000_read_phy_reg(&adapter->hw, 2331 PHY_CONTROL, &phy_tmp)) { 2332 phy_tmp |= MII_CR_AUTO_NEG_EN | 2333 MII_CR_RESTART_AUTO_NEG; 2334 e1000_write_phy_reg(&adapter->hw, 2335 PHY_CONTROL, phy_tmp); 2336 } 2337 } 2338 } 2339 return; 2340 } else if (adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { 2341 /* If still no link, perhaps using 2/3 pair cable */ 2342 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); 2343 phy_tmp |= CR_1000T_MS_ENABLE; 2344 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); 2345 if (adapter->hw.mac.autoneg && 2346 !e1000_phy_setup_autoneg(&adapter->hw) && 2347 !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { 2348 phy_tmp |= MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG; 2349 e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); 2350 } 2351 } 2352 2353 /* Restart process after EM_SMARTSPEED_MAX iterations */ 2354 if (adapter->smartspeed++ == EM_SMARTSPEED_MAX) 2355 adapter->smartspeed = 0; 2356 } 2357 2358 static int 2359 em_dma_malloc(struct adapter *adapter, bus_size_t size, 2360 struct em_dma_alloc *dma) 2361 { 2362 dma->dma_vaddr = bus_dmamem_coherent_any(adapter->parent_dtag, 2363 EM_DBA_ALIGN, size, BUS_DMA_WAITOK, 2364 &dma->dma_tag, &dma->dma_map, 2365 &dma->dma_paddr); 2366 if (dma->dma_vaddr == NULL) 2367 return ENOMEM; 2368 else 2369 return 0; 2370 } 2371 2372 static void 2373 em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) 2374 { 2375 if (dma->dma_tag == NULL) 2376 return; 2377 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 2378 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 2379 bus_dma_tag_destroy(dma->dma_tag); 2380 } 2381 2382 static int 2383 em_create_tx_ring(struct adapter *adapter) 2384 { 2385 device_t dev = adapter->dev; 2386 struct em_buffer *tx_buffer; 2387 int error, i; 2388 2389 adapter->tx_buffer_area = 2390 kmalloc(sizeof(struct em_buffer) * adapter->num_tx_desc, 2391 M_DEVBUF, M_WAITOK | M_ZERO); 2392 2393 /* 2394 * Create DMA tags for tx buffers 2395 */ 2396 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2397 1, 0, /* alignment, bounds */ 2398 BUS_SPACE_MAXADDR, /* lowaddr */ 2399 BUS_SPACE_MAXADDR, /* highaddr */ 2400 NULL, NULL, /* filter, filterarg */ 2401 EM_TSO_SIZE, /* maxsize */ 2402 EM_MAX_SCATTER, /* nsegments */ 2403 EM_MAX_SEGSIZE, /* maxsegsize */ 2404 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | 2405 BUS_DMA_ONEBPAGE, /* flags */ 2406 &adapter->txtag); 2407 if (error) { 2408 device_printf(dev, "Unable to allocate TX DMA tag\n"); 2409 kfree(adapter->tx_buffer_area, M_DEVBUF); 2410 adapter->tx_buffer_area = NULL; 2411 return error; 2412 } 2413 2414 /* 2415 * Create DMA maps for tx buffers 2416 */ 2417 for (i = 0; i < adapter->num_tx_desc; i++) { 2418 tx_buffer = &adapter->tx_buffer_area[i]; 2419 2420 error = bus_dmamap_create(adapter->txtag, 2421 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 2422 &tx_buffer->map); 2423 if (error) { 2424 device_printf(dev, "Unable to create TX DMA map\n"); 2425 em_destroy_tx_ring(adapter, i); 2426 return error; 2427 } 2428 } 2429 return (0); 2430 } 2431 2432 static void 2433 em_init_tx_ring(struct adapter *adapter) 2434 { 2435 /* Clear the old ring contents */ 2436 bzero(adapter->tx_desc_base, 2437 (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); 2438 2439 /* Reset state */ 2440 adapter->next_avail_tx_desc = 0; 2441 adapter->next_tx_to_clean = 0; 2442 adapter->num_tx_desc_avail = adapter->num_tx_desc; 2443 } 2444 2445 static void 2446 em_init_tx_unit(struct adapter *adapter) 2447 { 2448 uint32_t tctl, tarc, tipg = 0; 2449 uint64_t bus_addr; 2450 2451 /* Setup the Base and Length of the Tx Descriptor Ring */ 2452 bus_addr = adapter->txdma.dma_paddr; 2453 E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), 2454 adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); 2455 E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), 2456 (uint32_t)(bus_addr >> 32)); 2457 E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), 2458 (uint32_t)bus_addr); 2459 /* Setup the HW Tx Head and Tail descriptor pointers */ 2460 E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); 2461 E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); 2462 2463 /* Set the default values for the Tx Inter Packet Gap timer */ 2464 switch (adapter->hw.mac.type) { 2465 case e1000_82542: 2466 tipg = DEFAULT_82542_TIPG_IPGT; 2467 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2468 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2469 break; 2470 2471 case e1000_80003es2lan: 2472 tipg = DEFAULT_82543_TIPG_IPGR1; 2473 tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << 2474 E1000_TIPG_IPGR2_SHIFT; 2475 break; 2476 2477 default: 2478 if (adapter->hw.phy.media_type == e1000_media_type_fiber || 2479 adapter->hw.phy.media_type == 2480 e1000_media_type_internal_serdes) 2481 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 2482 else 2483 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 2484 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 2485 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 2486 break; 2487 } 2488 2489 E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); 2490 2491 /* NOTE: 0 is not allowed for TIDV */ 2492 E1000_WRITE_REG(&adapter->hw, E1000_TIDV, 1); 2493 if(adapter->hw.mac.type >= e1000_82540) 2494 E1000_WRITE_REG(&adapter->hw, E1000_TADV, 0); 2495 2496 if (adapter->hw.mac.type == e1000_82571 || 2497 adapter->hw.mac.type == e1000_82572) { 2498 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2499 tarc |= SPEED_MODE_BIT; 2500 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2501 } else if (adapter->hw.mac.type == e1000_80003es2lan) { 2502 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); 2503 tarc |= 1; 2504 E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); 2505 tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); 2506 tarc |= 1; 2507 E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); 2508 } 2509 2510 /* Program the Transmit Control Register */ 2511 tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); 2512 tctl &= ~E1000_TCTL_CT; 2513 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | 2514 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 2515 2516 if (adapter->hw.mac.type >= e1000_82571) 2517 tctl |= E1000_TCTL_MULR; 2518 2519 /* This write will effectively turn on the transmit unit. */ 2520 E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); 2521 } 2522 2523 static void 2524 em_destroy_tx_ring(struct adapter *adapter, int ndesc) 2525 { 2526 struct em_buffer *tx_buffer; 2527 int i; 2528 2529 if (adapter->tx_buffer_area == NULL) 2530 return; 2531 2532 for (i = 0; i < ndesc; i++) { 2533 tx_buffer = &adapter->tx_buffer_area[i]; 2534 2535 KKASSERT(tx_buffer->m_head == NULL); 2536 bus_dmamap_destroy(adapter->txtag, tx_buffer->map); 2537 } 2538 bus_dma_tag_destroy(adapter->txtag); 2539 2540 kfree(adapter->tx_buffer_area, M_DEVBUF); 2541 adapter->tx_buffer_area = NULL; 2542 } 2543 2544 /* 2545 * The offload context needs to be set when we transfer the first 2546 * packet of a particular protocol (TCP/UDP). This routine has been 2547 * enhanced to deal with inserted VLAN headers. 2548 * 2549 * If the new packet's ether header length, ip header length and 2550 * csum offloading type are same as the previous packet, we should 2551 * avoid allocating a new csum context descriptor; mainly to take 2552 * advantage of the pipeline effect of the TX data read request. 2553 * 2554 * This function returns number of TX descrptors allocated for 2555 * csum context. 2556 */ 2557 static int 2558 em_txcsum(struct adapter *adapter, struct mbuf *mp, 2559 uint32_t *txd_upper, uint32_t *txd_lower) 2560 { 2561 struct e1000_context_desc *TXD; 2562 struct em_buffer *tx_buffer; 2563 struct ether_vlan_header *eh; 2564 struct ip *ip; 2565 int curr_txd, ehdrlen, csum_flags; 2566 uint32_t cmd, hdr_len, ip_hlen; 2567 uint16_t etype; 2568 2569 /* 2570 * Determine where frame payload starts. 2571 * Jump over vlan headers if already present, 2572 * helpful for QinQ too. 2573 */ 2574 KASSERT(mp->m_len >= ETHER_HDR_LEN, 2575 ("em_txcsum_pullup is not called (eh)?\n")); 2576 eh = mtod(mp, struct ether_vlan_header *); 2577 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2578 KASSERT(mp->m_len >= ETHER_HDR_LEN + EVL_ENCAPLEN, 2579 ("em_txcsum_pullup is not called (evh)?\n")); 2580 etype = ntohs(eh->evl_proto); 2581 ehdrlen = ETHER_HDR_LEN + EVL_ENCAPLEN; 2582 } else { 2583 etype = ntohs(eh->evl_encap_proto); 2584 ehdrlen = ETHER_HDR_LEN; 2585 } 2586 2587 /* 2588 * We only support TCP/UDP for IPv4 for the moment. 2589 * TODO: Support SCTP too when it hits the tree. 2590 */ 2591 if (etype != ETHERTYPE_IP) 2592 return 0; 2593 2594 KASSERT(mp->m_len >= ehdrlen + EM_IPVHL_SIZE, 2595 ("em_txcsum_pullup is not called (eh+ip_vhl)?\n")); 2596 2597 /* NOTE: We could only safely access ip.ip_vhl part */ 2598 ip = (struct ip *)(mp->m_data + ehdrlen); 2599 ip_hlen = ip->ip_hl << 2; 2600 2601 csum_flags = mp->m_pkthdr.csum_flags & EM_CSUM_FEATURES; 2602 2603 if (adapter->csum_ehlen == ehdrlen && 2604 adapter->csum_iphlen == ip_hlen && 2605 adapter->csum_flags == csum_flags) { 2606 /* 2607 * Same csum offload context as the previous packets; 2608 * just return. 2609 */ 2610 *txd_upper = adapter->csum_txd_upper; 2611 *txd_lower = adapter->csum_txd_lower; 2612 return 0; 2613 } 2614 2615 /* 2616 * Setup a new csum offload context. 2617 */ 2618 2619 curr_txd = adapter->next_avail_tx_desc; 2620 tx_buffer = &adapter->tx_buffer_area[curr_txd]; 2621 TXD = (struct e1000_context_desc *)&adapter->tx_desc_base[curr_txd]; 2622 2623 cmd = 0; 2624 2625 /* Setup of IP header checksum. */ 2626 if (csum_flags & CSUM_IP) { 2627 /* 2628 * Start offset for header checksum calculation. 2629 * End offset for header checksum calculation. 2630 * Offset of place to put the checksum. 2631 */ 2632 TXD->lower_setup.ip_fields.ipcss = ehdrlen; 2633 TXD->lower_setup.ip_fields.ipcse = 2634 htole16(ehdrlen + ip_hlen - 1); 2635 TXD->lower_setup.ip_fields.ipcso = 2636 ehdrlen + offsetof(struct ip, ip_sum); 2637 cmd |= E1000_TXD_CMD_IP; 2638 *txd_upper |= E1000_TXD_POPTS_IXSM << 8; 2639 } 2640 hdr_len = ehdrlen + ip_hlen; 2641 2642 if (csum_flags & CSUM_TCP) { 2643 /* 2644 * Start offset for payload checksum calculation. 2645 * End offset for payload checksum calculation. 2646 * Offset of place to put the checksum. 2647 */ 2648 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2649 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2650 TXD->upper_setup.tcp_fields.tucso = 2651 hdr_len + offsetof(struct tcphdr, th_sum); 2652 cmd |= E1000_TXD_CMD_TCP; 2653 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2654 } else if (csum_flags & CSUM_UDP) { 2655 /* 2656 * Start offset for header checksum calculation. 2657 * End offset for header checksum calculation. 2658 * Offset of place to put the checksum. 2659 */ 2660 TXD->upper_setup.tcp_fields.tucss = hdr_len; 2661 TXD->upper_setup.tcp_fields.tucse = htole16(0); 2662 TXD->upper_setup.tcp_fields.tucso = 2663 hdr_len + offsetof(struct udphdr, uh_sum); 2664 *txd_upper |= E1000_TXD_POPTS_TXSM << 8; 2665 } 2666 2667 *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ 2668 E1000_TXD_DTYP_D; /* Data descr */ 2669 2670 /* Save the information for this csum offloading context */ 2671 adapter->csum_ehlen = ehdrlen; 2672 adapter->csum_iphlen = ip_hlen; 2673 adapter->csum_flags = csum_flags; 2674 adapter->csum_txd_upper = *txd_upper; 2675 adapter->csum_txd_lower = *txd_lower; 2676 2677 TXD->tcp_seg_setup.data = htole32(0); 2678 TXD->cmd_and_length = 2679 htole32(E1000_TXD_CMD_IFCS | E1000_TXD_CMD_DEXT | cmd); 2680 2681 if (++curr_txd == adapter->num_tx_desc) 2682 curr_txd = 0; 2683 2684 KKASSERT(adapter->num_tx_desc_avail > 0); 2685 adapter->num_tx_desc_avail--; 2686 2687 adapter->next_avail_tx_desc = curr_txd; 2688 return 1; 2689 } 2690 2691 static int 2692 em_txcsum_pullup(struct adapter *adapter, struct mbuf **m0) 2693 { 2694 struct mbuf *m = *m0; 2695 struct ether_header *eh; 2696 int len; 2697 2698 adapter->tx_csum_try_pullup++; 2699 2700 len = ETHER_HDR_LEN + EM_IPVHL_SIZE; 2701 2702 if (__predict_false(!M_WRITABLE(m))) { 2703 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2704 adapter->tx_csum_drop1++; 2705 m_freem(m); 2706 *m0 = NULL; 2707 return ENOBUFS; 2708 } 2709 eh = mtod(m, struct ether_header *); 2710 2711 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2712 len += EVL_ENCAPLEN; 2713 2714 if (m->m_len < len) { 2715 adapter->tx_csum_drop2++; 2716 m_freem(m); 2717 *m0 = NULL; 2718 return ENOBUFS; 2719 } 2720 return 0; 2721 } 2722 2723 if (__predict_false(m->m_len < ETHER_HDR_LEN)) { 2724 adapter->tx_csum_pullup1++; 2725 m = m_pullup(m, ETHER_HDR_LEN); 2726 if (m == NULL) { 2727 adapter->tx_csum_pullup1_failed++; 2728 *m0 = NULL; 2729 return ENOBUFS; 2730 } 2731 *m0 = m; 2732 } 2733 eh = mtod(m, struct ether_header *); 2734 2735 if (eh->ether_type == htons(ETHERTYPE_VLAN)) 2736 len += EVL_ENCAPLEN; 2737 2738 if (m->m_len < len) { 2739 adapter->tx_csum_pullup2++; 2740 m = m_pullup(m, len); 2741 if (m == NULL) { 2742 adapter->tx_csum_pullup2_failed++; 2743 *m0 = NULL; 2744 return ENOBUFS; 2745 } 2746 *m0 = m; 2747 } 2748 return 0; 2749 } 2750 2751 static void 2752 em_txeof(struct adapter *adapter) 2753 { 2754 struct ifnet *ifp = &adapter->arpcom.ac_if; 2755 struct em_buffer *tx_buffer; 2756 int first, num_avail; 2757 2758 if (adapter->tx_dd_head == adapter->tx_dd_tail) 2759 return; 2760 2761 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2762 return; 2763 2764 num_avail = adapter->num_tx_desc_avail; 2765 first = adapter->next_tx_to_clean; 2766 2767 while (adapter->tx_dd_head != adapter->tx_dd_tail) { 2768 struct e1000_tx_desc *tx_desc; 2769 int dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2770 2771 tx_desc = &adapter->tx_desc_base[dd_idx]; 2772 if (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { 2773 EM_INC_TXDD_IDX(adapter->tx_dd_head); 2774 2775 if (++dd_idx == adapter->num_tx_desc) 2776 dd_idx = 0; 2777 2778 while (first != dd_idx) { 2779 logif(pkt_txclean); 2780 2781 num_avail++; 2782 2783 tx_buffer = &adapter->tx_buffer_area[first]; 2784 if (tx_buffer->m_head) { 2785 ifp->if_opackets++; 2786 bus_dmamap_unload(adapter->txtag, 2787 tx_buffer->map); 2788 m_freem(tx_buffer->m_head); 2789 tx_buffer->m_head = NULL; 2790 } 2791 2792 if (++first == adapter->num_tx_desc) 2793 first = 0; 2794 } 2795 } else { 2796 break; 2797 } 2798 } 2799 adapter->next_tx_to_clean = first; 2800 adapter->num_tx_desc_avail = num_avail; 2801 2802 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 2803 adapter->tx_dd_head = 0; 2804 adapter->tx_dd_tail = 0; 2805 } 2806 2807 if (!EM_IS_OACTIVE(adapter)) { 2808 ifp->if_flags &= ~IFF_OACTIVE; 2809 2810 /* All clean, turn off the timer */ 2811 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2812 ifp->if_timer = 0; 2813 } 2814 } 2815 2816 static void 2817 em_tx_collect(struct adapter *adapter) 2818 { 2819 struct ifnet *ifp = &adapter->arpcom.ac_if; 2820 struct em_buffer *tx_buffer; 2821 int tdh, first, num_avail, dd_idx = -1; 2822 2823 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2824 return; 2825 2826 tdh = E1000_READ_REG(&adapter->hw, E1000_TDH(0)); 2827 if (tdh == adapter->next_tx_to_clean) 2828 return; 2829 2830 if (adapter->tx_dd_head != adapter->tx_dd_tail) 2831 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2832 2833 num_avail = adapter->num_tx_desc_avail; 2834 first = adapter->next_tx_to_clean; 2835 2836 while (first != tdh) { 2837 logif(pkt_txclean); 2838 2839 num_avail++; 2840 2841 tx_buffer = &adapter->tx_buffer_area[first]; 2842 if (tx_buffer->m_head) { 2843 ifp->if_opackets++; 2844 bus_dmamap_unload(adapter->txtag, 2845 tx_buffer->map); 2846 m_freem(tx_buffer->m_head); 2847 tx_buffer->m_head = NULL; 2848 } 2849 2850 if (first == dd_idx) { 2851 EM_INC_TXDD_IDX(adapter->tx_dd_head); 2852 if (adapter->tx_dd_head == adapter->tx_dd_tail) { 2853 adapter->tx_dd_head = 0; 2854 adapter->tx_dd_tail = 0; 2855 dd_idx = -1; 2856 } else { 2857 dd_idx = adapter->tx_dd[adapter->tx_dd_head]; 2858 } 2859 } 2860 2861 if (++first == adapter->num_tx_desc) 2862 first = 0; 2863 } 2864 adapter->next_tx_to_clean = first; 2865 adapter->num_tx_desc_avail = num_avail; 2866 2867 if (!EM_IS_OACTIVE(adapter)) { 2868 ifp->if_flags &= ~IFF_OACTIVE; 2869 2870 /* All clean, turn off the timer */ 2871 if (adapter->num_tx_desc_avail == adapter->num_tx_desc) 2872 ifp->if_timer = 0; 2873 } 2874 } 2875 2876 /* 2877 * When Link is lost sometimes there is work still in the TX ring 2878 * which will result in a watchdog, rather than allow that do an 2879 * attempted cleanup and then reinit here. Note that this has been 2880 * seens mostly with fiber adapters. 2881 */ 2882 static void 2883 em_tx_purge(struct adapter *adapter) 2884 { 2885 struct ifnet *ifp = &adapter->arpcom.ac_if; 2886 2887 if (!adapter->link_active && ifp->if_timer) { 2888 em_tx_collect(adapter); 2889 if (ifp->if_timer) { 2890 if_printf(ifp, "Link lost, TX pending, reinit\n"); 2891 ifp->if_timer = 0; 2892 em_init(adapter); 2893 } 2894 } 2895 } 2896 2897 static int 2898 em_newbuf(struct adapter *adapter, int i, int init) 2899 { 2900 struct mbuf *m; 2901 bus_dma_segment_t seg; 2902 bus_dmamap_t map; 2903 struct em_buffer *rx_buffer; 2904 int error, nseg; 2905 2906 m = m_getcl(init ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2907 if (m == NULL) { 2908 adapter->mbuf_cluster_failed++; 2909 if (init) { 2910 if_printf(&adapter->arpcom.ac_if, 2911 "Unable to allocate RX mbuf\n"); 2912 } 2913 return (ENOBUFS); 2914 } 2915 m->m_len = m->m_pkthdr.len = MCLBYTES; 2916 2917 if (adapter->max_frame_size <= MCLBYTES - ETHER_ALIGN) 2918 m_adj(m, ETHER_ALIGN); 2919 2920 error = bus_dmamap_load_mbuf_segment(adapter->rxtag, 2921 adapter->rx_sparemap, m, 2922 &seg, 1, &nseg, BUS_DMA_NOWAIT); 2923 if (error) { 2924 m_freem(m); 2925 if (init) { 2926 if_printf(&adapter->arpcom.ac_if, 2927 "Unable to load RX mbuf\n"); 2928 } 2929 return (error); 2930 } 2931 2932 rx_buffer = &adapter->rx_buffer_area[i]; 2933 if (rx_buffer->m_head != NULL) 2934 bus_dmamap_unload(adapter->rxtag, rx_buffer->map); 2935 2936 map = rx_buffer->map; 2937 rx_buffer->map = adapter->rx_sparemap; 2938 adapter->rx_sparemap = map; 2939 2940 rx_buffer->m_head = m; 2941 2942 adapter->rx_desc_base[i].buffer_addr = htole64(seg.ds_addr); 2943 return (0); 2944 } 2945 2946 static int 2947 em_create_rx_ring(struct adapter *adapter) 2948 { 2949 device_t dev = adapter->dev; 2950 struct em_buffer *rx_buffer; 2951 int i, error; 2952 2953 adapter->rx_buffer_area = 2954 kmalloc(sizeof(struct em_buffer) * adapter->num_rx_desc, 2955 M_DEVBUF, M_WAITOK | M_ZERO); 2956 2957 /* 2958 * Create DMA tag for rx buffers 2959 */ 2960 error = bus_dma_tag_create(adapter->parent_dtag, /* parent */ 2961 1, 0, /* alignment, bounds */ 2962 BUS_SPACE_MAXADDR, /* lowaddr */ 2963 BUS_SPACE_MAXADDR, /* highaddr */ 2964 NULL, NULL, /* filter, filterarg */ 2965 MCLBYTES, /* maxsize */ 2966 1, /* nsegments */ 2967 MCLBYTES, /* maxsegsize */ 2968 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, /* flags */ 2969 &adapter->rxtag); 2970 if (error) { 2971 device_printf(dev, "Unable to allocate RX DMA tag\n"); 2972 kfree(adapter->rx_buffer_area, M_DEVBUF); 2973 adapter->rx_buffer_area = NULL; 2974 return error; 2975 } 2976 2977 /* 2978 * Create spare DMA map for rx buffers 2979 */ 2980 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 2981 &adapter->rx_sparemap); 2982 if (error) { 2983 device_printf(dev, "Unable to create spare RX DMA map\n"); 2984 bus_dma_tag_destroy(adapter->rxtag); 2985 kfree(adapter->rx_buffer_area, M_DEVBUF); 2986 adapter->rx_buffer_area = NULL; 2987 return error; 2988 } 2989 2990 /* 2991 * Create DMA maps for rx buffers 2992 */ 2993 for (i = 0; i < adapter->num_rx_desc; i++) { 2994 rx_buffer = &adapter->rx_buffer_area[i]; 2995 2996 error = bus_dmamap_create(adapter->rxtag, BUS_DMA_WAITOK, 2997 &rx_buffer->map); 2998 if (error) { 2999 device_printf(dev, "Unable to create RX DMA map\n"); 3000 em_destroy_rx_ring(adapter, i); 3001 return error; 3002 } 3003 } 3004 return (0); 3005 } 3006 3007 static int 3008 em_init_rx_ring(struct adapter *adapter) 3009 { 3010 int i, error; 3011 3012 /* Reset descriptor ring */ 3013 bzero(adapter->rx_desc_base, 3014 (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); 3015 3016 /* Allocate new ones. */ 3017 for (i = 0; i < adapter->num_rx_desc; i++) { 3018 error = em_newbuf(adapter, i, 1); 3019 if (error) 3020 return (error); 3021 } 3022 3023 /* Setup our descriptor pointers */ 3024 adapter->next_rx_desc_to_check = 0; 3025 3026 return (0); 3027 } 3028 3029 static void 3030 em_init_rx_unit(struct adapter *adapter) 3031 { 3032 struct ifnet *ifp = &adapter->arpcom.ac_if; 3033 uint64_t bus_addr; 3034 uint32_t rctl, rxcsum; 3035 3036 /* 3037 * Make sure receives are disabled while setting 3038 * up the descriptor ring 3039 */ 3040 rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); 3041 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); 3042 3043 if (adapter->hw.mac.type >= e1000_82540) { 3044 /* 3045 * Set the interrupt throttling rate. Value is calculated 3046 * as ITR = 1 / (INT_THROTTLE_CEIL * 256ns) 3047 */ 3048 if (adapter->int_throttle_ceil) { 3049 E1000_WRITE_REG(&adapter->hw, E1000_ITR, 3050 1000000000 / 256 / adapter->int_throttle_ceil); 3051 } else { 3052 E1000_WRITE_REG(&adapter->hw, E1000_ITR, 0); 3053 } 3054 } 3055 3056 /* Disable accelerated ackknowledge */ 3057 if (adapter->hw.mac.type == e1000_82574) { 3058 E1000_WRITE_REG(&adapter->hw, 3059 E1000_RFCTL, E1000_RFCTL_ACK_DIS); 3060 } 3061 3062 /* Setup the Base and Length of the Rx Descriptor Ring */ 3063 bus_addr = adapter->rxdma.dma_paddr; 3064 E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), 3065 adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); 3066 E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), 3067 (uint32_t)(bus_addr >> 32)); 3068 E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), 3069 (uint32_t)bus_addr); 3070 3071 /* Setup the Receive Control Register */ 3072 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 3073 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | 3074 E1000_RCTL_RDMTS_HALF | 3075 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3076 3077 /* Make sure VLAN Filters are off */ 3078 rctl &= ~E1000_RCTL_VFE; 3079 3080 if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) 3081 rctl |= E1000_RCTL_SBP; 3082 else 3083 rctl &= ~E1000_RCTL_SBP; 3084 3085 switch (adapter->rx_buffer_len) { 3086 default: 3087 case 2048: 3088 rctl |= E1000_RCTL_SZ_2048; 3089 break; 3090 3091 case 4096: 3092 rctl |= E1000_RCTL_SZ_4096 | 3093 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3094 break; 3095 3096 case 8192: 3097 rctl |= E1000_RCTL_SZ_8192 | 3098 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3099 break; 3100 3101 case 16384: 3102 rctl |= E1000_RCTL_SZ_16384 | 3103 E1000_RCTL_BSEX | E1000_RCTL_LPE; 3104 break; 3105 } 3106 3107 if (ifp->if_mtu > ETHERMTU) 3108 rctl |= E1000_RCTL_LPE; 3109 else 3110 rctl &= ~E1000_RCTL_LPE; 3111 3112 /* Receive Checksum Offload for TCP and UDP */ 3113 if (ifp->if_capenable & IFCAP_RXCSUM) { 3114 rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); 3115 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); 3116 E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); 3117 } 3118 3119 /* 3120 * XXX TEMPORARY WORKAROUND: on some systems with 82573 3121 * long latencies are observed, like Lenovo X60. This 3122 * change eliminates the problem, but since having positive 3123 * values in RDTR is a known source of problems on other 3124 * platforms another solution is being sought. 3125 */ 3126 if (em_82573_workaround && adapter->hw.mac.type == e1000_82573) { 3127 E1000_WRITE_REG(&adapter->hw, E1000_RADV, EM_RADV_82573); 3128 E1000_WRITE_REG(&adapter->hw, E1000_RDTR, EM_RDTR_82573); 3129 } 3130 3131 /* 3132 * Setup the HW Rx Head and Tail Descriptor Pointers 3133 */ 3134 E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); 3135 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); 3136 3137 /* Enable Receives */ 3138 E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); 3139 } 3140 3141 static void 3142 em_destroy_rx_ring(struct adapter *adapter, int ndesc) 3143 { 3144 struct em_buffer *rx_buffer; 3145 int i; 3146 3147 if (adapter->rx_buffer_area == NULL) 3148 return; 3149 3150 for (i = 0; i < ndesc; i++) { 3151 rx_buffer = &adapter->rx_buffer_area[i]; 3152 3153 KKASSERT(rx_buffer->m_head == NULL); 3154 bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); 3155 } 3156 bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); 3157 bus_dma_tag_destroy(adapter->rxtag); 3158 3159 kfree(adapter->rx_buffer_area, M_DEVBUF); 3160 adapter->rx_buffer_area = NULL; 3161 } 3162 3163 static void 3164 em_rxeof(struct adapter *adapter, int count) 3165 { 3166 struct ifnet *ifp = &adapter->arpcom.ac_if; 3167 uint8_t status, accept_frame = 0, eop = 0; 3168 uint16_t len, desc_len, prev_len_adj; 3169 struct e1000_rx_desc *current_desc; 3170 struct mbuf *mp; 3171 int i; 3172 struct mbuf_chain chain[MAXCPU]; 3173 3174 i = adapter->next_rx_desc_to_check; 3175 current_desc = &adapter->rx_desc_base[i]; 3176 3177 if (!(current_desc->status & E1000_RXD_STAT_DD)) 3178 return; 3179 3180 ether_input_chain_init(chain); 3181 3182 while ((current_desc->status & E1000_RXD_STAT_DD) && count != 0) { 3183 struct mbuf *m = NULL; 3184 3185 logif(pkt_receive); 3186 3187 mp = adapter->rx_buffer_area[i].m_head; 3188 3189 /* 3190 * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT 3191 * needs to access the last received byte in the mbuf. 3192 */ 3193 bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, 3194 BUS_DMASYNC_POSTREAD); 3195 3196 accept_frame = 1; 3197 prev_len_adj = 0; 3198 desc_len = le16toh(current_desc->length); 3199 status = current_desc->status; 3200 if (status & E1000_RXD_STAT_EOP) { 3201 count--; 3202 eop = 1; 3203 if (desc_len < ETHER_CRC_LEN) { 3204 len = 0; 3205 prev_len_adj = ETHER_CRC_LEN - desc_len; 3206 } else { 3207 len = desc_len - ETHER_CRC_LEN; 3208 } 3209 } else { 3210 eop = 0; 3211 len = desc_len; 3212 } 3213 3214 if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { 3215 uint8_t last_byte; 3216 uint32_t pkt_len = desc_len; 3217 3218 if (adapter->fmp != NULL) 3219 pkt_len += adapter->fmp->m_pkthdr.len; 3220 3221 last_byte = *(mtod(mp, caddr_t) + desc_len - 1); 3222 if (TBI_ACCEPT(&adapter->hw, status, 3223 current_desc->errors, pkt_len, last_byte, 3224 adapter->min_frame_size, adapter->max_frame_size)) { 3225 e1000_tbi_adjust_stats_82543(&adapter->hw, 3226 &adapter->stats, pkt_len, 3227 adapter->hw.mac.addr, 3228 adapter->max_frame_size); 3229 if (len > 0) 3230 len--; 3231 } else { 3232 accept_frame = 0; 3233 } 3234 } 3235 3236 if (accept_frame) { 3237 if (em_newbuf(adapter, i, 0) != 0) { 3238 ifp->if_iqdrops++; 3239 goto discard; 3240 } 3241 3242 /* Assign correct length to the current fragment */ 3243 mp->m_len = len; 3244 3245 if (adapter->fmp == NULL) { 3246 mp->m_pkthdr.len = len; 3247 adapter->fmp = mp; /* Store the first mbuf */ 3248 adapter->lmp = mp; 3249 } else { 3250 /* 3251 * Chain mbuf's together 3252 */ 3253 3254 /* 3255 * Adjust length of previous mbuf in chain if 3256 * we received less than 4 bytes in the last 3257 * descriptor. 3258 */ 3259 if (prev_len_adj > 0) { 3260 adapter->lmp->m_len -= prev_len_adj; 3261 adapter->fmp->m_pkthdr.len -= 3262 prev_len_adj; 3263 } 3264 adapter->lmp->m_next = mp; 3265 adapter->lmp = adapter->lmp->m_next; 3266 adapter->fmp->m_pkthdr.len += len; 3267 } 3268 3269 if (eop) { 3270 adapter->fmp->m_pkthdr.rcvif = ifp; 3271 ifp->if_ipackets++; 3272 3273 if (ifp->if_capenable & IFCAP_RXCSUM) { 3274 em_rxcsum(adapter, current_desc, 3275 adapter->fmp); 3276 } 3277 3278 if (status & E1000_RXD_STAT_VP) { 3279 adapter->fmp->m_pkthdr.ether_vlantag = 3280 (le16toh(current_desc->special) & 3281 E1000_RXD_SPC_VLAN_MASK); 3282 adapter->fmp->m_flags |= M_VLANTAG; 3283 } 3284 m = adapter->fmp; 3285 adapter->fmp = NULL; 3286 adapter->lmp = NULL; 3287 } 3288 } else { 3289 ifp->if_ierrors++; 3290 discard: 3291 #ifdef foo 3292 /* Reuse loaded DMA map and just update mbuf chain */ 3293 mp = adapter->rx_buffer_area[i].m_head; 3294 mp->m_len = mp->m_pkthdr.len = MCLBYTES; 3295 mp->m_data = mp->m_ext.ext_buf; 3296 mp->m_next = NULL; 3297 if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) 3298 m_adj(mp, ETHER_ALIGN); 3299 #endif 3300 if (adapter->fmp != NULL) { 3301 m_freem(adapter->fmp); 3302 adapter->fmp = NULL; 3303 adapter->lmp = NULL; 3304 } 3305 m = NULL; 3306 } 3307 3308 /* Zero out the receive descriptors status. */ 3309 current_desc->status = 0; 3310 3311 if (m != NULL) 3312 ether_input_chain(ifp, m, NULL, chain); 3313 3314 /* Advance our pointers to the next descriptor. */ 3315 if (++i == adapter->num_rx_desc) 3316 i = 0; 3317 current_desc = &adapter->rx_desc_base[i]; 3318 } 3319 adapter->next_rx_desc_to_check = i; 3320 3321 ether_input_dispatch(chain); 3322 3323 /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ 3324 if (--i < 0) 3325 i = adapter->num_rx_desc - 1; 3326 E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); 3327 } 3328 3329 static void 3330 em_rxcsum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, 3331 struct mbuf *mp) 3332 { 3333 /* 82543 or newer only */ 3334 if (adapter->hw.mac.type < e1000_82543 || 3335 /* Ignore Checksum bit is set */ 3336 (rx_desc->status & E1000_RXD_STAT_IXSM)) 3337 return; 3338 3339 if ((rx_desc->status & E1000_RXD_STAT_IPCS) && 3340 !(rx_desc->errors & E1000_RXD_ERR_IPE)) { 3341 /* IP Checksum Good */ 3342 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; 3343 } 3344 3345 if ((rx_desc->status & E1000_RXD_STAT_TCPCS) && 3346 !(rx_desc->errors & E1000_RXD_ERR_TCPE)) { 3347 mp->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 3348 CSUM_PSEUDO_HDR | 3349 CSUM_FRAG_NOT_CHECKED; 3350 mp->m_pkthdr.csum_data = htons(0xffff); 3351 } 3352 } 3353 3354 static void 3355 em_enable_intr(struct adapter *adapter) 3356 { 3357 lwkt_serialize_handler_enable(adapter->arpcom.ac_if.if_serializer); 3358 E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK); 3359 } 3360 3361 static void 3362 em_disable_intr(struct adapter *adapter) 3363 { 3364 uint32_t clear = 0xffffffff; 3365 3366 /* 3367 * The first version of 82542 had an errata where when link was forced 3368 * it would stay up even up even if the cable was disconnected. 3369 * Sequence errors were used to detect the disconnect and then the 3370 * driver would unforce the link. This code in the in the ISR. For 3371 * this to work correctly the Sequence error interrupt had to be 3372 * enabled all the time. 3373 */ 3374 if (adapter->hw.mac.type == e1000_82542 && 3375 adapter->hw.revision_id == E1000_REVISION_2) 3376 clear &= ~E1000_IMC_RXSEQ; 3377 3378 E1000_WRITE_REG(&adapter->hw, E1000_IMC, clear); 3379 3380 lwkt_serialize_handler_disable(adapter->arpcom.ac_if.if_serializer); 3381 } 3382 3383 /* 3384 * Bit of a misnomer, what this really means is 3385 * to enable OS management of the system... aka 3386 * to disable special hardware management features 3387 */ 3388 static void 3389 em_get_mgmt(struct adapter *adapter) 3390 { 3391 /* A shared code workaround */ 3392 #define E1000_82542_MANC2H E1000_MANC2H 3393 if (adapter->has_manage) { 3394 int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); 3395 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3396 3397 /* disable hardware interception of ARP */ 3398 manc &= ~(E1000_MANC_ARP_EN); 3399 3400 /* enable receiving management packets to the host */ 3401 if (adapter->hw.mac.type >= e1000_82571) { 3402 manc |= E1000_MANC_EN_MNG2HOST; 3403 #define E1000_MNG2HOST_PORT_623 (1 << 5) 3404 #define E1000_MNG2HOST_PORT_664 (1 << 6) 3405 manc2h |= E1000_MNG2HOST_PORT_623; 3406 manc2h |= E1000_MNG2HOST_PORT_664; 3407 E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); 3408 } 3409 3410 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3411 } 3412 } 3413 3414 /* 3415 * Give control back to hardware management 3416 * controller if there is one. 3417 */ 3418 static void 3419 em_rel_mgmt(struct adapter *adapter) 3420 { 3421 if (adapter->has_manage) { 3422 int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); 3423 3424 /* re-enable hardware interception of ARP */ 3425 manc |= E1000_MANC_ARP_EN; 3426 3427 if (adapter->hw.mac.type >= e1000_82571) 3428 manc &= ~E1000_MANC_EN_MNG2HOST; 3429 3430 E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); 3431 } 3432 } 3433 3434 /* 3435 * em_get_hw_control() sets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3436 * For ASF and Pass Through versions of f/w this means that 3437 * the driver is loaded. For AMT version (only with 82573) 3438 * of the f/w this means that the network i/f is open. 3439 */ 3440 static void 3441 em_get_hw_control(struct adapter *adapter) 3442 { 3443 uint32_t ctrl_ext, swsm; 3444 3445 /* Let firmware know the driver has taken over */ 3446 switch (adapter->hw.mac.type) { 3447 case e1000_82573: 3448 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3449 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3450 swsm | E1000_SWSM_DRV_LOAD); 3451 break; 3452 case e1000_82571: 3453 case e1000_82572: 3454 case e1000_80003es2lan: 3455 case e1000_ich8lan: 3456 case e1000_ich9lan: 3457 case e1000_ich10lan: 3458 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3459 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3460 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); 3461 break; 3462 default: 3463 break; 3464 } 3465 } 3466 3467 /* 3468 * em_rel_hw_control() resets {CTRL_EXT|FWSM}:DRV_LOAD bit. 3469 * For ASF and Pass Through versions of f/w this means that the 3470 * driver is no longer loaded. For AMT version (only with 82573) 3471 * of the f/w this means that the network i/f is closed. 3472 */ 3473 static void 3474 em_rel_hw_control(struct adapter *adapter) 3475 { 3476 uint32_t ctrl_ext, swsm; 3477 3478 /* Let firmware taken over control of h/w */ 3479 switch (adapter->hw.mac.type) { 3480 case e1000_82573: 3481 swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); 3482 E1000_WRITE_REG(&adapter->hw, E1000_SWSM, 3483 swsm & ~E1000_SWSM_DRV_LOAD); 3484 break; 3485 3486 case e1000_82571: 3487 case e1000_82572: 3488 case e1000_80003es2lan: 3489 case e1000_ich8lan: 3490 case e1000_ich9lan: 3491 case e1000_ich10lan: 3492 ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); 3493 E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, 3494 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 3495 break; 3496 3497 default: 3498 break; 3499 } 3500 } 3501 3502 static int 3503 em_is_valid_eaddr(const uint8_t *addr) 3504 { 3505 char zero_addr[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; 3506 3507 if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN)) 3508 return (FALSE); 3509 3510 return (TRUE); 3511 } 3512 3513 /* 3514 * Enable PCI Wake On Lan capability 3515 */ 3516 void 3517 em_enable_wol(device_t dev) 3518 { 3519 uint16_t cap, status; 3520 uint8_t id; 3521 3522 /* First find the capabilities pointer*/ 3523 cap = pci_read_config(dev, PCIR_CAP_PTR, 2); 3524 3525 /* Read the PM Capabilities */ 3526 id = pci_read_config(dev, cap, 1); 3527 if (id != PCIY_PMG) /* Something wrong */ 3528 return; 3529 3530 /* 3531 * OK, we have the power capabilities, 3532 * so now get the status register 3533 */ 3534 cap += PCIR_POWER_STATUS; 3535 status = pci_read_config(dev, cap, 2); 3536 status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3537 pci_write_config(dev, cap, status, 2); 3538 } 3539 3540 3541 /* 3542 * 82544 Coexistence issue workaround. 3543 * There are 2 issues. 3544 * 1. Transmit Hang issue. 3545 * To detect this issue, following equation can be used... 3546 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3547 * If SUM[3:0] is in between 1 to 4, we will have this issue. 3548 * 3549 * 2. DAC issue. 3550 * To detect this issue, following equation can be used... 3551 * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. 3552 * If SUM[3:0] is in between 9 to c, we will have this issue. 3553 * 3554 * WORKAROUND: 3555 * Make sure we do not have ending address 3556 * as 1,2,3,4(Hang) or 9,a,b,c (DAC) 3557 */ 3558 static uint32_t 3559 em_82544_fill_desc(bus_addr_t address, uint32_t length, PDESC_ARRAY desc_array) 3560 { 3561 uint32_t safe_terminator; 3562 3563 /* 3564 * Since issue is sensitive to length and address. 3565 * Let us first check the address... 3566 */ 3567 if (length <= 4) { 3568 desc_array->descriptor[0].address = address; 3569 desc_array->descriptor[0].length = length; 3570 desc_array->elements = 1; 3571 return (desc_array->elements); 3572 } 3573 3574 safe_terminator = 3575 (uint32_t)((((uint32_t)address & 0x7) + (length & 0xF)) & 0xF); 3576 3577 /* If it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ 3578 if (safe_terminator == 0 || 3579 (safe_terminator > 4 && safe_terminator < 9) || 3580 (safe_terminator > 0xC && safe_terminator <= 0xF)) { 3581 desc_array->descriptor[0].address = address; 3582 desc_array->descriptor[0].length = length; 3583 desc_array->elements = 1; 3584 return (desc_array->elements); 3585 } 3586 3587 desc_array->descriptor[0].address = address; 3588 desc_array->descriptor[0].length = length - 4; 3589 desc_array->descriptor[1].address = address + (length - 4); 3590 desc_array->descriptor[1].length = 4; 3591 desc_array->elements = 2; 3592 return (desc_array->elements); 3593 } 3594 3595 static void 3596 em_update_stats(struct adapter *adapter) 3597 { 3598 struct ifnet *ifp = &adapter->arpcom.ac_if; 3599 3600 if (adapter->hw.phy.media_type == e1000_media_type_copper || 3601 (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { 3602 adapter->stats.symerrs += 3603 E1000_READ_REG(&adapter->hw, E1000_SYMERRS); 3604 adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); 3605 } 3606 adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); 3607 adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); 3608 adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); 3609 adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); 3610 3611 adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); 3612 adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); 3613 adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); 3614 adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); 3615 adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); 3616 adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); 3617 adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); 3618 adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); 3619 adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); 3620 adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); 3621 adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); 3622 adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); 3623 adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); 3624 adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); 3625 adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); 3626 adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); 3627 adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); 3628 adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); 3629 adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); 3630 adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); 3631 3632 /* For the 64-bit byte counters the low dword must be read first. */ 3633 /* Both registers clear on the read of the high dword */ 3634 3635 adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); 3636 adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); 3637 3638 adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); 3639 adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); 3640 adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); 3641 adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); 3642 adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); 3643 3644 adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); 3645 adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); 3646 3647 adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); 3648 adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); 3649 adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); 3650 adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); 3651 adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); 3652 adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); 3653 adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); 3654 adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); 3655 adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); 3656 adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); 3657 3658 if (adapter->hw.mac.type >= e1000_82543) { 3659 adapter->stats.algnerrc += 3660 E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); 3661 adapter->stats.rxerrc += 3662 E1000_READ_REG(&adapter->hw, E1000_RXERRC); 3663 adapter->stats.tncrs += 3664 E1000_READ_REG(&adapter->hw, E1000_TNCRS); 3665 adapter->stats.cexterr += 3666 E1000_READ_REG(&adapter->hw, E1000_CEXTERR); 3667 adapter->stats.tsctc += 3668 E1000_READ_REG(&adapter->hw, E1000_TSCTC); 3669 adapter->stats.tsctfc += 3670 E1000_READ_REG(&adapter->hw, E1000_TSCTFC); 3671 } 3672 3673 ifp->if_collisions = adapter->stats.colc; 3674 3675 /* Rx Errors */ 3676 ifp->if_ierrors = 3677 adapter->dropped_pkts + adapter->stats.rxerrc + 3678 adapter->stats.crcerrs + adapter->stats.algnerrc + 3679 adapter->stats.ruc + adapter->stats.roc + 3680 adapter->stats.mpc + adapter->stats.cexterr; 3681 3682 /* Tx Errors */ 3683 ifp->if_oerrors = 3684 adapter->stats.ecol + adapter->stats.latecol + 3685 adapter->watchdog_events; 3686 } 3687 3688 static void 3689 em_print_debug_info(struct adapter *adapter) 3690 { 3691 device_t dev = adapter->dev; 3692 uint8_t *hw_addr = adapter->hw.hw_addr; 3693 3694 device_printf(dev, "Adapter hardware address = %p \n", hw_addr); 3695 device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", 3696 E1000_READ_REG(&adapter->hw, E1000_CTRL), 3697 E1000_READ_REG(&adapter->hw, E1000_RCTL)); 3698 device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", 3699 ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ 3700 (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); 3701 device_printf(dev, "Flow control watermarks high = %d low = %d\n", 3702 adapter->hw.fc.high_water, 3703 adapter->hw.fc.low_water); 3704 device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", 3705 E1000_READ_REG(&adapter->hw, E1000_TIDV), 3706 E1000_READ_REG(&adapter->hw, E1000_TADV)); 3707 device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", 3708 E1000_READ_REG(&adapter->hw, E1000_RDTR), 3709 E1000_READ_REG(&adapter->hw, E1000_RADV)); 3710 device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", 3711 (long long)adapter->tx_fifo_wrk_cnt, 3712 (long long)adapter->tx_fifo_reset_cnt); 3713 device_printf(dev, "hw tdh = %d, hw tdt = %d\n", 3714 E1000_READ_REG(&adapter->hw, E1000_TDH(0)), 3715 E1000_READ_REG(&adapter->hw, E1000_TDT(0))); 3716 device_printf(dev, "hw rdh = %d, hw rdt = %d\n", 3717 E1000_READ_REG(&adapter->hw, E1000_RDH(0)), 3718 E1000_READ_REG(&adapter->hw, E1000_RDT(0))); 3719 device_printf(dev, "Num Tx descriptors avail = %d\n", 3720 adapter->num_tx_desc_avail); 3721 device_printf(dev, "Tx Descriptors not avail1 = %ld\n", 3722 adapter->no_tx_desc_avail1); 3723 device_printf(dev, "Tx Descriptors not avail2 = %ld\n", 3724 adapter->no_tx_desc_avail2); 3725 device_printf(dev, "Std mbuf failed = %ld\n", 3726 adapter->mbuf_alloc_failed); 3727 device_printf(dev, "Std mbuf cluster failed = %ld\n", 3728 adapter->mbuf_cluster_failed); 3729 device_printf(dev, "Driver dropped packets = %ld\n", 3730 adapter->dropped_pkts); 3731 device_printf(dev, "Driver tx dma failure in encap = %ld\n", 3732 adapter->no_tx_dma_setup); 3733 3734 device_printf(dev, "TXCSUM try pullup = %lu\n", 3735 adapter->tx_csum_try_pullup); 3736 device_printf(dev, "TXCSUM m_pullup(eh) called = %lu\n", 3737 adapter->tx_csum_pullup1); 3738 device_printf(dev, "TXCSUM m_pullup(eh) failed = %lu\n", 3739 adapter->tx_csum_pullup1_failed); 3740 device_printf(dev, "TXCSUM m_pullup(eh+ip) called = %lu\n", 3741 adapter->tx_csum_pullup2); 3742 device_printf(dev, "TXCSUM m_pullup(eh+ip) failed = %lu\n", 3743 adapter->tx_csum_pullup2_failed); 3744 device_printf(dev, "TXCSUM non-writable(eh) droped = %lu\n", 3745 adapter->tx_csum_drop1); 3746 device_printf(dev, "TXCSUM non-writable(eh+ip) droped = %lu\n", 3747 adapter->tx_csum_drop2); 3748 } 3749 3750 static void 3751 em_print_hw_stats(struct adapter *adapter) 3752 { 3753 device_t dev = adapter->dev; 3754 3755 device_printf(dev, "Excessive collisions = %lld\n", 3756 (long long)adapter->stats.ecol); 3757 #if (DEBUG_HW > 0) /* Dont output these errors normally */ 3758 device_printf(dev, "Symbol errors = %lld\n", 3759 (long long)adapter->stats.symerrs); 3760 #endif 3761 device_printf(dev, "Sequence errors = %lld\n", 3762 (long long)adapter->stats.sec); 3763 device_printf(dev, "Defer count = %lld\n", 3764 (long long)adapter->stats.dc); 3765 device_printf(dev, "Missed Packets = %lld\n", 3766 (long long)adapter->stats.mpc); 3767 device_printf(dev, "Receive No Buffers = %lld\n", 3768 (long long)adapter->stats.rnbc); 3769 /* RLEC is inaccurate on some hardware, calculate our own. */ 3770 device_printf(dev, "Receive Length Errors = %lld\n", 3771 ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); 3772 device_printf(dev, "Receive errors = %lld\n", 3773 (long long)adapter->stats.rxerrc); 3774 device_printf(dev, "Crc errors = %lld\n", 3775 (long long)adapter->stats.crcerrs); 3776 device_printf(dev, "Alignment errors = %lld\n", 3777 (long long)adapter->stats.algnerrc); 3778 device_printf(dev, "Collision/Carrier extension errors = %lld\n", 3779 (long long)adapter->stats.cexterr); 3780 device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); 3781 device_printf(dev, "watchdog timeouts = %ld\n", 3782 adapter->watchdog_events); 3783 device_printf(dev, "XON Rcvd = %lld\n", 3784 (long long)adapter->stats.xonrxc); 3785 device_printf(dev, "XON Xmtd = %lld\n", 3786 (long long)adapter->stats.xontxc); 3787 device_printf(dev, "XOFF Rcvd = %lld\n", 3788 (long long)adapter->stats.xoffrxc); 3789 device_printf(dev, "XOFF Xmtd = %lld\n", 3790 (long long)adapter->stats.xofftxc); 3791 device_printf(dev, "Good Packets Rcvd = %lld\n", 3792 (long long)adapter->stats.gprc); 3793 device_printf(dev, "Good Packets Xmtd = %lld\n", 3794 (long long)adapter->stats.gptc); 3795 } 3796 3797 static void 3798 em_print_nvm_info(struct adapter *adapter) 3799 { 3800 uint16_t eeprom_data; 3801 int i, j, row = 0; 3802 3803 /* Its a bit crude, but it gets the job done */ 3804 kprintf("\nInterface EEPROM Dump:\n"); 3805 kprintf("Offset\n0x0000 "); 3806 for (i = 0, j = 0; i < 32; i++, j++) { 3807 if (j == 8) { /* Make the offset block */ 3808 j = 0; ++row; 3809 kprintf("\n0x00%x0 ",row); 3810 } 3811 e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); 3812 kprintf("%04x ", eeprom_data); 3813 } 3814 kprintf("\n"); 3815 } 3816 3817 static int 3818 em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) 3819 { 3820 struct adapter *adapter; 3821 struct ifnet *ifp; 3822 int error, result; 3823 3824 result = -1; 3825 error = sysctl_handle_int(oidp, &result, 0, req); 3826 if (error || !req->newptr) 3827 return (error); 3828 3829 adapter = (struct adapter *)arg1; 3830 ifp = &adapter->arpcom.ac_if; 3831 3832 lwkt_serialize_enter(ifp->if_serializer); 3833 3834 if (result == 1) 3835 em_print_debug_info(adapter); 3836 3837 /* 3838 * This value will cause a hex dump of the 3839 * first 32 16-bit words of the EEPROM to 3840 * the screen. 3841 */ 3842 if (result == 2) 3843 em_print_nvm_info(adapter); 3844 3845 lwkt_serialize_exit(ifp->if_serializer); 3846 3847 return (error); 3848 } 3849 3850 static int 3851 em_sysctl_stats(SYSCTL_HANDLER_ARGS) 3852 { 3853 int error, result; 3854 3855 result = -1; 3856 error = sysctl_handle_int(oidp, &result, 0, req); 3857 if (error || !req->newptr) 3858 return (error); 3859 3860 if (result == 1) { 3861 struct adapter *adapter = (struct adapter *)arg1; 3862 struct ifnet *ifp = &adapter->arpcom.ac_if; 3863 3864 lwkt_serialize_enter(ifp->if_serializer); 3865 em_print_hw_stats(adapter); 3866 lwkt_serialize_exit(ifp->if_serializer); 3867 } 3868 return (error); 3869 } 3870 3871 static void 3872 em_add_sysctl(struct adapter *adapter) 3873 { 3874 sysctl_ctx_init(&adapter->sysctl_ctx); 3875 adapter->sysctl_tree = SYSCTL_ADD_NODE(&adapter->sysctl_ctx, 3876 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, 3877 device_get_nameunit(adapter->dev), 3878 CTLFLAG_RD, 0, ""); 3879 if (adapter->sysctl_tree == NULL) { 3880 device_printf(adapter->dev, "can't add sysctl node\n"); 3881 } else { 3882 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 3883 SYSCTL_CHILDREN(adapter->sysctl_tree), 3884 OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 3885 em_sysctl_debug_info, "I", "Debug Information"); 3886 3887 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 3888 SYSCTL_CHILDREN(adapter->sysctl_tree), 3889 OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 3890 em_sysctl_stats, "I", "Statistics"); 3891 3892 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 3893 SYSCTL_CHILDREN(adapter->sysctl_tree), 3894 OID_AUTO, "rxd", CTLFLAG_RD, 3895 &adapter->num_rx_desc, 0, NULL); 3896 SYSCTL_ADD_INT(&adapter->sysctl_ctx, 3897 SYSCTL_CHILDREN(adapter->sysctl_tree), 3898 OID_AUTO, "txd", CTLFLAG_RD, 3899 &adapter->num_tx_desc, 0, NULL); 3900 3901 if (adapter->hw.mac.type >= e1000_82540) { 3902 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 3903 SYSCTL_CHILDREN(adapter->sysctl_tree), 3904 OID_AUTO, "int_throttle_ceil", 3905 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 3906 em_sysctl_int_throttle, "I", 3907 "interrupt throttling rate"); 3908 } 3909 SYSCTL_ADD_PROC(&adapter->sysctl_ctx, 3910 SYSCTL_CHILDREN(adapter->sysctl_tree), 3911 OID_AUTO, "int_tx_nsegs", 3912 CTLTYPE_INT|CTLFLAG_RW, adapter, 0, 3913 em_sysctl_int_tx_nsegs, "I", 3914 "# segments per TX interrupt"); 3915 } 3916 } 3917 3918 static int 3919 em_sysctl_int_throttle(SYSCTL_HANDLER_ARGS) 3920 { 3921 struct adapter *adapter = (void *)arg1; 3922 struct ifnet *ifp = &adapter->arpcom.ac_if; 3923 int error, throttle; 3924 3925 throttle = adapter->int_throttle_ceil; 3926 error = sysctl_handle_int(oidp, &throttle, 0, req); 3927 if (error || req->newptr == NULL) 3928 return error; 3929 if (throttle < 0 || throttle > 1000000000 / 256) 3930 return EINVAL; 3931 3932 if (throttle) { 3933 /* 3934 * Set the interrupt throttling rate in 256ns increments, 3935 * recalculate sysctl value assignment to get exact frequency. 3936 */ 3937 throttle = 1000000000 / 256 / throttle; 3938 3939 /* Upper 16bits of ITR is reserved and should be zero */ 3940 if (throttle & 0xffff0000) 3941 return EINVAL; 3942 } 3943 3944 lwkt_serialize_enter(ifp->if_serializer); 3945 3946 if (throttle) 3947 adapter->int_throttle_ceil = 1000000000 / 256 / throttle; 3948 else 3949 adapter->int_throttle_ceil = 0; 3950 3951 if (ifp->if_flags & IFF_RUNNING) 3952 E1000_WRITE_REG(&adapter->hw, E1000_ITR, throttle); 3953 3954 lwkt_serialize_exit(ifp->if_serializer); 3955 3956 if (bootverbose) { 3957 if_printf(ifp, "Interrupt moderation set to %d/sec\n", 3958 adapter->int_throttle_ceil); 3959 } 3960 return 0; 3961 } 3962 3963 static int 3964 em_sysctl_int_tx_nsegs(SYSCTL_HANDLER_ARGS) 3965 { 3966 struct adapter *adapter = (void *)arg1; 3967 struct ifnet *ifp = &adapter->arpcom.ac_if; 3968 int error, segs; 3969 3970 segs = adapter->tx_int_nsegs; 3971 error = sysctl_handle_int(oidp, &segs, 0, req); 3972 if (error || req->newptr == NULL) 3973 return error; 3974 if (segs <= 0) 3975 return EINVAL; 3976 3977 lwkt_serialize_enter(ifp->if_serializer); 3978 3979 /* 3980 * Don't allow int_tx_nsegs to become: 3981 * o Less the oact_tx_desc 3982 * o Too large that no TX desc will cause TX interrupt to 3983 * be generated (OACTIVE will never recover) 3984 * o Too small that will cause tx_dd[] overflow 3985 */ 3986 if (segs < adapter->oact_tx_desc || 3987 segs >= adapter->num_tx_desc - adapter->oact_tx_desc || 3988 segs < adapter->num_tx_desc / EM_TXDD_SAFE) { 3989 error = EINVAL; 3990 } else { 3991 error = 0; 3992 adapter->tx_int_nsegs = segs; 3993 } 3994 3995 lwkt_serialize_exit(ifp->if_serializer); 3996 3997 return error; 3998 } 3999