1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> and 8 * Matthew Dillon <dillon@apollo.backplane.com> 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in 18 * the documentation and/or other materials provided with the 19 * distribution. 20 * 3. Neither the name of The DragonFly Project nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific, prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 26 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 27 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 28 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 34 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 40 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 41 * 42 * Permission to use, copy, modify, and distribute this software for any 43 * purpose with or without fee is hereby granted, provided that the above 44 * copyright notice and this permission notice appear in all copies. 45 * 46 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 47 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 48 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 49 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 50 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 51 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 52 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 53 */ 54 55 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 56 57 #include "opt_ifpoll.h" 58 59 #include <sys/param.h> 60 #include <sys/endian.h> 61 #include <sys/kernel.h> 62 #include <sys/bus.h> 63 #include <sys/interrupt.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/if_poll.h> 78 #include <net/ifq_var.h> 79 #include <net/if_types.h> 80 #include <net/if_var.h> 81 #include <net/vlan/if_vlan_var.h> 82 #include <net/vlan/if_vlan_ether.h> 83 84 #include <bus/pci/pcireg.h> 85 #include <bus/pci/pcivar.h> 86 #include <bus/pci/pcidevs.h> 87 88 #include <dev/netif/mii_layer/mii.h> 89 #include <dev/netif/mii_layer/miivar.h> 90 91 #include "miibus_if.h" 92 93 #include <dev/netif/nfe/if_nfereg.h> 94 #include <dev/netif/nfe/if_nfevar.h> 95 96 #define NFE_CSUM 97 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 98 99 static int nfe_probe(device_t); 100 static int nfe_attach(device_t); 101 static int nfe_detach(device_t); 102 static void nfe_shutdown(device_t); 103 static int nfe_resume(device_t); 104 static int nfe_suspend(device_t); 105 106 static int nfe_miibus_readreg(device_t, int, int); 107 static void nfe_miibus_writereg(device_t, int, int, int); 108 static void nfe_miibus_statchg(device_t); 109 110 #ifdef IFPOLL_ENABLE 111 static void nfe_npoll(struct ifnet *, struct ifpoll_info *); 112 static void nfe_npoll_compat(struct ifnet *, void *, int); 113 static void nfe_disable_intrs(struct nfe_softc *); 114 #endif 115 static void nfe_intr(void *); 116 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 117 static int nfe_rxeof(struct nfe_softc *); 118 static int nfe_txeof(struct nfe_softc *, int); 119 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 120 struct mbuf *); 121 static void nfe_start(struct ifnet *); 122 static void nfe_watchdog(struct ifnet *); 123 static void nfe_init(void *); 124 static void nfe_stop(struct nfe_softc *); 125 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 126 static void nfe_jfree(void *); 127 static void nfe_jref(void *); 128 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 129 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 130 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 133 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 134 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 137 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 138 static int nfe_ifmedia_upd(struct ifnet *); 139 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 140 static void nfe_setmulti(struct nfe_softc *); 141 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 142 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 143 static void nfe_powerup(device_t); 144 static void nfe_mac_reset(struct nfe_softc *); 145 static void nfe_tick(void *); 146 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 147 int, bus_addr_t); 148 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 149 int); 150 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 151 int); 152 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 153 int); 154 static void nfe_enable_intrs(struct nfe_softc *); 155 156 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 157 158 #define NFE_DEBUG 159 #ifdef NFE_DEBUG 160 161 static int nfe_debug = 0; 162 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 163 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 164 /* 165 * hw timer simulated interrupt moderation @4000Hz. Negative values 166 * disable the timer when the discrete interrupt rate falls below 167 * the moderation rate. 168 * 169 * XXX 8000Hz might be better but if the interrupt is shared it can 170 * blow out the cpu. 171 */ 172 static int nfe_imtime = -250; /* uS */ 173 174 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 175 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 176 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 177 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 178 179 #define DPRINTF(sc, fmt, ...) do { \ 180 if ((sc)->sc_debug) { \ 181 if_printf(&(sc)->arpcom.ac_if, \ 182 fmt, __VA_ARGS__); \ 183 } \ 184 } while (0) 185 186 #define DPRINTFN(sc, lv, fmt, ...) do { \ 187 if ((sc)->sc_debug >= (lv)) { \ 188 if_printf(&(sc)->arpcom.ac_if, \ 189 fmt, __VA_ARGS__); \ 190 } \ 191 } while (0) 192 193 #else /* !NFE_DEBUG */ 194 195 #define DPRINTF(sc, fmt, ...) 196 #define DPRINTFN(sc, lv, fmt, ...) 197 198 #endif /* NFE_DEBUG */ 199 200 static const struct nfe_dev { 201 uint16_t vid; 202 uint16_t did; 203 const char *desc; 204 } nfe_devices[] = { 205 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 206 "NVIDIA nForce Fast Ethernet" }, 207 208 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 209 "NVIDIA nForce2 Fast Ethernet" }, 210 211 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 212 "NVIDIA nForce3 Gigabit Ethernet" }, 213 214 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 215 chipset, and possibly also the 400R; it might be both nForce2- and 216 nForce3-based boards can use the same MCPs (= southbridges) */ 217 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 218 "NVIDIA nForce3 Gigabit Ethernet" }, 219 220 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 221 "NVIDIA nForce3 Gigabit Ethernet" }, 222 223 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 224 "NVIDIA nForce3 Gigabit Ethernet" }, 225 226 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 227 "NVIDIA nForce3 Gigabit Ethernet" }, 228 229 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 230 "NVIDIA CK804 Gigabit Ethernet" }, 231 232 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 233 "NVIDIA CK804 Gigabit Ethernet" }, 234 235 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 236 "NVIDIA MCP04 Gigabit Ethernet" }, 237 238 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 239 "NVIDIA MCP04 Gigabit Ethernet" }, 240 241 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 242 "NVIDIA MCP51 Gigabit Ethernet" }, 243 244 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 245 "NVIDIA MCP51 Gigabit Ethernet" }, 246 247 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 248 "NVIDIA MCP55 Gigabit Ethernet" }, 249 250 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 251 "NVIDIA MCP55 Gigabit Ethernet" }, 252 253 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 254 "NVIDIA MCP61 Gigabit Ethernet" }, 255 256 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 257 "NVIDIA MCP61 Gigabit Ethernet" }, 258 259 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 260 "NVIDIA MCP61 Gigabit Ethernet" }, 261 262 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 263 "NVIDIA MCP61 Gigabit Ethernet" }, 264 265 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 266 "NVIDIA MCP65 Gigabit Ethernet" }, 267 268 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 269 "NVIDIA MCP65 Gigabit Ethernet" }, 270 271 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 272 "NVIDIA MCP65 Gigabit Ethernet" }, 273 274 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 275 "NVIDIA MCP65 Gigabit Ethernet" }, 276 277 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 278 "NVIDIA MCP67 Gigabit Ethernet" }, 279 280 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 281 "NVIDIA MCP67 Gigabit Ethernet" }, 282 283 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 284 "NVIDIA MCP67 Gigabit Ethernet" }, 285 286 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 287 "NVIDIA MCP67 Gigabit Ethernet" }, 288 289 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 290 "NVIDIA MCP73 Gigabit Ethernet" }, 291 292 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 293 "NVIDIA MCP73 Gigabit Ethernet" }, 294 295 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 296 "NVIDIA MCP73 Gigabit Ethernet" }, 297 298 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 299 "NVIDIA MCP73 Gigabit Ethernet" }, 300 301 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 302 "NVIDIA MCP77 Gigabit Ethernet" }, 303 304 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 305 "NVIDIA MCP77 Gigabit Ethernet" }, 306 307 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 308 "NVIDIA MCP77 Gigabit Ethernet" }, 309 310 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 311 "NVIDIA MCP77 Gigabit Ethernet" }, 312 313 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 314 "NVIDIA MCP79 Gigabit Ethernet" }, 315 316 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 317 "NVIDIA MCP79 Gigabit Ethernet" }, 318 319 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 320 "NVIDIA MCP79 Gigabit Ethernet" }, 321 322 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 323 "NVIDIA MCP79 Gigabit Ethernet" }, 324 325 { 0, 0, NULL } 326 }; 327 328 static device_method_t nfe_methods[] = { 329 /* Device interface */ 330 DEVMETHOD(device_probe, nfe_probe), 331 DEVMETHOD(device_attach, nfe_attach), 332 DEVMETHOD(device_detach, nfe_detach), 333 DEVMETHOD(device_suspend, nfe_suspend), 334 DEVMETHOD(device_resume, nfe_resume), 335 DEVMETHOD(device_shutdown, nfe_shutdown), 336 337 /* Bus interface */ 338 DEVMETHOD(bus_print_child, bus_generic_print_child), 339 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 340 341 /* MII interface */ 342 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 343 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 344 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 345 346 { 0, 0 } 347 }; 348 349 static driver_t nfe_driver = { 350 "nfe", 351 nfe_methods, 352 sizeof(struct nfe_softc) 353 }; 354 355 static devclass_t nfe_devclass; 356 357 DECLARE_DUMMY_MODULE(if_nfe); 358 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 359 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, NULL, NULL); 360 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, NULL, NULL); 361 362 /* 363 * NOTE: NFE_WORDALIGN support is guesswork right now. 364 */ 365 static int 366 nfe_probe(device_t dev) 367 { 368 const struct nfe_dev *n; 369 uint16_t vid, did; 370 371 vid = pci_get_vendor(dev); 372 did = pci_get_device(dev); 373 for (n = nfe_devices; n->desc != NULL; ++n) { 374 if (vid == n->vid && did == n->did) { 375 struct nfe_softc *sc = device_get_softc(dev); 376 377 switch (did) { 378 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 379 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 380 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 381 sc->sc_caps = NFE_NO_PWRCTL | 382 NFE_FIX_EADDR; 383 break; 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 386 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 387 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 388 sc->sc_caps = NFE_JUMBO_SUP | 389 NFE_HW_CSUM | 390 NFE_NO_PWRCTL | 391 NFE_FIX_EADDR; 392 break; 393 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 394 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 395 sc->sc_caps = NFE_FIX_EADDR; 396 /* FALL THROUGH */ 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 399 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 400 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 403 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 404 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 407 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 408 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 409 sc->sc_caps |= NFE_40BIT_ADDR; 410 break; 411 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 412 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 413 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 414 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 415 sc->sc_caps = NFE_JUMBO_SUP | 416 NFE_40BIT_ADDR | 417 NFE_HW_CSUM | 418 NFE_NO_PWRCTL | 419 NFE_FIX_EADDR; 420 break; 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 423 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 424 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 425 sc->sc_caps = NFE_JUMBO_SUP | 426 NFE_40BIT_ADDR; 427 break; 428 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 429 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 430 sc->sc_caps = NFE_JUMBO_SUP | 431 NFE_40BIT_ADDR | 432 NFE_HW_CSUM | 433 NFE_HW_VLAN | 434 NFE_FIX_EADDR; 435 break; 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 438 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 439 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 442 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 443 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 444 sc->sc_caps = NFE_40BIT_ADDR | 445 NFE_HW_CSUM | 446 NFE_WORDALIGN; 447 break; 448 } 449 450 device_set_desc(dev, n->desc); 451 device_set_async_attach(dev, TRUE); 452 return 0; 453 } 454 } 455 return ENXIO; 456 } 457 458 static int 459 nfe_attach(device_t dev) 460 { 461 struct nfe_softc *sc = device_get_softc(dev); 462 struct ifnet *ifp = &sc->arpcom.ac_if; 463 uint8_t eaddr[ETHER_ADDR_LEN]; 464 bus_addr_t lowaddr; 465 int error; 466 467 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 468 lwkt_serialize_init(&sc->sc_jbuf_serializer); 469 470 /* 471 * Initialize sysctl variables 472 */ 473 sc->sc_rx_ring_count = nfe_rx_ring_count; 474 sc->sc_tx_ring_count = nfe_tx_ring_count; 475 sc->sc_debug = nfe_debug; 476 if (nfe_imtime < 0) { 477 sc->sc_flags |= NFE_F_DYN_IM; 478 sc->sc_imtime = -nfe_imtime; 479 } else { 480 sc->sc_imtime = nfe_imtime; 481 } 482 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 483 484 sc->sc_mem_rid = PCIR_BAR(0); 485 486 if (sc->sc_caps & NFE_40BIT_ADDR) 487 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 488 else if (sc->sc_caps & NFE_JUMBO_SUP) 489 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 490 491 #ifndef BURN_BRIDGES 492 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 493 uint32_t mem, irq; 494 495 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 496 irq = pci_read_config(dev, PCIR_INTLINE, 4); 497 498 device_printf(dev, "chip is in D%d power mode " 499 "-- setting to D0\n", pci_get_powerstate(dev)); 500 501 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 502 503 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 504 pci_write_config(dev, PCIR_INTLINE, irq, 4); 505 } 506 #endif /* !BURN_BRIDGE */ 507 508 /* Enable bus mastering */ 509 pci_enable_busmaster(dev); 510 511 /* Allocate IO memory */ 512 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 513 &sc->sc_mem_rid, RF_ACTIVE); 514 if (sc->sc_mem_res == NULL) { 515 device_printf(dev, "could not allocate io memory\n"); 516 return ENXIO; 517 } 518 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 519 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 520 521 /* Allocate IRQ */ 522 sc->sc_irq_rid = 0; 523 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 524 &sc->sc_irq_rid, 525 RF_SHAREABLE | RF_ACTIVE); 526 if (sc->sc_irq_res == NULL) { 527 device_printf(dev, "could not allocate irq\n"); 528 error = ENXIO; 529 goto fail; 530 } 531 532 /* Disable WOL */ 533 NFE_WRITE(sc, NFE_WOL_CTL, 0); 534 535 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 536 nfe_powerup(dev); 537 538 nfe_get_macaddr(sc, eaddr); 539 540 /* 541 * Allocate top level DMA tag 542 */ 543 if (sc->sc_caps & NFE_40BIT_ADDR) 544 lowaddr = NFE_BUS_SPACE_MAXADDR; 545 else 546 lowaddr = BUS_SPACE_MAXADDR_32BIT; 547 error = bus_dma_tag_create(NULL, /* parent */ 548 1, 0, /* alignment, boundary */ 549 lowaddr, /* lowaddr */ 550 BUS_SPACE_MAXADDR, /* highaddr */ 551 NULL, NULL, /* filter, filterarg */ 552 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 553 0, /* nsegments */ 554 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 555 0, /* flags */ 556 &sc->sc_dtag); 557 if (error) { 558 device_printf(dev, "could not allocate parent dma tag\n"); 559 goto fail; 560 } 561 562 /* 563 * Allocate Tx and Rx rings. 564 */ 565 error = nfe_alloc_tx_ring(sc, &sc->txq); 566 if (error) { 567 device_printf(dev, "could not allocate Tx ring\n"); 568 goto fail; 569 } 570 571 error = nfe_alloc_rx_ring(sc, &sc->rxq); 572 if (error) { 573 device_printf(dev, "could not allocate Rx ring\n"); 574 goto fail; 575 } 576 577 /* 578 * Create sysctl tree 579 */ 580 sysctl_ctx_init(&sc->sc_sysctl_ctx); 581 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 582 SYSCTL_STATIC_CHILDREN(_hw), 583 OID_AUTO, 584 device_get_nameunit(dev), 585 CTLFLAG_RD, 0, ""); 586 if (sc->sc_sysctl_tree == NULL) { 587 device_printf(dev, "can't add sysctl node\n"); 588 error = ENXIO; 589 goto fail; 590 } 591 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 592 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 593 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 594 sc, 0, nfe_sysctl_imtime, "I", 595 "Interrupt moderation time (usec). " 596 "0 to disable interrupt moderation."); 597 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 598 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 599 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 600 0, "RX ring count"); 601 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 602 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 603 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 604 0, "TX ring count"); 605 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 606 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 607 "debug", CTLFLAG_RW, &sc->sc_debug, 608 0, "control debugging printfs"); 609 610 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 611 nfe_ifmedia_sts); 612 if (error) { 613 device_printf(dev, "MII without any phy\n"); 614 goto fail; 615 } 616 617 ifp->if_softc = sc; 618 ifp->if_mtu = ETHERMTU; 619 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 620 ifp->if_ioctl = nfe_ioctl; 621 ifp->if_start = nfe_start; 622 #ifdef IFPOLL_ENABLE 623 ifp->if_npoll = nfe_npoll; 624 #endif 625 ifp->if_watchdog = nfe_watchdog; 626 ifp->if_init = nfe_init; 627 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 628 ifq_set_ready(&ifp->if_snd); 629 630 ifp->if_capabilities = IFCAP_VLAN_MTU; 631 632 if (sc->sc_caps & NFE_HW_VLAN) 633 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 634 635 #ifdef NFE_CSUM 636 if (sc->sc_caps & NFE_HW_CSUM) { 637 ifp->if_capabilities |= IFCAP_HWCSUM; 638 ifp->if_hwassist = NFE_CSUM_FEATURES; 639 } 640 #else 641 sc->sc_caps &= ~NFE_HW_CSUM; 642 #endif 643 ifp->if_capenable = ifp->if_capabilities; 644 645 callout_init(&sc->sc_tick_ch); 646 647 ether_ifattach(ifp, eaddr, NULL); 648 649 #ifdef IFPOLL_ENABLE 650 ifpoll_compat_setup(&sc->sc_npoll, 651 &sc->sc_sysctl_ctx, sc->sc_sysctl_tree, device_get_unit(dev), 652 ifp->if_serializer); 653 #endif 654 655 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 656 &sc->sc_ih, ifp->if_serializer); 657 if (error) { 658 device_printf(dev, "could not setup intr\n"); 659 ether_ifdetach(ifp); 660 goto fail; 661 } 662 663 ifp->if_cpuid = rman_get_cpuid(sc->sc_irq_res); 664 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 665 666 return 0; 667 fail: 668 nfe_detach(dev); 669 return error; 670 } 671 672 static int 673 nfe_detach(device_t dev) 674 { 675 struct nfe_softc *sc = device_get_softc(dev); 676 677 if (device_is_attached(dev)) { 678 struct ifnet *ifp = &sc->arpcom.ac_if; 679 680 lwkt_serialize_enter(ifp->if_serializer); 681 nfe_stop(sc); 682 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 683 lwkt_serialize_exit(ifp->if_serializer); 684 685 ether_ifdetach(ifp); 686 } 687 688 if (sc->sc_miibus != NULL) 689 device_delete_child(dev, sc->sc_miibus); 690 bus_generic_detach(dev); 691 692 if (sc->sc_sysctl_tree != NULL) 693 sysctl_ctx_free(&sc->sc_sysctl_ctx); 694 695 if (sc->sc_irq_res != NULL) { 696 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 697 sc->sc_irq_res); 698 } 699 700 if (sc->sc_mem_res != NULL) { 701 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 702 sc->sc_mem_res); 703 } 704 705 nfe_free_tx_ring(sc, &sc->txq); 706 nfe_free_rx_ring(sc, &sc->rxq); 707 if (sc->sc_dtag != NULL) 708 bus_dma_tag_destroy(sc->sc_dtag); 709 710 return 0; 711 } 712 713 static void 714 nfe_shutdown(device_t dev) 715 { 716 struct nfe_softc *sc = device_get_softc(dev); 717 struct ifnet *ifp = &sc->arpcom.ac_if; 718 719 lwkt_serialize_enter(ifp->if_serializer); 720 nfe_stop(sc); 721 lwkt_serialize_exit(ifp->if_serializer); 722 } 723 724 static int 725 nfe_suspend(device_t dev) 726 { 727 struct nfe_softc *sc = device_get_softc(dev); 728 struct ifnet *ifp = &sc->arpcom.ac_if; 729 730 lwkt_serialize_enter(ifp->if_serializer); 731 nfe_stop(sc); 732 lwkt_serialize_exit(ifp->if_serializer); 733 734 return 0; 735 } 736 737 static int 738 nfe_resume(device_t dev) 739 { 740 struct nfe_softc *sc = device_get_softc(dev); 741 struct ifnet *ifp = &sc->arpcom.ac_if; 742 743 lwkt_serialize_enter(ifp->if_serializer); 744 if (ifp->if_flags & IFF_UP) 745 nfe_init(sc); 746 lwkt_serialize_exit(ifp->if_serializer); 747 748 return 0; 749 } 750 751 static void 752 nfe_miibus_statchg(device_t dev) 753 { 754 struct nfe_softc *sc = device_get_softc(dev); 755 struct mii_data *mii = device_get_softc(sc->sc_miibus); 756 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 757 758 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 759 760 phy = NFE_READ(sc, NFE_PHY_IFACE); 761 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 762 763 seed = NFE_READ(sc, NFE_RNDSEED); 764 seed &= ~NFE_SEED_MASK; 765 766 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 767 phy |= NFE_PHY_HDX; /* half-duplex */ 768 misc |= NFE_MISC1_HDX; 769 } 770 771 switch (IFM_SUBTYPE(mii->mii_media_active)) { 772 case IFM_1000_T: /* full-duplex only */ 773 link |= NFE_MEDIA_1000T; 774 seed |= NFE_SEED_1000T; 775 phy |= NFE_PHY_1000T; 776 break; 777 case IFM_100_TX: 778 link |= NFE_MEDIA_100TX; 779 seed |= NFE_SEED_100TX; 780 phy |= NFE_PHY_100TX; 781 break; 782 case IFM_10_T: 783 link |= NFE_MEDIA_10T; 784 seed |= NFE_SEED_10T; 785 break; 786 } 787 788 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 789 790 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 791 NFE_WRITE(sc, NFE_MISC1, misc); 792 NFE_WRITE(sc, NFE_LINKSPEED, link); 793 } 794 795 static int 796 nfe_miibus_readreg(device_t dev, int phy, int reg) 797 { 798 struct nfe_softc *sc = device_get_softc(dev); 799 uint32_t val; 800 int ntries; 801 802 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 803 804 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 805 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 806 DELAY(100); 807 } 808 809 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 810 811 for (ntries = 0; ntries < 1000; ntries++) { 812 DELAY(100); 813 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 814 break; 815 } 816 if (ntries == 1000) { 817 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 818 return 0; 819 } 820 821 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 822 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 823 return 0; 824 } 825 826 val = NFE_READ(sc, NFE_PHY_DATA); 827 if (val != 0xffffffff && val != 0) 828 sc->mii_phyaddr = phy; 829 830 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 831 832 return val; 833 } 834 835 static void 836 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 837 { 838 struct nfe_softc *sc = device_get_softc(dev); 839 uint32_t ctl; 840 int ntries; 841 842 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 843 844 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 845 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 846 DELAY(100); 847 } 848 849 NFE_WRITE(sc, NFE_PHY_DATA, val); 850 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 851 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 852 853 for (ntries = 0; ntries < 1000; ntries++) { 854 DELAY(100); 855 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 856 break; 857 } 858 859 #ifdef NFE_DEBUG 860 if (ntries == 1000) 861 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 862 #endif 863 } 864 865 #ifdef IFPOLL_ENABLE 866 867 static void 868 nfe_npoll_compat(struct ifnet *ifp, void *arg __unused, int count __unused) 869 { 870 struct nfe_softc *sc = ifp->if_softc; 871 872 ASSERT_SERIALIZED(ifp->if_serializer); 873 874 nfe_rxeof(sc); 875 nfe_txeof(sc, 1); 876 } 877 878 static void 879 nfe_disable_intrs(struct nfe_softc *sc) 880 { 881 /* Disable interrupts */ 882 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 883 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 884 sc->sc_npoll.ifpc_stcount = 0; 885 } 886 887 static void 888 nfe_npoll(struct ifnet *ifp, struct ifpoll_info *info) 889 { 890 struct nfe_softc *sc = ifp->if_softc; 891 892 ASSERT_SERIALIZED(ifp->if_serializer); 893 894 if (info != NULL) { 895 int cpuid = sc->sc_npoll.ifpc_cpuid; 896 897 info->ifpi_rx[cpuid].poll_func = nfe_npoll_compat; 898 info->ifpi_rx[cpuid].arg = NULL; 899 info->ifpi_rx[cpuid].serializer = ifp->if_serializer; 900 901 if (ifp->if_flags & IFF_RUNNING) 902 nfe_disable_intrs(sc); 903 ifp->if_npoll_cpuid = cpuid; 904 } else { 905 if (ifp->if_flags & IFF_RUNNING) 906 nfe_enable_intrs(sc); 907 ifp->if_npoll_cpuid = -1; 908 } 909 } 910 911 #endif /* IFPOLL_ENABLE */ 912 913 static void 914 nfe_intr(void *arg) 915 { 916 struct nfe_softc *sc = arg; 917 struct ifnet *ifp = &sc->arpcom.ac_if; 918 uint32_t r; 919 920 r = NFE_READ(sc, NFE_IRQ_STATUS); 921 if (r == 0) 922 return; /* not for us */ 923 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 924 925 if (sc->sc_rate_second != time_second) { 926 /* 927 * Calculate sc_rate_avg - interrupts per second. 928 */ 929 sc->sc_rate_second = time_second; 930 if (sc->sc_rate_avg < sc->sc_rate_acc) 931 sc->sc_rate_avg = sc->sc_rate_acc; 932 else 933 sc->sc_rate_avg = (sc->sc_rate_avg * 3 + 934 sc->sc_rate_acc) / 4; 935 sc->sc_rate_acc = 0; 936 } else if (sc->sc_rate_avg < sc->sc_rate_acc) { 937 /* 938 * Don't wait for a tick to roll over if we are taking 939 * a lot of interrupts. 940 */ 941 sc->sc_rate_avg = sc->sc_rate_acc; 942 } 943 944 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 945 946 if (r & NFE_IRQ_LINK) { 947 NFE_READ(sc, NFE_PHY_STATUS); 948 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 949 DPRINTF(sc, "link state changed %s\n", ""); 950 } 951 952 if (ifp->if_flags & IFF_RUNNING) { 953 int ret; 954 int rate; 955 956 /* check Rx ring */ 957 ret = nfe_rxeof(sc); 958 959 /* check Tx ring */ 960 ret |= nfe_txeof(sc, 1); 961 962 /* update the rate accumulator */ 963 if (ret) 964 ++sc->sc_rate_acc; 965 966 if (sc->sc_flags & NFE_F_DYN_IM) { 967 rate = 1000000 / sc->sc_imtime; 968 if ((sc->sc_flags & NFE_F_IRQ_TIMER) == 0 && 969 sc->sc_rate_avg > rate) { 970 /* 971 * Use the hardware timer to reduce the 972 * interrupt rate if the discrete interrupt 973 * rate has exceeded our threshold. 974 */ 975 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 976 sc->sc_flags |= NFE_F_IRQ_TIMER; 977 } else if ((sc->sc_flags & NFE_F_IRQ_TIMER) && 978 sc->sc_rate_avg <= rate) { 979 /* 980 * Use discrete TX/RX interrupts if the rate 981 * has fallen below our threshold. 982 */ 983 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 984 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 985 986 /* 987 * Recollect, mainly to avoid the possible race 988 * introduced by changing interrupt masks. 989 */ 990 nfe_rxeof(sc); 991 nfe_txeof(sc, 1); 992 } 993 } 994 } 995 } 996 997 static int 998 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 999 { 1000 struct nfe_softc *sc = ifp->if_softc; 1001 struct ifreq *ifr = (struct ifreq *)data; 1002 struct mii_data *mii; 1003 int error = 0, mask, jumbo_cap; 1004 1005 ASSERT_SERIALIZED(ifp->if_serializer); 1006 1007 switch (cmd) { 1008 case SIOCSIFMTU: 1009 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 1010 jumbo_cap = 1; 1011 else 1012 jumbo_cap = 0; 1013 1014 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 1015 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 1016 return EINVAL; 1017 } else if (ifp->if_mtu != ifr->ifr_mtu) { 1018 ifp->if_mtu = ifr->ifr_mtu; 1019 if (ifp->if_flags & IFF_RUNNING) 1020 nfe_init(sc); 1021 } 1022 break; 1023 case SIOCSIFFLAGS: 1024 if (ifp->if_flags & IFF_UP) { 1025 /* 1026 * If only the PROMISC or ALLMULTI flag changes, then 1027 * don't do a full re-init of the chip, just update 1028 * the Rx filter. 1029 */ 1030 if ((ifp->if_flags & IFF_RUNNING) && 1031 ((ifp->if_flags ^ sc->sc_if_flags) & 1032 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1033 nfe_setmulti(sc); 1034 } else { 1035 if (!(ifp->if_flags & IFF_RUNNING)) 1036 nfe_init(sc); 1037 } 1038 } else { 1039 if (ifp->if_flags & IFF_RUNNING) 1040 nfe_stop(sc); 1041 } 1042 sc->sc_if_flags = ifp->if_flags; 1043 break; 1044 case SIOCADDMULTI: 1045 case SIOCDELMULTI: 1046 if (ifp->if_flags & IFF_RUNNING) 1047 nfe_setmulti(sc); 1048 break; 1049 case SIOCSIFMEDIA: 1050 case SIOCGIFMEDIA: 1051 mii = device_get_softc(sc->sc_miibus); 1052 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1053 break; 1054 case SIOCSIFCAP: 1055 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 1056 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 1057 ifp->if_capenable ^= mask; 1058 if (IFCAP_TXCSUM & ifp->if_capenable) 1059 ifp->if_hwassist = NFE_CSUM_FEATURES; 1060 else 1061 ifp->if_hwassist = 0; 1062 1063 if (ifp->if_flags & IFF_RUNNING) 1064 nfe_init(sc); 1065 } 1066 break; 1067 default: 1068 error = ether_ioctl(ifp, cmd, data); 1069 break; 1070 } 1071 return error; 1072 } 1073 1074 static int 1075 nfe_rxeof(struct nfe_softc *sc) 1076 { 1077 struct ifnet *ifp = &sc->arpcom.ac_if; 1078 struct nfe_rx_ring *ring = &sc->rxq; 1079 int reap; 1080 1081 reap = 0; 1082 for (;;) { 1083 struct nfe_rx_data *data = &ring->data[ring->cur]; 1084 struct mbuf *m; 1085 uint16_t flags; 1086 int len, error; 1087 1088 if (sc->sc_caps & NFE_40BIT_ADDR) { 1089 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1090 1091 flags = le16toh(desc64->flags); 1092 len = le16toh(desc64->length) & 0x3fff; 1093 } else { 1094 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1095 1096 flags = le16toh(desc32->flags); 1097 len = le16toh(desc32->length) & 0x3fff; 1098 } 1099 1100 if (flags & NFE_RX_READY) 1101 break; 1102 1103 reap = 1; 1104 1105 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1106 if (!(flags & NFE_RX_VALID_V1)) 1107 goto skip; 1108 1109 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1110 flags &= ~NFE_RX_ERROR; 1111 len--; /* fix buffer length */ 1112 } 1113 } else { 1114 if (!(flags & NFE_RX_VALID_V2)) 1115 goto skip; 1116 1117 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1118 flags &= ~NFE_RX_ERROR; 1119 len--; /* fix buffer length */ 1120 } 1121 } 1122 1123 if (flags & NFE_RX_ERROR) { 1124 ifp->if_ierrors++; 1125 goto skip; 1126 } 1127 1128 m = data->m; 1129 1130 if (sc->sc_flags & NFE_F_USE_JUMBO) 1131 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1132 else 1133 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1134 if (error) { 1135 ifp->if_ierrors++; 1136 goto skip; 1137 } 1138 1139 /* finalize mbuf */ 1140 m->m_pkthdr.len = m->m_len = len; 1141 m->m_pkthdr.rcvif = ifp; 1142 1143 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1144 (flags & NFE_RX_CSUMOK)) { 1145 if (flags & NFE_RX_IP_CSUMOK_V2) { 1146 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1147 CSUM_IP_VALID; 1148 } 1149 1150 if (flags & 1151 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1152 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1153 CSUM_PSEUDO_HDR | 1154 CSUM_FRAG_NOT_CHECKED; 1155 m->m_pkthdr.csum_data = 0xffff; 1156 } 1157 } 1158 1159 ifp->if_ipackets++; 1160 ifp->if_input(ifp, m); 1161 skip: 1162 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1163 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1164 } 1165 return reap; 1166 } 1167 1168 static int 1169 nfe_txeof(struct nfe_softc *sc, int start) 1170 { 1171 struct ifnet *ifp = &sc->arpcom.ac_if; 1172 struct nfe_tx_ring *ring = &sc->txq; 1173 struct nfe_tx_data *data = NULL; 1174 1175 while (ring->next != ring->cur) { 1176 uint16_t flags; 1177 1178 if (sc->sc_caps & NFE_40BIT_ADDR) 1179 flags = le16toh(ring->desc64[ring->next].flags); 1180 else 1181 flags = le16toh(ring->desc32[ring->next].flags); 1182 1183 if (flags & NFE_TX_VALID) 1184 break; 1185 1186 data = &ring->data[ring->next]; 1187 1188 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1189 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1190 goto skip; 1191 1192 if ((flags & NFE_TX_ERROR_V1) != 0) { 1193 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1194 NFE_V1_TXERR); 1195 ifp->if_oerrors++; 1196 } else { 1197 ifp->if_opackets++; 1198 } 1199 } else { 1200 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1201 goto skip; 1202 1203 if ((flags & NFE_TX_ERROR_V2) != 0) { 1204 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1205 NFE_V2_TXERR); 1206 ifp->if_oerrors++; 1207 } else { 1208 ifp->if_opackets++; 1209 } 1210 } 1211 1212 if (data->m == NULL) { /* should not get there */ 1213 if_printf(ifp, 1214 "last fragment bit w/o associated mbuf!\n"); 1215 goto skip; 1216 } 1217 1218 /* last fragment of the mbuf chain transmitted */ 1219 bus_dmamap_unload(ring->data_tag, data->map); 1220 m_freem(data->m); 1221 data->m = NULL; 1222 skip: 1223 ring->queued--; 1224 KKASSERT(ring->queued >= 0); 1225 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1226 } 1227 1228 if (sc->sc_tx_ring_count - ring->queued >= 1229 sc->sc_tx_spare + NFE_NSEG_RSVD) 1230 ifp->if_flags &= ~IFF_OACTIVE; 1231 1232 if (ring->queued == 0) 1233 ifp->if_timer = 0; 1234 1235 if (start && !ifq_is_empty(&ifp->if_snd)) 1236 if_devstart(ifp); 1237 1238 if (data != NULL) 1239 return 1; 1240 else 1241 return 0; 1242 } 1243 1244 static int 1245 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1246 { 1247 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1248 struct nfe_tx_data *data, *data_map; 1249 bus_dmamap_t map; 1250 struct nfe_desc64 *desc64 = NULL; 1251 struct nfe_desc32 *desc32 = NULL; 1252 uint16_t flags = 0; 1253 uint32_t vtag = 0; 1254 int error, i, j, maxsegs, nsegs; 1255 1256 data = &ring->data[ring->cur]; 1257 map = data->map; 1258 data_map = data; /* Remember who owns the DMA map */ 1259 1260 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1261 if (maxsegs > NFE_MAX_SCATTER) 1262 maxsegs = NFE_MAX_SCATTER; 1263 KASSERT(maxsegs >= sc->sc_tx_spare, 1264 ("not enough segments %d,%d", maxsegs, sc->sc_tx_spare)); 1265 1266 error = bus_dmamap_load_mbuf_defrag(ring->data_tag, map, &m0, 1267 segs, maxsegs, &nsegs, BUS_DMA_NOWAIT); 1268 if (error) 1269 goto back; 1270 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1271 1272 error = 0; 1273 1274 /* setup h/w VLAN tagging */ 1275 if (m0->m_flags & M_VLANTAG) 1276 vtag = m0->m_pkthdr.ether_vlantag; 1277 1278 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1279 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1280 flags |= NFE_TX_IP_CSUM; 1281 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1282 flags |= NFE_TX_TCP_CSUM; 1283 } 1284 1285 /* 1286 * XXX urm. somebody is unaware of how hardware works. You 1287 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1288 * the ring until the entire chain is actually *VALID*. Otherwise 1289 * the hardware may encounter a partially initialized chain that 1290 * is marked as being ready to go when it in fact is not ready to 1291 * go. 1292 */ 1293 1294 for (i = 0; i < nsegs; i++) { 1295 j = (ring->cur + i) % sc->sc_tx_ring_count; 1296 data = &ring->data[j]; 1297 1298 if (sc->sc_caps & NFE_40BIT_ADDR) { 1299 desc64 = &ring->desc64[j]; 1300 desc64->physaddr[0] = 1301 htole32(NFE_ADDR_HI(segs[i].ds_addr)); 1302 desc64->physaddr[1] = 1303 htole32(NFE_ADDR_LO(segs[i].ds_addr)); 1304 desc64->length = htole16(segs[i].ds_len - 1); 1305 desc64->vtag = htole32(vtag); 1306 desc64->flags = htole16(flags); 1307 } else { 1308 desc32 = &ring->desc32[j]; 1309 desc32->physaddr = htole32(segs[i].ds_addr); 1310 desc32->length = htole16(segs[i].ds_len - 1); 1311 desc32->flags = htole16(flags); 1312 } 1313 1314 /* csum flags and vtag belong to the first fragment only */ 1315 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1316 vtag = 0; 1317 1318 ring->queued++; 1319 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1320 } 1321 1322 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1323 if (sc->sc_caps & NFE_40BIT_ADDR) { 1324 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1325 } else { 1326 if (sc->sc_caps & NFE_JUMBO_SUP) 1327 flags = NFE_TX_LASTFRAG_V2; 1328 else 1329 flags = NFE_TX_LASTFRAG_V1; 1330 desc32->flags |= htole16(flags); 1331 } 1332 1333 /* 1334 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1335 * whole mess until the first descriptor in the map is flagged. 1336 */ 1337 for (i = nsegs - 1; i >= 0; --i) { 1338 j = (ring->cur + i) % sc->sc_tx_ring_count; 1339 if (sc->sc_caps & NFE_40BIT_ADDR) { 1340 desc64 = &ring->desc64[j]; 1341 desc64->flags |= htole16(NFE_TX_VALID); 1342 } else { 1343 desc32 = &ring->desc32[j]; 1344 desc32->flags |= htole16(NFE_TX_VALID); 1345 } 1346 } 1347 ring->cur = (ring->cur + nsegs) % sc->sc_tx_ring_count; 1348 1349 /* Exchange DMA map */ 1350 data_map->map = data->map; 1351 data->map = map; 1352 data->m = m0; 1353 back: 1354 if (error) 1355 m_freem(m0); 1356 return error; 1357 } 1358 1359 static void 1360 nfe_start(struct ifnet *ifp) 1361 { 1362 struct nfe_softc *sc = ifp->if_softc; 1363 struct nfe_tx_ring *ring = &sc->txq; 1364 int count = 0, oactive = 0; 1365 struct mbuf *m0; 1366 1367 ASSERT_SERIALIZED(ifp->if_serializer); 1368 1369 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1370 return; 1371 1372 for (;;) { 1373 int error; 1374 1375 if (sc->sc_tx_ring_count - ring->queued < 1376 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1377 if (oactive) { 1378 ifp->if_flags |= IFF_OACTIVE; 1379 break; 1380 } 1381 1382 nfe_txeof(sc, 0); 1383 oactive = 1; 1384 continue; 1385 } 1386 1387 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1388 if (m0 == NULL) 1389 break; 1390 1391 ETHER_BPF_MTAP(ifp, m0); 1392 1393 error = nfe_encap(sc, ring, m0); 1394 if (error) { 1395 ifp->if_oerrors++; 1396 if (error == EFBIG) { 1397 if (oactive) { 1398 ifp->if_flags |= IFF_OACTIVE; 1399 break; 1400 } 1401 nfe_txeof(sc, 0); 1402 oactive = 1; 1403 } 1404 continue; 1405 } else { 1406 oactive = 0; 1407 } 1408 ++count; 1409 1410 /* 1411 * NOTE: 1412 * `m0' may be freed in nfe_encap(), so 1413 * it should not be touched any more. 1414 */ 1415 } 1416 1417 if (count == 0) /* nothing sent */ 1418 return; 1419 1420 /* Kick Tx */ 1421 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1422 1423 /* 1424 * Set a timeout in case the chip goes out to lunch. 1425 */ 1426 ifp->if_timer = 5; 1427 } 1428 1429 static void 1430 nfe_watchdog(struct ifnet *ifp) 1431 { 1432 struct nfe_softc *sc = ifp->if_softc; 1433 1434 ASSERT_SERIALIZED(ifp->if_serializer); 1435 1436 if (ifp->if_flags & IFF_RUNNING) { 1437 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1438 nfe_txeof(sc, 1); 1439 return; 1440 } 1441 1442 if_printf(ifp, "watchdog timeout\n"); 1443 1444 nfe_init(ifp->if_softc); 1445 1446 ifp->if_oerrors++; 1447 } 1448 1449 static void 1450 nfe_init(void *xsc) 1451 { 1452 struct nfe_softc *sc = xsc; 1453 struct ifnet *ifp = &sc->arpcom.ac_if; 1454 uint32_t tmp; 1455 int error; 1456 1457 ASSERT_SERIALIZED(ifp->if_serializer); 1458 1459 nfe_stop(sc); 1460 1461 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1462 nfe_mac_reset(sc); 1463 1464 /* 1465 * NOTE: 1466 * Switching between jumbo frames and normal frames should 1467 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1468 */ 1469 if (ifp->if_mtu > ETHERMTU) { 1470 sc->sc_flags |= NFE_F_USE_JUMBO; 1471 sc->rxq.bufsz = NFE_JBYTES; 1472 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1473 if (bootverbose) 1474 if_printf(ifp, "use jumbo frames\n"); 1475 } else { 1476 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1477 sc->rxq.bufsz = MCLBYTES; 1478 sc->sc_tx_spare = NFE_NSEG_SPARE; 1479 if (bootverbose) 1480 if_printf(ifp, "use non-jumbo frames\n"); 1481 } 1482 1483 error = nfe_init_tx_ring(sc, &sc->txq); 1484 if (error) { 1485 nfe_stop(sc); 1486 return; 1487 } 1488 1489 error = nfe_init_rx_ring(sc, &sc->rxq); 1490 if (error) { 1491 nfe_stop(sc); 1492 return; 1493 } 1494 1495 NFE_WRITE(sc, NFE_TX_POLL, 0); 1496 NFE_WRITE(sc, NFE_STATUS, 0); 1497 1498 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1499 1500 if (ifp->if_capenable & IFCAP_RXCSUM) 1501 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1502 1503 /* 1504 * Although the adapter is capable of stripping VLAN tags from received 1505 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1506 * purpose. This will be done in software by our network stack. 1507 */ 1508 if (sc->sc_caps & NFE_HW_VLAN) 1509 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1510 1511 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1512 DELAY(10); 1513 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1514 1515 if (sc->sc_caps & NFE_HW_VLAN) 1516 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1517 1518 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1519 1520 /* set MAC address */ 1521 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1522 1523 /* tell MAC where rings are in memory */ 1524 if (sc->sc_caps & NFE_40BIT_ADDR) { 1525 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, 1526 NFE_ADDR_HI(sc->rxq.physaddr)); 1527 } 1528 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); 1529 1530 if (sc->sc_caps & NFE_40BIT_ADDR) { 1531 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, 1532 NFE_ADDR_HI(sc->txq.physaddr)); 1533 } 1534 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); 1535 1536 NFE_WRITE(sc, NFE_RING_SIZE, 1537 (sc->sc_rx_ring_count - 1) << 16 | 1538 (sc->sc_tx_ring_count - 1)); 1539 1540 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1541 1542 /* force MAC to wakeup */ 1543 tmp = NFE_READ(sc, NFE_PWR_STATE); 1544 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1545 DELAY(10); 1546 tmp = NFE_READ(sc, NFE_PWR_STATE); 1547 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1548 1549 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1550 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1551 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1552 1553 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1554 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1555 1556 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1557 1558 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1559 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1560 DELAY(10); 1561 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1562 1563 /* set Rx filter */ 1564 nfe_setmulti(sc); 1565 1566 nfe_ifmedia_upd(ifp); 1567 1568 /* enable Rx */ 1569 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1570 1571 /* enable Tx */ 1572 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1573 1574 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1575 1576 #ifdef IFPOLL_ENABLE 1577 if (ifp->if_flags & IFF_NPOLLING) 1578 nfe_disable_intrs(sc); 1579 else 1580 #endif 1581 nfe_enable_intrs(sc); 1582 1583 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1584 1585 ifp->if_flags |= IFF_RUNNING; 1586 ifp->if_flags &= ~IFF_OACTIVE; 1587 1588 /* 1589 * If we had stuff in the tx ring before its all cleaned out now 1590 * so we are not going to get an interrupt, jump-start any pending 1591 * output. 1592 */ 1593 if (!ifq_is_empty(&ifp->if_snd)) 1594 if_devstart(ifp); 1595 } 1596 1597 static void 1598 nfe_stop(struct nfe_softc *sc) 1599 { 1600 struct ifnet *ifp = &sc->arpcom.ac_if; 1601 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1602 int i; 1603 1604 ASSERT_SERIALIZED(ifp->if_serializer); 1605 1606 callout_stop(&sc->sc_tick_ch); 1607 1608 ifp->if_timer = 0; 1609 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1610 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1611 1612 #define WAITMAX 50000 1613 1614 /* 1615 * Abort Tx 1616 */ 1617 NFE_WRITE(sc, NFE_TX_CTL, 0); 1618 for (i = 0; i < WAITMAX; ++i) { 1619 DELAY(100); 1620 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1621 break; 1622 } 1623 if (i == WAITMAX) 1624 if_printf(ifp, "can't stop TX\n"); 1625 DELAY(100); 1626 1627 /* 1628 * Disable Rx 1629 */ 1630 NFE_WRITE(sc, NFE_RX_CTL, 0); 1631 for (i = 0; i < WAITMAX; ++i) { 1632 DELAY(100); 1633 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1634 break; 1635 } 1636 if (i == WAITMAX) 1637 if_printf(ifp, "can't stop RX\n"); 1638 DELAY(100); 1639 1640 #undef WAITMAX 1641 1642 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1643 DELAY(10); 1644 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1645 1646 /* Disable interrupts */ 1647 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1648 1649 /* Reset Tx and Rx rings */ 1650 nfe_reset_tx_ring(sc, &sc->txq); 1651 nfe_reset_rx_ring(sc, &sc->rxq); 1652 } 1653 1654 static int 1655 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1656 { 1657 int i, j, error, descsize; 1658 bus_dmamem_t dmem; 1659 void **desc; 1660 1661 if (sc->sc_caps & NFE_40BIT_ADDR) { 1662 desc = (void *)&ring->desc64; 1663 descsize = sizeof(struct nfe_desc64); 1664 } else { 1665 desc = (void *)&ring->desc32; 1666 descsize = sizeof(struct nfe_desc32); 1667 } 1668 1669 ring->bufsz = MCLBYTES; 1670 ring->cur = ring->next = 0; 1671 1672 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1673 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1674 sc->sc_rx_ring_count * descsize, 1675 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1676 if (error) { 1677 if_printf(&sc->arpcom.ac_if, 1678 "could not create RX desc ring\n"); 1679 return error; 1680 } 1681 ring->tag = dmem.dmem_tag; 1682 ring->map = dmem.dmem_map; 1683 *desc = dmem.dmem_addr; 1684 ring->physaddr = dmem.dmem_busaddr; 1685 1686 if (sc->sc_caps & NFE_JUMBO_SUP) { 1687 ring->jbuf = 1688 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1689 M_DEVBUF, M_WAITOK | M_ZERO); 1690 1691 error = nfe_jpool_alloc(sc, ring); 1692 if (error) { 1693 if_printf(&sc->arpcom.ac_if, 1694 "could not allocate jumbo frames\n"); 1695 kfree(ring->jbuf, M_DEVBUF); 1696 ring->jbuf = NULL; 1697 /* Allow jumbo frame allocation to fail */ 1698 } 1699 } 1700 1701 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1702 M_DEVBUF, M_WAITOK | M_ZERO); 1703 1704 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1705 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1706 NULL, NULL, 1707 MCLBYTES, 1, MCLBYTES, 1708 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK, 1709 &ring->data_tag); 1710 if (error) { 1711 if_printf(&sc->arpcom.ac_if, 1712 "could not create RX mbuf DMA tag\n"); 1713 return error; 1714 } 1715 1716 /* Create a spare RX mbuf DMA map */ 1717 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1718 &ring->data_tmpmap); 1719 if (error) { 1720 if_printf(&sc->arpcom.ac_if, 1721 "could not create spare RX mbuf DMA map\n"); 1722 bus_dma_tag_destroy(ring->data_tag); 1723 ring->data_tag = NULL; 1724 return error; 1725 } 1726 1727 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1728 error = bus_dmamap_create(ring->data_tag, BUS_DMA_WAITOK, 1729 &ring->data[i].map); 1730 if (error) { 1731 if_printf(&sc->arpcom.ac_if, 1732 "could not create %dth RX mbuf DMA mapn", i); 1733 goto fail; 1734 } 1735 } 1736 return 0; 1737 fail: 1738 for (j = 0; j < i; ++j) 1739 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1740 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1741 bus_dma_tag_destroy(ring->data_tag); 1742 ring->data_tag = NULL; 1743 return error; 1744 } 1745 1746 static void 1747 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1748 { 1749 int i; 1750 1751 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1752 struct nfe_rx_data *data = &ring->data[i]; 1753 1754 if (data->m != NULL) { 1755 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1756 bus_dmamap_unload(ring->data_tag, data->map); 1757 m_freem(data->m); 1758 data->m = NULL; 1759 } 1760 } 1761 1762 ring->cur = ring->next = 0; 1763 } 1764 1765 static int 1766 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1767 { 1768 int i; 1769 1770 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1771 int error; 1772 1773 /* XXX should use a function pointer */ 1774 if (sc->sc_flags & NFE_F_USE_JUMBO) 1775 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1776 else 1777 error = nfe_newbuf_std(sc, ring, i, 1); 1778 if (error) { 1779 if_printf(&sc->arpcom.ac_if, 1780 "could not allocate RX buffer\n"); 1781 return error; 1782 } 1783 nfe_set_ready_rxdesc(sc, ring, i); 1784 } 1785 return 0; 1786 } 1787 1788 static void 1789 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1790 { 1791 if (ring->data_tag != NULL) { 1792 struct nfe_rx_data *data; 1793 int i; 1794 1795 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1796 data = &ring->data[i]; 1797 1798 if (data->m != NULL) { 1799 bus_dmamap_unload(ring->data_tag, data->map); 1800 m_freem(data->m); 1801 } 1802 bus_dmamap_destroy(ring->data_tag, data->map); 1803 } 1804 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1805 bus_dma_tag_destroy(ring->data_tag); 1806 } 1807 1808 nfe_jpool_free(sc, ring); 1809 1810 if (ring->jbuf != NULL) 1811 kfree(ring->jbuf, M_DEVBUF); 1812 if (ring->data != NULL) 1813 kfree(ring->data, M_DEVBUF); 1814 1815 if (ring->tag != NULL) { 1816 void *desc; 1817 1818 if (sc->sc_caps & NFE_40BIT_ADDR) 1819 desc = ring->desc64; 1820 else 1821 desc = ring->desc32; 1822 1823 bus_dmamap_unload(ring->tag, ring->map); 1824 bus_dmamem_free(ring->tag, desc, ring->map); 1825 bus_dma_tag_destroy(ring->tag); 1826 } 1827 } 1828 1829 static struct nfe_jbuf * 1830 nfe_jalloc(struct nfe_softc *sc) 1831 { 1832 struct ifnet *ifp = &sc->arpcom.ac_if; 1833 struct nfe_jbuf *jbuf; 1834 1835 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1836 1837 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1838 if (jbuf != NULL) { 1839 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1840 jbuf->inuse = 1; 1841 } else { 1842 if_printf(ifp, "no free jumbo buffer\n"); 1843 } 1844 1845 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1846 1847 return jbuf; 1848 } 1849 1850 static void 1851 nfe_jfree(void *arg) 1852 { 1853 struct nfe_jbuf *jbuf = arg; 1854 struct nfe_softc *sc = jbuf->sc; 1855 struct nfe_rx_ring *ring = jbuf->ring; 1856 1857 if (&ring->jbuf[jbuf->slot] != jbuf) 1858 panic("%s: free wrong jumbo buffer", __func__); 1859 else if (jbuf->inuse == 0) 1860 panic("%s: jumbo buffer already freed", __func__); 1861 1862 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1863 atomic_subtract_int(&jbuf->inuse, 1); 1864 if (jbuf->inuse == 0) 1865 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1866 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1867 } 1868 1869 static void 1870 nfe_jref(void *arg) 1871 { 1872 struct nfe_jbuf *jbuf = arg; 1873 struct nfe_rx_ring *ring = jbuf->ring; 1874 1875 if (&ring->jbuf[jbuf->slot] != jbuf) 1876 panic("%s: ref wrong jumbo buffer", __func__); 1877 else if (jbuf->inuse == 0) 1878 panic("%s: jumbo buffer already freed", __func__); 1879 1880 atomic_add_int(&jbuf->inuse, 1); 1881 } 1882 1883 static int 1884 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1885 { 1886 struct nfe_jbuf *jbuf; 1887 bus_dmamem_t dmem; 1888 bus_addr_t physaddr; 1889 caddr_t buf; 1890 int i, error; 1891 1892 /* 1893 * Allocate a big chunk of DMA'able memory. 1894 */ 1895 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1896 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1897 NFE_JPOOL_SIZE(sc), 1898 BUS_DMA_WAITOK, &dmem); 1899 if (error) { 1900 if_printf(&sc->arpcom.ac_if, 1901 "could not create jumbo buffer\n"); 1902 return error; 1903 } 1904 ring->jtag = dmem.dmem_tag; 1905 ring->jmap = dmem.dmem_map; 1906 ring->jpool = dmem.dmem_addr; 1907 physaddr = dmem.dmem_busaddr; 1908 1909 /* ..and split it into 9KB chunks */ 1910 SLIST_INIT(&ring->jfreelist); 1911 1912 buf = ring->jpool; 1913 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1914 jbuf = &ring->jbuf[i]; 1915 1916 jbuf->sc = sc; 1917 jbuf->ring = ring; 1918 jbuf->inuse = 0; 1919 jbuf->slot = i; 1920 jbuf->buf = buf; 1921 jbuf->physaddr = physaddr; 1922 1923 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1924 1925 buf += NFE_JBYTES; 1926 physaddr += NFE_JBYTES; 1927 } 1928 1929 return 0; 1930 } 1931 1932 static void 1933 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1934 { 1935 if (ring->jtag != NULL) { 1936 bus_dmamap_unload(ring->jtag, ring->jmap); 1937 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1938 bus_dma_tag_destroy(ring->jtag); 1939 } 1940 } 1941 1942 static int 1943 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1944 { 1945 int i, j, error, descsize; 1946 bus_dmamem_t dmem; 1947 void **desc; 1948 1949 if (sc->sc_caps & NFE_40BIT_ADDR) { 1950 desc = (void *)&ring->desc64; 1951 descsize = sizeof(struct nfe_desc64); 1952 } else { 1953 desc = (void *)&ring->desc32; 1954 descsize = sizeof(struct nfe_desc32); 1955 } 1956 1957 ring->queued = 0; 1958 ring->cur = ring->next = 0; 1959 1960 error = bus_dmamem_coherent(sc->sc_dtag, PAGE_SIZE, 0, 1961 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1962 sc->sc_tx_ring_count * descsize, 1963 BUS_DMA_WAITOK | BUS_DMA_ZERO, &dmem); 1964 if (error) { 1965 if_printf(&sc->arpcom.ac_if, 1966 "could not create TX desc ring\n"); 1967 return error; 1968 } 1969 ring->tag = dmem.dmem_tag; 1970 ring->map = dmem.dmem_map; 1971 *desc = dmem.dmem_addr; 1972 ring->physaddr = dmem.dmem_busaddr; 1973 1974 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 1975 M_DEVBUF, M_WAITOK | M_ZERO); 1976 1977 error = bus_dma_tag_create(sc->sc_dtag, 1, 0, 1978 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1979 NULL, NULL, 1980 NFE_JBYTES, NFE_MAX_SCATTER, MCLBYTES, 1981 BUS_DMA_ALLOCNOW | BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1982 &ring->data_tag); 1983 if (error) { 1984 if_printf(&sc->arpcom.ac_if, 1985 "could not create TX buf DMA tag\n"); 1986 return error; 1987 } 1988 1989 for (i = 0; i < sc->sc_tx_ring_count; i++) { 1990 error = bus_dmamap_create(ring->data_tag, 1991 BUS_DMA_WAITOK | BUS_DMA_ONEBPAGE, 1992 &ring->data[i].map); 1993 if (error) { 1994 if_printf(&sc->arpcom.ac_if, 1995 "could not create %dth TX buf DMA map\n", i); 1996 goto fail; 1997 } 1998 } 1999 2000 return 0; 2001 fail: 2002 for (j = 0; j < i; ++j) 2003 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 2004 bus_dma_tag_destroy(ring->data_tag); 2005 ring->data_tag = NULL; 2006 return error; 2007 } 2008 2009 static void 2010 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2011 { 2012 int i; 2013 2014 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2015 struct nfe_tx_data *data = &ring->data[i]; 2016 2017 if (sc->sc_caps & NFE_40BIT_ADDR) 2018 ring->desc64[i].flags = 0; 2019 else 2020 ring->desc32[i].flags = 0; 2021 2022 if (data->m != NULL) { 2023 bus_dmamap_unload(ring->data_tag, data->map); 2024 m_freem(data->m); 2025 data->m = NULL; 2026 } 2027 } 2028 2029 ring->queued = 0; 2030 ring->cur = ring->next = 0; 2031 } 2032 2033 static int 2034 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2035 struct nfe_tx_ring *ring __unused) 2036 { 2037 return 0; 2038 } 2039 2040 static void 2041 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2042 { 2043 if (ring->data_tag != NULL) { 2044 struct nfe_tx_data *data; 2045 int i; 2046 2047 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2048 data = &ring->data[i]; 2049 2050 if (data->m != NULL) { 2051 bus_dmamap_unload(ring->data_tag, data->map); 2052 m_freem(data->m); 2053 } 2054 bus_dmamap_destroy(ring->data_tag, data->map); 2055 } 2056 2057 bus_dma_tag_destroy(ring->data_tag); 2058 } 2059 2060 if (ring->data != NULL) 2061 kfree(ring->data, M_DEVBUF); 2062 2063 if (ring->tag != NULL) { 2064 void *desc; 2065 2066 if (sc->sc_caps & NFE_40BIT_ADDR) 2067 desc = ring->desc64; 2068 else 2069 desc = ring->desc32; 2070 2071 bus_dmamap_unload(ring->tag, ring->map); 2072 bus_dmamem_free(ring->tag, desc, ring->map); 2073 bus_dma_tag_destroy(ring->tag); 2074 } 2075 } 2076 2077 static int 2078 nfe_ifmedia_upd(struct ifnet *ifp) 2079 { 2080 struct nfe_softc *sc = ifp->if_softc; 2081 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2082 2083 ASSERT_SERIALIZED(ifp->if_serializer); 2084 2085 if (mii->mii_instance != 0) { 2086 struct mii_softc *miisc; 2087 2088 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2089 mii_phy_reset(miisc); 2090 } 2091 mii_mediachg(mii); 2092 2093 return 0; 2094 } 2095 2096 static void 2097 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2098 { 2099 struct nfe_softc *sc = ifp->if_softc; 2100 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2101 2102 ASSERT_SERIALIZED(ifp->if_serializer); 2103 2104 mii_pollstat(mii); 2105 ifmr->ifm_status = mii->mii_media_status; 2106 ifmr->ifm_active = mii->mii_media_active; 2107 } 2108 2109 static void 2110 nfe_setmulti(struct nfe_softc *sc) 2111 { 2112 struct ifnet *ifp = &sc->arpcom.ac_if; 2113 struct ifmultiaddr *ifma; 2114 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2115 uint32_t filter = NFE_RXFILTER_MAGIC; 2116 int i; 2117 2118 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2119 bzero(addr, ETHER_ADDR_LEN); 2120 bzero(mask, ETHER_ADDR_LEN); 2121 goto done; 2122 } 2123 2124 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2125 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2126 2127 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2128 caddr_t maddr; 2129 2130 if (ifma->ifma_addr->sa_family != AF_LINK) 2131 continue; 2132 2133 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2134 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2135 addr[i] &= maddr[i]; 2136 mask[i] &= ~maddr[i]; 2137 } 2138 } 2139 2140 for (i = 0; i < ETHER_ADDR_LEN; i++) 2141 mask[i] |= addr[i]; 2142 2143 done: 2144 addr[0] |= 0x01; /* make sure multicast bit is set */ 2145 2146 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2147 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2148 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2149 addr[5] << 8 | addr[4]); 2150 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2151 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2152 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2153 mask[5] << 8 | mask[4]); 2154 2155 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2156 NFE_WRITE(sc, NFE_RXFILTER, filter); 2157 } 2158 2159 static void 2160 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2161 { 2162 uint32_t lo, hi; 2163 2164 lo = NFE_READ(sc, NFE_MACADDR_LO); 2165 hi = NFE_READ(sc, NFE_MACADDR_HI); 2166 if (sc->sc_caps & NFE_FIX_EADDR) { 2167 addr[0] = (lo >> 8) & 0xff; 2168 addr[1] = (lo & 0xff); 2169 2170 addr[2] = (hi >> 24) & 0xff; 2171 addr[3] = (hi >> 16) & 0xff; 2172 addr[4] = (hi >> 8) & 0xff; 2173 addr[5] = (hi & 0xff); 2174 } else { 2175 addr[0] = (hi & 0xff); 2176 addr[1] = (hi >> 8) & 0xff; 2177 addr[2] = (hi >> 16) & 0xff; 2178 addr[3] = (hi >> 24) & 0xff; 2179 2180 addr[4] = (lo & 0xff); 2181 addr[5] = (lo >> 8) & 0xff; 2182 } 2183 } 2184 2185 static void 2186 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2187 { 2188 NFE_WRITE(sc, NFE_MACADDR_LO, 2189 addr[5] << 8 | addr[4]); 2190 NFE_WRITE(sc, NFE_MACADDR_HI, 2191 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2192 } 2193 2194 static void 2195 nfe_tick(void *arg) 2196 { 2197 struct nfe_softc *sc = arg; 2198 struct ifnet *ifp = &sc->arpcom.ac_if; 2199 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2200 2201 lwkt_serialize_enter(ifp->if_serializer); 2202 2203 mii_tick(mii); 2204 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2205 2206 lwkt_serialize_exit(ifp->if_serializer); 2207 } 2208 2209 static int 2210 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2211 int wait) 2212 { 2213 struct nfe_rx_data *data = &ring->data[idx]; 2214 bus_dma_segment_t seg; 2215 bus_dmamap_t map; 2216 struct mbuf *m; 2217 int nsegs, error; 2218 2219 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2220 if (m == NULL) 2221 return ENOBUFS; 2222 m->m_len = m->m_pkthdr.len = MCLBYTES; 2223 2224 /* 2225 * Aligning the payload improves access times. 2226 */ 2227 if (sc->sc_caps & NFE_WORDALIGN) 2228 m_adj(m, ETHER_ALIGN); 2229 2230 error = bus_dmamap_load_mbuf_segment(ring->data_tag, ring->data_tmpmap, 2231 m, &seg, 1, &nsegs, BUS_DMA_NOWAIT); 2232 if (error) { 2233 m_freem(m); 2234 if (wait) { 2235 if_printf(&sc->arpcom.ac_if, 2236 "could map RX mbuf %d\n", error); 2237 } 2238 return error; 2239 } 2240 2241 if (data->m != NULL) { 2242 /* Sync and unload originally mapped mbuf */ 2243 bus_dmamap_sync(ring->data_tag, data->map, 2244 BUS_DMASYNC_POSTREAD); 2245 bus_dmamap_unload(ring->data_tag, data->map); 2246 } 2247 2248 /* Swap this DMA map with tmp DMA map */ 2249 map = data->map; 2250 data->map = ring->data_tmpmap; 2251 ring->data_tmpmap = map; 2252 2253 /* Caller is assumed to have collected the old mbuf */ 2254 data->m = m; 2255 2256 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2257 return 0; 2258 } 2259 2260 static int 2261 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2262 int wait) 2263 { 2264 struct nfe_rx_data *data = &ring->data[idx]; 2265 struct nfe_jbuf *jbuf; 2266 struct mbuf *m; 2267 2268 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2269 if (m == NULL) 2270 return ENOBUFS; 2271 2272 jbuf = nfe_jalloc(sc); 2273 if (jbuf == NULL) { 2274 m_freem(m); 2275 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2276 "-- packet dropped!\n"); 2277 return ENOBUFS; 2278 } 2279 2280 m->m_ext.ext_arg = jbuf; 2281 m->m_ext.ext_buf = jbuf->buf; 2282 m->m_ext.ext_free = nfe_jfree; 2283 m->m_ext.ext_ref = nfe_jref; 2284 m->m_ext.ext_size = NFE_JBYTES; 2285 2286 m->m_data = m->m_ext.ext_buf; 2287 m->m_flags |= M_EXT; 2288 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2289 2290 /* 2291 * Aligning the payload improves access times. 2292 */ 2293 if (sc->sc_caps & NFE_WORDALIGN) 2294 m_adj(m, ETHER_ALIGN); 2295 2296 /* Caller is assumed to have collected the old mbuf */ 2297 data->m = m; 2298 2299 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2300 return 0; 2301 } 2302 2303 static void 2304 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2305 bus_addr_t physaddr) 2306 { 2307 if (sc->sc_caps & NFE_40BIT_ADDR) { 2308 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2309 2310 desc64->physaddr[0] = htole32(NFE_ADDR_HI(physaddr)); 2311 desc64->physaddr[1] = htole32(NFE_ADDR_LO(physaddr)); 2312 } else { 2313 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2314 2315 desc32->physaddr = htole32(physaddr); 2316 } 2317 } 2318 2319 static void 2320 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2321 { 2322 if (sc->sc_caps & NFE_40BIT_ADDR) { 2323 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2324 2325 desc64->length = htole16(ring->bufsz); 2326 desc64->flags = htole16(NFE_RX_READY); 2327 } else { 2328 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2329 2330 desc32->length = htole16(ring->bufsz); 2331 desc32->flags = htole16(NFE_RX_READY); 2332 } 2333 } 2334 2335 static int 2336 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2337 { 2338 struct nfe_softc *sc = arg1; 2339 struct ifnet *ifp = &sc->arpcom.ac_if; 2340 uint32_t flags; 2341 int error, v; 2342 2343 lwkt_serialize_enter(ifp->if_serializer); 2344 2345 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2346 v = sc->sc_imtime; 2347 if (sc->sc_flags & NFE_F_DYN_IM) 2348 v = -v; 2349 2350 error = sysctl_handle_int(oidp, &v, 0, req); 2351 if (error || req->newptr == NULL) 2352 goto back; 2353 2354 if (v < 0) { 2355 flags |= NFE_F_DYN_IM; 2356 v = -v; 2357 } 2358 2359 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2360 if (NFE_IMTIME(v) == 0) 2361 v = 0; 2362 sc->sc_imtime = v; 2363 sc->sc_flags = flags; 2364 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2365 2366 if ((ifp->if_flags & (IFF_NPOLLING | IFF_RUNNING)) 2367 == IFF_RUNNING) { 2368 nfe_enable_intrs(sc); 2369 } 2370 } 2371 back: 2372 lwkt_serialize_exit(ifp->if_serializer); 2373 return error; 2374 } 2375 2376 static void 2377 nfe_powerup(device_t dev) 2378 { 2379 struct nfe_softc *sc = device_get_softc(dev); 2380 uint32_t pwr_state; 2381 uint16_t did; 2382 2383 /* 2384 * Bring MAC and PHY out of low power state 2385 */ 2386 2387 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2388 2389 did = pci_get_device(dev); 2390 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2391 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2392 pci_get_revid(dev) >= 0xa3) 2393 pwr_state |= NFE_PWRUP_REV_A3; 2394 2395 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2396 } 2397 2398 static void 2399 nfe_mac_reset(struct nfe_softc *sc) 2400 { 2401 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2402 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2403 2404 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2405 2406 /* Save several registers for later restoration */ 2407 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2408 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2409 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2410 2411 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2412 DELAY(100); 2413 2414 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2415 DELAY(100); 2416 2417 /* Restore saved registers */ 2418 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2419 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2420 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2421 2422 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2423 } 2424 2425 static void 2426 nfe_enable_intrs(struct nfe_softc *sc) 2427 { 2428 /* 2429 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2430 * It is unclear how wide the timer is. Base programming does 2431 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2432 * we don't get any interrupt moderation. TX moderation is 2433 * possible by using the timer interrupt instead of TX_DONE. 2434 * 2435 * It is unclear whether there are other bits that can be 2436 * set to make the NFE device actually do interrupt moderation 2437 * on the RX side. 2438 * 2439 * For now set a 128uS interval as a placemark, but don't use 2440 * the timer. 2441 */ 2442 if (sc->sc_imtime == 0) 2443 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2444 else 2445 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2446 2447 /* Enable interrupts */ 2448 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2449 2450 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2451 sc->sc_flags |= NFE_F_IRQ_TIMER; 2452 else 2453 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2454 } 2455