1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.43 2008/07/27 10:06:56 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 #include "opt_ethernet.h" 60 61 #include <sys/param.h> 62 #include <sys/endian.h> 63 #include <sys/kernel.h> 64 #include <sys/bus.h> 65 #include <sys/interrupt.h> 66 #include <sys/proc.h> 67 #include <sys/rman.h> 68 #include <sys/serialize.h> 69 #include <sys/socket.h> 70 #include <sys/sockio.h> 71 #include <sys/sysctl.h> 72 73 #include <net/ethernet.h> 74 #include <net/if.h> 75 #include <net/bpf.h> 76 #include <net/if_arp.h> 77 #include <net/if_dl.h> 78 #include <net/if_media.h> 79 #include <net/ifq_var.h> 80 #include <net/if_types.h> 81 #include <net/if_var.h> 82 #include <net/vlan/if_vlan_var.h> 83 #include <net/vlan/if_vlan_ether.h> 84 85 #include <bus/pci/pcireg.h> 86 #include <bus/pci/pcivar.h> 87 #include <bus/pci/pcidevs.h> 88 89 #include <dev/netif/mii_layer/mii.h> 90 #include <dev/netif/mii_layer/miivar.h> 91 92 #include "miibus_if.h" 93 94 #include <dev/netif/nfe/if_nfereg.h> 95 #include <dev/netif/nfe/if_nfevar.h> 96 97 #define NFE_CSUM 98 #define NFE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 99 100 static int nfe_probe(device_t); 101 static int nfe_attach(device_t); 102 static int nfe_detach(device_t); 103 static void nfe_shutdown(device_t); 104 static int nfe_resume(device_t); 105 static int nfe_suspend(device_t); 106 107 static int nfe_miibus_readreg(device_t, int, int); 108 static void nfe_miibus_writereg(device_t, int, int, int); 109 static void nfe_miibus_statchg(device_t); 110 111 #ifdef DEVICE_POLLING 112 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 113 #endif 114 static void nfe_intr(void *); 115 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 116 static int nfe_rxeof(struct nfe_softc *); 117 static int nfe_txeof(struct nfe_softc *, int); 118 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 119 struct mbuf *); 120 static void nfe_start(struct ifnet *); 121 static void nfe_watchdog(struct ifnet *); 122 static void nfe_init(void *); 123 static void nfe_stop(struct nfe_softc *); 124 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 125 static void nfe_jfree(void *); 126 static void nfe_jref(void *); 127 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 128 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 129 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 130 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 131 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 132 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 133 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 134 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 135 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 136 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 137 static int nfe_ifmedia_upd(struct ifnet *); 138 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 139 static void nfe_setmulti(struct nfe_softc *); 140 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 141 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 142 static void nfe_powerup(device_t); 143 static void nfe_mac_reset(struct nfe_softc *); 144 static void nfe_tick(void *); 145 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int); 146 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t, 147 int); 148 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 149 int, bus_addr_t); 150 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 151 int); 152 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 153 int); 154 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 155 int); 156 static void nfe_enable_intrs(struct nfe_softc *); 157 static void nfe_disable_intrs(struct nfe_softc *); 158 159 static int nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS); 160 161 #define NFE_DEBUG 162 #ifdef NFE_DEBUG 163 164 static int nfe_debug = 0; 165 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 166 static int nfe_tx_ring_count = NFE_TX_RING_DEF_COUNT; 167 /* hw timer simulated interrupt moderation @8000Hz */ 168 static int nfe_imtime = -125; 169 170 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 171 TUNABLE_INT("hw.nfe.tx_ring_count", &nfe_tx_ring_count); 172 TUNABLE_INT("hw.nfe.imtimer", &nfe_imtime); 173 TUNABLE_INT("hw.nfe.debug", &nfe_debug); 174 175 #define DPRINTF(sc, fmt, ...) do { \ 176 if ((sc)->sc_debug) { \ 177 if_printf(&(sc)->arpcom.ac_if, \ 178 fmt, __VA_ARGS__); \ 179 } \ 180 } while (0) 181 182 #define DPRINTFN(sc, lv, fmt, ...) do { \ 183 if ((sc)->sc_debug >= (lv)) { \ 184 if_printf(&(sc)->arpcom.ac_if, \ 185 fmt, __VA_ARGS__); \ 186 } \ 187 } while (0) 188 189 #else /* !NFE_DEBUG */ 190 191 #define DPRINTF(sc, fmt, ...) 192 #define DPRINTFN(sc, lv, fmt, ...) 193 194 #endif /* NFE_DEBUG */ 195 196 struct nfe_dma_ctx { 197 int nsegs; 198 bus_dma_segment_t *segs; 199 }; 200 201 static const struct nfe_dev { 202 uint16_t vid; 203 uint16_t did; 204 const char *desc; 205 } nfe_devices[] = { 206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 207 "NVIDIA nForce Fast Ethernet" }, 208 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 210 "NVIDIA nForce2 Fast Ethernet" }, 211 212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 213 "NVIDIA nForce3 Gigabit Ethernet" }, 214 215 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 216 chipset, and possibly also the 400R; it might be both nForce2- and 217 nForce3-based boards can use the same MCPs (= southbridges) */ 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 219 "NVIDIA nForce3 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 222 "NVIDIA nForce3 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 225 "NVIDIA nForce3 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 228 "NVIDIA nForce3 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 231 "NVIDIA CK804 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 234 "NVIDIA CK804 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 237 "NVIDIA MCP04 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 240 "NVIDIA MCP04 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 243 "NVIDIA MCP51 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 246 "NVIDIA MCP51 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 249 "NVIDIA MCP55 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 252 "NVIDIA MCP55 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 255 "NVIDIA MCP61 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 258 "NVIDIA MCP61 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 261 "NVIDIA MCP61 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 264 "NVIDIA MCP61 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 267 "NVIDIA MCP65 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 270 "NVIDIA MCP65 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 273 "NVIDIA MCP65 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 276 "NVIDIA MCP65 Gigabit Ethernet" }, 277 278 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 279 "NVIDIA MCP67 Gigabit Ethernet" }, 280 281 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 282 "NVIDIA MCP67 Gigabit Ethernet" }, 283 284 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 285 "NVIDIA MCP67 Gigabit Ethernet" }, 286 287 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 288 "NVIDIA MCP67 Gigabit Ethernet" }, 289 290 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, 291 "NVIDIA MCP73 Gigabit Ethernet" }, 292 293 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, 294 "NVIDIA MCP73 Gigabit Ethernet" }, 295 296 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, 297 "NVIDIA MCP73 Gigabit Ethernet" }, 298 299 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, 300 "NVIDIA MCP73 Gigabit Ethernet" }, 301 302 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, 303 "NVIDIA MCP77 Gigabit Ethernet" }, 304 305 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, 306 "NVIDIA MCP77 Gigabit Ethernet" }, 307 308 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, 309 "NVIDIA MCP77 Gigabit Ethernet" }, 310 311 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, 312 "NVIDIA MCP77 Gigabit Ethernet" }, 313 314 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, 315 "NVIDIA MCP79 Gigabit Ethernet" }, 316 317 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, 318 "NVIDIA MCP79 Gigabit Ethernet" }, 319 320 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, 321 "NVIDIA MCP79 Gigabit Ethernet" }, 322 323 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, 324 "NVIDIA MCP79 Gigabit Ethernet" }, 325 326 { 0, 0, NULL } 327 }; 328 329 static device_method_t nfe_methods[] = { 330 /* Device interface */ 331 DEVMETHOD(device_probe, nfe_probe), 332 DEVMETHOD(device_attach, nfe_attach), 333 DEVMETHOD(device_detach, nfe_detach), 334 DEVMETHOD(device_suspend, nfe_suspend), 335 DEVMETHOD(device_resume, nfe_resume), 336 DEVMETHOD(device_shutdown, nfe_shutdown), 337 338 /* Bus interface */ 339 DEVMETHOD(bus_print_child, bus_generic_print_child), 340 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 341 342 /* MII interface */ 343 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 344 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 345 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 346 347 { 0, 0 } 348 }; 349 350 static driver_t nfe_driver = { 351 "nfe", 352 nfe_methods, 353 sizeof(struct nfe_softc) 354 }; 355 356 static devclass_t nfe_devclass; 357 358 DECLARE_DUMMY_MODULE(if_nfe); 359 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 360 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 361 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 362 363 static int 364 nfe_probe(device_t dev) 365 { 366 const struct nfe_dev *n; 367 uint16_t vid, did; 368 369 vid = pci_get_vendor(dev); 370 did = pci_get_device(dev); 371 for (n = nfe_devices; n->desc != NULL; ++n) { 372 if (vid == n->vid && did == n->did) { 373 struct nfe_softc *sc = device_get_softc(dev); 374 375 switch (did) { 376 case PCI_PRODUCT_NVIDIA_NFORCE_LAN: 377 case PCI_PRODUCT_NVIDIA_NFORCE2_LAN: 378 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN1: 379 sc->sc_caps = NFE_NO_PWRCTL | 380 NFE_FIX_EADDR; 381 break; 382 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 383 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 384 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 385 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 386 sc->sc_caps = NFE_JUMBO_SUP | 387 NFE_HW_CSUM | 388 NFE_NO_PWRCTL | 389 NFE_FIX_EADDR; 390 break; 391 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 392 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 393 sc->sc_caps = NFE_FIX_EADDR; 394 /* FALL THROUGH */ 395 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 396 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 397 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 398 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 399 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 400 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 401 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 402 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 403 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 404 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 405 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 406 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 407 sc->sc_caps |= NFE_40BIT_ADDR; 408 break; 409 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 410 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 411 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 412 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 413 sc->sc_caps = NFE_JUMBO_SUP | 414 NFE_40BIT_ADDR | 415 NFE_HW_CSUM | 416 NFE_NO_PWRCTL | 417 NFE_FIX_EADDR; 418 break; 419 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 420 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 421 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 422 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 423 sc->sc_caps = NFE_JUMBO_SUP | 424 NFE_40BIT_ADDR; 425 break; 426 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 427 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 428 sc->sc_caps = NFE_JUMBO_SUP | 429 NFE_40BIT_ADDR | 430 NFE_HW_CSUM | 431 NFE_HW_VLAN | 432 NFE_FIX_EADDR; 433 break; 434 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 435 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 436 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 437 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 438 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 439 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 440 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 441 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 442 sc->sc_caps = NFE_40BIT_ADDR | 443 NFE_HW_CSUM; 444 break; 445 } 446 447 device_set_desc(dev, n->desc); 448 device_set_async_attach(dev, TRUE); 449 return 0; 450 } 451 } 452 return ENXIO; 453 } 454 455 static int 456 nfe_attach(device_t dev) 457 { 458 struct nfe_softc *sc = device_get_softc(dev); 459 struct ifnet *ifp = &sc->arpcom.ac_if; 460 uint8_t eaddr[ETHER_ADDR_LEN]; 461 int error; 462 463 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 464 lwkt_serialize_init(&sc->sc_jbuf_serializer); 465 466 /* 467 * Initialize sysctl variables 468 */ 469 sc->sc_rx_ring_count = nfe_rx_ring_count; 470 sc->sc_tx_ring_count = nfe_tx_ring_count; 471 sc->sc_debug = nfe_debug; 472 if (nfe_imtime < 0) { 473 sc->sc_flags |= NFE_F_DYN_IM; 474 sc->sc_imtime = -nfe_imtime; 475 } else { 476 sc->sc_imtime = nfe_imtime; 477 } 478 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 479 480 sc->sc_mem_rid = PCIR_BAR(0); 481 482 if (sc->sc_caps & NFE_40BIT_ADDR) 483 sc->rxtxctl_desc = NFE_RXTX_DESC_V3; 484 else if (sc->sc_caps & NFE_JUMBO_SUP) 485 sc->rxtxctl_desc = NFE_RXTX_DESC_V2; 486 487 #ifndef BURN_BRIDGES 488 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 489 uint32_t mem, irq; 490 491 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 492 irq = pci_read_config(dev, PCIR_INTLINE, 4); 493 494 device_printf(dev, "chip is in D%d power mode " 495 "-- setting to D0\n", pci_get_powerstate(dev)); 496 497 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 498 499 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 500 pci_write_config(dev, PCIR_INTLINE, irq, 4); 501 } 502 #endif /* !BURN_BRIDGE */ 503 504 /* Enable bus mastering */ 505 pci_enable_busmaster(dev); 506 507 /* Allocate IO memory */ 508 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 509 &sc->sc_mem_rid, RF_ACTIVE); 510 if (sc->sc_mem_res == NULL) { 511 device_printf(dev, "cound not allocate io memory\n"); 512 return ENXIO; 513 } 514 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 515 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 516 517 /* Allocate IRQ */ 518 sc->sc_irq_rid = 0; 519 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 520 &sc->sc_irq_rid, 521 RF_SHAREABLE | RF_ACTIVE); 522 if (sc->sc_irq_res == NULL) { 523 device_printf(dev, "could not allocate irq\n"); 524 error = ENXIO; 525 goto fail; 526 } 527 528 /* Disable WOL */ 529 NFE_WRITE(sc, NFE_WOL_CTL, 0); 530 531 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 532 nfe_powerup(dev); 533 534 nfe_get_macaddr(sc, eaddr); 535 536 /* 537 * Allocate Tx and Rx rings. 538 */ 539 error = nfe_alloc_tx_ring(sc, &sc->txq); 540 if (error) { 541 device_printf(dev, "could not allocate Tx ring\n"); 542 goto fail; 543 } 544 545 error = nfe_alloc_rx_ring(sc, &sc->rxq); 546 if (error) { 547 device_printf(dev, "could not allocate Rx ring\n"); 548 goto fail; 549 } 550 551 /* 552 * Create sysctl tree 553 */ 554 sysctl_ctx_init(&sc->sc_sysctl_ctx); 555 sc->sc_sysctl_tree = SYSCTL_ADD_NODE(&sc->sc_sysctl_ctx, 556 SYSCTL_STATIC_CHILDREN(_hw), 557 OID_AUTO, 558 device_get_nameunit(dev), 559 CTLFLAG_RD, 0, ""); 560 if (sc->sc_sysctl_tree == NULL) { 561 device_printf(dev, "can't add sysctl node\n"); 562 error = ENXIO; 563 goto fail; 564 } 565 SYSCTL_ADD_PROC(&sc->sc_sysctl_ctx, 566 SYSCTL_CHILDREN(sc->sc_sysctl_tree), 567 OID_AUTO, "imtimer", CTLTYPE_INT | CTLFLAG_RW, 568 sc, 0, nfe_sysctl_imtime, "I", 569 "Interrupt moderation time (usec). " 570 "0 to disable interrupt moderation."); 571 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 572 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 573 "rx_ring_count", CTLFLAG_RD, &sc->sc_rx_ring_count, 574 0, "RX ring count"); 575 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 576 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 577 "tx_ring_count", CTLFLAG_RD, &sc->sc_tx_ring_count, 578 0, "TX ring count"); 579 SYSCTL_ADD_INT(&sc->sc_sysctl_ctx, 580 SYSCTL_CHILDREN(sc->sc_sysctl_tree), OID_AUTO, 581 "debug", CTLFLAG_RW, &sc->sc_debug, 582 0, "control debugging printfs"); 583 584 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 585 nfe_ifmedia_sts); 586 if (error) { 587 device_printf(dev, "MII without any phy\n"); 588 goto fail; 589 } 590 591 ifp->if_softc = sc; 592 ifp->if_mtu = ETHERMTU; 593 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 594 ifp->if_ioctl = nfe_ioctl; 595 ifp->if_start = nfe_start; 596 #ifdef DEVICE_POLLING 597 ifp->if_poll = nfe_poll; 598 #endif 599 ifp->if_watchdog = nfe_watchdog; 600 ifp->if_init = nfe_init; 601 ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_count); 602 ifq_set_ready(&ifp->if_snd); 603 604 ifp->if_capabilities = IFCAP_VLAN_MTU; 605 606 if (sc->sc_caps & NFE_HW_VLAN) 607 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 608 609 #ifdef NFE_CSUM 610 if (sc->sc_caps & NFE_HW_CSUM) { 611 ifp->if_capabilities |= IFCAP_HWCSUM; 612 ifp->if_hwassist = NFE_CSUM_FEATURES; 613 } 614 #else 615 sc->sc_caps &= ~NFE_HW_CSUM; 616 #endif 617 ifp->if_capenable = ifp->if_capabilities; 618 619 callout_init(&sc->sc_tick_ch); 620 621 ether_ifattach(ifp, eaddr, NULL); 622 623 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 624 &sc->sc_ih, ifp->if_serializer); 625 if (error) { 626 device_printf(dev, "could not setup intr\n"); 627 ether_ifdetach(ifp); 628 goto fail; 629 } 630 631 ifp->if_cpuid = ithread_cpuid(rman_get_start(sc->sc_irq_res)); 632 KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus); 633 634 return 0; 635 fail: 636 nfe_detach(dev); 637 return error; 638 } 639 640 static int 641 nfe_detach(device_t dev) 642 { 643 struct nfe_softc *sc = device_get_softc(dev); 644 645 if (device_is_attached(dev)) { 646 struct ifnet *ifp = &sc->arpcom.ac_if; 647 648 lwkt_serialize_enter(ifp->if_serializer); 649 nfe_stop(sc); 650 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 651 lwkt_serialize_exit(ifp->if_serializer); 652 653 ether_ifdetach(ifp); 654 } 655 656 if (sc->sc_miibus != NULL) 657 device_delete_child(dev, sc->sc_miibus); 658 bus_generic_detach(dev); 659 660 if (sc->sc_sysctl_tree != NULL) 661 sysctl_ctx_free(&sc->sc_sysctl_ctx); 662 663 if (sc->sc_irq_res != NULL) { 664 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 665 sc->sc_irq_res); 666 } 667 668 if (sc->sc_mem_res != NULL) { 669 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 670 sc->sc_mem_res); 671 } 672 673 nfe_free_tx_ring(sc, &sc->txq); 674 nfe_free_rx_ring(sc, &sc->rxq); 675 676 return 0; 677 } 678 679 static void 680 nfe_shutdown(device_t dev) 681 { 682 struct nfe_softc *sc = device_get_softc(dev); 683 struct ifnet *ifp = &sc->arpcom.ac_if; 684 685 lwkt_serialize_enter(ifp->if_serializer); 686 nfe_stop(sc); 687 lwkt_serialize_exit(ifp->if_serializer); 688 } 689 690 static int 691 nfe_suspend(device_t dev) 692 { 693 struct nfe_softc *sc = device_get_softc(dev); 694 struct ifnet *ifp = &sc->arpcom.ac_if; 695 696 lwkt_serialize_enter(ifp->if_serializer); 697 nfe_stop(sc); 698 lwkt_serialize_exit(ifp->if_serializer); 699 700 return 0; 701 } 702 703 static int 704 nfe_resume(device_t dev) 705 { 706 struct nfe_softc *sc = device_get_softc(dev); 707 struct ifnet *ifp = &sc->arpcom.ac_if; 708 709 lwkt_serialize_enter(ifp->if_serializer); 710 if (ifp->if_flags & IFF_UP) 711 nfe_init(sc); 712 lwkt_serialize_exit(ifp->if_serializer); 713 714 return 0; 715 } 716 717 static void 718 nfe_miibus_statchg(device_t dev) 719 { 720 struct nfe_softc *sc = device_get_softc(dev); 721 struct mii_data *mii = device_get_softc(sc->sc_miibus); 722 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 723 724 ASSERT_SERIALIZED(sc->arpcom.ac_if.if_serializer); 725 726 phy = NFE_READ(sc, NFE_PHY_IFACE); 727 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 728 729 seed = NFE_READ(sc, NFE_RNDSEED); 730 seed &= ~NFE_SEED_MASK; 731 732 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 733 phy |= NFE_PHY_HDX; /* half-duplex */ 734 misc |= NFE_MISC1_HDX; 735 } 736 737 switch (IFM_SUBTYPE(mii->mii_media_active)) { 738 case IFM_1000_T: /* full-duplex only */ 739 link |= NFE_MEDIA_1000T; 740 seed |= NFE_SEED_1000T; 741 phy |= NFE_PHY_1000T; 742 break; 743 case IFM_100_TX: 744 link |= NFE_MEDIA_100TX; 745 seed |= NFE_SEED_100TX; 746 phy |= NFE_PHY_100TX; 747 break; 748 case IFM_10_T: 749 link |= NFE_MEDIA_10T; 750 seed |= NFE_SEED_10T; 751 break; 752 } 753 754 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 755 756 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 757 NFE_WRITE(sc, NFE_MISC1, misc); 758 NFE_WRITE(sc, NFE_LINKSPEED, link); 759 } 760 761 static int 762 nfe_miibus_readreg(device_t dev, int phy, int reg) 763 { 764 struct nfe_softc *sc = device_get_softc(dev); 765 uint32_t val; 766 int ntries; 767 768 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 769 770 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 771 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 772 DELAY(100); 773 } 774 775 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 776 777 for (ntries = 0; ntries < 1000; ntries++) { 778 DELAY(100); 779 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 780 break; 781 } 782 if (ntries == 1000) { 783 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 784 return 0; 785 } 786 787 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 788 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 789 return 0; 790 } 791 792 val = NFE_READ(sc, NFE_PHY_DATA); 793 if (val != 0xffffffff && val != 0) 794 sc->mii_phyaddr = phy; 795 796 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 797 798 return val; 799 } 800 801 static void 802 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 803 { 804 struct nfe_softc *sc = device_get_softc(dev); 805 uint32_t ctl; 806 int ntries; 807 808 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 809 810 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 811 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 812 DELAY(100); 813 } 814 815 NFE_WRITE(sc, NFE_PHY_DATA, val); 816 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 817 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 818 819 for (ntries = 0; ntries < 1000; ntries++) { 820 DELAY(100); 821 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 822 break; 823 } 824 825 #ifdef NFE_DEBUG 826 if (ntries == 1000) 827 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 828 #endif 829 } 830 831 #ifdef DEVICE_POLLING 832 833 static void 834 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 835 { 836 struct nfe_softc *sc = ifp->if_softc; 837 838 ASSERT_SERIALIZED(ifp->if_serializer); 839 840 switch(cmd) { 841 case POLL_REGISTER: 842 nfe_disable_intrs(sc); 843 break; 844 845 case POLL_DEREGISTER: 846 nfe_enable_intrs(sc); 847 break; 848 849 case POLL_AND_CHECK_STATUS: 850 /* fall through */ 851 case POLL_ONLY: 852 if (ifp->if_flags & IFF_RUNNING) { 853 nfe_rxeof(sc); 854 nfe_txeof(sc, 1); 855 } 856 break; 857 } 858 } 859 860 #endif 861 862 static void 863 nfe_intr(void *arg) 864 { 865 struct nfe_softc *sc = arg; 866 struct ifnet *ifp = &sc->arpcom.ac_if; 867 uint32_t r; 868 869 r = NFE_READ(sc, NFE_IRQ_STATUS); 870 if (r == 0) 871 return; /* not for us */ 872 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 873 874 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 875 876 if (r & NFE_IRQ_LINK) { 877 NFE_READ(sc, NFE_PHY_STATUS); 878 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 879 DPRINTF(sc, "link state changed %s\n", ""); 880 } 881 882 if (ifp->if_flags & IFF_RUNNING) { 883 int ret; 884 885 /* check Rx ring */ 886 ret = nfe_rxeof(sc); 887 888 /* check Tx ring */ 889 ret |= nfe_txeof(sc, 1); 890 891 if (sc->sc_flags & NFE_F_DYN_IM) { 892 if (ret && (sc->sc_flags & NFE_F_IRQ_TIMER) == 0) { 893 /* 894 * Assume that using hardware timer could reduce 895 * the interrupt rate. 896 */ 897 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_IMTIMER); 898 sc->sc_flags |= NFE_F_IRQ_TIMER; 899 } else if (!ret && (sc->sc_flags & NFE_F_IRQ_TIMER)) { 900 /* 901 * Nothing needs to be processed, fall back to 902 * use TX/RX interrupts. 903 */ 904 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_NOIMTIMER); 905 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 906 } 907 } 908 } 909 } 910 911 static int 912 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 913 { 914 struct nfe_softc *sc = ifp->if_softc; 915 struct ifreq *ifr = (struct ifreq *)data; 916 struct mii_data *mii; 917 int error = 0, mask, jumbo_cap; 918 919 ASSERT_SERIALIZED(ifp->if_serializer); 920 921 switch (cmd) { 922 case SIOCSIFMTU: 923 if ((sc->sc_caps & NFE_JUMBO_SUP) && sc->rxq.jbuf != NULL) 924 jumbo_cap = 1; 925 else 926 jumbo_cap = 0; 927 928 if ((jumbo_cap && ifr->ifr_mtu > NFE_JUMBO_MTU) || 929 (!jumbo_cap && ifr->ifr_mtu > ETHERMTU)) { 930 return EINVAL; 931 } else if (ifp->if_mtu != ifr->ifr_mtu) { 932 ifp->if_mtu = ifr->ifr_mtu; 933 if (ifp->if_flags & IFF_RUNNING) 934 nfe_init(sc); 935 } 936 break; 937 case SIOCSIFFLAGS: 938 if (ifp->if_flags & IFF_UP) { 939 /* 940 * If only the PROMISC or ALLMULTI flag changes, then 941 * don't do a full re-init of the chip, just update 942 * the Rx filter. 943 */ 944 if ((ifp->if_flags & IFF_RUNNING) && 945 ((ifp->if_flags ^ sc->sc_if_flags) & 946 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 947 nfe_setmulti(sc); 948 } else { 949 if (!(ifp->if_flags & IFF_RUNNING)) 950 nfe_init(sc); 951 } 952 } else { 953 if (ifp->if_flags & IFF_RUNNING) 954 nfe_stop(sc); 955 } 956 sc->sc_if_flags = ifp->if_flags; 957 break; 958 case SIOCADDMULTI: 959 case SIOCDELMULTI: 960 if (ifp->if_flags & IFF_RUNNING) 961 nfe_setmulti(sc); 962 break; 963 case SIOCSIFMEDIA: 964 case SIOCGIFMEDIA: 965 mii = device_get_softc(sc->sc_miibus); 966 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 967 break; 968 case SIOCSIFCAP: 969 mask = (ifr->ifr_reqcap ^ ifp->if_capenable) & IFCAP_HWCSUM; 970 if (mask && (ifp->if_capabilities & IFCAP_HWCSUM)) { 971 ifp->if_capenable ^= mask; 972 if (IFCAP_TXCSUM & ifp->if_capenable) 973 ifp->if_hwassist = NFE_CSUM_FEATURES; 974 else 975 ifp->if_hwassist = 0; 976 977 if (ifp->if_flags & IFF_RUNNING) 978 nfe_init(sc); 979 } 980 break; 981 default: 982 error = ether_ioctl(ifp, cmd, data); 983 break; 984 } 985 return error; 986 } 987 988 static int 989 nfe_rxeof(struct nfe_softc *sc) 990 { 991 struct ifnet *ifp = &sc->arpcom.ac_if; 992 struct nfe_rx_ring *ring = &sc->rxq; 993 int reap; 994 #ifdef ETHER_INPUT_CHAIN 995 struct mbuf_chain chain[MAXCPU]; 996 #endif 997 998 reap = 0; 999 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 1000 1001 #ifdef ETHER_INPUT_CHAIN 1002 ether_input_chain_init(chain); 1003 #endif 1004 1005 for (;;) { 1006 struct nfe_rx_data *data = &ring->data[ring->cur]; 1007 struct mbuf *m; 1008 uint16_t flags; 1009 int len, error; 1010 1011 if (sc->sc_caps & NFE_40BIT_ADDR) { 1012 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 1013 1014 flags = le16toh(desc64->flags); 1015 len = le16toh(desc64->length) & 0x3fff; 1016 } else { 1017 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 1018 1019 flags = le16toh(desc32->flags); 1020 len = le16toh(desc32->length) & 0x3fff; 1021 } 1022 1023 if (flags & NFE_RX_READY) 1024 break; 1025 1026 reap = 1; 1027 1028 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1029 if (!(flags & NFE_RX_VALID_V1)) 1030 goto skip; 1031 1032 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 1033 flags &= ~NFE_RX_ERROR; 1034 len--; /* fix buffer length */ 1035 } 1036 } else { 1037 if (!(flags & NFE_RX_VALID_V2)) 1038 goto skip; 1039 1040 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 1041 flags &= ~NFE_RX_ERROR; 1042 len--; /* fix buffer length */ 1043 } 1044 } 1045 1046 if (flags & NFE_RX_ERROR) { 1047 ifp->if_ierrors++; 1048 goto skip; 1049 } 1050 1051 m = data->m; 1052 1053 if (sc->sc_flags & NFE_F_USE_JUMBO) 1054 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 1055 else 1056 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 1057 if (error) { 1058 ifp->if_ierrors++; 1059 goto skip; 1060 } 1061 1062 /* finalize mbuf */ 1063 m->m_pkthdr.len = m->m_len = len; 1064 m->m_pkthdr.rcvif = ifp; 1065 1066 if ((ifp->if_capenable & IFCAP_RXCSUM) && 1067 (flags & NFE_RX_CSUMOK)) { 1068 if (flags & NFE_RX_IP_CSUMOK_V2) { 1069 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | 1070 CSUM_IP_VALID; 1071 } 1072 1073 if (flags & 1074 (NFE_RX_UDP_CSUMOK_V2 | NFE_RX_TCP_CSUMOK_V2)) { 1075 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1076 CSUM_PSEUDO_HDR | 1077 CSUM_FRAG_NOT_CHECKED; 1078 m->m_pkthdr.csum_data = 0xffff; 1079 } 1080 } 1081 1082 ifp->if_ipackets++; 1083 #ifdef ETHER_INPUT_CHAIN 1084 ether_input_chain2(ifp, m, chain); 1085 #else 1086 ifp->if_input(ifp, m); 1087 #endif 1088 skip: 1089 nfe_set_ready_rxdesc(sc, ring, ring->cur); 1090 sc->rxq.cur = (sc->rxq.cur + 1) % sc->sc_rx_ring_count; 1091 } 1092 1093 if (reap) { 1094 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1095 #ifdef ETHER_INPUT_CHAIN 1096 ether_input_dispatch(chain); 1097 #endif 1098 } 1099 return reap; 1100 } 1101 1102 static int 1103 nfe_txeof(struct nfe_softc *sc, int start) 1104 { 1105 struct ifnet *ifp = &sc->arpcom.ac_if; 1106 struct nfe_tx_ring *ring = &sc->txq; 1107 struct nfe_tx_data *data = NULL; 1108 1109 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 1110 while (ring->next != ring->cur) { 1111 uint16_t flags; 1112 1113 if (sc->sc_caps & NFE_40BIT_ADDR) 1114 flags = le16toh(ring->desc64[ring->next].flags); 1115 else 1116 flags = le16toh(ring->desc32[ring->next].flags); 1117 1118 if (flags & NFE_TX_VALID) 1119 break; 1120 1121 data = &ring->data[ring->next]; 1122 1123 if ((sc->sc_caps & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1124 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 1125 goto skip; 1126 1127 if ((flags & NFE_TX_ERROR_V1) != 0) { 1128 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 1129 NFE_V1_TXERR); 1130 ifp->if_oerrors++; 1131 } else { 1132 ifp->if_opackets++; 1133 } 1134 } else { 1135 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 1136 goto skip; 1137 1138 if ((flags & NFE_TX_ERROR_V2) != 0) { 1139 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 1140 NFE_V2_TXERR); 1141 ifp->if_oerrors++; 1142 } else { 1143 ifp->if_opackets++; 1144 } 1145 } 1146 1147 if (data->m == NULL) { /* should not get there */ 1148 if_printf(ifp, 1149 "last fragment bit w/o associated mbuf!\n"); 1150 goto skip; 1151 } 1152 1153 /* last fragment of the mbuf chain transmitted */ 1154 bus_dmamap_sync(ring->data_tag, data->map, 1155 BUS_DMASYNC_POSTWRITE); 1156 bus_dmamap_unload(ring->data_tag, data->map); 1157 m_freem(data->m); 1158 data->m = NULL; 1159 skip: 1160 ring->queued--; 1161 KKASSERT(ring->queued >= 0); 1162 ring->next = (ring->next + 1) % sc->sc_tx_ring_count; 1163 } 1164 1165 if (sc->sc_tx_ring_count - ring->queued >= 1166 sc->sc_tx_spare + NFE_NSEG_RSVD) 1167 ifp->if_flags &= ~IFF_OACTIVE; 1168 1169 if (ring->queued == 0) 1170 ifp->if_timer = 0; 1171 1172 if (start && !ifq_is_empty(&ifp->if_snd)) 1173 if_devstart(ifp); 1174 1175 if (data != NULL) 1176 return 1; 1177 else 1178 return 0; 1179 } 1180 1181 static int 1182 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 1183 { 1184 struct nfe_dma_ctx ctx; 1185 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 1186 struct nfe_tx_data *data, *data_map; 1187 bus_dmamap_t map; 1188 struct nfe_desc64 *desc64 = NULL; 1189 struct nfe_desc32 *desc32 = NULL; 1190 uint16_t flags = 0; 1191 uint32_t vtag = 0; 1192 int error, i, j, maxsegs; 1193 1194 data = &ring->data[ring->cur]; 1195 map = data->map; 1196 data_map = data; /* Remember who owns the DMA map */ 1197 1198 maxsegs = (sc->sc_tx_ring_count - ring->queued) - NFE_NSEG_RSVD; 1199 if (maxsegs > NFE_MAX_SCATTER) 1200 maxsegs = NFE_MAX_SCATTER; 1201 KASSERT(maxsegs >= sc->sc_tx_spare, 1202 ("no enough segments %d,%d\n", maxsegs, sc->sc_tx_spare)); 1203 1204 ctx.nsegs = maxsegs; 1205 ctx.segs = segs; 1206 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1207 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT); 1208 if (!error && ctx.nsegs == 0) { 1209 bus_dmamap_unload(ring->data_tag, map); 1210 error = EFBIG; 1211 } 1212 if (error && error != EFBIG) { 1213 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); 1214 goto back; 1215 } 1216 if (error) { /* error == EFBIG */ 1217 struct mbuf *m_new; 1218 1219 m_new = m_defrag(m0, MB_DONTWAIT); 1220 if (m_new == NULL) { 1221 if_printf(&sc->arpcom.ac_if, 1222 "could not defrag TX mbuf\n"); 1223 error = ENOBUFS; 1224 goto back; 1225 } else { 1226 m0 = m_new; 1227 } 1228 1229 ctx.nsegs = maxsegs; 1230 ctx.segs = segs; 1231 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1232 nfe_buf_dma_addr, &ctx, 1233 BUS_DMA_NOWAIT); 1234 if (error || ctx.nsegs == 0) { 1235 if (!error) { 1236 bus_dmamap_unload(ring->data_tag, map); 1237 error = EFBIG; 1238 } 1239 if_printf(&sc->arpcom.ac_if, 1240 "could not map defraged TX mbuf\n"); 1241 goto back; 1242 } 1243 } 1244 1245 error = 0; 1246 1247 /* setup h/w VLAN tagging */ 1248 if (m0->m_flags & M_VLANTAG) 1249 vtag = m0->m_pkthdr.ether_vlantag; 1250 1251 if (sc->arpcom.ac_if.if_capenable & IFCAP_TXCSUM) { 1252 if (m0->m_pkthdr.csum_flags & CSUM_IP) 1253 flags |= NFE_TX_IP_CSUM; 1254 if (m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) 1255 flags |= NFE_TX_TCP_CSUM; 1256 } 1257 1258 /* 1259 * XXX urm. somebody is unaware of how hardware works. You 1260 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1261 * the ring until the entire chain is actually *VALID*. Otherwise 1262 * the hardware may encounter a partially initialized chain that 1263 * is marked as being ready to go when it in fact is not ready to 1264 * go. 1265 */ 1266 1267 for (i = 0; i < ctx.nsegs; i++) { 1268 j = (ring->cur + i) % sc->sc_tx_ring_count; 1269 data = &ring->data[j]; 1270 1271 if (sc->sc_caps & NFE_40BIT_ADDR) { 1272 desc64 = &ring->desc64[j]; 1273 #if defined(__LP64__) 1274 desc64->physaddr[0] = 1275 htole32(segs[i].ds_addr >> 32); 1276 #endif 1277 desc64->physaddr[1] = 1278 htole32(segs[i].ds_addr & 0xffffffff); 1279 desc64->length = htole16(segs[i].ds_len - 1); 1280 desc64->vtag = htole32(vtag); 1281 desc64->flags = htole16(flags); 1282 } else { 1283 desc32 = &ring->desc32[j]; 1284 desc32->physaddr = htole32(segs[i].ds_addr); 1285 desc32->length = htole16(segs[i].ds_len - 1); 1286 desc32->flags = htole16(flags); 1287 } 1288 1289 /* csum flags and vtag belong to the first fragment only */ 1290 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1291 vtag = 0; 1292 1293 ring->queued++; 1294 KKASSERT(ring->queued <= sc->sc_tx_ring_count); 1295 } 1296 1297 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1298 if (sc->sc_caps & NFE_40BIT_ADDR) { 1299 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1300 } else { 1301 if (sc->sc_caps & NFE_JUMBO_SUP) 1302 flags = NFE_TX_LASTFRAG_V2; 1303 else 1304 flags = NFE_TX_LASTFRAG_V1; 1305 desc32->flags |= htole16(flags); 1306 } 1307 1308 /* 1309 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1310 * whole mess until the first descriptor in the map is flagged. 1311 */ 1312 for (i = ctx.nsegs - 1; i >= 0; --i) { 1313 j = (ring->cur + i) % sc->sc_tx_ring_count; 1314 if (sc->sc_caps & NFE_40BIT_ADDR) { 1315 desc64 = &ring->desc64[j]; 1316 desc64->flags |= htole16(NFE_TX_VALID); 1317 } else { 1318 desc32 = &ring->desc32[j]; 1319 desc32->flags |= htole16(NFE_TX_VALID); 1320 } 1321 } 1322 ring->cur = (ring->cur + ctx.nsegs) % sc->sc_tx_ring_count; 1323 1324 /* Exchange DMA map */ 1325 data_map->map = data->map; 1326 data->map = map; 1327 data->m = m0; 1328 1329 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1330 back: 1331 if (error) 1332 m_freem(m0); 1333 return error; 1334 } 1335 1336 static void 1337 nfe_start(struct ifnet *ifp) 1338 { 1339 struct nfe_softc *sc = ifp->if_softc; 1340 struct nfe_tx_ring *ring = &sc->txq; 1341 int count = 0, oactive = 0; 1342 struct mbuf *m0; 1343 1344 ASSERT_SERIALIZED(ifp->if_serializer); 1345 1346 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 1347 return; 1348 1349 for (;;) { 1350 int error; 1351 1352 if (sc->sc_tx_ring_count - ring->queued < 1353 sc->sc_tx_spare + NFE_NSEG_RSVD) { 1354 if (oactive) { 1355 ifp->if_flags |= IFF_OACTIVE; 1356 break; 1357 } 1358 1359 nfe_txeof(sc, 0); 1360 oactive = 1; 1361 continue; 1362 } 1363 1364 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1365 if (m0 == NULL) 1366 break; 1367 1368 ETHER_BPF_MTAP(ifp, m0); 1369 1370 error = nfe_encap(sc, ring, m0); 1371 if (error) { 1372 ifp->if_oerrors++; 1373 if (error == EFBIG) { 1374 if (oactive) { 1375 ifp->if_flags |= IFF_OACTIVE; 1376 break; 1377 } 1378 nfe_txeof(sc, 0); 1379 oactive = 1; 1380 } 1381 continue; 1382 } else { 1383 oactive = 0; 1384 } 1385 ++count; 1386 1387 /* 1388 * NOTE: 1389 * `m0' may be freed in nfe_encap(), so 1390 * it should not be touched any more. 1391 */ 1392 } 1393 if (count == 0) /* nothing sent */ 1394 return; 1395 1396 /* Sync TX descriptor ring */ 1397 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1398 1399 /* Kick Tx */ 1400 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1401 1402 /* 1403 * Set a timeout in case the chip goes out to lunch. 1404 */ 1405 ifp->if_timer = 5; 1406 } 1407 1408 static void 1409 nfe_watchdog(struct ifnet *ifp) 1410 { 1411 struct nfe_softc *sc = ifp->if_softc; 1412 1413 ASSERT_SERIALIZED(ifp->if_serializer); 1414 1415 if (ifp->if_flags & IFF_RUNNING) { 1416 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1417 nfe_txeof(sc, 1); 1418 return; 1419 } 1420 1421 if_printf(ifp, "watchdog timeout\n"); 1422 1423 nfe_init(ifp->if_softc); 1424 1425 ifp->if_oerrors++; 1426 } 1427 1428 static void 1429 nfe_init(void *xsc) 1430 { 1431 struct nfe_softc *sc = xsc; 1432 struct ifnet *ifp = &sc->arpcom.ac_if; 1433 uint32_t tmp; 1434 int error; 1435 1436 ASSERT_SERIALIZED(ifp->if_serializer); 1437 1438 nfe_stop(sc); 1439 1440 if ((sc->sc_caps & NFE_NO_PWRCTL) == 0) 1441 nfe_mac_reset(sc); 1442 1443 /* 1444 * NOTE: 1445 * Switching between jumbo frames and normal frames should 1446 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1447 */ 1448 if (ifp->if_mtu > ETHERMTU) { 1449 sc->sc_flags |= NFE_F_USE_JUMBO; 1450 sc->rxq.bufsz = NFE_JBYTES; 1451 sc->sc_tx_spare = NFE_NSEG_SPARE_JUMBO; 1452 if (bootverbose) 1453 if_printf(ifp, "use jumbo frames\n"); 1454 } else { 1455 sc->sc_flags &= ~NFE_F_USE_JUMBO; 1456 sc->rxq.bufsz = MCLBYTES; 1457 sc->sc_tx_spare = NFE_NSEG_SPARE; 1458 if (bootverbose) 1459 if_printf(ifp, "use non-jumbo frames\n"); 1460 } 1461 1462 error = nfe_init_tx_ring(sc, &sc->txq); 1463 if (error) { 1464 nfe_stop(sc); 1465 return; 1466 } 1467 1468 error = nfe_init_rx_ring(sc, &sc->rxq); 1469 if (error) { 1470 nfe_stop(sc); 1471 return; 1472 } 1473 1474 NFE_WRITE(sc, NFE_TX_POLL, 0); 1475 NFE_WRITE(sc, NFE_STATUS, 0); 1476 1477 sc->rxtxctl = NFE_RXTX_BIT2 | sc->rxtxctl_desc; 1478 1479 if (ifp->if_capenable & IFCAP_RXCSUM) 1480 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1481 1482 /* 1483 * Although the adapter is capable of stripping VLAN tags from received 1484 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1485 * purpose. This will be done in software by our network stack. 1486 */ 1487 if (sc->sc_caps & NFE_HW_VLAN) 1488 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1489 1490 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1491 DELAY(10); 1492 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1493 1494 if (sc->sc_caps & NFE_HW_VLAN) 1495 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1496 1497 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1498 1499 /* set MAC address */ 1500 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1501 1502 /* tell MAC where rings are in memory */ 1503 #ifdef __LP64__ 1504 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1505 #endif 1506 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1507 #ifdef __LP64__ 1508 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1509 #endif 1510 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1511 1512 NFE_WRITE(sc, NFE_RING_SIZE, 1513 (sc->sc_rx_ring_count - 1) << 16 | 1514 (sc->sc_tx_ring_count - 1)); 1515 1516 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1517 1518 /* force MAC to wakeup */ 1519 tmp = NFE_READ(sc, NFE_PWR_STATE); 1520 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1521 DELAY(10); 1522 tmp = NFE_READ(sc, NFE_PWR_STATE); 1523 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1524 1525 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1526 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1527 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1528 1529 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1530 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1531 1532 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1533 1534 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1535 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1536 DELAY(10); 1537 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1538 1539 /* set Rx filter */ 1540 nfe_setmulti(sc); 1541 1542 nfe_ifmedia_upd(ifp); 1543 1544 /* enable Rx */ 1545 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1546 1547 /* enable Tx */ 1548 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1549 1550 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1551 1552 #ifdef DEVICE_POLLING 1553 if ((ifp->if_flags & IFF_POLLING)) 1554 nfe_disable_intrs(sc); 1555 else 1556 #endif 1557 nfe_enable_intrs(sc); 1558 1559 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1560 1561 ifp->if_flags |= IFF_RUNNING; 1562 ifp->if_flags &= ~IFF_OACTIVE; 1563 1564 /* 1565 * If we had stuff in the tx ring before its all cleaned out now 1566 * so we are not going to get an interrupt, jump-start any pending 1567 * output. 1568 */ 1569 if (!ifq_is_empty(&ifp->if_snd)) 1570 if_devstart(ifp); 1571 } 1572 1573 static void 1574 nfe_stop(struct nfe_softc *sc) 1575 { 1576 struct ifnet *ifp = &sc->arpcom.ac_if; 1577 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 1578 int i; 1579 1580 ASSERT_SERIALIZED(ifp->if_serializer); 1581 1582 callout_stop(&sc->sc_tick_ch); 1583 1584 ifp->if_timer = 0; 1585 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1586 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 1587 1588 #define WAITMAX 50000 1589 1590 /* 1591 * Abort Tx 1592 */ 1593 NFE_WRITE(sc, NFE_TX_CTL, 0); 1594 for (i = 0; i < WAITMAX; ++i) { 1595 DELAY(100); 1596 if ((NFE_READ(sc, NFE_TX_STATUS) & NFE_TX_STATUS_BUSY) == 0) 1597 break; 1598 } 1599 if (i == WAITMAX) 1600 if_printf(ifp, "can't stop TX\n"); 1601 DELAY(100); 1602 1603 /* 1604 * Disable Rx 1605 */ 1606 NFE_WRITE(sc, NFE_RX_CTL, 0); 1607 for (i = 0; i < WAITMAX; ++i) { 1608 DELAY(100); 1609 if ((NFE_READ(sc, NFE_RX_STATUS) & NFE_RX_STATUS_BUSY) == 0) 1610 break; 1611 } 1612 if (i == WAITMAX) 1613 if_printf(ifp, "can't stop RX\n"); 1614 DELAY(100); 1615 1616 #undef WAITMAX 1617 1618 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 1619 DELAY(10); 1620 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 1621 1622 /* Disable interrupts */ 1623 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1624 1625 /* Reset Tx and Rx rings */ 1626 nfe_reset_tx_ring(sc, &sc->txq); 1627 nfe_reset_rx_ring(sc, &sc->rxq); 1628 } 1629 1630 static int 1631 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1632 { 1633 int i, j, error, descsize; 1634 void **desc; 1635 1636 if (sc->sc_caps & NFE_40BIT_ADDR) { 1637 desc = (void **)&ring->desc64; 1638 descsize = sizeof(struct nfe_desc64); 1639 } else { 1640 desc = (void **)&ring->desc32; 1641 descsize = sizeof(struct nfe_desc32); 1642 } 1643 1644 ring->bufsz = MCLBYTES; 1645 ring->cur = ring->next = 0; 1646 1647 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1648 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1649 NULL, NULL, 1650 sc->sc_rx_ring_count * descsize, 1, 1651 BUS_SPACE_MAXSIZE_32BIT, 1652 0, &ring->tag); 1653 if (error) { 1654 if_printf(&sc->arpcom.ac_if, 1655 "could not create desc RX DMA tag\n"); 1656 return error; 1657 } 1658 1659 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1660 &ring->map); 1661 if (error) { 1662 if_printf(&sc->arpcom.ac_if, 1663 "could not allocate RX desc DMA memory\n"); 1664 bus_dma_tag_destroy(ring->tag); 1665 ring->tag = NULL; 1666 return error; 1667 } 1668 1669 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1670 sc->sc_rx_ring_count * descsize, 1671 nfe_ring_dma_addr, &ring->physaddr, 1672 BUS_DMA_WAITOK); 1673 if (error) { 1674 if_printf(&sc->arpcom.ac_if, 1675 "could not load RX desc DMA map\n"); 1676 bus_dmamem_free(ring->tag, *desc, ring->map); 1677 bus_dma_tag_destroy(ring->tag); 1678 ring->tag = NULL; 1679 return error; 1680 } 1681 1682 if (sc->sc_caps & NFE_JUMBO_SUP) { 1683 ring->jbuf = 1684 kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT(sc), 1685 M_DEVBUF, M_WAITOK | M_ZERO); 1686 1687 error = nfe_jpool_alloc(sc, ring); 1688 if (error) { 1689 if_printf(&sc->arpcom.ac_if, 1690 "could not allocate jumbo frames\n"); 1691 kfree(ring->jbuf, M_DEVBUF); 1692 ring->jbuf = NULL; 1693 /* Allow jumbo frame allocation to fail */ 1694 } 1695 } 1696 1697 ring->data = kmalloc(sizeof(struct nfe_rx_data) * sc->sc_rx_ring_count, 1698 M_DEVBUF, M_WAITOK | M_ZERO); 1699 1700 error = bus_dma_tag_create(NULL, 1, 0, 1701 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1702 NULL, NULL, 1703 MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 1704 BUS_DMA_ALLOCNOW, &ring->data_tag); 1705 if (error) { 1706 if_printf(&sc->arpcom.ac_if, 1707 "could not create RX mbuf DMA tag\n"); 1708 return error; 1709 } 1710 1711 /* Create a spare RX mbuf DMA map */ 1712 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap); 1713 if (error) { 1714 if_printf(&sc->arpcom.ac_if, 1715 "could not create spare RX mbuf DMA map\n"); 1716 bus_dma_tag_destroy(ring->data_tag); 1717 ring->data_tag = NULL; 1718 return error; 1719 } 1720 1721 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1722 error = bus_dmamap_create(ring->data_tag, 0, 1723 &ring->data[i].map); 1724 if (error) { 1725 if_printf(&sc->arpcom.ac_if, 1726 "could not create %dth RX mbuf DMA mapn", i); 1727 goto fail; 1728 } 1729 } 1730 return 0; 1731 fail: 1732 for (j = 0; j < i; ++j) 1733 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1734 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1735 bus_dma_tag_destroy(ring->data_tag); 1736 ring->data_tag = NULL; 1737 return error; 1738 } 1739 1740 static void 1741 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1742 { 1743 int i; 1744 1745 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1746 struct nfe_rx_data *data = &ring->data[i]; 1747 1748 if (data->m != NULL) { 1749 if ((sc->sc_flags & NFE_F_USE_JUMBO) == 0) 1750 bus_dmamap_unload(ring->data_tag, data->map); 1751 m_freem(data->m); 1752 data->m = NULL; 1753 } 1754 } 1755 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1756 1757 ring->cur = ring->next = 0; 1758 } 1759 1760 static int 1761 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1762 { 1763 int i; 1764 1765 for (i = 0; i < sc->sc_rx_ring_count; ++i) { 1766 int error; 1767 1768 /* XXX should use a function pointer */ 1769 if (sc->sc_flags & NFE_F_USE_JUMBO) 1770 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1771 else 1772 error = nfe_newbuf_std(sc, ring, i, 1); 1773 if (error) { 1774 if_printf(&sc->arpcom.ac_if, 1775 "could not allocate RX buffer\n"); 1776 return error; 1777 } 1778 1779 nfe_set_ready_rxdesc(sc, ring, i); 1780 } 1781 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1782 1783 return 0; 1784 } 1785 1786 static void 1787 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1788 { 1789 if (ring->data_tag != NULL) { 1790 struct nfe_rx_data *data; 1791 int i; 1792 1793 for (i = 0; i < sc->sc_rx_ring_count; i++) { 1794 data = &ring->data[i]; 1795 1796 if (data->m != NULL) { 1797 bus_dmamap_unload(ring->data_tag, data->map); 1798 m_freem(data->m); 1799 } 1800 bus_dmamap_destroy(ring->data_tag, data->map); 1801 } 1802 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1803 bus_dma_tag_destroy(ring->data_tag); 1804 } 1805 1806 nfe_jpool_free(sc, ring); 1807 1808 if (ring->jbuf != NULL) 1809 kfree(ring->jbuf, M_DEVBUF); 1810 if (ring->data != NULL) 1811 kfree(ring->data, M_DEVBUF); 1812 1813 if (ring->tag != NULL) { 1814 void *desc; 1815 1816 if (sc->sc_caps & NFE_40BIT_ADDR) 1817 desc = ring->desc64; 1818 else 1819 desc = ring->desc32; 1820 1821 bus_dmamap_unload(ring->tag, ring->map); 1822 bus_dmamem_free(ring->tag, desc, ring->map); 1823 bus_dma_tag_destroy(ring->tag); 1824 } 1825 } 1826 1827 static struct nfe_jbuf * 1828 nfe_jalloc(struct nfe_softc *sc) 1829 { 1830 struct ifnet *ifp = &sc->arpcom.ac_if; 1831 struct nfe_jbuf *jbuf; 1832 1833 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1834 1835 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1836 if (jbuf != NULL) { 1837 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1838 jbuf->inuse = 1; 1839 } else { 1840 if_printf(ifp, "no free jumbo buffer\n"); 1841 } 1842 1843 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1844 1845 return jbuf; 1846 } 1847 1848 static void 1849 nfe_jfree(void *arg) 1850 { 1851 struct nfe_jbuf *jbuf = arg; 1852 struct nfe_softc *sc = jbuf->sc; 1853 struct nfe_rx_ring *ring = jbuf->ring; 1854 1855 if (&ring->jbuf[jbuf->slot] != jbuf) 1856 panic("%s: free wrong jumbo buffer\n", __func__); 1857 else if (jbuf->inuse == 0) 1858 panic("%s: jumbo buffer already freed\n", __func__); 1859 1860 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1861 atomic_subtract_int(&jbuf->inuse, 1); 1862 if (jbuf->inuse == 0) 1863 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1864 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1865 } 1866 1867 static void 1868 nfe_jref(void *arg) 1869 { 1870 struct nfe_jbuf *jbuf = arg; 1871 struct nfe_rx_ring *ring = jbuf->ring; 1872 1873 if (&ring->jbuf[jbuf->slot] != jbuf) 1874 panic("%s: ref wrong jumbo buffer\n", __func__); 1875 else if (jbuf->inuse == 0) 1876 panic("%s: jumbo buffer already freed\n", __func__); 1877 1878 atomic_add_int(&jbuf->inuse, 1); 1879 } 1880 1881 static int 1882 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1883 { 1884 struct nfe_jbuf *jbuf; 1885 bus_addr_t physaddr; 1886 caddr_t buf; 1887 int i, error; 1888 1889 /* 1890 * Allocate a big chunk of DMA'able memory. 1891 */ 1892 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1893 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1894 NULL, NULL, 1895 NFE_JPOOL_SIZE(sc), 1, 1896 BUS_SPACE_MAXSIZE_32BIT, 1897 0, &ring->jtag); 1898 if (error) { 1899 if_printf(&sc->arpcom.ac_if, 1900 "could not create jumbo DMA tag\n"); 1901 return error; 1902 } 1903 1904 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool, 1905 BUS_DMA_WAITOK, &ring->jmap); 1906 if (error) { 1907 if_printf(&sc->arpcom.ac_if, 1908 "could not allocate jumbo DMA memory\n"); 1909 bus_dma_tag_destroy(ring->jtag); 1910 ring->jtag = NULL; 1911 return error; 1912 } 1913 1914 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool, 1915 NFE_JPOOL_SIZE(sc), 1916 nfe_ring_dma_addr, &physaddr, BUS_DMA_WAITOK); 1917 if (error) { 1918 if_printf(&sc->arpcom.ac_if, 1919 "could not load jumbo DMA map\n"); 1920 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1921 bus_dma_tag_destroy(ring->jtag); 1922 ring->jtag = NULL; 1923 return error; 1924 } 1925 1926 /* ..and split it into 9KB chunks */ 1927 SLIST_INIT(&ring->jfreelist); 1928 1929 buf = ring->jpool; 1930 for (i = 0; i < NFE_JPOOL_COUNT(sc); i++) { 1931 jbuf = &ring->jbuf[i]; 1932 1933 jbuf->sc = sc; 1934 jbuf->ring = ring; 1935 jbuf->inuse = 0; 1936 jbuf->slot = i; 1937 jbuf->buf = buf; 1938 jbuf->physaddr = physaddr; 1939 1940 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1941 1942 buf += NFE_JBYTES; 1943 physaddr += NFE_JBYTES; 1944 } 1945 1946 return 0; 1947 } 1948 1949 static void 1950 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1951 { 1952 if (ring->jtag != NULL) { 1953 bus_dmamap_unload(ring->jtag, ring->jmap); 1954 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1955 bus_dma_tag_destroy(ring->jtag); 1956 } 1957 } 1958 1959 static int 1960 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1961 { 1962 int i, j, error, descsize; 1963 void **desc; 1964 1965 if (sc->sc_caps & NFE_40BIT_ADDR) { 1966 desc = (void **)&ring->desc64; 1967 descsize = sizeof(struct nfe_desc64); 1968 } else { 1969 desc = (void **)&ring->desc32; 1970 descsize = sizeof(struct nfe_desc32); 1971 } 1972 1973 ring->queued = 0; 1974 ring->cur = ring->next = 0; 1975 1976 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1977 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1978 NULL, NULL, 1979 sc->sc_tx_ring_count * descsize, 1, 1980 BUS_SPACE_MAXSIZE_32BIT, 1981 0, &ring->tag); 1982 if (error) { 1983 if_printf(&sc->arpcom.ac_if, 1984 "could not create TX desc DMA map\n"); 1985 return error; 1986 } 1987 1988 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1989 &ring->map); 1990 if (error) { 1991 if_printf(&sc->arpcom.ac_if, 1992 "could not allocate TX desc DMA memory\n"); 1993 bus_dma_tag_destroy(ring->tag); 1994 ring->tag = NULL; 1995 return error; 1996 } 1997 1998 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1999 sc->sc_tx_ring_count * descsize, 2000 nfe_ring_dma_addr, &ring->physaddr, 2001 BUS_DMA_WAITOK); 2002 if (error) { 2003 if_printf(&sc->arpcom.ac_if, 2004 "could not load TX desc DMA map\n"); 2005 bus_dmamem_free(ring->tag, *desc, ring->map); 2006 bus_dma_tag_destroy(ring->tag); 2007 ring->tag = NULL; 2008 return error; 2009 } 2010 2011 ring->data = kmalloc(sizeof(struct nfe_tx_data) * sc->sc_tx_ring_count, 2012 M_DEVBUF, M_WAITOK | M_ZERO); 2013 2014 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 2015 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 2016 NULL, NULL, 2017 NFE_JBYTES, NFE_MAX_SCATTER, 2018 BUS_SPACE_MAXSIZE_32BIT, 2019 BUS_DMA_ALLOCNOW, &ring->data_tag); 2020 if (error) { 2021 if_printf(&sc->arpcom.ac_if, 2022 "could not create TX buf DMA tag\n"); 2023 return error; 2024 } 2025 2026 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2027 error = bus_dmamap_create(ring->data_tag, 0, 2028 &ring->data[i].map); 2029 if (error) { 2030 if_printf(&sc->arpcom.ac_if, 2031 "could not create %dth TX buf DMA map\n", i); 2032 goto fail; 2033 } 2034 } 2035 2036 return 0; 2037 fail: 2038 for (j = 0; j < i; ++j) 2039 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 2040 bus_dma_tag_destroy(ring->data_tag); 2041 ring->data_tag = NULL; 2042 return error; 2043 } 2044 2045 static void 2046 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2047 { 2048 int i; 2049 2050 for (i = 0; i < sc->sc_tx_ring_count; i++) { 2051 struct nfe_tx_data *data = &ring->data[i]; 2052 2053 if (sc->sc_caps & NFE_40BIT_ADDR) 2054 ring->desc64[i].flags = 0; 2055 else 2056 ring->desc32[i].flags = 0; 2057 2058 if (data->m != NULL) { 2059 bus_dmamap_sync(ring->data_tag, data->map, 2060 BUS_DMASYNC_POSTWRITE); 2061 bus_dmamap_unload(ring->data_tag, data->map); 2062 m_freem(data->m); 2063 data->m = NULL; 2064 } 2065 } 2066 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 2067 2068 ring->queued = 0; 2069 ring->cur = ring->next = 0; 2070 } 2071 2072 static int 2073 nfe_init_tx_ring(struct nfe_softc *sc __unused, 2074 struct nfe_tx_ring *ring __unused) 2075 { 2076 return 0; 2077 } 2078 2079 static void 2080 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 2081 { 2082 if (ring->data_tag != NULL) { 2083 struct nfe_tx_data *data; 2084 int i; 2085 2086 for (i = 0; i < sc->sc_tx_ring_count; ++i) { 2087 data = &ring->data[i]; 2088 2089 if (data->m != NULL) { 2090 bus_dmamap_unload(ring->data_tag, data->map); 2091 m_freem(data->m); 2092 } 2093 bus_dmamap_destroy(ring->data_tag, data->map); 2094 } 2095 2096 bus_dma_tag_destroy(ring->data_tag); 2097 } 2098 2099 if (ring->data != NULL) 2100 kfree(ring->data, M_DEVBUF); 2101 2102 if (ring->tag != NULL) { 2103 void *desc; 2104 2105 if (sc->sc_caps & NFE_40BIT_ADDR) 2106 desc = ring->desc64; 2107 else 2108 desc = ring->desc32; 2109 2110 bus_dmamap_unload(ring->tag, ring->map); 2111 bus_dmamem_free(ring->tag, desc, ring->map); 2112 bus_dma_tag_destroy(ring->tag); 2113 } 2114 } 2115 2116 static int 2117 nfe_ifmedia_upd(struct ifnet *ifp) 2118 { 2119 struct nfe_softc *sc = ifp->if_softc; 2120 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2121 2122 ASSERT_SERIALIZED(ifp->if_serializer); 2123 2124 if (mii->mii_instance != 0) { 2125 struct mii_softc *miisc; 2126 2127 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2128 mii_phy_reset(miisc); 2129 } 2130 mii_mediachg(mii); 2131 2132 return 0; 2133 } 2134 2135 static void 2136 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2137 { 2138 struct nfe_softc *sc = ifp->if_softc; 2139 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2140 2141 ASSERT_SERIALIZED(ifp->if_serializer); 2142 2143 mii_pollstat(mii); 2144 ifmr->ifm_status = mii->mii_media_status; 2145 ifmr->ifm_active = mii->mii_media_active; 2146 } 2147 2148 static void 2149 nfe_setmulti(struct nfe_softc *sc) 2150 { 2151 struct ifnet *ifp = &sc->arpcom.ac_if; 2152 struct ifmultiaddr *ifma; 2153 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 2154 uint32_t filter = NFE_RXFILTER_MAGIC; 2155 int i; 2156 2157 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 2158 bzero(addr, ETHER_ADDR_LEN); 2159 bzero(mask, ETHER_ADDR_LEN); 2160 goto done; 2161 } 2162 2163 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 2164 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 2165 2166 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2167 caddr_t maddr; 2168 2169 if (ifma->ifma_addr->sa_family != AF_LINK) 2170 continue; 2171 2172 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 2173 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2174 addr[i] &= maddr[i]; 2175 mask[i] &= ~maddr[i]; 2176 } 2177 } 2178 2179 for (i = 0; i < ETHER_ADDR_LEN; i++) 2180 mask[i] |= addr[i]; 2181 2182 done: 2183 addr[0] |= 0x01; /* make sure multicast bit is set */ 2184 2185 NFE_WRITE(sc, NFE_MULTIADDR_HI, 2186 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2187 NFE_WRITE(sc, NFE_MULTIADDR_LO, 2188 addr[5] << 8 | addr[4]); 2189 NFE_WRITE(sc, NFE_MULTIMASK_HI, 2190 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 2191 NFE_WRITE(sc, NFE_MULTIMASK_LO, 2192 mask[5] << 8 | mask[4]); 2193 2194 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 2195 NFE_WRITE(sc, NFE_RXFILTER, filter); 2196 } 2197 2198 static void 2199 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 2200 { 2201 uint32_t lo, hi; 2202 2203 lo = NFE_READ(sc, NFE_MACADDR_LO); 2204 hi = NFE_READ(sc, NFE_MACADDR_HI); 2205 if (sc->sc_caps & NFE_FIX_EADDR) { 2206 addr[0] = (lo >> 8) & 0xff; 2207 addr[1] = (lo & 0xff); 2208 2209 addr[2] = (hi >> 24) & 0xff; 2210 addr[3] = (hi >> 16) & 0xff; 2211 addr[4] = (hi >> 8) & 0xff; 2212 addr[5] = (hi & 0xff); 2213 } else { 2214 addr[0] = (hi & 0xff); 2215 addr[1] = (hi >> 8) & 0xff; 2216 addr[2] = (hi >> 16) & 0xff; 2217 addr[3] = (hi >> 24) & 0xff; 2218 2219 addr[4] = (lo & 0xff); 2220 addr[5] = (lo >> 8) & 0xff; 2221 } 2222 } 2223 2224 static void 2225 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 2226 { 2227 NFE_WRITE(sc, NFE_MACADDR_LO, 2228 addr[5] << 8 | addr[4]); 2229 NFE_WRITE(sc, NFE_MACADDR_HI, 2230 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 2231 } 2232 2233 static void 2234 nfe_tick(void *arg) 2235 { 2236 struct nfe_softc *sc = arg; 2237 struct ifnet *ifp = &sc->arpcom.ac_if; 2238 struct mii_data *mii = device_get_softc(sc->sc_miibus); 2239 2240 lwkt_serialize_enter(ifp->if_serializer); 2241 2242 mii_tick(mii); 2243 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 2244 2245 lwkt_serialize_exit(ifp->if_serializer); 2246 } 2247 2248 static void 2249 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 2250 { 2251 if (error) 2252 return; 2253 2254 KASSERT(nseg == 1, ("too many segments, should be 1\n")); 2255 2256 *((uint32_t *)arg) = seg->ds_addr; 2257 } 2258 2259 static void 2260 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 2261 bus_size_t mapsz __unused, int error) 2262 { 2263 struct nfe_dma_ctx *ctx = arg; 2264 int i; 2265 2266 if (error) 2267 return; 2268 2269 if (nsegs > ctx->nsegs) { 2270 ctx->nsegs = 0; 2271 return; 2272 } 2273 2274 ctx->nsegs = nsegs; 2275 for (i = 0; i < nsegs; ++i) 2276 ctx->segs[i] = segs[i]; 2277 } 2278 2279 static int 2280 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2281 int wait) 2282 { 2283 struct nfe_rx_data *data = &ring->data[idx]; 2284 struct nfe_dma_ctx ctx; 2285 bus_dma_segment_t seg; 2286 bus_dmamap_t map; 2287 struct mbuf *m; 2288 int error; 2289 2290 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2291 if (m == NULL) 2292 return ENOBUFS; 2293 m->m_len = m->m_pkthdr.len = MCLBYTES; 2294 2295 ctx.nsegs = 1; 2296 ctx.segs = &seg; 2297 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap, 2298 m, nfe_buf_dma_addr, &ctx, 2299 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2300 if (error || ctx.nsegs == 0) { 2301 if (!error) { 2302 bus_dmamap_unload(ring->data_tag, ring->data_tmpmap); 2303 error = EFBIG; 2304 if_printf(&sc->arpcom.ac_if, "too many segments?!\n"); 2305 } 2306 m_freem(m); 2307 2308 if (wait) { 2309 if_printf(&sc->arpcom.ac_if, 2310 "could map RX mbuf %d\n", error); 2311 } 2312 return error; 2313 } 2314 2315 /* Unload originally mapped mbuf */ 2316 bus_dmamap_unload(ring->data_tag, data->map); 2317 2318 /* Swap this DMA map with tmp DMA map */ 2319 map = data->map; 2320 data->map = ring->data_tmpmap; 2321 ring->data_tmpmap = map; 2322 2323 /* Caller is assumed to have collected the old mbuf */ 2324 data->m = m; 2325 2326 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2327 2328 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD); 2329 return 0; 2330 } 2331 2332 static int 2333 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2334 int wait) 2335 { 2336 struct nfe_rx_data *data = &ring->data[idx]; 2337 struct nfe_jbuf *jbuf; 2338 struct mbuf *m; 2339 2340 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2341 if (m == NULL) 2342 return ENOBUFS; 2343 2344 jbuf = nfe_jalloc(sc); 2345 if (jbuf == NULL) { 2346 m_freem(m); 2347 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2348 "-- packet dropped!\n"); 2349 return ENOBUFS; 2350 } 2351 2352 m->m_ext.ext_arg = jbuf; 2353 m->m_ext.ext_buf = jbuf->buf; 2354 m->m_ext.ext_free = nfe_jfree; 2355 m->m_ext.ext_ref = nfe_jref; 2356 m->m_ext.ext_size = NFE_JBYTES; 2357 2358 m->m_data = m->m_ext.ext_buf; 2359 m->m_flags |= M_EXT; 2360 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2361 2362 /* Caller is assumed to have collected the old mbuf */ 2363 data->m = m; 2364 2365 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2366 2367 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD); 2368 return 0; 2369 } 2370 2371 static void 2372 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2373 bus_addr_t physaddr) 2374 { 2375 if (sc->sc_caps & NFE_40BIT_ADDR) { 2376 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2377 2378 #if defined(__LP64__) 2379 desc64->physaddr[0] = htole32(physaddr >> 32); 2380 #endif 2381 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 2382 } else { 2383 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2384 2385 desc32->physaddr = htole32(physaddr); 2386 } 2387 } 2388 2389 static void 2390 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2391 { 2392 if (sc->sc_caps & NFE_40BIT_ADDR) { 2393 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2394 2395 desc64->length = htole16(ring->bufsz); 2396 desc64->flags = htole16(NFE_RX_READY); 2397 } else { 2398 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2399 2400 desc32->length = htole16(ring->bufsz); 2401 desc32->flags = htole16(NFE_RX_READY); 2402 } 2403 } 2404 2405 static int 2406 nfe_sysctl_imtime(SYSCTL_HANDLER_ARGS) 2407 { 2408 struct nfe_softc *sc = arg1; 2409 struct ifnet *ifp = &sc->arpcom.ac_if; 2410 uint32_t flags; 2411 int error, v; 2412 2413 lwkt_serialize_enter(ifp->if_serializer); 2414 2415 flags = sc->sc_flags & ~NFE_F_DYN_IM; 2416 v = sc->sc_imtime; 2417 if (sc->sc_flags & NFE_F_DYN_IM) 2418 v = -v; 2419 2420 error = sysctl_handle_int(oidp, &v, 0, req); 2421 if (error || req->newptr == NULL) 2422 goto back; 2423 2424 if (v < 0) { 2425 flags |= NFE_F_DYN_IM; 2426 v = -v; 2427 } 2428 2429 if (v != sc->sc_imtime || (flags ^ sc->sc_flags)) { 2430 int old_imtime = sc->sc_imtime; 2431 uint32_t old_flags = sc->sc_flags; 2432 2433 sc->sc_imtime = v; 2434 sc->sc_flags = flags; 2435 sc->sc_irq_enable = NFE_IRQ_ENABLE(sc); 2436 2437 if ((ifp->if_flags & (IFF_POLLING | IFF_RUNNING)) 2438 == IFF_RUNNING) { 2439 if (old_imtime * sc->sc_imtime == 0 || 2440 (old_flags ^ sc->sc_flags)) { 2441 ifp->if_init(sc); 2442 } else { 2443 NFE_WRITE(sc, NFE_IMTIMER, 2444 NFE_IMTIME(sc->sc_imtime)); 2445 } 2446 } 2447 } 2448 back: 2449 lwkt_serialize_exit(ifp->if_serializer); 2450 return error; 2451 } 2452 2453 static void 2454 nfe_powerup(device_t dev) 2455 { 2456 struct nfe_softc *sc = device_get_softc(dev); 2457 uint32_t pwr_state; 2458 uint16_t did; 2459 2460 /* 2461 * Bring MAC and PHY out of low power state 2462 */ 2463 2464 pwr_state = NFE_READ(sc, NFE_PWR_STATE2) & ~NFE_PWRUP_MASK; 2465 2466 did = pci_get_device(dev); 2467 if ((did == PCI_PRODUCT_NVIDIA_MCP51_LAN1 || 2468 did == PCI_PRODUCT_NVIDIA_MCP51_LAN2) && 2469 pci_get_revid(dev) >= 0xa3) 2470 pwr_state |= NFE_PWRUP_REV_A3; 2471 2472 NFE_WRITE(sc, NFE_PWR_STATE2, pwr_state); 2473 } 2474 2475 static void 2476 nfe_mac_reset(struct nfe_softc *sc) 2477 { 2478 uint32_t rxtxctl = sc->rxtxctl_desc | NFE_RXTX_BIT2; 2479 uint32_t macaddr_hi, macaddr_lo, tx_poll; 2480 2481 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | rxtxctl); 2482 2483 /* Save several registers for later restoration */ 2484 macaddr_hi = NFE_READ(sc, NFE_MACADDR_HI); 2485 macaddr_lo = NFE_READ(sc, NFE_MACADDR_LO); 2486 tx_poll = NFE_READ(sc, NFE_TX_POLL); 2487 2488 NFE_WRITE(sc, NFE_MAC_RESET, NFE_RESET_ASSERT); 2489 DELAY(100); 2490 2491 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2492 DELAY(100); 2493 2494 /* Restore saved registers */ 2495 NFE_WRITE(sc, NFE_MACADDR_HI, macaddr_hi); 2496 NFE_WRITE(sc, NFE_MACADDR_LO, macaddr_lo); 2497 NFE_WRITE(sc, NFE_TX_POLL, tx_poll); 2498 2499 NFE_WRITE(sc, NFE_RXTX_CTL, rxtxctl); 2500 } 2501 2502 static void 2503 nfe_enable_intrs(struct nfe_softc *sc) 2504 { 2505 /* 2506 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 2507 * It is unclear how wide the timer is. Base programming does 2508 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 2509 * we don't get any interrupt moderation. TX moderation is 2510 * possible by using the timer interrupt instead of TX_DONE. 2511 * 2512 * It is unclear whether there are other bits that can be 2513 * set to make the NFE device actually do interrupt moderation 2514 * on the RX side. 2515 * 2516 * For now set a 128uS interval as a placemark, but don't use 2517 * the timer. 2518 */ 2519 if (sc->sc_imtime == 0) 2520 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME_DEFAULT); 2521 else 2522 NFE_WRITE(sc, NFE_IMTIMER, NFE_IMTIME(sc->sc_imtime)); 2523 2524 /* Enable interrupts */ 2525 NFE_WRITE(sc, NFE_IRQ_MASK, sc->sc_irq_enable); 2526 2527 if (sc->sc_irq_enable & NFE_IRQ_TIMER) 2528 sc->sc_flags |= NFE_F_IRQ_TIMER; 2529 else 2530 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2531 } 2532 2533 static void 2534 nfe_disable_intrs(struct nfe_softc *sc) 2535 { 2536 /* Disable interrupts */ 2537 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 2538 sc->sc_flags &= ~NFE_F_IRQ_TIMER; 2539 } 2540