1 /* $OpenBSD: if_nfe.c,v 1.63 2006/06/17 18:00:43 brad Exp $ */ 2 /* $DragonFly: src/sys/dev/netif/nfe/if_nfe.c,v 1.10 2007/06/17 11:38:58 sephe Exp $ */ 3 4 /* 5 * Copyright (c) 2006 The DragonFly Project. All rights reserved. 6 * 7 * This code is derived from software contributed to The DragonFly Project 8 * by Sepherosa Ziehau <sepherosa@gmail.com> and 9 * Matthew Dillon <dillon@apollo.backplane.com> 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in 19 * the documentation and/or other materials provided with the 20 * distribution. 21 * 3. Neither the name of The DragonFly Project nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific, prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 26 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 28 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 29 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 31 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 32 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 33 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 34 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 35 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr> 41 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 42 * 43 * Permission to use, copy, modify, and distribute this software for any 44 * purpose with or without fee is hereby granted, provided that the above 45 * copyright notice and this permission notice appear in all copies. 46 * 47 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 48 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 49 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 50 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 51 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 52 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 53 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 54 */ 55 56 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 57 58 #include "opt_polling.h" 59 60 #include <sys/param.h> 61 #include <sys/endian.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/proc.h> 65 #include <sys/rman.h> 66 #include <sys/serialize.h> 67 #include <sys/socket.h> 68 #include <sys/sockio.h> 69 #include <sys/sysctl.h> 70 71 #include <net/ethernet.h> 72 #include <net/if.h> 73 #include <net/bpf.h> 74 #include <net/if_arp.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 #include <net/ifq_var.h> 78 #include <net/if_types.h> 79 #include <net/if_var.h> 80 #include <net/vlan/if_vlan_var.h> 81 82 #include <bus/pci/pcireg.h> 83 #include <bus/pci/pcivar.h> 84 #include <bus/pci/pcidevs.h> 85 86 #include <dev/netif/mii_layer/mii.h> 87 #include <dev/netif/mii_layer/miivar.h> 88 89 #include "miibus_if.h" 90 91 #include "if_nfereg.h" 92 #include "if_nfevar.h" 93 94 static int nfe_probe(device_t); 95 static int nfe_attach(device_t); 96 static int nfe_detach(device_t); 97 static void nfe_shutdown(device_t); 98 static int nfe_resume(device_t); 99 static int nfe_suspend(device_t); 100 101 static int nfe_miibus_readreg(device_t, int, int); 102 static void nfe_miibus_writereg(device_t, int, int, int); 103 static void nfe_miibus_statchg(device_t); 104 105 #ifdef DEVICE_POLLING 106 static void nfe_poll(struct ifnet *, enum poll_cmd, int); 107 #endif 108 static void nfe_intr(void *); 109 static int nfe_ioctl(struct ifnet *, u_long, caddr_t, struct ucred *); 110 static void nfe_rxeof(struct nfe_softc *); 111 static void nfe_txeof(struct nfe_softc *); 112 static int nfe_encap(struct nfe_softc *, struct nfe_tx_ring *, 113 struct mbuf *); 114 static void nfe_start(struct ifnet *); 115 static void nfe_watchdog(struct ifnet *); 116 static void nfe_init(void *); 117 static void nfe_stop(struct nfe_softc *); 118 static struct nfe_jbuf *nfe_jalloc(struct nfe_softc *); 119 static void nfe_jfree(void *); 120 static void nfe_jref(void *); 121 static int nfe_jpool_alloc(struct nfe_softc *, struct nfe_rx_ring *); 122 static void nfe_jpool_free(struct nfe_softc *, struct nfe_rx_ring *); 123 static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 124 static void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 125 static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 126 static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 127 static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 128 static void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 129 static int nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 130 static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 131 static int nfe_ifmedia_upd(struct ifnet *); 132 static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 133 static void nfe_setmulti(struct nfe_softc *); 134 static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 135 static void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 136 static void nfe_tick(void *); 137 static void nfe_ring_dma_addr(void *, bus_dma_segment_t *, int, int); 138 static void nfe_buf_dma_addr(void *, bus_dma_segment_t *, int, bus_size_t, 139 int); 140 static void nfe_set_paddr_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 141 int, bus_addr_t); 142 static void nfe_set_ready_rxdesc(struct nfe_softc *, struct nfe_rx_ring *, 143 int); 144 static int nfe_newbuf_std(struct nfe_softc *, struct nfe_rx_ring *, int, 145 int); 146 static int nfe_newbuf_jumbo(struct nfe_softc *, struct nfe_rx_ring *, int, 147 int); 148 149 #define NFE_DEBUG 150 #ifdef NFE_DEBUG 151 152 static int nfe_debug = 0; 153 static int nfe_rx_ring_count = NFE_RX_RING_DEF_COUNT; 154 155 TUNABLE_INT("hw.nfe.rx_ring_count", &nfe_rx_ring_count); 156 157 SYSCTL_NODE(_hw, OID_AUTO, nfe, CTLFLAG_RD, 0, "nVidia GigE parameters"); 158 SYSCTL_INT(_hw_nfe, OID_AUTO, rx_ring_count, CTLFLAG_RD, &nfe_rx_ring_count, 159 NFE_RX_RING_DEF_COUNT, "rx ring count"); 160 SYSCTL_INT(_hw_nfe, OID_AUTO, debug, CTLFLAG_RW, &nfe_debug, 0, 161 "control debugging printfs"); 162 163 #define DPRINTF(sc, fmt, ...) do { \ 164 if (nfe_debug) { \ 165 if_printf(&(sc)->arpcom.ac_if, \ 166 fmt, __VA_ARGS__); \ 167 } \ 168 } while (0) 169 170 #define DPRINTFN(sc, lv, fmt, ...) do { \ 171 if (nfe_debug >= (lv)) { \ 172 if_printf(&(sc)->arpcom.ac_if, \ 173 fmt, __VA_ARGS__); \ 174 } \ 175 } while (0) 176 177 #else /* !NFE_DEBUG */ 178 179 #define DPRINTF(sc, fmt, ...) 180 #define DPRINTFN(sc, lv, fmt, ...) 181 182 #endif /* NFE_DEBUG */ 183 184 struct nfe_dma_ctx { 185 int nsegs; 186 bus_dma_segment_t *segs; 187 }; 188 189 static const struct nfe_dev { 190 uint16_t vid; 191 uint16_t did; 192 const char *desc; 193 } nfe_devices[] = { 194 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, 195 "NVIDIA nForce Fast Ethernet" }, 196 197 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, 198 "NVIDIA nForce2 Fast Ethernet" }, 199 200 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, 201 "NVIDIA nForce3 Gigabit Ethernet" }, 202 203 /* XXX TGEN the next chip can also be found in the nForce2 Ultra 400Gb 204 chipset, and possibly also the 400R; it might be both nForce2- and 205 nForce3-based boards can use the same MCPs (= southbridges) */ 206 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2, 207 "NVIDIA nForce3 Gigabit Ethernet" }, 208 209 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3, 210 "NVIDIA nForce3 Gigabit Ethernet" }, 211 212 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, 213 "NVIDIA nForce3 Gigabit Ethernet" }, 214 215 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5, 216 "NVIDIA nForce3 Gigabit Ethernet" }, 217 218 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1, 219 "NVIDIA CK804 Gigabit Ethernet" }, 220 221 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2, 222 "NVIDIA CK804 Gigabit Ethernet" }, 223 224 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, 225 "NVIDIA MCP04 Gigabit Ethernet" }, 226 227 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, 228 "NVIDIA MCP04 Gigabit Ethernet" }, 229 230 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1, 231 "NVIDIA MCP51 Gigabit Ethernet" }, 232 233 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2, 234 "NVIDIA MCP51 Gigabit Ethernet" }, 235 236 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, 237 "NVIDIA MCP55 Gigabit Ethernet" }, 238 239 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, 240 "NVIDIA MCP55 Gigabit Ethernet" }, 241 242 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, 243 "NVIDIA MCP61 Gigabit Ethernet" }, 244 245 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, 246 "NVIDIA MCP61 Gigabit Ethernet" }, 247 248 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, 249 "NVIDIA MCP61 Gigabit Ethernet" }, 250 251 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, 252 "NVIDIA MCP61 Gigabit Ethernet" }, 253 254 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, 255 "NVIDIA MCP65 Gigabit Ethernet" }, 256 257 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, 258 "NVIDIA MCP65 Gigabit Ethernet" }, 259 260 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, 261 "NVIDIA MCP65 Gigabit Ethernet" }, 262 263 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, 264 "NVIDIA MCP65 Gigabit Ethernet" }, 265 266 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, 267 "NVIDIA MCP67 Gigabit Ethernet" }, 268 269 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, 270 "NVIDIA MCP67 Gigabit Ethernet" }, 271 272 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, 273 "NVIDIA MCP67 Gigabit Ethernet" }, 274 275 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, 276 "NVIDIA MCP67 Gigabit Ethernet" } 277 }; 278 279 static device_method_t nfe_methods[] = { 280 /* Device interface */ 281 DEVMETHOD(device_probe, nfe_probe), 282 DEVMETHOD(device_attach, nfe_attach), 283 DEVMETHOD(device_detach, nfe_detach), 284 DEVMETHOD(device_suspend, nfe_suspend), 285 DEVMETHOD(device_resume, nfe_resume), 286 DEVMETHOD(device_shutdown, nfe_shutdown), 287 288 /* Bus interface */ 289 DEVMETHOD(bus_print_child, bus_generic_print_child), 290 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 291 292 /* MII interface */ 293 DEVMETHOD(miibus_readreg, nfe_miibus_readreg), 294 DEVMETHOD(miibus_writereg, nfe_miibus_writereg), 295 DEVMETHOD(miibus_statchg, nfe_miibus_statchg), 296 297 { 0, 0 } 298 }; 299 300 static driver_t nfe_driver = { 301 "nfe", 302 nfe_methods, 303 sizeof(struct nfe_softc) 304 }; 305 306 static devclass_t nfe_devclass; 307 308 DECLARE_DUMMY_MODULE(if_nfe); 309 MODULE_DEPEND(if_nfe, miibus, 1, 1, 1); 310 DRIVER_MODULE(if_nfe, pci, nfe_driver, nfe_devclass, 0, 0); 311 DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); 312 313 static int 314 nfe_probe(device_t dev) 315 { 316 const struct nfe_dev *n; 317 uint16_t vid, did; 318 319 vid = pci_get_vendor(dev); 320 did = pci_get_device(dev); 321 for (n = nfe_devices; n->desc != NULL; ++n) { 322 if (vid == n->vid && did == n->did) { 323 struct nfe_softc *sc = device_get_softc(dev); 324 325 switch (did) { 326 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 327 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 328 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 329 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 330 sc->sc_flags = NFE_JUMBO_SUP | 331 NFE_HW_CSUM; 332 break; 333 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 334 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 335 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 336 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 337 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 338 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 339 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 340 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 341 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 342 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 343 sc->sc_flags = NFE_40BIT_ADDR; 344 break; 345 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 346 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 347 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 348 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 349 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 350 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 351 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 352 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 353 sc->sc_flags = NFE_JUMBO_SUP | 354 NFE_40BIT_ADDR | 355 NFE_HW_CSUM; 356 break; 357 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 358 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 359 sc->sc_flags = NFE_JUMBO_SUP | 360 NFE_40BIT_ADDR | 361 NFE_HW_CSUM | 362 NFE_HW_VLAN; 363 break; 364 } 365 366 device_set_desc(dev, n->desc); 367 device_set_async_attach(dev, TRUE); 368 return 0; 369 } 370 } 371 return ENXIO; 372 } 373 374 static int 375 nfe_attach(device_t dev) 376 { 377 struct nfe_softc *sc = device_get_softc(dev); 378 struct ifnet *ifp = &sc->arpcom.ac_if; 379 uint8_t eaddr[ETHER_ADDR_LEN]; 380 int error; 381 382 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 383 lwkt_serialize_init(&sc->sc_jbuf_serializer); 384 385 sc->sc_mem_rid = PCIR_BAR(0); 386 387 #ifndef BURN_BRIDGES 388 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 389 uint32_t mem, irq; 390 391 mem = pci_read_config(dev, sc->sc_mem_rid, 4); 392 irq = pci_read_config(dev, PCIR_INTLINE, 4); 393 394 device_printf(dev, "chip is in D%d power mode " 395 "-- setting to D0\n", pci_get_powerstate(dev)); 396 397 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 398 399 pci_write_config(dev, sc->sc_mem_rid, mem, 4); 400 pci_write_config(dev, PCIR_INTLINE, irq, 4); 401 } 402 #endif /* !BURN_BRIDGE */ 403 404 /* Enable bus mastering */ 405 pci_enable_busmaster(dev); 406 407 /* Allocate IO memory */ 408 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 409 &sc->sc_mem_rid, RF_ACTIVE); 410 if (sc->sc_mem_res == NULL) { 411 device_printf(dev, "cound not allocate io memory\n"); 412 return ENXIO; 413 } 414 sc->sc_memh = rman_get_bushandle(sc->sc_mem_res); 415 sc->sc_memt = rman_get_bustag(sc->sc_mem_res); 416 417 /* Allocate IRQ */ 418 sc->sc_irq_rid = 0; 419 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 420 &sc->sc_irq_rid, 421 RF_SHAREABLE | RF_ACTIVE); 422 if (sc->sc_irq_res == NULL) { 423 device_printf(dev, "could not allocate irq\n"); 424 error = ENXIO; 425 goto fail; 426 } 427 428 nfe_get_macaddr(sc, eaddr); 429 430 /* 431 * Allocate Tx and Rx rings. 432 */ 433 error = nfe_alloc_tx_ring(sc, &sc->txq); 434 if (error) { 435 device_printf(dev, "could not allocate Tx ring\n"); 436 goto fail; 437 } 438 439 error = nfe_alloc_rx_ring(sc, &sc->rxq); 440 if (error) { 441 device_printf(dev, "could not allocate Rx ring\n"); 442 goto fail; 443 } 444 445 error = mii_phy_probe(dev, &sc->sc_miibus, nfe_ifmedia_upd, 446 nfe_ifmedia_sts); 447 if (error) { 448 device_printf(dev, "MII without any phy\n"); 449 goto fail; 450 } 451 452 ifp->if_softc = sc; 453 ifp->if_mtu = ETHERMTU; 454 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 455 ifp->if_ioctl = nfe_ioctl; 456 ifp->if_start = nfe_start; 457 #ifdef DEVICE_POLLING 458 ifp->if_poll = nfe_poll; 459 #endif 460 ifp->if_watchdog = nfe_watchdog; 461 ifp->if_init = nfe_init; 462 ifq_set_maxlen(&ifp->if_snd, NFE_IFQ_MAXLEN); 463 ifq_set_ready(&ifp->if_snd); 464 465 ifp->if_capabilities = IFCAP_VLAN_MTU; 466 467 if (sc->sc_flags & NFE_HW_VLAN) 468 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 469 470 #ifdef NFE_CSUM 471 if (sc->sc_flags & NFE_HW_CSUM) { 472 #if 0 473 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 474 IFCAP_CSUM_UDPv4; 475 #else 476 ifp->if_capabilities = IFCAP_HWCSUM; 477 ifp->if_hwassist = CSUM_IP | CSUM_TCP | CSUM_UDP; 478 #endif 479 } 480 #endif 481 ifp->if_capenable = ifp->if_capabilities; 482 483 callout_init(&sc->sc_tick_ch); 484 485 ether_ifattach(ifp, eaddr, NULL); 486 487 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_MPSAFE, nfe_intr, sc, 488 &sc->sc_ih, ifp->if_serializer); 489 if (error) { 490 device_printf(dev, "could not setup intr\n"); 491 ether_ifdetach(ifp); 492 goto fail; 493 } 494 495 return 0; 496 fail: 497 nfe_detach(dev); 498 return error; 499 } 500 501 static int 502 nfe_detach(device_t dev) 503 { 504 struct nfe_softc *sc = device_get_softc(dev); 505 506 if (device_is_attached(dev)) { 507 struct ifnet *ifp = &sc->arpcom.ac_if; 508 509 lwkt_serialize_enter(ifp->if_serializer); 510 nfe_stop(sc); 511 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_ih); 512 lwkt_serialize_exit(ifp->if_serializer); 513 514 ether_ifdetach(ifp); 515 } 516 517 if (sc->sc_miibus != NULL) 518 device_delete_child(dev, sc->sc_miibus); 519 bus_generic_detach(dev); 520 521 if (sc->sc_irq_res != NULL) { 522 bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid, 523 sc->sc_irq_res); 524 } 525 526 if (sc->sc_mem_res != NULL) { 527 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid, 528 sc->sc_mem_res); 529 } 530 531 nfe_free_tx_ring(sc, &sc->txq); 532 nfe_free_rx_ring(sc, &sc->rxq); 533 534 return 0; 535 } 536 537 static void 538 nfe_shutdown(device_t dev) 539 { 540 struct nfe_softc *sc = device_get_softc(dev); 541 struct ifnet *ifp = &sc->arpcom.ac_if; 542 543 lwkt_serialize_enter(ifp->if_serializer); 544 nfe_stop(sc); 545 lwkt_serialize_exit(ifp->if_serializer); 546 } 547 548 static int 549 nfe_suspend(device_t dev) 550 { 551 struct nfe_softc *sc = device_get_softc(dev); 552 struct ifnet *ifp = &sc->arpcom.ac_if; 553 554 lwkt_serialize_enter(ifp->if_serializer); 555 nfe_stop(sc); 556 lwkt_serialize_exit(ifp->if_serializer); 557 558 return 0; 559 } 560 561 static int 562 nfe_resume(device_t dev) 563 { 564 struct nfe_softc *sc = device_get_softc(dev); 565 struct ifnet *ifp = &sc->arpcom.ac_if; 566 567 lwkt_serialize_enter(ifp->if_serializer); 568 if (ifp->if_flags & IFF_UP) { 569 nfe_init(sc); 570 if (ifp->if_flags & IFF_RUNNING) 571 ifp->if_start(ifp); 572 } 573 lwkt_serialize_exit(ifp->if_serializer); 574 575 return 0; 576 } 577 578 static void 579 nfe_miibus_statchg(device_t dev) 580 { 581 struct nfe_softc *sc = device_get_softc(dev); 582 struct mii_data *mii = device_get_softc(sc->sc_miibus); 583 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 584 585 phy = NFE_READ(sc, NFE_PHY_IFACE); 586 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 587 588 seed = NFE_READ(sc, NFE_RNDSEED); 589 seed &= ~NFE_SEED_MASK; 590 591 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 592 phy |= NFE_PHY_HDX; /* half-duplex */ 593 misc |= NFE_MISC1_HDX; 594 } 595 596 switch (IFM_SUBTYPE(mii->mii_media_active)) { 597 case IFM_1000_T: /* full-duplex only */ 598 link |= NFE_MEDIA_1000T; 599 seed |= NFE_SEED_1000T; 600 phy |= NFE_PHY_1000T; 601 break; 602 case IFM_100_TX: 603 link |= NFE_MEDIA_100TX; 604 seed |= NFE_SEED_100TX; 605 phy |= NFE_PHY_100TX; 606 break; 607 case IFM_10_T: 608 link |= NFE_MEDIA_10T; 609 seed |= NFE_SEED_10T; 610 break; 611 } 612 613 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 614 615 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 616 NFE_WRITE(sc, NFE_MISC1, misc); 617 NFE_WRITE(sc, NFE_LINKSPEED, link); 618 } 619 620 static int 621 nfe_miibus_readreg(device_t dev, int phy, int reg) 622 { 623 struct nfe_softc *sc = device_get_softc(dev); 624 uint32_t val; 625 int ntries; 626 627 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 628 629 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 630 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 631 DELAY(100); 632 } 633 634 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 635 636 for (ntries = 0; ntries < 1000; ntries++) { 637 DELAY(100); 638 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 639 break; 640 } 641 if (ntries == 1000) { 642 DPRINTFN(sc, 2, "timeout waiting for PHY %s\n", ""); 643 return 0; 644 } 645 646 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 647 DPRINTFN(sc, 2, "could not read PHY %s\n", ""); 648 return 0; 649 } 650 651 val = NFE_READ(sc, NFE_PHY_DATA); 652 if (val != 0xffffffff && val != 0) 653 sc->mii_phyaddr = phy; 654 655 DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); 656 657 return val; 658 } 659 660 static void 661 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 662 { 663 struct nfe_softc *sc = device_get_softc(dev); 664 uint32_t ctl; 665 int ntries; 666 667 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 668 669 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 670 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 671 DELAY(100); 672 } 673 674 NFE_WRITE(sc, NFE_PHY_DATA, val); 675 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 676 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 677 678 for (ntries = 0; ntries < 1000; ntries++) { 679 DELAY(100); 680 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 681 break; 682 } 683 684 #ifdef NFE_DEBUG 685 if (ntries == 1000) 686 DPRINTFN(sc, 2, "could not write to PHY %s\n", ""); 687 #endif 688 } 689 690 #ifdef DEVICE_POLLING 691 692 static void 693 nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 694 { 695 struct nfe_softc *sc = ifp->if_softc; 696 697 switch(cmd) { 698 case POLL_REGISTER: 699 /* Disable interrupts */ 700 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 701 break; 702 case POLL_DEREGISTER: 703 /* enable interrupts */ 704 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 705 break; 706 case POLL_AND_CHECK_STATUS: 707 /* fall through */ 708 case POLL_ONLY: 709 if (ifp->if_flags & IFF_RUNNING) { 710 nfe_rxeof(sc); 711 nfe_txeof(sc); 712 } 713 break; 714 } 715 } 716 717 #endif 718 719 static void 720 nfe_intr(void *arg) 721 { 722 struct nfe_softc *sc = arg; 723 struct ifnet *ifp = &sc->arpcom.ac_if; 724 uint32_t r; 725 726 r = NFE_READ(sc, NFE_IRQ_STATUS); 727 if (r == 0) 728 return; /* not for us */ 729 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 730 731 DPRINTFN(sc, 5, "%s: interrupt register %x\n", __func__, r); 732 733 if (r & NFE_IRQ_LINK) { 734 NFE_READ(sc, NFE_PHY_STATUS); 735 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 736 DPRINTF(sc, "link state changed %s\n", ""); 737 } 738 739 if (ifp->if_flags & IFF_RUNNING) { 740 /* check Rx ring */ 741 nfe_rxeof(sc); 742 743 /* check Tx ring */ 744 nfe_txeof(sc); 745 } 746 } 747 748 static int 749 nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) 750 { 751 struct nfe_softc *sc = ifp->if_softc; 752 struct ifreq *ifr = (struct ifreq *)data; 753 struct mii_data *mii; 754 int error = 0, mask; 755 756 switch (cmd) { 757 case SIOCSIFMTU: 758 if (((sc->sc_flags & NFE_JUMBO_SUP) && 759 ifr->ifr_mtu > NFE_JUMBO_MTU) || 760 ((sc->sc_flags & NFE_JUMBO_SUP) == 0 && 761 ifr->ifr_mtu > ETHERMTU)) { 762 return EINVAL; 763 } else if (ifp->if_mtu != ifr->ifr_mtu) { 764 ifp->if_mtu = ifr->ifr_mtu; 765 nfe_init(sc); 766 } 767 break; 768 case SIOCSIFFLAGS: 769 if (ifp->if_flags & IFF_UP) { 770 /* 771 * If only the PROMISC or ALLMULTI flag changes, then 772 * don't do a full re-init of the chip, just update 773 * the Rx filter. 774 */ 775 if ((ifp->if_flags & IFF_RUNNING) && 776 ((ifp->if_flags ^ sc->sc_if_flags) & 777 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 778 nfe_setmulti(sc); 779 } else { 780 if (!(ifp->if_flags & IFF_RUNNING)) 781 nfe_init(sc); 782 } 783 } else { 784 if (ifp->if_flags & IFF_RUNNING) 785 nfe_stop(sc); 786 } 787 sc->sc_if_flags = ifp->if_flags; 788 break; 789 case SIOCADDMULTI: 790 case SIOCDELMULTI: 791 if (ifp->if_flags & IFF_RUNNING) 792 nfe_setmulti(sc); 793 break; 794 case SIOCSIFMEDIA: 795 case SIOCGIFMEDIA: 796 mii = device_get_softc(sc->sc_miibus); 797 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 798 break; 799 case SIOCSIFCAP: 800 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 801 if (mask & IFCAP_HWCSUM) { 802 if (IFCAP_HWCSUM & ifp->if_capenable) 803 ifp->if_capenable &= ~IFCAP_HWCSUM; 804 else 805 ifp->if_capenable |= IFCAP_HWCSUM; 806 } 807 break; 808 default: 809 error = ether_ioctl(ifp, cmd, data); 810 break; 811 } 812 return error; 813 } 814 815 static void 816 nfe_rxeof(struct nfe_softc *sc) 817 { 818 struct ifnet *ifp = &sc->arpcom.ac_if; 819 struct nfe_rx_ring *ring = &sc->rxq; 820 int reap; 821 822 reap = 0; 823 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 824 825 for (;;) { 826 struct nfe_rx_data *data = &ring->data[ring->cur]; 827 struct mbuf *m; 828 uint16_t flags; 829 int len, error; 830 831 if (sc->sc_flags & NFE_40BIT_ADDR) { 832 struct nfe_desc64 *desc64 = &ring->desc64[ring->cur]; 833 834 flags = le16toh(desc64->flags); 835 len = le16toh(desc64->length) & 0x3fff; 836 } else { 837 struct nfe_desc32 *desc32 = &ring->desc32[ring->cur]; 838 839 flags = le16toh(desc32->flags); 840 len = le16toh(desc32->length) & 0x3fff; 841 } 842 843 if (flags & NFE_RX_READY) 844 break; 845 846 reap = 1; 847 848 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 849 if (!(flags & NFE_RX_VALID_V1)) 850 goto skip; 851 852 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 853 flags &= ~NFE_RX_ERROR; 854 len--; /* fix buffer length */ 855 } 856 } else { 857 if (!(flags & NFE_RX_VALID_V2)) 858 goto skip; 859 860 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 861 flags &= ~NFE_RX_ERROR; 862 len--; /* fix buffer length */ 863 } 864 } 865 866 if (flags & NFE_RX_ERROR) { 867 ifp->if_ierrors++; 868 goto skip; 869 } 870 871 m = data->m; 872 873 if (sc->sc_flags & NFE_USE_JUMBO) 874 error = nfe_newbuf_jumbo(sc, ring, ring->cur, 0); 875 else 876 error = nfe_newbuf_std(sc, ring, ring->cur, 0); 877 if (error) { 878 ifp->if_ierrors++; 879 goto skip; 880 } 881 882 /* finalize mbuf */ 883 m->m_pkthdr.len = m->m_len = len; 884 m->m_pkthdr.rcvif = ifp; 885 886 #ifdef notyet 887 if (sc->sc_flags & NFE_HW_CSUM) { 888 if (flags & NFE_RX_IP_CSUMOK) 889 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 890 if (flags & NFE_RX_UDP_CSUMOK) 891 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 892 if (flags & NFE_RX_TCP_CSUMOK) 893 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 894 } 895 #elif defined(NFE_CSUM) 896 if ((sc->sc_flags & NFE_HW_CSUM) && (flags & NFE_RX_CSUMOK)) 897 m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK; 898 #endif 899 900 ifp->if_ipackets++; 901 ifp->if_input(ifp, m); 902 skip: 903 nfe_set_ready_rxdesc(sc, ring, ring->cur); 904 sc->rxq.cur = (sc->rxq.cur + 1) % nfe_rx_ring_count; 905 } 906 907 if (reap) 908 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 909 } 910 911 static void 912 nfe_txeof(struct nfe_softc *sc) 913 { 914 struct ifnet *ifp = &sc->arpcom.ac_if; 915 struct nfe_tx_ring *ring = &sc->txq; 916 struct nfe_tx_data *data = NULL; 917 918 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_POSTREAD); 919 while (ring->next != ring->cur) { 920 uint16_t flags; 921 922 if (sc->sc_flags & NFE_40BIT_ADDR) 923 flags = le16toh(ring->desc64[ring->next].flags); 924 else 925 flags = le16toh(ring->desc32[ring->next].flags); 926 927 if (flags & NFE_TX_VALID) 928 break; 929 930 data = &ring->data[ring->next]; 931 932 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 933 if (!(flags & NFE_TX_LASTFRAG_V1) && data->m == NULL) 934 goto skip; 935 936 if ((flags & NFE_TX_ERROR_V1) != 0) { 937 if_printf(ifp, "tx v1 error 0x%4b\n", flags, 938 NFE_V1_TXERR); 939 ifp->if_oerrors++; 940 } else { 941 ifp->if_opackets++; 942 } 943 } else { 944 if (!(flags & NFE_TX_LASTFRAG_V2) && data->m == NULL) 945 goto skip; 946 947 if ((flags & NFE_TX_ERROR_V2) != 0) { 948 if_printf(ifp, "tx v2 error 0x%4b\n", flags, 949 NFE_V2_TXERR); 950 ifp->if_oerrors++; 951 } else { 952 ifp->if_opackets++; 953 } 954 } 955 956 if (data->m == NULL) { /* should not get there */ 957 if_printf(ifp, 958 "last fragment bit w/o associated mbuf!\n"); 959 goto skip; 960 } 961 962 /* last fragment of the mbuf chain transmitted */ 963 bus_dmamap_sync(ring->data_tag, data->map, 964 BUS_DMASYNC_POSTWRITE); 965 bus_dmamap_unload(ring->data_tag, data->map); 966 m_freem(data->m); 967 data->m = NULL; 968 969 ifp->if_timer = 0; 970 skip: 971 ring->queued--; 972 KKASSERT(ring->queued >= 0); 973 ring->next = (ring->next + 1) % NFE_TX_RING_COUNT; 974 } 975 976 if (data != NULL) { /* at least one slot freed */ 977 ifp->if_flags &= ~IFF_OACTIVE; 978 ifp->if_start(ifp); 979 } 980 } 981 982 static int 983 nfe_encap(struct nfe_softc *sc, struct nfe_tx_ring *ring, struct mbuf *m0) 984 { 985 struct nfe_dma_ctx ctx; 986 bus_dma_segment_t segs[NFE_MAX_SCATTER]; 987 struct nfe_tx_data *data, *data_map; 988 bus_dmamap_t map; 989 struct nfe_desc64 *desc64 = NULL; 990 struct nfe_desc32 *desc32 = NULL; 991 uint16_t flags = 0; 992 uint32_t vtag = 0; 993 int error, i, j; 994 995 data = &ring->data[ring->cur]; 996 map = data->map; 997 data_map = data; /* Remember who owns the DMA map */ 998 999 ctx.nsegs = NFE_MAX_SCATTER; 1000 ctx.segs = segs; 1001 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1002 nfe_buf_dma_addr, &ctx, BUS_DMA_NOWAIT); 1003 if (error && error != EFBIG) { 1004 if_printf(&sc->arpcom.ac_if, "could not map TX mbuf\n"); 1005 goto back; 1006 } 1007 1008 if (error) { /* error == EFBIG */ 1009 struct mbuf *m_new; 1010 1011 m_new = m_defrag(m0, MB_DONTWAIT); 1012 if (m_new == NULL) { 1013 if_printf(&sc->arpcom.ac_if, 1014 "could not defrag TX mbuf\n"); 1015 error = ENOBUFS; 1016 goto back; 1017 } else { 1018 m0 = m_new; 1019 } 1020 1021 ctx.nsegs = NFE_MAX_SCATTER; 1022 ctx.segs = segs; 1023 error = bus_dmamap_load_mbuf(ring->data_tag, map, m0, 1024 nfe_buf_dma_addr, &ctx, 1025 BUS_DMA_NOWAIT); 1026 if (error) { 1027 if_printf(&sc->arpcom.ac_if, 1028 "could not map defraged TX mbuf\n"); 1029 goto back; 1030 } 1031 } 1032 1033 error = 0; 1034 1035 if (ring->queued + ctx.nsegs >= NFE_TX_RING_COUNT - 1) { 1036 bus_dmamap_unload(ring->data_tag, map); 1037 error = ENOBUFS; 1038 goto back; 1039 } 1040 1041 /* setup h/w VLAN tagging */ 1042 if ((m0->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && 1043 m0->m_pkthdr.rcvif != NULL && 1044 m0->m_pkthdr.rcvif->if_type == IFT_L2VLAN) { 1045 struct ifvlan *ifv = m0->m_pkthdr.rcvif->if_softc; 1046 1047 if (ifv != NULL) 1048 vtag = NFE_TX_VTAG | htons(ifv->ifv_tag); 1049 } 1050 1051 #ifdef NFE_CSUM 1052 if (m0->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1053 flags |= NFE_TX_IP_CSUM; 1054 if (m0->m_pkthdr.csum_flags & (M_TCPV4_CSUM_OUT | M_UDPV4_CSUM_OUT)) 1055 flags |= NFE_TX_TCP_CSUM; 1056 #endif 1057 1058 /* 1059 * XXX urm. somebody is unaware of how hardware works. You 1060 * absolutely CANNOT set NFE_TX_VALID on the next descriptor in 1061 * the ring until the entire chain is actually *VALID*. Otherwise 1062 * the hardware may encounter a partially initialized chain that 1063 * is marked as being ready to go when it in fact is not ready to 1064 * go. 1065 */ 1066 1067 for (i = 0; i < ctx.nsegs; i++) { 1068 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1069 data = &ring->data[j]; 1070 1071 if (sc->sc_flags & NFE_40BIT_ADDR) { 1072 desc64 = &ring->desc64[j]; 1073 #if defined(__LP64__) 1074 desc64->physaddr[0] = 1075 htole32(segs[i].ds_addr >> 32); 1076 #endif 1077 desc64->physaddr[1] = 1078 htole32(segs[i].ds_addr & 0xffffffff); 1079 desc64->length = htole16(segs[i].ds_len - 1); 1080 desc64->vtag = htole32(vtag); 1081 desc64->flags = htole16(flags); 1082 } else { 1083 desc32 = &ring->desc32[j]; 1084 desc32->physaddr = htole32(segs[i].ds_addr); 1085 desc32->length = htole16(segs[i].ds_len - 1); 1086 desc32->flags = htole16(flags); 1087 } 1088 1089 /* csum flags and vtag belong to the first fragment only */ 1090 flags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_CSUM); 1091 vtag = 0; 1092 1093 ring->queued++; 1094 KKASSERT(ring->queued <= NFE_TX_RING_COUNT); 1095 } 1096 1097 /* the whole mbuf chain has been DMA mapped, fix last descriptor */ 1098 if (sc->sc_flags & NFE_40BIT_ADDR) { 1099 desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); 1100 } else { 1101 if (sc->sc_flags & NFE_JUMBO_SUP) 1102 flags = NFE_TX_LASTFRAG_V2; 1103 else 1104 flags = NFE_TX_LASTFRAG_V1; 1105 desc32->flags |= htole16(flags); 1106 } 1107 1108 /* 1109 * Set NFE_TX_VALID backwards so the hardware doesn't see the 1110 * whole mess until the first descriptor in the map is flagged. 1111 */ 1112 for (i = ctx.nsegs - 1; i >= 0; --i) { 1113 j = (ring->cur + i) % NFE_TX_RING_COUNT; 1114 if (sc->sc_flags & NFE_40BIT_ADDR) { 1115 desc64 = &ring->desc64[j]; 1116 desc64->flags |= htole16(NFE_TX_VALID); 1117 } else { 1118 desc32 = &ring->desc32[j]; 1119 desc32->flags |= htole16(NFE_TX_VALID); 1120 } 1121 } 1122 ring->cur = (ring->cur + ctx.nsegs) % NFE_TX_RING_COUNT; 1123 1124 /* Exchange DMA map */ 1125 data_map->map = data->map; 1126 data->map = map; 1127 data->m = m0; 1128 1129 bus_dmamap_sync(ring->data_tag, map, BUS_DMASYNC_PREWRITE); 1130 back: 1131 if (error) 1132 m_freem(m0); 1133 return error; 1134 } 1135 1136 static void 1137 nfe_start(struct ifnet *ifp) 1138 { 1139 struct nfe_softc *sc = ifp->if_softc; 1140 struct nfe_tx_ring *ring = &sc->txq; 1141 int count = 0; 1142 struct mbuf *m0; 1143 1144 if (ifp->if_flags & IFF_OACTIVE) 1145 return; 1146 1147 if (ifq_is_empty(&ifp->if_snd)) 1148 return; 1149 1150 for (;;) { 1151 m0 = ifq_dequeue(&ifp->if_snd, NULL); 1152 if (m0 == NULL) 1153 break; 1154 1155 BPF_MTAP(ifp, m0); 1156 1157 if (nfe_encap(sc, ring, m0) != 0) { 1158 ifp->if_flags |= IFF_OACTIVE; 1159 break; 1160 } 1161 ++count; 1162 1163 /* 1164 * NOTE: 1165 * `m0' may be freed in nfe_encap(), so 1166 * it should not be touched any more. 1167 */ 1168 } 1169 if (count == 0) /* nothing sent */ 1170 return; 1171 1172 /* Sync TX descriptor ring */ 1173 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1174 1175 /* Kick Tx */ 1176 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1177 1178 /* 1179 * Set a timeout in case the chip goes out to lunch. 1180 */ 1181 ifp->if_timer = 5; 1182 } 1183 1184 static void 1185 nfe_watchdog(struct ifnet *ifp) 1186 { 1187 struct nfe_softc *sc = ifp->if_softc; 1188 1189 if (ifp->if_flags & IFF_RUNNING) { 1190 if_printf(ifp, "watchdog timeout - lost interrupt recovered\n"); 1191 nfe_txeof(sc); 1192 return; 1193 } 1194 1195 if_printf(ifp, "watchdog timeout\n"); 1196 1197 nfe_init(ifp->if_softc); 1198 1199 ifp->if_oerrors++; 1200 1201 if (!ifq_is_empty(&ifp->if_snd)) 1202 ifp->if_start(ifp); 1203 } 1204 1205 static void 1206 nfe_init(void *xsc) 1207 { 1208 struct nfe_softc *sc = xsc; 1209 struct ifnet *ifp = &sc->arpcom.ac_if; 1210 uint32_t tmp; 1211 int error; 1212 1213 nfe_stop(sc); 1214 1215 /* 1216 * NOTE: 1217 * Switching between jumbo frames and normal frames should 1218 * be done _after_ nfe_stop() but _before_ nfe_init_rx_ring(). 1219 */ 1220 if (ifp->if_mtu > ETHERMTU) { 1221 sc->sc_flags |= NFE_USE_JUMBO; 1222 sc->rxq.bufsz = NFE_JBYTES; 1223 if (bootverbose) 1224 if_printf(ifp, "use jumbo frames\n"); 1225 } else { 1226 sc->sc_flags &= ~NFE_USE_JUMBO; 1227 sc->rxq.bufsz = MCLBYTES; 1228 if (bootverbose) 1229 if_printf(ifp, "use non-jumbo frames\n"); 1230 } 1231 1232 error = nfe_init_tx_ring(sc, &sc->txq); 1233 if (error) { 1234 nfe_stop(sc); 1235 return; 1236 } 1237 1238 error = nfe_init_rx_ring(sc, &sc->rxq); 1239 if (error) { 1240 nfe_stop(sc); 1241 return; 1242 } 1243 1244 NFE_WRITE(sc, NFE_TX_UNK, 0); 1245 NFE_WRITE(sc, NFE_STATUS, 0); 1246 1247 sc->rxtxctl = NFE_RXTX_BIT2; 1248 if (sc->sc_flags & NFE_40BIT_ADDR) 1249 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1250 else if (sc->sc_flags & NFE_JUMBO_SUP) 1251 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1252 #ifdef NFE_CSUM 1253 if (sc->sc_flags & NFE_HW_CSUM) 1254 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1255 #endif 1256 1257 /* 1258 * Although the adapter is capable of stripping VLAN tags from received 1259 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1260 * purpose. This will be done in software by our network stack. 1261 */ 1262 if (sc->sc_flags & NFE_HW_VLAN) 1263 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1264 1265 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1266 DELAY(10); 1267 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1268 1269 if (sc->sc_flags & NFE_HW_VLAN) 1270 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1271 1272 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1273 1274 /* set MAC address */ 1275 nfe_set_macaddr(sc, sc->arpcom.ac_enaddr); 1276 1277 /* tell MAC where rings are in memory */ 1278 #ifdef __LP64__ 1279 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1280 #endif 1281 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1282 #ifdef __LP64__ 1283 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1284 #endif 1285 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1286 1287 NFE_WRITE(sc, NFE_RING_SIZE, 1288 (nfe_rx_ring_count - 1) << 16 | 1289 (NFE_TX_RING_COUNT - 1)); 1290 1291 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1292 1293 /* force MAC to wakeup */ 1294 tmp = NFE_READ(sc, NFE_PWR_STATE); 1295 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1296 DELAY(10); 1297 tmp = NFE_READ(sc, NFE_PWR_STATE); 1298 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1299 1300 /* 1301 * NFE_IMTIMER generates a periodic interrupt via NFE_IRQ_TIMER. 1302 * It is unclear how wide the timer is. Base programming does 1303 * not seem to effect NFE_IRQ_TX_DONE or NFE_IRQ_RX_DONE so 1304 * we don't get any interrupt moderation. TX moderation is 1305 * possible by using the timer interrupt instead of TX_DONE. 1306 * 1307 * It is unclear whether there are other bits that can be 1308 * set to make the NFE device actually do interrupt moderation 1309 * on the RX side. 1310 * 1311 * For now set a 128uS interval as a placemark, but don't use 1312 * the timer. 1313 */ 1314 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1315 1316 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1317 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1318 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1319 1320 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1321 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1322 1323 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1324 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); 1325 1326 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1327 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1328 DELAY(10); 1329 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1330 1331 /* set Rx filter */ 1332 nfe_setmulti(sc); 1333 1334 nfe_ifmedia_upd(ifp); 1335 1336 /* enable Rx */ 1337 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1338 1339 /* enable Tx */ 1340 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1341 1342 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1343 1344 #ifdef DEVICE_POLLING 1345 if ((ifp->if_flags & IFF_POLLING) == 0) 1346 #endif 1347 /* enable interrupts */ 1348 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1349 1350 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1351 1352 ifp->if_flags |= IFF_RUNNING; 1353 ifp->if_flags &= ~IFF_OACTIVE; 1354 } 1355 1356 static void 1357 nfe_stop(struct nfe_softc *sc) 1358 { 1359 struct ifnet *ifp = &sc->arpcom.ac_if; 1360 1361 callout_stop(&sc->sc_tick_ch); 1362 1363 ifp->if_timer = 0; 1364 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1365 1366 /* Abort Tx */ 1367 NFE_WRITE(sc, NFE_TX_CTL, 0); 1368 1369 /* Disable Rx */ 1370 NFE_WRITE(sc, NFE_RX_CTL, 0); 1371 1372 /* Disable interrupts */ 1373 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1374 1375 /* Reset Tx and Rx rings */ 1376 nfe_reset_tx_ring(sc, &sc->txq); 1377 nfe_reset_rx_ring(sc, &sc->rxq); 1378 } 1379 1380 static int 1381 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1382 { 1383 int i, j, error, descsize; 1384 void **desc; 1385 1386 if (sc->sc_flags & NFE_40BIT_ADDR) { 1387 desc = (void **)&ring->desc64; 1388 descsize = sizeof(struct nfe_desc64); 1389 } else { 1390 desc = (void **)&ring->desc32; 1391 descsize = sizeof(struct nfe_desc32); 1392 } 1393 1394 ring->jbuf = kmalloc(sizeof(struct nfe_jbuf) * NFE_JPOOL_COUNT, 1395 M_DEVBUF, M_WAITOK | M_ZERO); 1396 ring->data = kmalloc(sizeof(struct nfe_rx_data) * nfe_rx_ring_count, 1397 M_DEVBUF, M_WAITOK | M_ZERO); 1398 1399 ring->bufsz = MCLBYTES; 1400 ring->cur = ring->next = 0; 1401 1402 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1403 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1404 NULL, NULL, 1405 nfe_rx_ring_count * descsize, 1, 1406 nfe_rx_ring_count * descsize, 1407 0, &ring->tag); 1408 if (error) { 1409 if_printf(&sc->arpcom.ac_if, 1410 "could not create desc RX DMA tag\n"); 1411 return error; 1412 } 1413 1414 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1415 &ring->map); 1416 if (error) { 1417 if_printf(&sc->arpcom.ac_if, 1418 "could not allocate RX desc DMA memory\n"); 1419 bus_dma_tag_destroy(ring->tag); 1420 ring->tag = NULL; 1421 return error; 1422 } 1423 1424 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1425 nfe_rx_ring_count * descsize, 1426 nfe_ring_dma_addr, &ring->physaddr, 1427 BUS_DMA_WAITOK); 1428 if (error) { 1429 if_printf(&sc->arpcom.ac_if, 1430 "could not load RX desc DMA map\n"); 1431 bus_dmamem_free(ring->tag, *desc, ring->map); 1432 bus_dma_tag_destroy(ring->tag); 1433 ring->tag = NULL; 1434 return error; 1435 } 1436 1437 if (sc->sc_flags & NFE_JUMBO_SUP) { 1438 error = nfe_jpool_alloc(sc, ring); 1439 if (error) { 1440 if_printf(&sc->arpcom.ac_if, 1441 "could not allocate jumbo frames\n"); 1442 return error; 1443 } 1444 } 1445 1446 error = bus_dma_tag_create(NULL, 1, 0, 1447 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1448 NULL, NULL, 1449 MCLBYTES, 1, MCLBYTES, 1450 0, &ring->data_tag); 1451 if (error) { 1452 if_printf(&sc->arpcom.ac_if, 1453 "could not create RX mbuf DMA tag\n"); 1454 return error; 1455 } 1456 1457 /* Create a spare RX mbuf DMA map */ 1458 error = bus_dmamap_create(ring->data_tag, 0, &ring->data_tmpmap); 1459 if (error) { 1460 if_printf(&sc->arpcom.ac_if, 1461 "could not create spare RX mbuf DMA map\n"); 1462 bus_dma_tag_destroy(ring->data_tag); 1463 ring->data_tag = NULL; 1464 return error; 1465 } 1466 1467 for (i = 0; i < nfe_rx_ring_count; i++) { 1468 error = bus_dmamap_create(ring->data_tag, 0, 1469 &ring->data[i].map); 1470 if (error) { 1471 if_printf(&sc->arpcom.ac_if, 1472 "could not create %dth RX mbuf DMA mapn", i); 1473 goto fail; 1474 } 1475 } 1476 return 0; 1477 fail: 1478 for (j = 0; j < i; ++j) 1479 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1480 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1481 bus_dma_tag_destroy(ring->data_tag); 1482 ring->data_tag = NULL; 1483 return error; 1484 } 1485 1486 static void 1487 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1488 { 1489 int i; 1490 1491 for (i = 0; i < nfe_rx_ring_count; i++) { 1492 struct nfe_rx_data *data = &ring->data[i]; 1493 1494 if (data->m != NULL) { 1495 if ((sc->sc_flags & NFE_USE_JUMBO) == 0) 1496 bus_dmamap_unload(ring->data_tag, data->map); 1497 m_freem(data->m); 1498 data->m = NULL; 1499 } 1500 } 1501 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1502 1503 ring->cur = ring->next = 0; 1504 } 1505 1506 static int 1507 nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1508 { 1509 int i; 1510 1511 for (i = 0; i < nfe_rx_ring_count; ++i) { 1512 int error; 1513 1514 /* XXX should use a function pointer */ 1515 if (sc->sc_flags & NFE_USE_JUMBO) 1516 error = nfe_newbuf_jumbo(sc, ring, i, 1); 1517 else 1518 error = nfe_newbuf_std(sc, ring, i, 1); 1519 if (error) { 1520 if_printf(&sc->arpcom.ac_if, 1521 "could not allocate RX buffer\n"); 1522 return error; 1523 } 1524 1525 nfe_set_ready_rxdesc(sc, ring, i); 1526 } 1527 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1528 1529 return 0; 1530 } 1531 1532 static void 1533 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1534 { 1535 if (ring->data_tag != NULL) { 1536 struct nfe_rx_data *data; 1537 int i; 1538 1539 for (i = 0; i < nfe_rx_ring_count; i++) { 1540 data = &ring->data[i]; 1541 1542 if (data->m != NULL) { 1543 bus_dmamap_unload(ring->data_tag, data->map); 1544 m_freem(data->m); 1545 } 1546 bus_dmamap_destroy(ring->data_tag, data->map); 1547 } 1548 bus_dmamap_destroy(ring->data_tag, ring->data_tmpmap); 1549 bus_dma_tag_destroy(ring->data_tag); 1550 } 1551 1552 nfe_jpool_free(sc, ring); 1553 1554 if (ring->jbuf != NULL) 1555 kfree(ring->jbuf, M_DEVBUF); 1556 if (ring->data != NULL) 1557 kfree(ring->data, M_DEVBUF); 1558 1559 if (ring->tag != NULL) { 1560 void *desc; 1561 1562 if (sc->sc_flags & NFE_40BIT_ADDR) 1563 desc = ring->desc64; 1564 else 1565 desc = ring->desc32; 1566 1567 bus_dmamap_unload(ring->tag, ring->map); 1568 bus_dmamem_free(ring->tag, desc, ring->map); 1569 bus_dma_tag_destroy(ring->tag); 1570 } 1571 } 1572 1573 static struct nfe_jbuf * 1574 nfe_jalloc(struct nfe_softc *sc) 1575 { 1576 struct ifnet *ifp = &sc->arpcom.ac_if; 1577 struct nfe_jbuf *jbuf; 1578 1579 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1580 1581 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1582 if (jbuf != NULL) { 1583 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1584 jbuf->inuse = 1; 1585 } else { 1586 if_printf(ifp, "no free jumbo buffer\n"); 1587 } 1588 1589 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1590 1591 return jbuf; 1592 } 1593 1594 static void 1595 nfe_jfree(void *arg) 1596 { 1597 struct nfe_jbuf *jbuf = arg; 1598 struct nfe_softc *sc = jbuf->sc; 1599 struct nfe_rx_ring *ring = jbuf->ring; 1600 1601 if (&ring->jbuf[jbuf->slot] != jbuf) 1602 panic("%s: free wrong jumbo buffer\n", __func__); 1603 else if (jbuf->inuse == 0) 1604 panic("%s: jumbo buffer already freed\n", __func__); 1605 1606 lwkt_serialize_enter(&sc->sc_jbuf_serializer); 1607 atomic_subtract_int(&jbuf->inuse, 1); 1608 if (jbuf->inuse == 0) 1609 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1610 lwkt_serialize_exit(&sc->sc_jbuf_serializer); 1611 } 1612 1613 static void 1614 nfe_jref(void *arg) 1615 { 1616 struct nfe_jbuf *jbuf = arg; 1617 struct nfe_rx_ring *ring = jbuf->ring; 1618 1619 if (&ring->jbuf[jbuf->slot] != jbuf) 1620 panic("%s: ref wrong jumbo buffer\n", __func__); 1621 else if (jbuf->inuse == 0) 1622 panic("%s: jumbo buffer already freed\n", __func__); 1623 1624 atomic_add_int(&jbuf->inuse, 1); 1625 } 1626 1627 static int 1628 nfe_jpool_alloc(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1629 { 1630 struct nfe_jbuf *jbuf; 1631 bus_addr_t physaddr; 1632 caddr_t buf; 1633 int i, error; 1634 1635 /* 1636 * Allocate a big chunk of DMA'able memory. 1637 */ 1638 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1639 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1640 NULL, NULL, 1641 NFE_JPOOL_SIZE, 1, NFE_JPOOL_SIZE, 1642 0, &ring->jtag); 1643 if (error) { 1644 if_printf(&sc->arpcom.ac_if, 1645 "could not create jumbo DMA tag\n"); 1646 return error; 1647 } 1648 1649 error = bus_dmamem_alloc(ring->jtag, (void **)&ring->jpool, 1650 BUS_DMA_WAITOK, &ring->jmap); 1651 if (error) { 1652 if_printf(&sc->arpcom.ac_if, 1653 "could not allocate jumbo DMA memory\n"); 1654 bus_dma_tag_destroy(ring->jtag); 1655 ring->jtag = NULL; 1656 return error; 1657 } 1658 1659 error = bus_dmamap_load(ring->jtag, ring->jmap, ring->jpool, 1660 NFE_JPOOL_SIZE, nfe_ring_dma_addr, &physaddr, 1661 BUS_DMA_WAITOK); 1662 if (error) { 1663 if_printf(&sc->arpcom.ac_if, 1664 "could not load jumbo DMA map\n"); 1665 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1666 bus_dma_tag_destroy(ring->jtag); 1667 ring->jtag = NULL; 1668 return error; 1669 } 1670 1671 /* ..and split it into 9KB chunks */ 1672 SLIST_INIT(&ring->jfreelist); 1673 1674 buf = ring->jpool; 1675 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1676 jbuf = &ring->jbuf[i]; 1677 1678 jbuf->sc = sc; 1679 jbuf->ring = ring; 1680 jbuf->inuse = 0; 1681 jbuf->slot = i; 1682 jbuf->buf = buf; 1683 jbuf->physaddr = physaddr; 1684 1685 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1686 1687 buf += NFE_JBYTES; 1688 physaddr += NFE_JBYTES; 1689 } 1690 1691 return 0; 1692 } 1693 1694 static void 1695 nfe_jpool_free(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1696 { 1697 if (ring->jtag != NULL) { 1698 bus_dmamap_unload(ring->jtag, ring->jmap); 1699 bus_dmamem_free(ring->jtag, ring->jpool, ring->jmap); 1700 bus_dma_tag_destroy(ring->jtag); 1701 } 1702 } 1703 1704 static int 1705 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1706 { 1707 int i, j, error, descsize; 1708 void **desc; 1709 1710 if (sc->sc_flags & NFE_40BIT_ADDR) { 1711 desc = (void **)&ring->desc64; 1712 descsize = sizeof(struct nfe_desc64); 1713 } else { 1714 desc = (void **)&ring->desc32; 1715 descsize = sizeof(struct nfe_desc32); 1716 } 1717 1718 ring->queued = 0; 1719 ring->cur = ring->next = 0; 1720 1721 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1722 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1723 NULL, NULL, 1724 NFE_TX_RING_COUNT * descsize, 1, 1725 NFE_TX_RING_COUNT * descsize, 1726 0, &ring->tag); 1727 if (error) { 1728 if_printf(&sc->arpcom.ac_if, 1729 "could not create TX desc DMA map\n"); 1730 return error; 1731 } 1732 1733 error = bus_dmamem_alloc(ring->tag, desc, BUS_DMA_WAITOK | BUS_DMA_ZERO, 1734 &ring->map); 1735 if (error) { 1736 if_printf(&sc->arpcom.ac_if, 1737 "could not allocate TX desc DMA memory\n"); 1738 bus_dma_tag_destroy(ring->tag); 1739 ring->tag = NULL; 1740 return error; 1741 } 1742 1743 error = bus_dmamap_load(ring->tag, ring->map, *desc, 1744 NFE_TX_RING_COUNT * descsize, 1745 nfe_ring_dma_addr, &ring->physaddr, 1746 BUS_DMA_WAITOK); 1747 if (error) { 1748 if_printf(&sc->arpcom.ac_if, 1749 "could not load TX desc DMA map\n"); 1750 bus_dmamem_free(ring->tag, *desc, ring->map); 1751 bus_dma_tag_destroy(ring->tag); 1752 ring->tag = NULL; 1753 return error; 1754 } 1755 1756 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, 1757 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1758 NULL, NULL, 1759 NFE_JBYTES * NFE_MAX_SCATTER, 1760 NFE_MAX_SCATTER, NFE_JBYTES, 1761 0, &ring->data_tag); 1762 if (error) { 1763 if_printf(&sc->arpcom.ac_if, 1764 "could not create TX buf DMA tag\n"); 1765 return error; 1766 } 1767 1768 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1769 error = bus_dmamap_create(ring->data_tag, 0, 1770 &ring->data[i].map); 1771 if (error) { 1772 if_printf(&sc->arpcom.ac_if, 1773 "could not create %dth TX buf DMA map\n", i); 1774 goto fail; 1775 } 1776 } 1777 1778 return 0; 1779 fail: 1780 for (j = 0; j < i; ++j) 1781 bus_dmamap_destroy(ring->data_tag, ring->data[i].map); 1782 bus_dma_tag_destroy(ring->data_tag); 1783 ring->data_tag = NULL; 1784 return error; 1785 } 1786 1787 static void 1788 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1789 { 1790 int i; 1791 1792 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1793 struct nfe_tx_data *data = &ring->data[i]; 1794 1795 if (sc->sc_flags & NFE_40BIT_ADDR) 1796 ring->desc64[i].flags = 0; 1797 else 1798 ring->desc32[i].flags = 0; 1799 1800 if (data->m != NULL) { 1801 bus_dmamap_sync(ring->data_tag, data->map, 1802 BUS_DMASYNC_POSTWRITE); 1803 bus_dmamap_unload(ring->data_tag, data->map); 1804 m_freem(data->m); 1805 data->m = NULL; 1806 } 1807 } 1808 bus_dmamap_sync(ring->tag, ring->map, BUS_DMASYNC_PREWRITE); 1809 1810 ring->queued = 0; 1811 ring->cur = ring->next = 0; 1812 } 1813 1814 static int 1815 nfe_init_tx_ring(struct nfe_softc *sc __unused, 1816 struct nfe_tx_ring *ring __unused) 1817 { 1818 return 0; 1819 } 1820 1821 static void 1822 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1823 { 1824 if (ring->data_tag != NULL) { 1825 struct nfe_tx_data *data; 1826 int i; 1827 1828 for (i = 0; i < NFE_TX_RING_COUNT; ++i) { 1829 data = &ring->data[i]; 1830 1831 if (data->m != NULL) { 1832 bus_dmamap_unload(ring->data_tag, data->map); 1833 m_freem(data->m); 1834 } 1835 bus_dmamap_destroy(ring->data_tag, data->map); 1836 } 1837 1838 bus_dma_tag_destroy(ring->data_tag); 1839 } 1840 1841 if (ring->tag != NULL) { 1842 void *desc; 1843 1844 if (sc->sc_flags & NFE_40BIT_ADDR) 1845 desc = ring->desc64; 1846 else 1847 desc = ring->desc32; 1848 1849 bus_dmamap_unload(ring->tag, ring->map); 1850 bus_dmamem_free(ring->tag, desc, ring->map); 1851 bus_dma_tag_destroy(ring->tag); 1852 } 1853 } 1854 1855 static int 1856 nfe_ifmedia_upd(struct ifnet *ifp) 1857 { 1858 struct nfe_softc *sc = ifp->if_softc; 1859 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1860 1861 if (mii->mii_instance != 0) { 1862 struct mii_softc *miisc; 1863 1864 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1865 mii_phy_reset(miisc); 1866 } 1867 mii_mediachg(mii); 1868 1869 return 0; 1870 } 1871 1872 static void 1873 nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1874 { 1875 struct nfe_softc *sc = ifp->if_softc; 1876 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1877 1878 mii_pollstat(mii); 1879 ifmr->ifm_status = mii->mii_media_status; 1880 ifmr->ifm_active = mii->mii_media_active; 1881 } 1882 1883 static void 1884 nfe_setmulti(struct nfe_softc *sc) 1885 { 1886 struct ifnet *ifp = &sc->arpcom.ac_if; 1887 struct ifmultiaddr *ifma; 1888 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1889 uint32_t filter = NFE_RXFILTER_MAGIC; 1890 int i; 1891 1892 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1893 bzero(addr, ETHER_ADDR_LEN); 1894 bzero(mask, ETHER_ADDR_LEN); 1895 goto done; 1896 } 1897 1898 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1899 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1900 1901 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1902 caddr_t maddr; 1903 1904 if (ifma->ifma_addr->sa_family != AF_LINK) 1905 continue; 1906 1907 maddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1908 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1909 addr[i] &= maddr[i]; 1910 mask[i] &= ~maddr[i]; 1911 } 1912 } 1913 1914 for (i = 0; i < ETHER_ADDR_LEN; i++) 1915 mask[i] |= addr[i]; 1916 1917 done: 1918 addr[0] |= 0x01; /* make sure multicast bit is set */ 1919 1920 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1921 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1922 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1923 addr[5] << 8 | addr[4]); 1924 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1925 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1926 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1927 mask[5] << 8 | mask[4]); 1928 1929 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1930 NFE_WRITE(sc, NFE_RXFILTER, filter); 1931 } 1932 1933 static void 1934 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1935 { 1936 uint32_t tmp; 1937 1938 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1939 addr[0] = (tmp >> 8) & 0xff; 1940 addr[1] = (tmp & 0xff); 1941 1942 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1943 addr[2] = (tmp >> 24) & 0xff; 1944 addr[3] = (tmp >> 16) & 0xff; 1945 addr[4] = (tmp >> 8) & 0xff; 1946 addr[5] = (tmp & 0xff); 1947 } 1948 1949 static void 1950 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1951 { 1952 NFE_WRITE(sc, NFE_MACADDR_LO, 1953 addr[5] << 8 | addr[4]); 1954 NFE_WRITE(sc, NFE_MACADDR_HI, 1955 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1956 } 1957 1958 static void 1959 nfe_tick(void *arg) 1960 { 1961 struct nfe_softc *sc = arg; 1962 struct ifnet *ifp = &sc->arpcom.ac_if; 1963 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1964 1965 lwkt_serialize_enter(ifp->if_serializer); 1966 1967 mii_tick(mii); 1968 callout_reset(&sc->sc_tick_ch, hz, nfe_tick, sc); 1969 1970 lwkt_serialize_exit(ifp->if_serializer); 1971 } 1972 1973 static void 1974 nfe_ring_dma_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) 1975 { 1976 if (error) 1977 return; 1978 1979 KASSERT(nseg == 1, ("too many segments, should be 1\n")); 1980 1981 *((uint32_t *)arg) = seg->ds_addr; 1982 } 1983 1984 static void 1985 nfe_buf_dma_addr(void *arg, bus_dma_segment_t *segs, int nsegs, 1986 bus_size_t mapsz __unused, int error) 1987 { 1988 struct nfe_dma_ctx *ctx = arg; 1989 int i; 1990 1991 if (error) 1992 return; 1993 1994 KASSERT(nsegs <= ctx->nsegs, 1995 ("too many segments(%d), should be <= %d\n", 1996 nsegs, ctx->nsegs)); 1997 1998 ctx->nsegs = nsegs; 1999 for (i = 0; i < nsegs; ++i) 2000 ctx->segs[i] = segs[i]; 2001 } 2002 2003 static int 2004 nfe_newbuf_std(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2005 int wait) 2006 { 2007 struct nfe_rx_data *data = &ring->data[idx]; 2008 struct nfe_dma_ctx ctx; 2009 bus_dma_segment_t seg; 2010 bus_dmamap_t map; 2011 struct mbuf *m; 2012 int error; 2013 2014 m = m_getcl(wait ? MB_WAIT : MB_DONTWAIT, MT_DATA, M_PKTHDR); 2015 if (m == NULL) 2016 return ENOBUFS; 2017 m->m_len = m->m_pkthdr.len = MCLBYTES; 2018 2019 ctx.nsegs = 1; 2020 ctx.segs = &seg; 2021 error = bus_dmamap_load_mbuf(ring->data_tag, ring->data_tmpmap, 2022 m, nfe_buf_dma_addr, &ctx, 2023 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT); 2024 if (error) { 2025 m_freem(m); 2026 if_printf(&sc->arpcom.ac_if, "could map RX mbuf %d\n", error); 2027 return error; 2028 } 2029 2030 /* Unload originally mapped mbuf */ 2031 bus_dmamap_unload(ring->data_tag, data->map); 2032 2033 /* Swap this DMA map with tmp DMA map */ 2034 map = data->map; 2035 data->map = ring->data_tmpmap; 2036 ring->data_tmpmap = map; 2037 2038 /* Caller is assumed to have collected the old mbuf */ 2039 data->m = m; 2040 2041 nfe_set_paddr_rxdesc(sc, ring, idx, seg.ds_addr); 2042 2043 bus_dmamap_sync(ring->data_tag, data->map, BUS_DMASYNC_PREREAD); 2044 return 0; 2045 } 2046 2047 static int 2048 nfe_newbuf_jumbo(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2049 int wait) 2050 { 2051 struct nfe_rx_data *data = &ring->data[idx]; 2052 struct nfe_jbuf *jbuf; 2053 struct mbuf *m; 2054 2055 MGETHDR(m, wait ? MB_WAIT : MB_DONTWAIT, MT_DATA); 2056 if (m == NULL) 2057 return ENOBUFS; 2058 2059 jbuf = nfe_jalloc(sc); 2060 if (jbuf == NULL) { 2061 m_freem(m); 2062 if_printf(&sc->arpcom.ac_if, "jumbo allocation failed " 2063 "-- packet dropped!\n"); 2064 return ENOBUFS; 2065 } 2066 2067 m->m_ext.ext_arg = jbuf; 2068 m->m_ext.ext_buf = jbuf->buf; 2069 m->m_ext.ext_free = nfe_jfree; 2070 m->m_ext.ext_ref = nfe_jref; 2071 m->m_ext.ext_size = NFE_JBYTES; 2072 2073 m->m_data = m->m_ext.ext_buf; 2074 m->m_flags |= M_EXT; 2075 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2076 2077 /* Caller is assumed to have collected the old mbuf */ 2078 data->m = m; 2079 2080 nfe_set_paddr_rxdesc(sc, ring, idx, jbuf->physaddr); 2081 2082 bus_dmamap_sync(ring->jtag, ring->jmap, BUS_DMASYNC_PREREAD); 2083 return 0; 2084 } 2085 2086 static void 2087 nfe_set_paddr_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx, 2088 bus_addr_t physaddr) 2089 { 2090 if (sc->sc_flags & NFE_40BIT_ADDR) { 2091 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2092 2093 #if defined(__LP64__) 2094 desc64->physaddr[0] = htole32(physaddr >> 32); 2095 #endif 2096 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 2097 } else { 2098 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2099 2100 desc32->physaddr = htole32(physaddr); 2101 } 2102 } 2103 2104 static void 2105 nfe_set_ready_rxdesc(struct nfe_softc *sc, struct nfe_rx_ring *ring, int idx) 2106 { 2107 if (sc->sc_flags & NFE_40BIT_ADDR) { 2108 struct nfe_desc64 *desc64 = &ring->desc64[idx]; 2109 2110 desc64->length = htole16(ring->bufsz); 2111 desc64->flags = htole16(NFE_RX_READY); 2112 } else { 2113 struct nfe_desc32 *desc32 = &ring->desc32[idx]; 2114 2115 desc32->length = htole16(ring->bufsz); 2116 desc32->flags = htole16(NFE_RX_READY); 2117 } 2118 } 2119