1 /* $NetBSD: if_nfe.c,v 1.48 2010/01/08 19:56:52 dyoung Exp $ */ 2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.48 2010/01/08 19:56:52 dyoung Exp $"); 25 26 #include "opt_inet.h" 27 #include "bpfilter.h" 28 #include "vlan.h" 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/mutex.h> 37 #include <sys/queue.h> 38 #include <sys/kernel.h> 39 #include <sys/device.h> 40 #include <sys/callout.h> 41 #include <sys/socket.h> 42 43 #include <sys/bus.h> 44 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_ether.h> 49 #include <net/if_arp.h> 50 51 #ifdef INET 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/in_var.h> 55 #include <netinet/ip.h> 56 #include <netinet/if_inarp.h> 57 #endif 58 59 #if NVLAN > 0 60 #include <net/if_types.h> 61 #endif 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 #include <dev/pci/pcidevs.h> 73 74 #include <dev/pci/if_nfereg.h> 75 #include <dev/pci/if_nfevar.h> 76 77 static int nfe_ifflags_cb(struct ethercom *); 78 79 int nfe_match(device_t, cfdata_t, void *); 80 void nfe_attach(device_t, device_t, void *); 81 void nfe_power(int, void *); 82 void nfe_miibus_statchg(device_t); 83 int nfe_miibus_readreg(device_t, int, int); 84 void nfe_miibus_writereg(device_t, int, int, int); 85 int nfe_intr(void *); 86 int nfe_ioctl(struct ifnet *, u_long, void *); 87 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 88 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 89 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 90 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 91 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 92 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 93 void nfe_rxeof(struct nfe_softc *); 94 void nfe_txeof(struct nfe_softc *); 95 int nfe_encap(struct nfe_softc *, struct mbuf *); 96 void nfe_start(struct ifnet *); 97 void nfe_watchdog(struct ifnet *); 98 int nfe_init(struct ifnet *); 99 void nfe_stop(struct ifnet *, int); 100 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); 101 void nfe_jfree(struct mbuf *, void *, size_t, void *); 102 int nfe_jpool_alloc(struct nfe_softc *); 103 void nfe_jpool_free(struct nfe_softc *); 104 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 106 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 107 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 109 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 110 void nfe_setmulti(struct nfe_softc *); 111 void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 112 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 113 void nfe_tick(void *); 114 void nfe_poweron(device_t); 115 bool nfe_resume(device_t, pmf_qual_t); 116 117 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach, 118 NULL, NULL); 119 120 /* #define NFE_NO_JUMBO */ 121 122 #ifdef NFE_DEBUG 123 int nfedebug = 0; 124 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 125 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 126 #else 127 #define DPRINTF(x) 128 #define DPRINTFN(n,x) 129 #endif 130 131 /* deal with naming differences */ 132 133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ 134 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 135 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ 136 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 137 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ 138 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN 139 140 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ 141 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 142 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ 143 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 144 145 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ 146 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 147 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ 148 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 149 150 #ifdef _LP64 151 #define __LP64__ 1 152 #endif 153 154 const struct nfe_product { 155 pci_vendor_id_t vendor; 156 pci_product_id_t product; 157 } nfe_devices[] = { 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 177 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 178 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 179 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 180 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 181 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 182 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 183 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 184 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 185 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 186 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 187 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 188 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 190 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 191 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 193 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 194 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 196 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 197 }; 198 199 int 200 nfe_match(device_t dev, cfdata_t match, void *aux) 201 { 202 struct pci_attach_args *pa = aux; 203 const struct nfe_product *np; 204 int i; 205 206 for (i = 0; i < __arraycount(nfe_devices); i++) { 207 np = &nfe_devices[i]; 208 if (PCI_VENDOR(pa->pa_id) == np->vendor && 209 PCI_PRODUCT(pa->pa_id) == np->product) 210 return 1; 211 } 212 return 0; 213 } 214 215 void 216 nfe_attach(device_t parent, device_t self, void *aux) 217 { 218 struct nfe_softc *sc = device_private(self); 219 struct pci_attach_args *pa = aux; 220 pci_chipset_tag_t pc = pa->pa_pc; 221 pci_intr_handle_t ih; 222 const char *intrstr; 223 struct ifnet *ifp; 224 bus_size_t memsize; 225 pcireg_t memtype; 226 char devinfo[256]; 227 int mii_flags = 0; 228 229 sc->sc_dev = self; 230 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 231 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, PCI_REVISION(pa->pa_class)); 232 233 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 234 switch (memtype) { 235 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 236 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 237 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 238 &sc->sc_memh, NULL, &memsize) == 0) 239 break; 240 /* FALLTHROUGH */ 241 default: 242 aprint_error_dev(self, "could not map mem space\n"); 243 return; 244 } 245 246 if (pci_intr_map(pa, &ih) != 0) { 247 aprint_error_dev(self, "could not map interrupt\n"); 248 goto fail; 249 } 250 251 intrstr = pci_intr_string(pc, ih); 252 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc); 253 if (sc->sc_ih == NULL) { 254 aprint_error_dev(self, "could not establish interrupt"); 255 if (intrstr != NULL) 256 aprint_error(" at %s", intrstr); 257 aprint_error("\n"); 258 goto fail; 259 } 260 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 261 262 sc->sc_dmat = pa->pa_dmat; 263 264 sc->sc_flags = 0; 265 266 switch (PCI_PRODUCT(pa->pa_id)) { 267 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 268 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 269 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 270 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 271 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 272 break; 273 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 274 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 275 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 276 break; 277 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 278 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 279 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 280 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 281 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 282 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 283 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 284 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 285 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 286 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 287 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 288 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 289 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 290 NFE_PWR_MGMT; 291 break; 292 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 293 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 294 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 295 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 296 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 297 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 298 break; 299 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 300 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 301 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 302 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 303 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 304 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 305 break; 306 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 307 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 308 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 309 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 310 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 311 break; 312 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 313 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 314 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 315 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 316 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 317 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 318 mii_flags = MIIF_DOPAUSE; 319 break; 320 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 321 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 322 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 323 NFE_HW_VLAN | NFE_PWR_MGMT; 324 break; 325 } 326 327 nfe_poweron(self); 328 329 #ifndef NFE_NO_JUMBO 330 /* enable jumbo frames for adapters that support it */ 331 if (sc->sc_flags & NFE_JUMBO_SUP) 332 sc->sc_flags |= NFE_USE_JUMBO; 333 #endif 334 335 /* Check for reversed ethernet address */ 336 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 337 sc->sc_flags |= NFE_CORRECT_MACADDR; 338 339 nfe_get_macaddr(sc, sc->sc_enaddr); 340 aprint_normal_dev(self, "Ethernet address %s\n", 341 ether_sprintf(sc->sc_enaddr)); 342 343 /* 344 * Allocate Tx and Rx rings. 345 */ 346 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 347 aprint_error_dev(self, "could not allocate Tx ring\n"); 348 goto fail; 349 } 350 351 mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); 352 353 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 354 aprint_error_dev(self, "could not allocate Rx ring\n"); 355 nfe_free_tx_ring(sc, &sc->txq); 356 goto fail; 357 } 358 359 ifp = &sc->sc_ethercom.ec_if; 360 ifp->if_softc = sc; 361 ifp->if_mtu = ETHERMTU; 362 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 363 ifp->if_ioctl = nfe_ioctl; 364 ifp->if_start = nfe_start; 365 ifp->if_stop = nfe_stop; 366 ifp->if_watchdog = nfe_watchdog; 367 ifp->if_init = nfe_init; 368 ifp->if_baudrate = IF_Gbps(1); 369 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 370 IFQ_SET_READY(&ifp->if_snd); 371 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 372 373 if (sc->sc_flags & NFE_USE_JUMBO) 374 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 375 376 #if NVLAN > 0 377 if (sc->sc_flags & NFE_HW_VLAN) 378 sc->sc_ethercom.ec_capabilities |= 379 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 380 #endif 381 if (sc->sc_flags & NFE_HW_CSUM) { 382 ifp->if_capabilities |= 383 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 384 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 385 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 386 } 387 388 sc->sc_mii.mii_ifp = ifp; 389 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 390 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 391 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 392 393 sc->sc_ethercom.ec_mii = &sc->sc_mii; 394 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 395 ether_mediastatus); 396 397 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 398 MII_OFFSET_ANY, mii_flags); 399 400 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 401 aprint_error_dev(self, "no PHY found!\n"); 402 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 403 0, NULL); 404 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 405 } else 406 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 407 408 if_attach(ifp); 409 ether_ifattach(ifp, sc->sc_enaddr); 410 ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); 411 412 callout_init(&sc->sc_tick_ch, 0); 413 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); 414 415 if (pmf_device_register(self, NULL, nfe_resume)) 416 pmf_class_network_register(self, ifp); 417 else 418 aprint_error_dev(self, "couldn't establish power handler\n"); 419 420 return; 421 422 fail: 423 if (sc->sc_ih != NULL) { 424 pci_intr_disestablish(pc, sc->sc_ih); 425 sc->sc_ih = NULL; 426 } 427 if (memsize) 428 bus_space_unmap(sc->sc_memt, sc->sc_memh, memsize); 429 } 430 431 void 432 nfe_miibus_statchg(device_t dev) 433 { 434 struct nfe_softc *sc = device_private(dev); 435 struct mii_data *mii = &sc->sc_mii; 436 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 437 438 phy = NFE_READ(sc, NFE_PHY_IFACE); 439 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 440 441 seed = NFE_READ(sc, NFE_RNDSEED); 442 seed &= ~NFE_SEED_MASK; 443 444 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 445 phy |= NFE_PHY_HDX; /* half-duplex */ 446 misc |= NFE_MISC1_HDX; 447 } 448 449 switch (IFM_SUBTYPE(mii->mii_media_active)) { 450 case IFM_1000_T: /* full-duplex only */ 451 link |= NFE_MEDIA_1000T; 452 seed |= NFE_SEED_1000T; 453 phy |= NFE_PHY_1000T; 454 break; 455 case IFM_100_TX: 456 link |= NFE_MEDIA_100TX; 457 seed |= NFE_SEED_100TX; 458 phy |= NFE_PHY_100TX; 459 break; 460 case IFM_10_T: 461 link |= NFE_MEDIA_10T; 462 seed |= NFE_SEED_10T; 463 break; 464 } 465 466 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 467 468 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 469 NFE_WRITE(sc, NFE_MISC1, misc); 470 NFE_WRITE(sc, NFE_LINKSPEED, link); 471 } 472 473 int 474 nfe_miibus_readreg(device_t dev, int phy, int reg) 475 { 476 struct nfe_softc *sc = device_private(dev); 477 uint32_t val; 478 int ntries; 479 480 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 481 482 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 483 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 484 DELAY(100); 485 } 486 487 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 488 489 for (ntries = 0; ntries < 1000; ntries++) { 490 DELAY(100); 491 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 492 break; 493 } 494 if (ntries == 1000) { 495 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 496 device_xname(sc->sc_dev))); 497 return 0; 498 } 499 500 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 501 DPRINTFN(2, ("%s: could not read PHY\n", 502 device_xname(sc->sc_dev))); 503 return 0; 504 } 505 506 val = NFE_READ(sc, NFE_PHY_DATA); 507 if (val != 0xffffffff && val != 0) 508 sc->mii_phyaddr = phy; 509 510 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 511 device_xname(sc->sc_dev), phy, reg, val)); 512 513 return val; 514 } 515 516 void 517 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 518 { 519 struct nfe_softc *sc = device_private(dev); 520 uint32_t ctl; 521 int ntries; 522 523 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 524 525 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 526 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 527 DELAY(100); 528 } 529 530 NFE_WRITE(sc, NFE_PHY_DATA, val); 531 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 532 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 533 534 for (ntries = 0; ntries < 1000; ntries++) { 535 DELAY(100); 536 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 537 break; 538 } 539 #ifdef NFE_DEBUG 540 if (nfedebug >= 2 && ntries == 1000) 541 printf("could not write to PHY\n"); 542 #endif 543 } 544 545 int 546 nfe_intr(void *arg) 547 { 548 struct nfe_softc *sc = arg; 549 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 550 uint32_t r; 551 int handled; 552 553 if ((ifp->if_flags & IFF_UP) == 0) 554 return 0; 555 556 handled = 0; 557 558 for (;;) { 559 r = NFE_READ(sc, NFE_IRQ_STATUS); 560 if ((r & NFE_IRQ_WANTED) == 0) 561 break; 562 563 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 564 handled = 1; 565 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 566 567 if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) { 568 /* check Rx ring */ 569 nfe_rxeof(sc); 570 } 571 if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { 572 /* check Tx ring */ 573 nfe_txeof(sc); 574 } 575 if ((r & NFE_IRQ_LINK) != 0) { 576 NFE_READ(sc, NFE_PHY_STATUS); 577 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 578 DPRINTF(("%s: link state changed\n", 579 device_xname(sc->sc_dev))); 580 } 581 } 582 583 if (handled && !IF_IS_EMPTY(&ifp->if_snd)) 584 nfe_start(ifp); 585 586 return handled; 587 } 588 589 static int 590 nfe_ifflags_cb(struct ethercom *ec) 591 { 592 struct ifnet *ifp = &ec->ec_if; 593 struct nfe_softc *sc = ifp->if_softc; 594 int change = ifp->if_flags ^ sc->sc_if_flags; 595 596 /* 597 * If only the PROMISC flag changes, then 598 * don't do a full re-init of the chip, just update 599 * the Rx filter. 600 */ 601 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 602 return ENETRESET; 603 else if ((change & IFF_PROMISC) != 0) 604 nfe_setmulti(sc); 605 606 return 0; 607 } 608 609 int 610 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 611 { 612 struct nfe_softc *sc = ifp->if_softc; 613 struct ifaddr *ifa = (struct ifaddr *)data; 614 int s, error = 0; 615 616 s = splnet(); 617 618 switch (cmd) { 619 case SIOCINITIFADDR: 620 ifp->if_flags |= IFF_UP; 621 nfe_init(ifp); 622 switch (ifa->ifa_addr->sa_family) { 623 #ifdef INET 624 case AF_INET: 625 arp_ifinit(ifp, ifa); 626 break; 627 #endif 628 default: 629 break; 630 } 631 break; 632 default: 633 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 634 break; 635 636 error = 0; 637 638 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 639 ; 640 else if (ifp->if_flags & IFF_RUNNING) 641 nfe_setmulti(sc); 642 break; 643 } 644 sc->sc_if_flags = ifp->if_flags; 645 646 splx(s); 647 648 return error; 649 } 650 651 void 652 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 653 { 654 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 655 (char *)desc32 - (char *)sc->txq.desc32, 656 sizeof (struct nfe_desc32), ops); 657 } 658 659 void 660 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 661 { 662 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 663 (char *)desc64 - (char *)sc->txq.desc64, 664 sizeof (struct nfe_desc64), ops); 665 } 666 667 void 668 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 669 { 670 if (end > start) { 671 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 672 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 673 (char *)&sc->txq.desc32[end] - 674 (char *)&sc->txq.desc32[start], ops); 675 return; 676 } 677 /* sync from 'start' to end of ring */ 678 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 679 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 680 (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - 681 (char *)&sc->txq.desc32[start], ops); 682 683 /* sync from start of ring to 'end' */ 684 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 685 (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); 686 } 687 688 void 689 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 690 { 691 if (end > start) { 692 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 693 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 694 (char *)&sc->txq.desc64[end] - 695 (char *)&sc->txq.desc64[start], ops); 696 return; 697 } 698 /* sync from 'start' to end of ring */ 699 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 700 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 701 (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - 702 (char *)&sc->txq.desc64[start], ops); 703 704 /* sync from start of ring to 'end' */ 705 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 706 (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); 707 } 708 709 void 710 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 711 { 712 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 713 (char *)desc32 - (char *)sc->rxq.desc32, 714 sizeof (struct nfe_desc32), ops); 715 } 716 717 void 718 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 719 { 720 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 721 (char *)desc64 - (char *)sc->rxq.desc64, 722 sizeof (struct nfe_desc64), ops); 723 } 724 725 void 726 nfe_rxeof(struct nfe_softc *sc) 727 { 728 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 729 struct nfe_desc32 *desc32; 730 struct nfe_desc64 *desc64; 731 struct nfe_rx_data *data; 732 struct nfe_jbuf *jbuf; 733 struct mbuf *m, *mnew; 734 bus_addr_t physaddr; 735 uint16_t flags; 736 int error, len, i; 737 738 desc32 = NULL; 739 desc64 = NULL; 740 for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { 741 data = &sc->rxq.data[i]; 742 743 if (sc->sc_flags & NFE_40BIT_ADDR) { 744 desc64 = &sc->rxq.desc64[i]; 745 nfe_rxdesc64_sync(sc, desc64, 746 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 747 748 flags = le16toh(desc64->flags); 749 len = le16toh(desc64->length) & 0x3fff; 750 } else { 751 desc32 = &sc->rxq.desc32[i]; 752 nfe_rxdesc32_sync(sc, desc32, 753 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 754 755 flags = le16toh(desc32->flags); 756 len = le16toh(desc32->length) & 0x3fff; 757 } 758 759 if ((flags & NFE_RX_READY) != 0) 760 break; 761 762 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 763 if ((flags & NFE_RX_VALID_V1) == 0) 764 goto skip; 765 766 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 767 flags &= ~NFE_RX_ERROR; 768 len--; /* fix buffer length */ 769 } 770 } else { 771 if ((flags & NFE_RX_VALID_V2) == 0) 772 goto skip; 773 774 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 775 flags &= ~NFE_RX_ERROR; 776 len--; /* fix buffer length */ 777 } 778 } 779 780 if (flags & NFE_RX_ERROR) { 781 ifp->if_ierrors++; 782 goto skip; 783 } 784 785 /* 786 * Try to allocate a new mbuf for this ring element and load 787 * it before processing the current mbuf. If the ring element 788 * cannot be loaded, drop the received packet and reuse the 789 * old mbuf. In the unlikely case that the old mbuf can't be 790 * reloaded either, explicitly panic. 791 */ 792 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 793 if (mnew == NULL) { 794 ifp->if_ierrors++; 795 goto skip; 796 } 797 798 if (sc->sc_flags & NFE_USE_JUMBO) { 799 physaddr = 800 sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; 801 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 802 if (len > MCLBYTES) { 803 m_freem(mnew); 804 ifp->if_ierrors++; 805 goto skip1; 806 } 807 MCLGET(mnew, M_DONTWAIT); 808 if ((mnew->m_flags & M_EXT) == 0) { 809 m_freem(mnew); 810 ifp->if_ierrors++; 811 goto skip1; 812 } 813 814 (void)memcpy(mtod(mnew, void *), 815 mtod(data->m, const void *), len); 816 m = mnew; 817 goto mbufcopied; 818 } else { 819 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 820 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 821 mtod(data->m, char *) - (char *)sc->rxq.jpool, 822 NFE_JBYTES, BUS_DMASYNC_POSTREAD); 823 824 physaddr = jbuf->physaddr; 825 } 826 } else { 827 MCLGET(mnew, M_DONTWAIT); 828 if ((mnew->m_flags & M_EXT) == 0) { 829 m_freem(mnew); 830 ifp->if_ierrors++; 831 goto skip; 832 } 833 834 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 835 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 836 bus_dmamap_unload(sc->sc_dmat, data->map); 837 838 error = bus_dmamap_load(sc->sc_dmat, data->map, 839 mtod(mnew, void *), MCLBYTES, NULL, 840 BUS_DMA_READ | BUS_DMA_NOWAIT); 841 if (error != 0) { 842 m_freem(mnew); 843 844 /* try to reload the old mbuf */ 845 error = bus_dmamap_load(sc->sc_dmat, data->map, 846 mtod(data->m, void *), MCLBYTES, NULL, 847 BUS_DMA_READ | BUS_DMA_NOWAIT); 848 if (error != 0) { 849 /* very unlikely that it will fail.. */ 850 panic("%s: could not load old rx mbuf", 851 device_xname(sc->sc_dev)); 852 } 853 ifp->if_ierrors++; 854 goto skip; 855 } 856 physaddr = data->map->dm_segs[0].ds_addr; 857 } 858 859 /* 860 * New mbuf successfully loaded, update Rx ring and continue 861 * processing. 862 */ 863 m = data->m; 864 data->m = mnew; 865 866 mbufcopied: 867 /* finalize mbuf */ 868 m->m_pkthdr.len = m->m_len = len; 869 m->m_pkthdr.rcvif = ifp; 870 871 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 872 /* 873 * XXX 874 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? 875 */ 876 if (flags & NFE_RX_IP_CSUMOK) { 877 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 878 DPRINTFN(3, ("%s: ip4csum-rx ok\n", 879 device_xname(sc->sc_dev))); 880 } 881 /* 882 * XXX 883 * no way to check M_CSUM_TCP_UDP_BAD or 884 * other protocols? 885 */ 886 if (flags & NFE_RX_UDP_CSUMOK) { 887 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 888 DPRINTFN(3, ("%s: udp4csum-rx ok\n", 889 device_xname(sc->sc_dev))); 890 } else if (flags & NFE_RX_TCP_CSUMOK) { 891 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 892 DPRINTFN(3, ("%s: tcp4csum-rx ok\n", 893 device_xname(sc->sc_dev))); 894 } 895 } 896 #if NBPFILTER > 0 897 if (ifp->if_bpf) 898 bpf_mtap(ifp->if_bpf, m); 899 #endif 900 ifp->if_ipackets++; 901 (*ifp->if_input)(ifp, m); 902 903 skip1: 904 /* update mapping address in h/w descriptor */ 905 if (sc->sc_flags & NFE_40BIT_ADDR) { 906 #if defined(__LP64__) 907 desc64->physaddr[0] = htole32(physaddr >> 32); 908 #endif 909 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 910 } else { 911 desc32->physaddr = htole32(physaddr); 912 } 913 914 skip: 915 if (sc->sc_flags & NFE_40BIT_ADDR) { 916 desc64->length = htole16(sc->rxq.bufsz); 917 desc64->flags = htole16(NFE_RX_READY); 918 919 nfe_rxdesc64_sync(sc, desc64, 920 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 921 } else { 922 desc32->length = htole16(sc->rxq.bufsz); 923 desc32->flags = htole16(NFE_RX_READY); 924 925 nfe_rxdesc32_sync(sc, desc32, 926 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 927 } 928 } 929 /* update current RX pointer */ 930 sc->rxq.cur = i; 931 } 932 933 void 934 nfe_txeof(struct nfe_softc *sc) 935 { 936 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 937 struct nfe_desc32 *desc32; 938 struct nfe_desc64 *desc64; 939 struct nfe_tx_data *data = NULL; 940 int i; 941 uint16_t flags; 942 char buf[128]; 943 944 for (i = sc->txq.next; 945 sc->txq.queued > 0; 946 i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { 947 if (sc->sc_flags & NFE_40BIT_ADDR) { 948 desc64 = &sc->txq.desc64[i]; 949 nfe_txdesc64_sync(sc, desc64, 950 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 951 952 flags = le16toh(desc64->flags); 953 } else { 954 desc32 = &sc->txq.desc32[i]; 955 nfe_txdesc32_sync(sc, desc32, 956 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 957 958 flags = le16toh(desc32->flags); 959 } 960 961 if ((flags & NFE_TX_VALID) != 0) 962 break; 963 964 data = &sc->txq.data[i]; 965 966 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 967 if ((flags & NFE_TX_LASTFRAG_V1) == 0 && 968 data->m == NULL) 969 continue; 970 971 if ((flags & NFE_TX_ERROR_V1) != 0) { 972 snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); 973 aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", 974 buf); 975 ifp->if_oerrors++; 976 } else 977 ifp->if_opackets++; 978 } else { 979 if ((flags & NFE_TX_LASTFRAG_V2) == 0 && 980 data->m == NULL) 981 continue; 982 983 if ((flags & NFE_TX_ERROR_V2) != 0) { 984 snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); 985 aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", 986 buf); 987 ifp->if_oerrors++; 988 } else 989 ifp->if_opackets++; 990 } 991 992 if (data->m == NULL) { /* should not get there */ 993 aprint_error_dev(sc->sc_dev, 994 "last fragment bit w/o associated mbuf!\n"); 995 continue; 996 } 997 998 /* last fragment of the mbuf chain transmitted */ 999 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1000 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1001 bus_dmamap_unload(sc->sc_dmat, data->active); 1002 m_freem(data->m); 1003 data->m = NULL; 1004 } 1005 1006 sc->txq.next = i; 1007 1008 if (sc->txq.queued < NFE_TX_RING_COUNT) { 1009 /* at least one slot freed */ 1010 ifp->if_flags &= ~IFF_OACTIVE; 1011 } 1012 1013 if (sc->txq.queued == 0) { 1014 /* all queued packets are sent */ 1015 ifp->if_timer = 0; 1016 } 1017 } 1018 1019 int 1020 nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1021 { 1022 struct nfe_desc32 *desc32; 1023 struct nfe_desc64 *desc64; 1024 struct nfe_tx_data *data; 1025 bus_dmamap_t map; 1026 uint16_t flags, csumflags; 1027 #if NVLAN > 0 1028 struct m_tag *mtag; 1029 uint32_t vtag = 0; 1030 #endif 1031 int error, i, first; 1032 1033 desc32 = NULL; 1034 desc64 = NULL; 1035 data = NULL; 1036 1037 flags = 0; 1038 csumflags = 0; 1039 first = sc->txq.cur; 1040 1041 map = sc->txq.data[first].map; 1042 1043 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 1044 if (error != 0) { 1045 aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", 1046 error); 1047 return error; 1048 } 1049 1050 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 1051 bus_dmamap_unload(sc->sc_dmat, map); 1052 return ENOBUFS; 1053 } 1054 1055 #if NVLAN > 0 1056 /* setup h/w VLAN tagging */ 1057 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) 1058 vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag); 1059 #endif 1060 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 1061 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 1062 csumflags |= NFE_TX_IP_CSUM; 1063 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1064 csumflags |= NFE_TX_TCP_UDP_CSUM; 1065 } 1066 1067 for (i = 0; i < map->dm_nsegs; i++) { 1068 data = &sc->txq.data[sc->txq.cur]; 1069 1070 if (sc->sc_flags & NFE_40BIT_ADDR) { 1071 desc64 = &sc->txq.desc64[sc->txq.cur]; 1072 #if defined(__LP64__) 1073 desc64->physaddr[0] = 1074 htole32(map->dm_segs[i].ds_addr >> 32); 1075 #endif 1076 desc64->physaddr[1] = 1077 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 1078 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 1079 desc64->flags = htole16(flags); 1080 desc64->vtag = 0; 1081 } else { 1082 desc32 = &sc->txq.desc32[sc->txq.cur]; 1083 1084 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 1085 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 1086 desc32->flags = htole16(flags); 1087 } 1088 1089 /* 1090 * Setting of the valid bit in the first descriptor is 1091 * deferred until the whole chain is fully setup. 1092 */ 1093 flags |= NFE_TX_VALID; 1094 1095 sc->txq.queued++; 1096 sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); 1097 } 1098 1099 /* the whole mbuf chain has been setup */ 1100 if (sc->sc_flags & NFE_40BIT_ADDR) { 1101 /* fix last descriptor */ 1102 flags |= NFE_TX_LASTFRAG_V2; 1103 desc64->flags = htole16(flags); 1104 1105 /* Checksum flags and vtag belong to the first fragment only. */ 1106 #if NVLAN > 0 1107 sc->txq.desc64[first].vtag = htole32(vtag); 1108 #endif 1109 sc->txq.desc64[first].flags |= htole16(csumflags); 1110 1111 /* finally, set the valid bit in the first descriptor */ 1112 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1113 } else { 1114 /* fix last descriptor */ 1115 if (sc->sc_flags & NFE_JUMBO_SUP) 1116 flags |= NFE_TX_LASTFRAG_V2; 1117 else 1118 flags |= NFE_TX_LASTFRAG_V1; 1119 desc32->flags = htole16(flags); 1120 1121 /* Checksum flags belong to the first fragment only. */ 1122 sc->txq.desc32[first].flags |= htole16(csumflags); 1123 1124 /* finally, set the valid bit in the first descriptor */ 1125 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1126 } 1127 1128 data->m = m0; 1129 data->active = map; 1130 1131 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1132 BUS_DMASYNC_PREWRITE); 1133 1134 return 0; 1135 } 1136 1137 void 1138 nfe_start(struct ifnet *ifp) 1139 { 1140 struct nfe_softc *sc = ifp->if_softc; 1141 int old = sc->txq.queued; 1142 struct mbuf *m0; 1143 1144 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1145 return; 1146 1147 for (;;) { 1148 IFQ_POLL(&ifp->if_snd, m0); 1149 if (m0 == NULL) 1150 break; 1151 1152 if (nfe_encap(sc, m0) != 0) { 1153 ifp->if_flags |= IFF_OACTIVE; 1154 break; 1155 } 1156 1157 /* packet put in h/w queue, remove from s/w queue */ 1158 IFQ_DEQUEUE(&ifp->if_snd, m0); 1159 1160 #if NBPFILTER > 0 1161 if (ifp->if_bpf != NULL) 1162 bpf_mtap(ifp->if_bpf, m0); 1163 #endif 1164 } 1165 1166 if (sc->txq.queued != old) { 1167 /* packets are queued */ 1168 if (sc->sc_flags & NFE_40BIT_ADDR) 1169 nfe_txdesc64_rsync(sc, old, sc->txq.cur, 1170 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1171 else 1172 nfe_txdesc32_rsync(sc, old, sc->txq.cur, 1173 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1174 /* kick Tx */ 1175 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1176 1177 /* 1178 * Set a timeout in case the chip goes out to lunch. 1179 */ 1180 ifp->if_timer = 5; 1181 } 1182 } 1183 1184 void 1185 nfe_watchdog(struct ifnet *ifp) 1186 { 1187 struct nfe_softc *sc = ifp->if_softc; 1188 1189 aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); 1190 1191 ifp->if_flags &= ~IFF_RUNNING; 1192 nfe_init(ifp); 1193 1194 ifp->if_oerrors++; 1195 } 1196 1197 int 1198 nfe_init(struct ifnet *ifp) 1199 { 1200 struct nfe_softc *sc = ifp->if_softc; 1201 uint32_t tmp; 1202 int rc = 0, s; 1203 1204 if (ifp->if_flags & IFF_RUNNING) 1205 return 0; 1206 1207 nfe_stop(ifp, 0); 1208 1209 NFE_WRITE(sc, NFE_TX_UNK, 0); 1210 NFE_WRITE(sc, NFE_STATUS, 0); 1211 1212 sc->rxtxctl = NFE_RXTX_BIT2; 1213 if (sc->sc_flags & NFE_40BIT_ADDR) 1214 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1215 else if (sc->sc_flags & NFE_JUMBO_SUP) 1216 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1217 if (sc->sc_flags & NFE_HW_CSUM) 1218 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1219 #if NVLAN > 0 1220 /* 1221 * Although the adapter is capable of stripping VLAN tags from received 1222 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1223 * purpose. This will be done in software by our network stack. 1224 */ 1225 if (sc->sc_flags & NFE_HW_VLAN) 1226 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1227 #endif 1228 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1229 DELAY(10); 1230 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1231 1232 #if NVLAN 1233 if (sc->sc_flags & NFE_HW_VLAN) 1234 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1235 #endif 1236 1237 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1238 1239 /* set MAC address */ 1240 nfe_set_macaddr(sc, sc->sc_enaddr); 1241 1242 /* tell MAC where rings are in memory */ 1243 #ifdef __LP64__ 1244 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1245 #endif 1246 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1247 #ifdef __LP64__ 1248 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1249 #endif 1250 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1251 1252 NFE_WRITE(sc, NFE_RING_SIZE, 1253 (NFE_RX_RING_COUNT - 1) << 16 | 1254 (NFE_TX_RING_COUNT - 1)); 1255 1256 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1257 1258 /* force MAC to wakeup */ 1259 tmp = NFE_READ(sc, NFE_PWR_STATE); 1260 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1261 DELAY(10); 1262 tmp = NFE_READ(sc, NFE_PWR_STATE); 1263 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1264 1265 s = splnet(); 1266 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1267 nfe_intr(sc); /* XXX clear IRQ status registers */ 1268 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1269 splx(s); 1270 1271 #if 1 1272 /* configure interrupts coalescing/mitigation */ 1273 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1274 #else 1275 /* no interrupt mitigation: one interrupt per packet */ 1276 NFE_WRITE(sc, NFE_IMTIMER, 970); 1277 #endif 1278 1279 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1280 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1281 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1282 1283 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1284 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1285 1286 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1287 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1288 1289 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1290 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1291 DELAY(10); 1292 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1293 1294 /* set Rx filter */ 1295 nfe_setmulti(sc); 1296 1297 if ((rc = ether_mediachange(ifp)) != 0) 1298 goto out; 1299 1300 nfe_tick(sc); 1301 1302 /* enable Rx */ 1303 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1304 1305 /* enable Tx */ 1306 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1307 1308 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1309 1310 /* enable interrupts */ 1311 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1312 1313 callout_schedule(&sc->sc_tick_ch, hz); 1314 1315 ifp->if_flags |= IFF_RUNNING; 1316 ifp->if_flags &= ~IFF_OACTIVE; 1317 1318 out: 1319 return rc; 1320 } 1321 1322 void 1323 nfe_stop(struct ifnet *ifp, int disable) 1324 { 1325 struct nfe_softc *sc = ifp->if_softc; 1326 1327 callout_stop(&sc->sc_tick_ch); 1328 1329 ifp->if_timer = 0; 1330 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1331 1332 mii_down(&sc->sc_mii); 1333 1334 /* abort Tx */ 1335 NFE_WRITE(sc, NFE_TX_CTL, 0); 1336 1337 /* disable Rx */ 1338 NFE_WRITE(sc, NFE_RX_CTL, 0); 1339 1340 /* disable interrupts */ 1341 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1342 1343 /* reset Tx and Rx rings */ 1344 nfe_reset_tx_ring(sc, &sc->txq); 1345 nfe_reset_rx_ring(sc, &sc->rxq); 1346 } 1347 1348 int 1349 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1350 { 1351 struct nfe_desc32 *desc32; 1352 struct nfe_desc64 *desc64; 1353 struct nfe_rx_data *data; 1354 struct nfe_jbuf *jbuf; 1355 void **desc; 1356 bus_addr_t physaddr; 1357 int i, nsegs, error, descsize; 1358 1359 if (sc->sc_flags & NFE_40BIT_ADDR) { 1360 desc = (void **)&ring->desc64; 1361 descsize = sizeof (struct nfe_desc64); 1362 } else { 1363 desc = (void **)&ring->desc32; 1364 descsize = sizeof (struct nfe_desc32); 1365 } 1366 1367 ring->cur = ring->next = 0; 1368 ring->bufsz = MCLBYTES; 1369 1370 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1371 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1372 if (error != 0) { 1373 aprint_error_dev(sc->sc_dev, 1374 "could not create desc DMA map\n"); 1375 ring->map = NULL; 1376 goto fail; 1377 } 1378 1379 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1380 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1381 if (error != 0) { 1382 aprint_error_dev(sc->sc_dev, 1383 "could not allocate DMA memory\n"); 1384 goto fail; 1385 } 1386 1387 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1388 NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1389 if (error != 0) { 1390 aprint_error_dev(sc->sc_dev, 1391 "could not map desc DMA memory\n"); 1392 goto fail; 1393 } 1394 1395 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1396 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1397 if (error != 0) { 1398 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1399 goto fail; 1400 } 1401 1402 memset(*desc, 0, NFE_RX_RING_COUNT * descsize); 1403 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1404 1405 if (sc->sc_flags & NFE_USE_JUMBO) { 1406 ring->bufsz = NFE_JBYTES; 1407 if ((error = nfe_jpool_alloc(sc)) != 0) { 1408 aprint_error_dev(sc->sc_dev, 1409 "could not allocate jumbo frames\n"); 1410 goto fail; 1411 } 1412 } 1413 1414 /* 1415 * Pre-allocate Rx buffers and populate Rx ring. 1416 */ 1417 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1418 data = &sc->rxq.data[i]; 1419 1420 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1421 if (data->m == NULL) { 1422 aprint_error_dev(sc->sc_dev, 1423 "could not allocate rx mbuf\n"); 1424 error = ENOMEM; 1425 goto fail; 1426 } 1427 1428 if (sc->sc_flags & NFE_USE_JUMBO) { 1429 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 1430 aprint_error_dev(sc->sc_dev, 1431 "could not allocate jumbo buffer\n"); 1432 goto fail; 1433 } 1434 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1435 sc); 1436 1437 physaddr = jbuf->physaddr; 1438 } else { 1439 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1440 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1441 if (error != 0) { 1442 aprint_error_dev(sc->sc_dev, 1443 "could not create DMA map\n"); 1444 data->map = NULL; 1445 goto fail; 1446 } 1447 MCLGET(data->m, M_DONTWAIT); 1448 if (!(data->m->m_flags & M_EXT)) { 1449 aprint_error_dev(sc->sc_dev, 1450 "could not allocate mbuf cluster\n"); 1451 error = ENOMEM; 1452 goto fail; 1453 } 1454 1455 error = bus_dmamap_load(sc->sc_dmat, data->map, 1456 mtod(data->m, void *), MCLBYTES, NULL, 1457 BUS_DMA_READ | BUS_DMA_NOWAIT); 1458 if (error != 0) { 1459 aprint_error_dev(sc->sc_dev, 1460 "could not load rx buf DMA map"); 1461 goto fail; 1462 } 1463 physaddr = data->map->dm_segs[0].ds_addr; 1464 } 1465 1466 if (sc->sc_flags & NFE_40BIT_ADDR) { 1467 desc64 = &sc->rxq.desc64[i]; 1468 #if defined(__LP64__) 1469 desc64->physaddr[0] = htole32(physaddr >> 32); 1470 #endif 1471 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1472 desc64->length = htole16(sc->rxq.bufsz); 1473 desc64->flags = htole16(NFE_RX_READY); 1474 } else { 1475 desc32 = &sc->rxq.desc32[i]; 1476 desc32->physaddr = htole32(physaddr); 1477 desc32->length = htole16(sc->rxq.bufsz); 1478 desc32->flags = htole16(NFE_RX_READY); 1479 } 1480 } 1481 1482 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1483 BUS_DMASYNC_PREWRITE); 1484 1485 return 0; 1486 1487 fail: nfe_free_rx_ring(sc, ring); 1488 return error; 1489 } 1490 1491 void 1492 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1493 { 1494 int i; 1495 1496 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1497 if (sc->sc_flags & NFE_40BIT_ADDR) { 1498 ring->desc64[i].length = htole16(ring->bufsz); 1499 ring->desc64[i].flags = htole16(NFE_RX_READY); 1500 } else { 1501 ring->desc32[i].length = htole16(ring->bufsz); 1502 ring->desc32[i].flags = htole16(NFE_RX_READY); 1503 } 1504 } 1505 1506 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1507 BUS_DMASYNC_PREWRITE); 1508 1509 ring->cur = ring->next = 0; 1510 } 1511 1512 void 1513 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1514 { 1515 struct nfe_rx_data *data; 1516 void *desc; 1517 int i, descsize; 1518 1519 if (sc->sc_flags & NFE_40BIT_ADDR) { 1520 desc = ring->desc64; 1521 descsize = sizeof (struct nfe_desc64); 1522 } else { 1523 desc = ring->desc32; 1524 descsize = sizeof (struct nfe_desc32); 1525 } 1526 1527 if (desc != NULL) { 1528 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1529 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1530 bus_dmamap_unload(sc->sc_dmat, ring->map); 1531 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1532 NFE_RX_RING_COUNT * descsize); 1533 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1534 } 1535 1536 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1537 data = &ring->data[i]; 1538 1539 if (data->map != NULL) { 1540 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1541 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1542 bus_dmamap_unload(sc->sc_dmat, data->map); 1543 bus_dmamap_destroy(sc->sc_dmat, data->map); 1544 } 1545 if (data->m != NULL) 1546 m_freem(data->m); 1547 } 1548 } 1549 1550 struct nfe_jbuf * 1551 nfe_jalloc(struct nfe_softc *sc, int i) 1552 { 1553 struct nfe_jbuf *jbuf; 1554 1555 mutex_enter(&sc->rxq.mtx); 1556 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1557 if (jbuf != NULL) 1558 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1559 mutex_exit(&sc->rxq.mtx); 1560 if (jbuf == NULL) 1561 return NULL; 1562 sc->rxq.jbufmap[i] = 1563 ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1564 return jbuf; 1565 } 1566 1567 /* 1568 * This is called automatically by the network stack when the mbuf is freed. 1569 * Caution must be taken that the NIC might be reset by the time the mbuf is 1570 * freed. 1571 */ 1572 void 1573 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1574 { 1575 struct nfe_softc *sc = arg; 1576 struct nfe_jbuf *jbuf; 1577 int i; 1578 1579 /* find the jbuf from the base pointer */ 1580 i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1581 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1582 aprint_error_dev(sc->sc_dev, 1583 "request to free a buffer (%p) not managed by us\n", buf); 1584 return; 1585 } 1586 jbuf = &sc->rxq.jbuf[i]; 1587 1588 /* ..and put it back in the free list */ 1589 mutex_enter(&sc->rxq.mtx); 1590 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1591 mutex_exit(&sc->rxq.mtx); 1592 1593 if (m != NULL) 1594 pool_cache_put(mb_cache, m); 1595 } 1596 1597 int 1598 nfe_jpool_alloc(struct nfe_softc *sc) 1599 { 1600 struct nfe_rx_ring *ring = &sc->rxq; 1601 struct nfe_jbuf *jbuf; 1602 bus_addr_t physaddr; 1603 char *buf; 1604 int i, nsegs, error; 1605 1606 /* 1607 * Allocate a big chunk of DMA'able memory. 1608 */ 1609 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1610 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1611 if (error != 0) { 1612 aprint_error_dev(sc->sc_dev, 1613 "could not create jumbo DMA map\n"); 1614 ring->jmap = NULL; 1615 goto fail; 1616 } 1617 1618 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1619 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1620 if (error != 0) { 1621 aprint_error_dev(sc->sc_dev, 1622 "could not allocate jumbo DMA memory\n"); 1623 goto fail; 1624 } 1625 1626 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1627 &ring->jpool, BUS_DMA_NOWAIT); 1628 if (error != 0) { 1629 aprint_error_dev(sc->sc_dev, 1630 "could not map jumbo DMA memory\n"); 1631 goto fail; 1632 } 1633 1634 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1635 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1636 if (error != 0) { 1637 aprint_error_dev(sc->sc_dev, 1638 "could not load jumbo DMA map\n"); 1639 goto fail; 1640 } 1641 1642 /* ..and split it into 9KB chunks */ 1643 SLIST_INIT(&ring->jfreelist); 1644 1645 buf = ring->jpool; 1646 physaddr = ring->jmap->dm_segs[0].ds_addr; 1647 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1648 jbuf = &ring->jbuf[i]; 1649 1650 jbuf->buf = buf; 1651 jbuf->physaddr = physaddr; 1652 1653 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1654 1655 buf += NFE_JBYTES; 1656 physaddr += NFE_JBYTES; 1657 } 1658 1659 return 0; 1660 1661 fail: nfe_jpool_free(sc); 1662 return error; 1663 } 1664 1665 void 1666 nfe_jpool_free(struct nfe_softc *sc) 1667 { 1668 struct nfe_rx_ring *ring = &sc->rxq; 1669 1670 if (ring->jmap != NULL) { 1671 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1672 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1673 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1674 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1675 } 1676 if (ring->jpool != NULL) { 1677 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1678 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1679 } 1680 } 1681 1682 int 1683 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1684 { 1685 int i, nsegs, error; 1686 void **desc; 1687 int descsize; 1688 1689 if (sc->sc_flags & NFE_40BIT_ADDR) { 1690 desc = (void **)&ring->desc64; 1691 descsize = sizeof (struct nfe_desc64); 1692 } else { 1693 desc = (void **)&ring->desc32; 1694 descsize = sizeof (struct nfe_desc32); 1695 } 1696 1697 ring->queued = 0; 1698 ring->cur = ring->next = 0; 1699 1700 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1701 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1702 1703 if (error != 0) { 1704 aprint_error_dev(sc->sc_dev, 1705 "could not create desc DMA map\n"); 1706 ring->map = NULL; 1707 goto fail; 1708 } 1709 1710 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1711 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1712 if (error != 0) { 1713 aprint_error_dev(sc->sc_dev, 1714 "could not allocate DMA memory\n"); 1715 goto fail; 1716 } 1717 1718 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1719 NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1720 if (error != 0) { 1721 aprint_error_dev(sc->sc_dev, 1722 "could not map desc DMA memory\n"); 1723 goto fail; 1724 } 1725 1726 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1727 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1728 if (error != 0) { 1729 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1730 goto fail; 1731 } 1732 1733 memset(*desc, 0, NFE_TX_RING_COUNT * descsize); 1734 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1735 1736 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1737 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1738 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1739 &ring->data[i].map); 1740 if (error != 0) { 1741 aprint_error_dev(sc->sc_dev, 1742 "could not create DMA map\n"); 1743 ring->data[i].map = NULL; 1744 goto fail; 1745 } 1746 } 1747 1748 return 0; 1749 1750 fail: nfe_free_tx_ring(sc, ring); 1751 return error; 1752 } 1753 1754 void 1755 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1756 { 1757 struct nfe_tx_data *data; 1758 int i; 1759 1760 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1761 if (sc->sc_flags & NFE_40BIT_ADDR) 1762 ring->desc64[i].flags = 0; 1763 else 1764 ring->desc32[i].flags = 0; 1765 1766 data = &ring->data[i]; 1767 1768 if (data->m != NULL) { 1769 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1770 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1771 bus_dmamap_unload(sc->sc_dmat, data->active); 1772 m_freem(data->m); 1773 data->m = NULL; 1774 } 1775 } 1776 1777 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1778 BUS_DMASYNC_PREWRITE); 1779 1780 ring->queued = 0; 1781 ring->cur = ring->next = 0; 1782 } 1783 1784 void 1785 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1786 { 1787 struct nfe_tx_data *data; 1788 void *desc; 1789 int i, descsize; 1790 1791 if (sc->sc_flags & NFE_40BIT_ADDR) { 1792 desc = ring->desc64; 1793 descsize = sizeof (struct nfe_desc64); 1794 } else { 1795 desc = ring->desc32; 1796 descsize = sizeof (struct nfe_desc32); 1797 } 1798 1799 if (desc != NULL) { 1800 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1801 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1802 bus_dmamap_unload(sc->sc_dmat, ring->map); 1803 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1804 NFE_TX_RING_COUNT * descsize); 1805 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1806 } 1807 1808 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1809 data = &ring->data[i]; 1810 1811 if (data->m != NULL) { 1812 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1813 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1814 bus_dmamap_unload(sc->sc_dmat, data->active); 1815 m_freem(data->m); 1816 } 1817 } 1818 1819 /* ..and now actually destroy the DMA mappings */ 1820 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1821 data = &ring->data[i]; 1822 if (data->map == NULL) 1823 continue; 1824 bus_dmamap_destroy(sc->sc_dmat, data->map); 1825 } 1826 } 1827 1828 void 1829 nfe_setmulti(struct nfe_softc *sc) 1830 { 1831 struct ethercom *ec = &sc->sc_ethercom; 1832 struct ifnet *ifp = &ec->ec_if; 1833 struct ether_multi *enm; 1834 struct ether_multistep step; 1835 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1836 uint32_t filter = NFE_RXFILTER_MAGIC; 1837 int i; 1838 1839 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1840 memset(addr, 0, ETHER_ADDR_LEN); 1841 memset(mask, 0, ETHER_ADDR_LEN); 1842 goto done; 1843 } 1844 1845 memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN); 1846 memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN); 1847 1848 ETHER_FIRST_MULTI(step, ec, enm); 1849 while (enm != NULL) { 1850 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1851 ifp->if_flags |= IFF_ALLMULTI; 1852 memset(addr, 0, ETHER_ADDR_LEN); 1853 memset(mask, 0, ETHER_ADDR_LEN); 1854 goto done; 1855 } 1856 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1857 addr[i] &= enm->enm_addrlo[i]; 1858 mask[i] &= ~enm->enm_addrlo[i]; 1859 } 1860 ETHER_NEXT_MULTI(step, enm); 1861 } 1862 for (i = 0; i < ETHER_ADDR_LEN; i++) 1863 mask[i] |= addr[i]; 1864 1865 done: 1866 addr[0] |= 0x01; /* make sure multicast bit is set */ 1867 1868 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1869 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1870 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1871 addr[5] << 8 | addr[4]); 1872 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1873 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1874 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1875 mask[5] << 8 | mask[4]); 1876 1877 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1878 NFE_WRITE(sc, NFE_RXFILTER, filter); 1879 } 1880 1881 void 1882 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1883 { 1884 uint32_t tmp; 1885 1886 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 1887 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1888 addr[0] = (tmp & 0xff); 1889 addr[1] = (tmp >> 8) & 0xff; 1890 addr[2] = (tmp >> 16) & 0xff; 1891 addr[3] = (tmp >> 24) & 0xff; 1892 1893 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1894 addr[4] = (tmp & 0xff); 1895 addr[5] = (tmp >> 8) & 0xff; 1896 1897 } else { 1898 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1899 addr[0] = (tmp >> 8) & 0xff; 1900 addr[1] = (tmp & 0xff); 1901 1902 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1903 addr[2] = (tmp >> 24) & 0xff; 1904 addr[3] = (tmp >> 16) & 0xff; 1905 addr[4] = (tmp >> 8) & 0xff; 1906 addr[5] = (tmp & 0xff); 1907 } 1908 } 1909 1910 void 1911 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1912 { 1913 NFE_WRITE(sc, NFE_MACADDR_LO, 1914 addr[5] << 8 | addr[4]); 1915 NFE_WRITE(sc, NFE_MACADDR_HI, 1916 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1917 } 1918 1919 void 1920 nfe_tick(void *arg) 1921 { 1922 struct nfe_softc *sc = arg; 1923 int s; 1924 1925 s = splnet(); 1926 mii_tick(&sc->sc_mii); 1927 splx(s); 1928 1929 callout_schedule(&sc->sc_tick_ch, hz); 1930 } 1931 1932 void 1933 nfe_poweron(device_t self) 1934 { 1935 struct nfe_softc *sc = device_private(self); 1936 1937 if ((sc->sc_flags & NFE_PWR_MGMT) != 0) { 1938 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 1939 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 1940 DELAY(100); 1941 NFE_WRITE(sc, NFE_MAC_RESET, 0); 1942 DELAY(100); 1943 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 1944 NFE_WRITE(sc, NFE_PWR2_CTL, 1945 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 1946 } 1947 } 1948 1949 bool 1950 nfe_resume(device_t dv, pmf_qual_t qual) 1951 { 1952 nfe_poweron(dv); 1953 1954 return true; 1955 } 1956