1 /* $NetBSD: if_nfe.c,v 1.38 2008/12/16 22:35:33 christos Exp $ */ 2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.38 2008/12/16 22:35:33 christos Exp $"); 25 26 #include "opt_inet.h" 27 #include "bpfilter.h" 28 #include "vlan.h" 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/mutex.h> 37 #include <sys/queue.h> 38 #include <sys/kernel.h> 39 #include <sys/device.h> 40 #include <sys/callout.h> 41 #include <sys/socket.h> 42 43 #include <sys/bus.h> 44 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_ether.h> 49 #include <net/if_arp.h> 50 51 #ifdef INET 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/in_var.h> 55 #include <netinet/ip.h> 56 #include <netinet/if_inarp.h> 57 #endif 58 59 #if NVLAN > 0 60 #include <net/if_types.h> 61 #endif 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 #include <dev/pci/pcidevs.h> 73 74 #include <dev/pci/if_nfereg.h> 75 #include <dev/pci/if_nfevar.h> 76 77 static int nfe_ifflags_cb(struct ethercom *); 78 79 int nfe_match(device_t, cfdata_t, void *); 80 void nfe_attach(device_t, device_t, void *); 81 void nfe_power(int, void *); 82 void nfe_miibus_statchg(device_t); 83 int nfe_miibus_readreg(device_t, int, int); 84 void nfe_miibus_writereg(device_t, int, int, int); 85 int nfe_intr(void *); 86 int nfe_ioctl(struct ifnet *, u_long, void *); 87 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 88 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 89 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 90 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 91 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 92 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 93 void nfe_rxeof(struct nfe_softc *); 94 void nfe_txeof(struct nfe_softc *); 95 int nfe_encap(struct nfe_softc *, struct mbuf *); 96 void nfe_start(struct ifnet *); 97 void nfe_watchdog(struct ifnet *); 98 int nfe_init(struct ifnet *); 99 void nfe_stop(struct ifnet *, int); 100 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); 101 void nfe_jfree(struct mbuf *, void *, size_t, void *); 102 int nfe_jpool_alloc(struct nfe_softc *); 103 void nfe_jpool_free(struct nfe_softc *); 104 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 106 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 107 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 109 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 110 void nfe_setmulti(struct nfe_softc *); 111 void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 112 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 113 void nfe_tick(void *); 114 void nfe_poweron(device_t); 115 bool nfe_resume(device_t PMF_FN_PROTO); 116 117 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach, 118 NULL, NULL); 119 120 /* #define NFE_NO_JUMBO */ 121 122 #ifdef NFE_DEBUG 123 int nfedebug = 0; 124 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 125 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 126 #else 127 #define DPRINTF(x) 128 #define DPRINTFN(n,x) 129 #endif 130 131 /* deal with naming differences */ 132 133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ 134 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 135 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ 136 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 137 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ 138 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN 139 140 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ 141 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 142 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ 143 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 144 145 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ 146 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 147 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ 148 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 149 150 #ifdef _LP64 151 #define __LP64__ 1 152 #endif 153 154 const struct nfe_product { 155 pci_vendor_id_t vendor; 156 pci_product_id_t product; 157 } nfe_devices[] = { 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 177 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 178 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 179 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 180 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 181 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 182 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 183 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 184 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 185 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 186 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 187 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 188 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 190 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 191 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 193 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 194 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 195 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 196 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 197 }; 198 199 int 200 nfe_match(device_t dev, cfdata_t match, void *aux) 201 { 202 struct pci_attach_args *pa = aux; 203 const struct nfe_product *np; 204 int i; 205 206 for (i = 0; i < sizeof(nfe_devices) / sizeof(nfe_devices[0]); i++) { 207 np = &nfe_devices[i]; 208 if (PCI_VENDOR(pa->pa_id) == np->vendor && 209 PCI_PRODUCT(pa->pa_id) == np->product) 210 return 1; 211 } 212 return 0; 213 } 214 215 void 216 nfe_attach(device_t parent, device_t self, void *aux) 217 { 218 struct nfe_softc *sc = device_private(self); 219 struct pci_attach_args *pa = aux; 220 pci_chipset_tag_t pc = pa->pa_pc; 221 pci_intr_handle_t ih; 222 const char *intrstr; 223 struct ifnet *ifp; 224 bus_size_t memsize; 225 pcireg_t memtype; 226 char devinfo[256]; 227 228 sc->sc_dev = self; 229 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 230 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, PCI_REVISION(pa->pa_class)); 231 232 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 233 switch (memtype) { 234 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 235 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 236 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 237 &sc->sc_memh, NULL, &memsize) == 0) 238 break; 239 /* FALLTHROUGH */ 240 default: 241 aprint_error_dev(self, "could not map mem space\n"); 242 return; 243 } 244 245 if (pci_intr_map(pa, &ih) != 0) { 246 aprint_error_dev(self, "could not map interrupt\n"); 247 return; 248 } 249 250 intrstr = pci_intr_string(pc, ih); 251 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc); 252 if (sc->sc_ih == NULL) { 253 aprint_error_dev(self, "could not establish interrupt"); 254 if (intrstr != NULL) 255 aprint_normal(" at %s", intrstr); 256 aprint_normal("\n"); 257 return; 258 } 259 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 260 261 sc->sc_dmat = pa->pa_dmat; 262 263 sc->sc_flags = 0; 264 265 switch (PCI_PRODUCT(pa->pa_id)) { 266 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 267 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 268 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 269 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 270 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 271 break; 272 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 273 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 274 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 275 break; 276 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 277 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 278 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 279 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 280 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 281 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 282 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 283 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 284 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 285 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 286 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 287 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 288 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 289 NFE_PWR_MGMT; 290 break; 291 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 292 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 293 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 294 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 295 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 296 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 297 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 298 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 299 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 300 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 301 break; 302 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 303 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 304 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 305 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 306 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 307 break; 308 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 309 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 310 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 311 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 312 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 313 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 314 break; 315 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 316 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 317 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 318 NFE_HW_VLAN | NFE_PWR_MGMT; 319 break; 320 } 321 322 nfe_poweron(self); 323 324 #ifndef NFE_NO_JUMBO 325 /* enable jumbo frames for adapters that support it */ 326 if (sc->sc_flags & NFE_JUMBO_SUP) 327 sc->sc_flags |= NFE_USE_JUMBO; 328 #endif 329 330 /* Check for reversed ethernet address */ 331 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 332 sc->sc_flags |= NFE_CORRECT_MACADDR; 333 334 nfe_get_macaddr(sc, sc->sc_enaddr); 335 aprint_normal_dev(self, "Ethernet address %s\n", 336 ether_sprintf(sc->sc_enaddr)); 337 338 /* 339 * Allocate Tx and Rx rings. 340 */ 341 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 342 aprint_error_dev(self, "could not allocate Tx ring\n"); 343 return; 344 } 345 346 mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); 347 348 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 349 aprint_error_dev(self, "could not allocate Rx ring\n"); 350 nfe_free_tx_ring(sc, &sc->txq); 351 return; 352 } 353 354 ifp = &sc->sc_ethercom.ec_if; 355 ifp->if_softc = sc; 356 ifp->if_mtu = ETHERMTU; 357 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 358 ifp->if_ioctl = nfe_ioctl; 359 ifp->if_start = nfe_start; 360 ifp->if_stop = nfe_stop; 361 ifp->if_watchdog = nfe_watchdog; 362 ifp->if_init = nfe_init; 363 ifp->if_baudrate = IF_Gbps(1); 364 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 365 IFQ_SET_READY(&ifp->if_snd); 366 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 367 368 if (sc->sc_flags & NFE_USE_JUMBO) 369 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 370 371 #if NVLAN > 0 372 if (sc->sc_flags & NFE_HW_VLAN) 373 sc->sc_ethercom.ec_capabilities |= 374 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 375 #endif 376 if (sc->sc_flags & NFE_HW_CSUM) { 377 ifp->if_capabilities |= 378 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 379 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 380 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 381 } 382 383 sc->sc_mii.mii_ifp = ifp; 384 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 385 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 386 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 387 388 sc->sc_ethercom.ec_mii = &sc->sc_mii; 389 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 390 ether_mediastatus); 391 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 392 MII_OFFSET_ANY, 0); 393 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 394 aprint_error_dev(self, "no PHY found!\n"); 395 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 396 0, NULL); 397 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 398 } else 399 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 400 401 if_attach(ifp); 402 ether_ifattach(ifp, sc->sc_enaddr); 403 ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); 404 405 callout_init(&sc->sc_tick_ch, 0); 406 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); 407 408 if (!pmf_device_register(self, NULL, nfe_resume)) 409 aprint_error_dev(self, "couldn't establish power handler\n"); 410 else 411 pmf_class_network_register(self, ifp); 412 } 413 414 void 415 nfe_miibus_statchg(device_t dev) 416 { 417 struct nfe_softc *sc = device_private(dev); 418 struct mii_data *mii = &sc->sc_mii; 419 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 420 421 phy = NFE_READ(sc, NFE_PHY_IFACE); 422 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 423 424 seed = NFE_READ(sc, NFE_RNDSEED); 425 seed &= ~NFE_SEED_MASK; 426 427 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 428 phy |= NFE_PHY_HDX; /* half-duplex */ 429 misc |= NFE_MISC1_HDX; 430 } 431 432 switch (IFM_SUBTYPE(mii->mii_media_active)) { 433 case IFM_1000_T: /* full-duplex only */ 434 link |= NFE_MEDIA_1000T; 435 seed |= NFE_SEED_1000T; 436 phy |= NFE_PHY_1000T; 437 break; 438 case IFM_100_TX: 439 link |= NFE_MEDIA_100TX; 440 seed |= NFE_SEED_100TX; 441 phy |= NFE_PHY_100TX; 442 break; 443 case IFM_10_T: 444 link |= NFE_MEDIA_10T; 445 seed |= NFE_SEED_10T; 446 break; 447 } 448 449 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 450 451 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 452 NFE_WRITE(sc, NFE_MISC1, misc); 453 NFE_WRITE(sc, NFE_LINKSPEED, link); 454 } 455 456 int 457 nfe_miibus_readreg(device_t dev, int phy, int reg) 458 { 459 struct nfe_softc *sc = device_private(dev); 460 uint32_t val; 461 int ntries; 462 463 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 464 465 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 466 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 467 DELAY(100); 468 } 469 470 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 471 472 for (ntries = 0; ntries < 1000; ntries++) { 473 DELAY(100); 474 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 475 break; 476 } 477 if (ntries == 1000) { 478 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 479 device_xname(sc->sc_dev))); 480 return 0; 481 } 482 483 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 484 DPRINTFN(2, ("%s: could not read PHY\n", 485 device_xname(sc->sc_dev))); 486 return 0; 487 } 488 489 val = NFE_READ(sc, NFE_PHY_DATA); 490 if (val != 0xffffffff && val != 0) 491 sc->mii_phyaddr = phy; 492 493 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 494 device_xname(sc->sc_dev), phy, reg, val)); 495 496 return val; 497 } 498 499 void 500 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 501 { 502 struct nfe_softc *sc = device_private(dev); 503 uint32_t ctl; 504 int ntries; 505 506 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 507 508 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 509 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 510 DELAY(100); 511 } 512 513 NFE_WRITE(sc, NFE_PHY_DATA, val); 514 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 515 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 516 517 for (ntries = 0; ntries < 1000; ntries++) { 518 DELAY(100); 519 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 520 break; 521 } 522 #ifdef NFE_DEBUG 523 if (nfedebug >= 2 && ntries == 1000) 524 printf("could not write to PHY\n"); 525 #endif 526 } 527 528 int 529 nfe_intr(void *arg) 530 { 531 struct nfe_softc *sc = arg; 532 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 533 uint32_t r; 534 int handled; 535 536 if ((ifp->if_flags & IFF_UP) == 0) 537 return 0; 538 539 handled = 0; 540 541 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 542 543 for (;;) { 544 r = NFE_READ(sc, NFE_IRQ_STATUS); 545 if ((r & NFE_IRQ_WANTED) == 0) 546 break; 547 548 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 549 handled = 1; 550 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 551 552 if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) { 553 /* check Rx ring */ 554 nfe_rxeof(sc); 555 } 556 if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { 557 /* check Tx ring */ 558 nfe_txeof(sc); 559 } 560 if ((r & NFE_IRQ_LINK) != 0) { 561 NFE_READ(sc, NFE_PHY_STATUS); 562 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 563 DPRINTF(("%s: link state changed\n", 564 device_xname(sc->sc_dev))); 565 } 566 } 567 568 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 569 570 if (handled && !IF_IS_EMPTY(&ifp->if_snd)) 571 nfe_start(ifp); 572 573 return handled; 574 } 575 576 static int 577 nfe_ifflags_cb(struct ethercom *ec) 578 { 579 struct ifnet *ifp = &ec->ec_if; 580 struct nfe_softc *sc = ifp->if_softc; 581 int change = ifp->if_flags ^ sc->sc_if_flags; 582 583 /* 584 * If only the PROMISC flag changes, then 585 * don't do a full re-init of the chip, just update 586 * the Rx filter. 587 */ 588 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 589 return ENETRESET; 590 else if ((change & IFF_PROMISC) != 0) 591 nfe_setmulti(sc); 592 593 return 0; 594 } 595 596 int 597 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 598 { 599 struct nfe_softc *sc = ifp->if_softc; 600 struct ifaddr *ifa = (struct ifaddr *)data; 601 int s, error = 0; 602 603 s = splnet(); 604 605 switch (cmd) { 606 case SIOCINITIFADDR: 607 ifp->if_flags |= IFF_UP; 608 nfe_init(ifp); 609 switch (ifa->ifa_addr->sa_family) { 610 #ifdef INET 611 case AF_INET: 612 arp_ifinit(ifp, ifa); 613 break; 614 #endif 615 default: 616 break; 617 } 618 break; 619 default: 620 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 621 break; 622 623 error = 0; 624 625 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 626 ; 627 else if (ifp->if_flags & IFF_RUNNING) 628 nfe_setmulti(sc); 629 break; 630 } 631 sc->sc_if_flags = ifp->if_flags; 632 633 splx(s); 634 635 return error; 636 } 637 638 void 639 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 640 { 641 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 642 (char *)desc32 - (char *)sc->txq.desc32, 643 sizeof (struct nfe_desc32), ops); 644 } 645 646 void 647 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 648 { 649 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 650 (char *)desc64 - (char *)sc->txq.desc64, 651 sizeof (struct nfe_desc64), ops); 652 } 653 654 void 655 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 656 { 657 if (end > start) { 658 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 659 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 660 (char *)&sc->txq.desc32[end] - 661 (char *)&sc->txq.desc32[start], ops); 662 return; 663 } 664 /* sync from 'start' to end of ring */ 665 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 666 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 667 (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - 668 (char *)&sc->txq.desc32[start], ops); 669 670 /* sync from start of ring to 'end' */ 671 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 672 (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); 673 } 674 675 void 676 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 677 { 678 if (end > start) { 679 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 680 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 681 (char *)&sc->txq.desc64[end] - 682 (char *)&sc->txq.desc64[start], ops); 683 return; 684 } 685 /* sync from 'start' to end of ring */ 686 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 687 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 688 (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - 689 (char *)&sc->txq.desc64[start], ops); 690 691 /* sync from start of ring to 'end' */ 692 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 693 (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); 694 } 695 696 void 697 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 698 { 699 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 700 (char *)desc32 - (char *)sc->rxq.desc32, 701 sizeof (struct nfe_desc32), ops); 702 } 703 704 void 705 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 706 { 707 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 708 (char *)desc64 - (char *)sc->rxq.desc64, 709 sizeof (struct nfe_desc64), ops); 710 } 711 712 void 713 nfe_rxeof(struct nfe_softc *sc) 714 { 715 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 716 struct nfe_desc32 *desc32; 717 struct nfe_desc64 *desc64; 718 struct nfe_rx_data *data; 719 struct nfe_jbuf *jbuf; 720 struct mbuf *m, *mnew; 721 bus_addr_t physaddr; 722 uint16_t flags; 723 int error, len, i; 724 725 desc32 = NULL; 726 desc64 = NULL; 727 for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { 728 data = &sc->rxq.data[i]; 729 730 if (sc->sc_flags & NFE_40BIT_ADDR) { 731 desc64 = &sc->rxq.desc64[i]; 732 nfe_rxdesc64_sync(sc, desc64, 733 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 734 735 flags = le16toh(desc64->flags); 736 len = le16toh(desc64->length) & 0x3fff; 737 } else { 738 desc32 = &sc->rxq.desc32[i]; 739 nfe_rxdesc32_sync(sc, desc32, 740 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 741 742 flags = le16toh(desc32->flags); 743 len = le16toh(desc32->length) & 0x3fff; 744 } 745 746 if ((flags & NFE_RX_READY) != 0) 747 break; 748 749 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 750 if ((flags & NFE_RX_VALID_V1) == 0) 751 goto skip; 752 753 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 754 flags &= ~NFE_RX_ERROR; 755 len--; /* fix buffer length */ 756 } 757 } else { 758 if ((flags & NFE_RX_VALID_V2) == 0) 759 goto skip; 760 761 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 762 flags &= ~NFE_RX_ERROR; 763 len--; /* fix buffer length */ 764 } 765 } 766 767 if (flags & NFE_RX_ERROR) { 768 ifp->if_ierrors++; 769 goto skip; 770 } 771 772 /* 773 * Try to allocate a new mbuf for this ring element and load 774 * it before processing the current mbuf. If the ring element 775 * cannot be loaded, drop the received packet and reuse the 776 * old mbuf. In the unlikely case that the old mbuf can't be 777 * reloaded either, explicitly panic. 778 */ 779 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 780 if (mnew == NULL) { 781 ifp->if_ierrors++; 782 goto skip; 783 } 784 785 if (sc->sc_flags & NFE_USE_JUMBO) { 786 physaddr = 787 sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; 788 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 789 if (len > MCLBYTES) { 790 m_freem(mnew); 791 ifp->if_ierrors++; 792 goto skip1; 793 } 794 MCLGET(mnew, M_DONTWAIT); 795 if ((mnew->m_flags & M_EXT) == 0) { 796 m_freem(mnew); 797 ifp->if_ierrors++; 798 goto skip1; 799 } 800 801 (void)memcpy(mtod(mnew, void *), 802 mtod(data->m, const void *), len); 803 m = mnew; 804 goto mbufcopied; 805 } else { 806 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 807 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 808 mtod(data->m, char *) - (char *)sc->rxq.jpool, 809 NFE_JBYTES, BUS_DMASYNC_POSTREAD); 810 811 physaddr = jbuf->physaddr; 812 } 813 } else { 814 MCLGET(mnew, M_DONTWAIT); 815 if ((mnew->m_flags & M_EXT) == 0) { 816 m_freem(mnew); 817 ifp->if_ierrors++; 818 goto skip; 819 } 820 821 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 822 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 823 bus_dmamap_unload(sc->sc_dmat, data->map); 824 825 error = bus_dmamap_load(sc->sc_dmat, data->map, 826 mtod(mnew, void *), MCLBYTES, NULL, 827 BUS_DMA_READ | BUS_DMA_NOWAIT); 828 if (error != 0) { 829 m_freem(mnew); 830 831 /* try to reload the old mbuf */ 832 error = bus_dmamap_load(sc->sc_dmat, data->map, 833 mtod(data->m, void *), MCLBYTES, NULL, 834 BUS_DMA_READ | BUS_DMA_NOWAIT); 835 if (error != 0) { 836 /* very unlikely that it will fail.. */ 837 panic("%s: could not load old rx mbuf", 838 device_xname(sc->sc_dev)); 839 } 840 ifp->if_ierrors++; 841 goto skip; 842 } 843 physaddr = data->map->dm_segs[0].ds_addr; 844 } 845 846 /* 847 * New mbuf successfully loaded, update Rx ring and continue 848 * processing. 849 */ 850 m = data->m; 851 data->m = mnew; 852 853 mbufcopied: 854 /* finalize mbuf */ 855 m->m_pkthdr.len = m->m_len = len; 856 m->m_pkthdr.rcvif = ifp; 857 858 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 859 /* 860 * XXX 861 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? 862 */ 863 if (flags & NFE_RX_IP_CSUMOK) { 864 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 865 DPRINTFN(3, ("%s: ip4csum-rx ok\n", 866 device_xname(sc->sc_dev))); 867 } 868 /* 869 * XXX 870 * no way to check M_CSUM_TCP_UDP_BAD or 871 * other protocols? 872 */ 873 if (flags & NFE_RX_UDP_CSUMOK) { 874 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 875 DPRINTFN(3, ("%s: udp4csum-rx ok\n", 876 device_xname(sc->sc_dev))); 877 } else if (flags & NFE_RX_TCP_CSUMOK) { 878 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 879 DPRINTFN(3, ("%s: tcp4csum-rx ok\n", 880 device_xname(sc->sc_dev))); 881 } 882 } 883 #if NBPFILTER > 0 884 if (ifp->if_bpf) 885 bpf_mtap(ifp->if_bpf, m); 886 #endif 887 ifp->if_ipackets++; 888 (*ifp->if_input)(ifp, m); 889 890 skip1: 891 /* update mapping address in h/w descriptor */ 892 if (sc->sc_flags & NFE_40BIT_ADDR) { 893 #if defined(__LP64__) 894 desc64->physaddr[0] = htole32(physaddr >> 32); 895 #endif 896 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 897 } else { 898 desc32->physaddr = htole32(physaddr); 899 } 900 901 skip: 902 if (sc->sc_flags & NFE_40BIT_ADDR) { 903 desc64->length = htole16(sc->rxq.bufsz); 904 desc64->flags = htole16(NFE_RX_READY); 905 906 nfe_rxdesc64_sync(sc, desc64, 907 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 908 } else { 909 desc32->length = htole16(sc->rxq.bufsz); 910 desc32->flags = htole16(NFE_RX_READY); 911 912 nfe_rxdesc32_sync(sc, desc32, 913 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 914 } 915 } 916 /* update current RX pointer */ 917 sc->rxq.cur = i; 918 } 919 920 void 921 nfe_txeof(struct nfe_softc *sc) 922 { 923 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 924 struct nfe_desc32 *desc32; 925 struct nfe_desc64 *desc64; 926 struct nfe_tx_data *data = NULL; 927 int i; 928 uint16_t flags; 929 char buf[128]; 930 931 for (i = sc->txq.next; 932 sc->txq.queued > 0; 933 i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { 934 if (sc->sc_flags & NFE_40BIT_ADDR) { 935 desc64 = &sc->txq.desc64[i]; 936 nfe_txdesc64_sync(sc, desc64, 937 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 938 939 flags = le16toh(desc64->flags); 940 } else { 941 desc32 = &sc->txq.desc32[i]; 942 nfe_txdesc32_sync(sc, desc32, 943 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 944 945 flags = le16toh(desc32->flags); 946 } 947 948 if ((flags & NFE_TX_VALID) != 0) 949 break; 950 951 data = &sc->txq.data[i]; 952 953 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 954 if ((flags & NFE_TX_LASTFRAG_V1) == 0 && 955 data->m == NULL) 956 continue; 957 958 if ((flags & NFE_TX_ERROR_V1) != 0) { 959 snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); 960 aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", 961 buf); 962 ifp->if_oerrors++; 963 } else 964 ifp->if_opackets++; 965 } else { 966 if ((flags & NFE_TX_LASTFRAG_V2) == 0 && 967 data->m == NULL) 968 continue; 969 970 if ((flags & NFE_TX_ERROR_V2) != 0) { 971 snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); 972 aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", 973 buf); 974 ifp->if_oerrors++; 975 } else 976 ifp->if_opackets++; 977 } 978 979 if (data->m == NULL) { /* should not get there */ 980 aprint_error_dev(sc->sc_dev, 981 "last fragment bit w/o associated mbuf!\n"); 982 continue; 983 } 984 985 /* last fragment of the mbuf chain transmitted */ 986 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 987 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 988 bus_dmamap_unload(sc->sc_dmat, data->active); 989 m_freem(data->m); 990 data->m = NULL; 991 } 992 993 sc->txq.next = i; 994 995 if (sc->txq.queued < NFE_TX_RING_COUNT) { 996 /* at least one slot freed */ 997 ifp->if_flags &= ~IFF_OACTIVE; 998 } 999 1000 if (sc->txq.queued == 0) { 1001 /* all queued packets are sent */ 1002 ifp->if_timer = 0; 1003 } 1004 } 1005 1006 int 1007 nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1008 { 1009 struct nfe_desc32 *desc32; 1010 struct nfe_desc64 *desc64; 1011 struct nfe_tx_data *data; 1012 bus_dmamap_t map; 1013 uint16_t flags, csumflags; 1014 #if NVLAN > 0 1015 struct m_tag *mtag; 1016 uint32_t vtag = 0; 1017 #endif 1018 int error, i, first; 1019 1020 desc32 = NULL; 1021 desc64 = NULL; 1022 data = NULL; 1023 1024 flags = 0; 1025 csumflags = 0; 1026 first = sc->txq.cur; 1027 1028 map = sc->txq.data[first].map; 1029 1030 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 1031 if (error != 0) { 1032 aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", 1033 error); 1034 return error; 1035 } 1036 1037 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 1038 bus_dmamap_unload(sc->sc_dmat, map); 1039 return ENOBUFS; 1040 } 1041 1042 #if NVLAN > 0 1043 /* setup h/w VLAN tagging */ 1044 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) 1045 vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag); 1046 #endif 1047 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 1048 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 1049 csumflags |= NFE_TX_IP_CSUM; 1050 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1051 csumflags |= NFE_TX_TCP_UDP_CSUM; 1052 } 1053 1054 for (i = 0; i < map->dm_nsegs; i++) { 1055 data = &sc->txq.data[sc->txq.cur]; 1056 1057 if (sc->sc_flags & NFE_40BIT_ADDR) { 1058 desc64 = &sc->txq.desc64[sc->txq.cur]; 1059 #if defined(__LP64__) 1060 desc64->physaddr[0] = 1061 htole32(map->dm_segs[i].ds_addr >> 32); 1062 #endif 1063 desc64->physaddr[1] = 1064 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 1065 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 1066 desc64->flags = htole16(flags); 1067 desc64->vtag = 0; 1068 } else { 1069 desc32 = &sc->txq.desc32[sc->txq.cur]; 1070 1071 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 1072 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 1073 desc32->flags = htole16(flags); 1074 } 1075 1076 /* 1077 * Setting of the valid bit in the first descriptor is 1078 * deferred until the whole chain is fully setup. 1079 */ 1080 flags |= NFE_TX_VALID; 1081 1082 sc->txq.queued++; 1083 sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); 1084 } 1085 1086 /* the whole mbuf chain has been setup */ 1087 if (sc->sc_flags & NFE_40BIT_ADDR) { 1088 /* fix last descriptor */ 1089 flags |= NFE_TX_LASTFRAG_V2; 1090 desc64->flags = htole16(flags); 1091 1092 /* Checksum flags and vtag belong to the first fragment only. */ 1093 #if NVLAN > 0 1094 sc->txq.desc64[first].vtag = htole32(vtag); 1095 #endif 1096 sc->txq.desc64[first].flags |= htole16(csumflags); 1097 1098 /* finally, set the valid bit in the first descriptor */ 1099 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1100 } else { 1101 /* fix last descriptor */ 1102 if (sc->sc_flags & NFE_JUMBO_SUP) 1103 flags |= NFE_TX_LASTFRAG_V2; 1104 else 1105 flags |= NFE_TX_LASTFRAG_V1; 1106 desc32->flags = htole16(flags); 1107 1108 /* Checksum flags belong to the first fragment only. */ 1109 sc->txq.desc32[first].flags |= htole16(csumflags); 1110 1111 /* finally, set the valid bit in the first descriptor */ 1112 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1113 } 1114 1115 data->m = m0; 1116 data->active = map; 1117 1118 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1119 BUS_DMASYNC_PREWRITE); 1120 1121 return 0; 1122 } 1123 1124 void 1125 nfe_start(struct ifnet *ifp) 1126 { 1127 struct nfe_softc *sc = ifp->if_softc; 1128 int old = sc->txq.queued; 1129 struct mbuf *m0; 1130 1131 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1132 return; 1133 1134 for (;;) { 1135 IFQ_POLL(&ifp->if_snd, m0); 1136 if (m0 == NULL) 1137 break; 1138 1139 if (nfe_encap(sc, m0) != 0) { 1140 ifp->if_flags |= IFF_OACTIVE; 1141 break; 1142 } 1143 1144 /* packet put in h/w queue, remove from s/w queue */ 1145 IFQ_DEQUEUE(&ifp->if_snd, m0); 1146 1147 #if NBPFILTER > 0 1148 if (ifp->if_bpf != NULL) 1149 bpf_mtap(ifp->if_bpf, m0); 1150 #endif 1151 } 1152 1153 if (sc->txq.queued != old) { 1154 /* packets are queued */ 1155 if (sc->sc_flags & NFE_40BIT_ADDR) 1156 nfe_txdesc64_rsync(sc, old, sc->txq.cur, 1157 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1158 else 1159 nfe_txdesc32_rsync(sc, old, sc->txq.cur, 1160 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1161 /* kick Tx */ 1162 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1163 1164 /* 1165 * Set a timeout in case the chip goes out to lunch. 1166 */ 1167 ifp->if_timer = 5; 1168 } 1169 } 1170 1171 void 1172 nfe_watchdog(struct ifnet *ifp) 1173 { 1174 struct nfe_softc *sc = ifp->if_softc; 1175 1176 aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); 1177 1178 ifp->if_flags &= ~IFF_RUNNING; 1179 nfe_init(ifp); 1180 1181 ifp->if_oerrors++; 1182 } 1183 1184 int 1185 nfe_init(struct ifnet *ifp) 1186 { 1187 struct nfe_softc *sc = ifp->if_softc; 1188 uint32_t tmp; 1189 int rc = 0, s; 1190 1191 if (ifp->if_flags & IFF_RUNNING) 1192 return 0; 1193 1194 nfe_stop(ifp, 0); 1195 1196 NFE_WRITE(sc, NFE_TX_UNK, 0); 1197 NFE_WRITE(sc, NFE_STATUS, 0); 1198 1199 sc->rxtxctl = NFE_RXTX_BIT2; 1200 if (sc->sc_flags & NFE_40BIT_ADDR) 1201 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1202 else if (sc->sc_flags & NFE_JUMBO_SUP) 1203 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1204 if (sc->sc_flags & NFE_HW_CSUM) 1205 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1206 #if NVLAN > 0 1207 /* 1208 * Although the adapter is capable of stripping VLAN tags from received 1209 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1210 * purpose. This will be done in software by our network stack. 1211 */ 1212 if (sc->sc_flags & NFE_HW_VLAN) 1213 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1214 #endif 1215 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1216 DELAY(10); 1217 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1218 1219 #if NVLAN 1220 if (sc->sc_flags & NFE_HW_VLAN) 1221 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1222 #endif 1223 1224 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1225 1226 /* set MAC address */ 1227 nfe_set_macaddr(sc, sc->sc_enaddr); 1228 1229 /* tell MAC where rings are in memory */ 1230 #ifdef __LP64__ 1231 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1232 #endif 1233 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1234 #ifdef __LP64__ 1235 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1236 #endif 1237 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1238 1239 NFE_WRITE(sc, NFE_RING_SIZE, 1240 (NFE_RX_RING_COUNT - 1) << 16 | 1241 (NFE_TX_RING_COUNT - 1)); 1242 1243 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1244 1245 /* force MAC to wakeup */ 1246 tmp = NFE_READ(sc, NFE_PWR_STATE); 1247 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1248 DELAY(10); 1249 tmp = NFE_READ(sc, NFE_PWR_STATE); 1250 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1251 1252 s = splnet(); 1253 nfe_intr(sc); /* XXX clear IRQ status registers */ 1254 splx(s); 1255 1256 #if 1 1257 /* configure interrupts coalescing/mitigation */ 1258 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1259 #else 1260 /* no interrupt mitigation: one interrupt per packet */ 1261 NFE_WRITE(sc, NFE_IMTIMER, 970); 1262 #endif 1263 1264 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1265 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1266 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1267 1268 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1269 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1270 1271 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1272 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1273 1274 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1275 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1276 DELAY(10); 1277 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1278 1279 /* set Rx filter */ 1280 nfe_setmulti(sc); 1281 1282 if ((rc = ether_mediachange(ifp)) != 0) 1283 goto out; 1284 1285 nfe_tick(sc); 1286 1287 /* enable Rx */ 1288 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1289 1290 /* enable Tx */ 1291 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1292 1293 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1294 1295 /* enable interrupts */ 1296 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1297 1298 callout_schedule(&sc->sc_tick_ch, hz); 1299 1300 ifp->if_flags |= IFF_RUNNING; 1301 ifp->if_flags &= ~IFF_OACTIVE; 1302 1303 out: 1304 return rc; 1305 } 1306 1307 void 1308 nfe_stop(struct ifnet *ifp, int disable) 1309 { 1310 struct nfe_softc *sc = ifp->if_softc; 1311 1312 callout_stop(&sc->sc_tick_ch); 1313 1314 ifp->if_timer = 0; 1315 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1316 1317 mii_down(&sc->sc_mii); 1318 1319 /* abort Tx */ 1320 NFE_WRITE(sc, NFE_TX_CTL, 0); 1321 1322 /* disable Rx */ 1323 NFE_WRITE(sc, NFE_RX_CTL, 0); 1324 1325 /* disable interrupts */ 1326 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1327 1328 /* reset Tx and Rx rings */ 1329 nfe_reset_tx_ring(sc, &sc->txq); 1330 nfe_reset_rx_ring(sc, &sc->rxq); 1331 } 1332 1333 int 1334 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1335 { 1336 struct nfe_desc32 *desc32; 1337 struct nfe_desc64 *desc64; 1338 struct nfe_rx_data *data; 1339 struct nfe_jbuf *jbuf; 1340 void **desc; 1341 bus_addr_t physaddr; 1342 int i, nsegs, error, descsize; 1343 1344 if (sc->sc_flags & NFE_40BIT_ADDR) { 1345 desc = (void **)&ring->desc64; 1346 descsize = sizeof (struct nfe_desc64); 1347 } else { 1348 desc = (void **)&ring->desc32; 1349 descsize = sizeof (struct nfe_desc32); 1350 } 1351 1352 ring->cur = ring->next = 0; 1353 ring->bufsz = MCLBYTES; 1354 1355 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1356 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1357 if (error != 0) { 1358 aprint_error_dev(sc->sc_dev, 1359 "could not create desc DMA map\n"); 1360 goto fail; 1361 } 1362 1363 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1364 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1365 if (error != 0) { 1366 aprint_error_dev(sc->sc_dev, 1367 "could not allocate DMA memory\n"); 1368 goto fail; 1369 } 1370 1371 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1372 NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1373 if (error != 0) { 1374 aprint_error_dev(sc->sc_dev, 1375 "could not map desc DMA memory\n"); 1376 goto fail; 1377 } 1378 1379 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1380 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1381 if (error != 0) { 1382 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1383 goto fail; 1384 } 1385 1386 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1387 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1388 1389 if (sc->sc_flags & NFE_USE_JUMBO) { 1390 ring->bufsz = NFE_JBYTES; 1391 if ((error = nfe_jpool_alloc(sc)) != 0) { 1392 aprint_error_dev(sc->sc_dev, 1393 "could not allocate jumbo frames\n"); 1394 goto fail; 1395 } 1396 } 1397 1398 /* 1399 * Pre-allocate Rx buffers and populate Rx ring. 1400 */ 1401 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1402 data = &sc->rxq.data[i]; 1403 1404 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1405 if (data->m == NULL) { 1406 aprint_error_dev(sc->sc_dev, 1407 "could not allocate rx mbuf\n"); 1408 error = ENOMEM; 1409 goto fail; 1410 } 1411 1412 if (sc->sc_flags & NFE_USE_JUMBO) { 1413 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 1414 aprint_error_dev(sc->sc_dev, 1415 "could not allocate jumbo buffer\n"); 1416 goto fail; 1417 } 1418 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1419 sc); 1420 1421 physaddr = jbuf->physaddr; 1422 } else { 1423 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1424 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1425 if (error != 0) { 1426 aprint_error_dev(sc->sc_dev, 1427 "could not create DMA map\n"); 1428 goto fail; 1429 } 1430 MCLGET(data->m, M_DONTWAIT); 1431 if (!(data->m->m_flags & M_EXT)) { 1432 aprint_error_dev(sc->sc_dev, 1433 "could not allocate mbuf cluster\n"); 1434 error = ENOMEM; 1435 goto fail; 1436 } 1437 1438 error = bus_dmamap_load(sc->sc_dmat, data->map, 1439 mtod(data->m, void *), MCLBYTES, NULL, 1440 BUS_DMA_READ | BUS_DMA_NOWAIT); 1441 if (error != 0) { 1442 aprint_error_dev(sc->sc_dev, 1443 "could not load rx buf DMA map"); 1444 goto fail; 1445 } 1446 physaddr = data->map->dm_segs[0].ds_addr; 1447 } 1448 1449 if (sc->sc_flags & NFE_40BIT_ADDR) { 1450 desc64 = &sc->rxq.desc64[i]; 1451 #if defined(__LP64__) 1452 desc64->physaddr[0] = htole32(physaddr >> 32); 1453 #endif 1454 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1455 desc64->length = htole16(sc->rxq.bufsz); 1456 desc64->flags = htole16(NFE_RX_READY); 1457 } else { 1458 desc32 = &sc->rxq.desc32[i]; 1459 desc32->physaddr = htole32(physaddr); 1460 desc32->length = htole16(sc->rxq.bufsz); 1461 desc32->flags = htole16(NFE_RX_READY); 1462 } 1463 } 1464 1465 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1466 BUS_DMASYNC_PREWRITE); 1467 1468 return 0; 1469 1470 fail: nfe_free_rx_ring(sc, ring); 1471 return error; 1472 } 1473 1474 void 1475 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1476 { 1477 int i; 1478 1479 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1480 if (sc->sc_flags & NFE_40BIT_ADDR) { 1481 ring->desc64[i].length = htole16(ring->bufsz); 1482 ring->desc64[i].flags = htole16(NFE_RX_READY); 1483 } else { 1484 ring->desc32[i].length = htole16(ring->bufsz); 1485 ring->desc32[i].flags = htole16(NFE_RX_READY); 1486 } 1487 } 1488 1489 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1490 BUS_DMASYNC_PREWRITE); 1491 1492 ring->cur = ring->next = 0; 1493 } 1494 1495 void 1496 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1497 { 1498 struct nfe_rx_data *data; 1499 void *desc; 1500 int i, descsize; 1501 1502 if (sc->sc_flags & NFE_40BIT_ADDR) { 1503 desc = ring->desc64; 1504 descsize = sizeof (struct nfe_desc64); 1505 } else { 1506 desc = ring->desc32; 1507 descsize = sizeof (struct nfe_desc32); 1508 } 1509 1510 if (desc != NULL) { 1511 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1512 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1513 bus_dmamap_unload(sc->sc_dmat, ring->map); 1514 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1515 NFE_RX_RING_COUNT * descsize); 1516 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1517 } 1518 1519 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1520 data = &ring->data[i]; 1521 1522 if (data->map != NULL) { 1523 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1524 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1525 bus_dmamap_unload(sc->sc_dmat, data->map); 1526 bus_dmamap_destroy(sc->sc_dmat, data->map); 1527 } 1528 if (data->m != NULL) 1529 m_freem(data->m); 1530 } 1531 } 1532 1533 struct nfe_jbuf * 1534 nfe_jalloc(struct nfe_softc *sc, int i) 1535 { 1536 struct nfe_jbuf *jbuf; 1537 1538 mutex_enter(&sc->rxq.mtx); 1539 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1540 if (jbuf != NULL) 1541 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1542 mutex_exit(&sc->rxq.mtx); 1543 if (jbuf == NULL) 1544 return NULL; 1545 sc->rxq.jbufmap[i] = 1546 ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1547 return jbuf; 1548 } 1549 1550 /* 1551 * This is called automatically by the network stack when the mbuf is freed. 1552 * Caution must be taken that the NIC might be reset by the time the mbuf is 1553 * freed. 1554 */ 1555 void 1556 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1557 { 1558 struct nfe_softc *sc = arg; 1559 struct nfe_jbuf *jbuf; 1560 int i; 1561 1562 /* find the jbuf from the base pointer */ 1563 i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1564 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1565 aprint_error_dev(sc->sc_dev, 1566 "request to free a buffer (%p) not managed by us\n", buf); 1567 return; 1568 } 1569 jbuf = &sc->rxq.jbuf[i]; 1570 1571 /* ..and put it back in the free list */ 1572 mutex_enter(&sc->rxq.mtx); 1573 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1574 mutex_exit(&sc->rxq.mtx); 1575 1576 if (m != NULL) 1577 pool_cache_put(mb_cache, m); 1578 } 1579 1580 int 1581 nfe_jpool_alloc(struct nfe_softc *sc) 1582 { 1583 struct nfe_rx_ring *ring = &sc->rxq; 1584 struct nfe_jbuf *jbuf; 1585 bus_addr_t physaddr; 1586 char *buf; 1587 int i, nsegs, error; 1588 1589 /* 1590 * Allocate a big chunk of DMA'able memory. 1591 */ 1592 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1593 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1594 if (error != 0) { 1595 aprint_error_dev(sc->sc_dev, 1596 "could not create jumbo DMA map\n"); 1597 goto fail; 1598 } 1599 1600 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1601 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1602 if (error != 0) { 1603 aprint_error_dev(sc->sc_dev, 1604 "could not allocate jumbo DMA memory\n"); 1605 goto fail; 1606 } 1607 1608 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1609 &ring->jpool, BUS_DMA_NOWAIT); 1610 if (error != 0) { 1611 aprint_error_dev(sc->sc_dev, 1612 "could not map jumbo DMA memory\n"); 1613 goto fail; 1614 } 1615 1616 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1617 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1618 if (error != 0) { 1619 aprint_error_dev(sc->sc_dev, 1620 "could not load jumbo DMA map\n"); 1621 goto fail; 1622 } 1623 1624 /* ..and split it into 9KB chunks */ 1625 SLIST_INIT(&ring->jfreelist); 1626 1627 buf = ring->jpool; 1628 physaddr = ring->jmap->dm_segs[0].ds_addr; 1629 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1630 jbuf = &ring->jbuf[i]; 1631 1632 jbuf->buf = buf; 1633 jbuf->physaddr = physaddr; 1634 1635 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1636 1637 buf += NFE_JBYTES; 1638 physaddr += NFE_JBYTES; 1639 } 1640 1641 return 0; 1642 1643 fail: nfe_jpool_free(sc); 1644 return error; 1645 } 1646 1647 void 1648 nfe_jpool_free(struct nfe_softc *sc) 1649 { 1650 struct nfe_rx_ring *ring = &sc->rxq; 1651 1652 if (ring->jmap != NULL) { 1653 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1654 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1655 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1656 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1657 } 1658 if (ring->jpool != NULL) { 1659 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1660 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1661 } 1662 } 1663 1664 int 1665 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1666 { 1667 int i, nsegs, error; 1668 void **desc; 1669 int descsize; 1670 1671 if (sc->sc_flags & NFE_40BIT_ADDR) { 1672 desc = (void **)&ring->desc64; 1673 descsize = sizeof (struct nfe_desc64); 1674 } else { 1675 desc = (void **)&ring->desc32; 1676 descsize = sizeof (struct nfe_desc32); 1677 } 1678 1679 ring->queued = 0; 1680 ring->cur = ring->next = 0; 1681 1682 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1683 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1684 1685 if (error != 0) { 1686 aprint_error_dev(sc->sc_dev, 1687 "could not create desc DMA map\n"); 1688 goto fail; 1689 } 1690 1691 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1692 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1693 if (error != 0) { 1694 aprint_error_dev(sc->sc_dev, 1695 "could not allocate DMA memory\n"); 1696 goto fail; 1697 } 1698 1699 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1700 NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1701 if (error != 0) { 1702 aprint_error_dev(sc->sc_dev, 1703 "could not map desc DMA memory\n"); 1704 goto fail; 1705 } 1706 1707 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1708 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1709 if (error != 0) { 1710 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1711 goto fail; 1712 } 1713 1714 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1715 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1716 1717 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1718 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1719 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1720 &ring->data[i].map); 1721 if (error != 0) { 1722 aprint_error_dev(sc->sc_dev, 1723 "could not create DMA map\n"); 1724 goto fail; 1725 } 1726 } 1727 1728 return 0; 1729 1730 fail: nfe_free_tx_ring(sc, ring); 1731 return error; 1732 } 1733 1734 void 1735 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1736 { 1737 struct nfe_tx_data *data; 1738 int i; 1739 1740 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1741 if (sc->sc_flags & NFE_40BIT_ADDR) 1742 ring->desc64[i].flags = 0; 1743 else 1744 ring->desc32[i].flags = 0; 1745 1746 data = &ring->data[i]; 1747 1748 if (data->m != NULL) { 1749 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1750 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1751 bus_dmamap_unload(sc->sc_dmat, data->active); 1752 m_freem(data->m); 1753 data->m = NULL; 1754 } 1755 } 1756 1757 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1758 BUS_DMASYNC_PREWRITE); 1759 1760 ring->queued = 0; 1761 ring->cur = ring->next = 0; 1762 } 1763 1764 void 1765 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1766 { 1767 struct nfe_tx_data *data; 1768 void *desc; 1769 int i, descsize; 1770 1771 if (sc->sc_flags & NFE_40BIT_ADDR) { 1772 desc = ring->desc64; 1773 descsize = sizeof (struct nfe_desc64); 1774 } else { 1775 desc = ring->desc32; 1776 descsize = sizeof (struct nfe_desc32); 1777 } 1778 1779 if (desc != NULL) { 1780 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1781 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1782 bus_dmamap_unload(sc->sc_dmat, ring->map); 1783 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1784 NFE_TX_RING_COUNT * descsize); 1785 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1786 } 1787 1788 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1789 data = &ring->data[i]; 1790 1791 if (data->m != NULL) { 1792 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1793 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1794 bus_dmamap_unload(sc->sc_dmat, data->active); 1795 m_freem(data->m); 1796 } 1797 } 1798 1799 /* ..and now actually destroy the DMA mappings */ 1800 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1801 data = &ring->data[i]; 1802 if (data->map == NULL) 1803 continue; 1804 bus_dmamap_destroy(sc->sc_dmat, data->map); 1805 } 1806 } 1807 1808 void 1809 nfe_setmulti(struct nfe_softc *sc) 1810 { 1811 struct ethercom *ec = &sc->sc_ethercom; 1812 struct ifnet *ifp = &ec->ec_if; 1813 struct ether_multi *enm; 1814 struct ether_multistep step; 1815 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1816 uint32_t filter = NFE_RXFILTER_MAGIC; 1817 int i; 1818 1819 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1820 bzero(addr, ETHER_ADDR_LEN); 1821 bzero(mask, ETHER_ADDR_LEN); 1822 goto done; 1823 } 1824 1825 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1826 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1827 1828 ETHER_FIRST_MULTI(step, ec, enm); 1829 while (enm != NULL) { 1830 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1831 ifp->if_flags |= IFF_ALLMULTI; 1832 bzero(addr, ETHER_ADDR_LEN); 1833 bzero(mask, ETHER_ADDR_LEN); 1834 goto done; 1835 } 1836 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1837 addr[i] &= enm->enm_addrlo[i]; 1838 mask[i] &= ~enm->enm_addrlo[i]; 1839 } 1840 ETHER_NEXT_MULTI(step, enm); 1841 } 1842 for (i = 0; i < ETHER_ADDR_LEN; i++) 1843 mask[i] |= addr[i]; 1844 1845 done: 1846 addr[0] |= 0x01; /* make sure multicast bit is set */ 1847 1848 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1849 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1850 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1851 addr[5] << 8 | addr[4]); 1852 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1853 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1854 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1855 mask[5] << 8 | mask[4]); 1856 1857 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1858 NFE_WRITE(sc, NFE_RXFILTER, filter); 1859 } 1860 1861 void 1862 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1863 { 1864 uint32_t tmp; 1865 1866 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 1867 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1868 addr[0] = (tmp & 0xff); 1869 addr[1] = (tmp >> 8) & 0xff; 1870 addr[2] = (tmp >> 16) & 0xff; 1871 addr[3] = (tmp >> 24) & 0xff; 1872 1873 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1874 addr[4] = (tmp & 0xff); 1875 addr[5] = (tmp >> 8) & 0xff; 1876 1877 } else { 1878 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1879 addr[0] = (tmp >> 8) & 0xff; 1880 addr[1] = (tmp & 0xff); 1881 1882 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1883 addr[2] = (tmp >> 24) & 0xff; 1884 addr[3] = (tmp >> 16) & 0xff; 1885 addr[4] = (tmp >> 8) & 0xff; 1886 addr[5] = (tmp & 0xff); 1887 } 1888 } 1889 1890 void 1891 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1892 { 1893 NFE_WRITE(sc, NFE_MACADDR_LO, 1894 addr[5] << 8 | addr[4]); 1895 NFE_WRITE(sc, NFE_MACADDR_HI, 1896 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1897 } 1898 1899 void 1900 nfe_tick(void *arg) 1901 { 1902 struct nfe_softc *sc = arg; 1903 int s; 1904 1905 s = splnet(); 1906 mii_tick(&sc->sc_mii); 1907 splx(s); 1908 1909 callout_schedule(&sc->sc_tick_ch, hz); 1910 } 1911 1912 void 1913 nfe_poweron(device_t self) 1914 { 1915 struct nfe_softc *sc = device_private(self); 1916 1917 if ((sc->sc_flags & NFE_PWR_MGMT) != 0) { 1918 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 1919 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 1920 DELAY(100); 1921 NFE_WRITE(sc, NFE_MAC_RESET, 0); 1922 DELAY(100); 1923 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 1924 NFE_WRITE(sc, NFE_PWR2_CTL, 1925 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 1926 } 1927 } 1928 1929 bool 1930 nfe_resume(device_t dv PMF_FN_ARGS) 1931 { 1932 nfe_poweron(dv); 1933 1934 return true; 1935 } 1936