1 /* $NetBSD: if_nfe.c,v 1.34 2008/04/20 08:57:37 cube Exp $ */ 2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.34 2008/04/20 08:57:37 cube Exp $"); 25 26 #include "opt_inet.h" 27 #include "bpfilter.h" 28 #include "vlan.h" 29 30 #include <sys/param.h> 31 #include <sys/endian.h> 32 #include <sys/systm.h> 33 #include <sys/types.h> 34 #include <sys/sockio.h> 35 #include <sys/mbuf.h> 36 #include <sys/mutex.h> 37 #include <sys/queue.h> 38 #include <sys/kernel.h> 39 #include <sys/device.h> 40 #include <sys/callout.h> 41 #include <sys/socket.h> 42 43 #include <sys/bus.h> 44 45 #include <net/if.h> 46 #include <net/if_dl.h> 47 #include <net/if_media.h> 48 #include <net/if_ether.h> 49 #include <net/if_arp.h> 50 51 #ifdef INET 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/in_var.h> 55 #include <netinet/ip.h> 56 #include <netinet/if_inarp.h> 57 #endif 58 59 #if NVLAN > 0 60 #include <net/if_types.h> 61 #endif 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 #include <dev/pci/pcidevs.h> 73 74 #include <dev/pci/if_nfereg.h> 75 #include <dev/pci/if_nfevar.h> 76 77 int nfe_match(device_t, cfdata_t, void *); 78 void nfe_attach(device_t, device_t, void *); 79 void nfe_power(int, void *); 80 void nfe_miibus_statchg(device_t); 81 int nfe_miibus_readreg(device_t, int, int); 82 void nfe_miibus_writereg(device_t, int, int, int); 83 int nfe_intr(void *); 84 int nfe_ioctl(struct ifnet *, u_long, void *); 85 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 86 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 87 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 88 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 89 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 90 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 91 void nfe_rxeof(struct nfe_softc *); 92 void nfe_txeof(struct nfe_softc *); 93 int nfe_encap(struct nfe_softc *, struct mbuf *); 94 void nfe_start(struct ifnet *); 95 void nfe_watchdog(struct ifnet *); 96 int nfe_init(struct ifnet *); 97 void nfe_stop(struct ifnet *, int); 98 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); 99 void nfe_jfree(struct mbuf *, void *, size_t, void *); 100 int nfe_jpool_alloc(struct nfe_softc *); 101 void nfe_jpool_free(struct nfe_softc *); 102 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 103 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 104 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 106 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 107 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108 void nfe_setmulti(struct nfe_softc *); 109 void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 110 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 111 void nfe_tick(void *); 112 113 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), nfe_match, nfe_attach, 114 NULL, NULL); 115 116 /* #define NFE_NO_JUMBO */ 117 118 #ifdef NFE_DEBUG 119 int nfedebug = 0; 120 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 121 #define DPRINTFN(n,x) do { if (nfedebug >= (n)) printf x; } while (0) 122 #else 123 #define DPRINTF(x) 124 #define DPRINTFN(n,x) 125 #endif 126 127 /* deal with naming differences */ 128 129 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ 130 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 131 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ 132 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ 134 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN 135 136 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ 137 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 138 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ 139 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 140 141 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ 142 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 143 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ 144 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 145 146 #ifdef _LP64 147 #define __LP64__ 1 148 #endif 149 150 const struct nfe_product { 151 pci_vendor_id_t vendor; 152 pci_product_id_t product; 153 } nfe_devices[] = { 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 177 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 178 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 179 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 180 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 181 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 182 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 183 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 184 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 185 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 186 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 187 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 188 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 190 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 191 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 192 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 193 }; 194 195 int 196 nfe_match(device_t dev, cfdata_t match, void *aux) 197 { 198 struct pci_attach_args *pa = aux; 199 const struct nfe_product *np; 200 int i; 201 202 for (i = 0; i < sizeof(nfe_devices) / sizeof(nfe_devices[0]); i++) { 203 np = &nfe_devices[i]; 204 if (PCI_VENDOR(pa->pa_id) == np->vendor && 205 PCI_PRODUCT(pa->pa_id) == np->product) 206 return 1; 207 } 208 return 0; 209 } 210 211 void 212 nfe_attach(device_t parent, device_t self, void *aux) 213 { 214 struct nfe_softc *sc = device_private(self); 215 struct pci_attach_args *pa = aux; 216 pci_chipset_tag_t pc = pa->pa_pc; 217 pci_intr_handle_t ih; 218 const char *intrstr; 219 struct ifnet *ifp; 220 bus_size_t memsize; 221 pcireg_t memtype; 222 char devinfo[256]; 223 224 sc->sc_dev = self; 225 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 226 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, PCI_REVISION(pa->pa_class)); 227 228 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 229 switch (memtype) { 230 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 231 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 232 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 233 &sc->sc_memh, NULL, &memsize) == 0) 234 break; 235 /* FALLTHROUGH */ 236 default: 237 aprint_error_dev(self, "could not map mem space\n"); 238 return; 239 } 240 241 if (pci_intr_map(pa, &ih) != 0) { 242 aprint_error_dev(self, "could not map interrupt\n"); 243 return; 244 } 245 246 intrstr = pci_intr_string(pc, ih); 247 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, nfe_intr, sc); 248 if (sc->sc_ih == NULL) { 249 aprint_error_dev(self, "could not establish interrupt"); 250 if (intrstr != NULL) 251 aprint_normal(" at %s", intrstr); 252 aprint_normal("\n"); 253 return; 254 } 255 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 256 257 sc->sc_dmat = pa->pa_dmat; 258 259 sc->sc_flags = 0; 260 261 switch (PCI_PRODUCT(pa->pa_id)) { 262 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 263 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 264 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 265 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 266 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 267 break; 268 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 269 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 270 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 271 break; 272 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 273 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 274 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 275 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 276 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 277 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 278 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 279 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 280 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 281 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 282 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 283 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 284 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 285 NFE_PWR_MGMT; 286 break; 287 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 288 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 289 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 290 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 291 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 292 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 293 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 294 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 295 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 296 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 297 break; 298 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 299 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 300 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 301 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 302 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 303 break; 304 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 305 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 306 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 307 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 308 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 309 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 310 break; 311 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 312 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 313 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 314 NFE_HW_VLAN | NFE_PWR_MGMT; 315 break; 316 } 317 318 if ((sc->sc_flags & NFE_PWR_MGMT) != 0) { 319 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 320 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 321 DELAY(100); 322 NFE_WRITE(sc, NFE_MAC_RESET, 0); 323 DELAY(100); 324 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 325 NFE_WRITE(sc, NFE_PWR2_CTL, 326 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 327 } 328 329 #ifndef NFE_NO_JUMBO 330 /* enable jumbo frames for adapters that support it */ 331 if (sc->sc_flags & NFE_JUMBO_SUP) 332 sc->sc_flags |= NFE_USE_JUMBO; 333 #endif 334 335 /* Check for reversed ethernet address */ 336 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 337 sc->sc_flags |= NFE_CORRECT_MACADDR; 338 339 nfe_get_macaddr(sc, sc->sc_enaddr); 340 aprint_normal_dev(self, "Ethernet address %s\n", 341 ether_sprintf(sc->sc_enaddr)); 342 343 /* 344 * Allocate Tx and Rx rings. 345 */ 346 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 347 aprint_error_dev(self, "could not allocate Tx ring\n"); 348 return; 349 } 350 351 mutex_init(&sc->rxq.mtx, MUTEX_SPIN, IPL_NET); 352 353 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 354 aprint_error_dev(self, "could not allocate Rx ring\n"); 355 nfe_free_tx_ring(sc, &sc->txq); 356 return; 357 } 358 359 ifp = &sc->sc_ethercom.ec_if; 360 ifp->if_softc = sc; 361 ifp->if_mtu = ETHERMTU; 362 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 363 ifp->if_ioctl = nfe_ioctl; 364 ifp->if_start = nfe_start; 365 ifp->if_stop = nfe_stop; 366 ifp->if_watchdog = nfe_watchdog; 367 ifp->if_init = nfe_init; 368 ifp->if_baudrate = IF_Gbps(1); 369 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 370 IFQ_SET_READY(&ifp->if_snd); 371 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 372 373 #ifdef notyet 374 if (sc->sc_flags & NFE_USE_JUMBO) 375 ifp->if_hardmtu = NFE_JUMBO_MTU; 376 #endif 377 378 #if NVLAN > 0 379 if (sc->sc_flags & NFE_HW_VLAN) 380 sc->sc_ethercom.ec_capabilities |= 381 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 382 #endif 383 if (sc->sc_flags & NFE_HW_CSUM) { 384 ifp->if_capabilities |= 385 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 386 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 387 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 388 } 389 390 sc->sc_mii.mii_ifp = ifp; 391 sc->sc_mii.mii_readreg = nfe_miibus_readreg; 392 sc->sc_mii.mii_writereg = nfe_miibus_writereg; 393 sc->sc_mii.mii_statchg = nfe_miibus_statchg; 394 395 sc->sc_ethercom.ec_mii = &sc->sc_mii; 396 ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, 397 ether_mediastatus); 398 mii_attach(self, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 399 MII_OFFSET_ANY, 0); 400 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 401 aprint_error_dev(self, "no PHY found!\n"); 402 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 403 0, NULL); 404 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 405 } else 406 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 407 408 if_attach(ifp); 409 ether_ifattach(ifp, sc->sc_enaddr); 410 411 callout_init(&sc->sc_tick_ch, 0); 412 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); 413 414 if (!pmf_device_register(self, NULL, NULL)) 415 aprint_error_dev(self, "couldn't establish power handler\n"); 416 else 417 pmf_class_network_register(self, ifp); 418 } 419 420 void 421 nfe_miibus_statchg(device_t dev) 422 { 423 struct nfe_softc *sc = device_private(dev); 424 struct mii_data *mii = &sc->sc_mii; 425 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 426 427 phy = NFE_READ(sc, NFE_PHY_IFACE); 428 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 429 430 seed = NFE_READ(sc, NFE_RNDSEED); 431 seed &= ~NFE_SEED_MASK; 432 433 if ((mii->mii_media_active & IFM_GMASK) == IFM_HDX) { 434 phy |= NFE_PHY_HDX; /* half-duplex */ 435 misc |= NFE_MISC1_HDX; 436 } 437 438 switch (IFM_SUBTYPE(mii->mii_media_active)) { 439 case IFM_1000_T: /* full-duplex only */ 440 link |= NFE_MEDIA_1000T; 441 seed |= NFE_SEED_1000T; 442 phy |= NFE_PHY_1000T; 443 break; 444 case IFM_100_TX: 445 link |= NFE_MEDIA_100TX; 446 seed |= NFE_SEED_100TX; 447 phy |= NFE_PHY_100TX; 448 break; 449 case IFM_10_T: 450 link |= NFE_MEDIA_10T; 451 seed |= NFE_SEED_10T; 452 break; 453 } 454 455 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 456 457 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 458 NFE_WRITE(sc, NFE_MISC1, misc); 459 NFE_WRITE(sc, NFE_LINKSPEED, link); 460 } 461 462 int 463 nfe_miibus_readreg(device_t dev, int phy, int reg) 464 { 465 struct nfe_softc *sc = device_private(dev); 466 uint32_t val; 467 int ntries; 468 469 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 470 471 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 472 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 473 DELAY(100); 474 } 475 476 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 477 478 for (ntries = 0; ntries < 1000; ntries++) { 479 DELAY(100); 480 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 481 break; 482 } 483 if (ntries == 1000) { 484 DPRINTFN(2, ("%s: timeout waiting for PHY\n", 485 device_xname(sc->sc_dev))); 486 return 0; 487 } 488 489 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 490 DPRINTFN(2, ("%s: could not read PHY\n", 491 device_xname(sc->sc_dev))); 492 return 0; 493 } 494 495 val = NFE_READ(sc, NFE_PHY_DATA); 496 if (val != 0xffffffff && val != 0) 497 sc->mii_phyaddr = phy; 498 499 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x ret 0x%x\n", 500 device_xname(sc->sc_dev), phy, reg, val)); 501 502 return val; 503 } 504 505 void 506 nfe_miibus_writereg(device_t dev, int phy, int reg, int val) 507 { 508 struct nfe_softc *sc = device_private(dev); 509 uint32_t ctl; 510 int ntries; 511 512 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 513 514 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 515 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 516 DELAY(100); 517 } 518 519 NFE_WRITE(sc, NFE_PHY_DATA, val); 520 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 521 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 522 523 for (ntries = 0; ntries < 1000; ntries++) { 524 DELAY(100); 525 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 526 break; 527 } 528 #ifdef NFE_DEBUG 529 if (nfedebug >= 2 && ntries == 1000) 530 printf("could not write to PHY\n"); 531 #endif 532 } 533 534 int 535 nfe_intr(void *arg) 536 { 537 struct nfe_softc *sc = arg; 538 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 539 uint32_t r; 540 int handled; 541 542 if ((ifp->if_flags & IFF_UP) == 0) 543 return 0; 544 545 handled = 0; 546 547 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 548 549 for (;;) { 550 r = NFE_READ(sc, NFE_IRQ_STATUS); 551 if ((r & NFE_IRQ_WANTED) == 0) 552 break; 553 554 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 555 handled = 1; 556 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 557 558 if ((r & (NFE_IRQ_RXERR|NFE_IRQ_RX_NOBUF|NFE_IRQ_RX)) != 0) { 559 /* check Rx ring */ 560 nfe_rxeof(sc); 561 } 562 if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { 563 /* check Tx ring */ 564 nfe_txeof(sc); 565 } 566 if ((r & NFE_IRQ_LINK) != 0) { 567 NFE_READ(sc, NFE_PHY_STATUS); 568 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 569 DPRINTF(("%s: link state changed\n", 570 device_xname(sc->sc_dev))); 571 } 572 } 573 574 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 575 576 if (handled && !IF_IS_EMPTY(&ifp->if_snd)) 577 nfe_start(ifp); 578 579 return handled; 580 } 581 582 int 583 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 584 { 585 struct nfe_softc *sc = ifp->if_softc; 586 struct ifreq *ifr = (struct ifreq *)data; 587 struct ifaddr *ifa = (struct ifaddr *)data; 588 int s, error = 0; 589 590 s = splnet(); 591 592 switch (cmd) { 593 case SIOCSIFADDR: 594 ifp->if_flags |= IFF_UP; 595 nfe_init(ifp); 596 switch (ifa->ifa_addr->sa_family) { 597 #ifdef INET 598 case AF_INET: 599 arp_ifinit(ifp, ifa); 600 break; 601 #endif 602 default: 603 break; 604 } 605 break; 606 case SIOCSIFMTU: 607 if (ifr->ifr_mtu < ETHERMIN || 608 ((sc->sc_flags & NFE_USE_JUMBO) && 609 ifr->ifr_mtu > ETHERMTU_JUMBO) || 610 (!(sc->sc_flags & NFE_USE_JUMBO) && 611 ifr->ifr_mtu > ETHERMTU)) 612 error = EINVAL; 613 else if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 614 error = 0; 615 break; 616 case SIOCSIFFLAGS: 617 if (ifp->if_flags & IFF_UP) { 618 /* 619 * If only the PROMISC or ALLMULTI flag changes, then 620 * don't do a full re-init of the chip, just update 621 * the Rx filter. 622 */ 623 if ((ifp->if_flags & IFF_RUNNING) && 624 ((ifp->if_flags ^ sc->sc_if_flags) & 625 (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 626 nfe_setmulti(sc); 627 } else 628 nfe_init(ifp); 629 } else { 630 if (ifp->if_flags & IFF_RUNNING) 631 nfe_stop(ifp, 1); 632 } 633 sc->sc_if_flags = ifp->if_flags; 634 break; 635 default: 636 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 637 break; 638 639 error = 0; 640 641 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 642 ; 643 else if (ifp->if_flags & IFF_RUNNING) 644 nfe_setmulti(sc); 645 break; 646 } 647 648 splx(s); 649 650 return error; 651 } 652 653 void 654 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 655 { 656 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 657 (char *)desc32 - (char *)sc->txq.desc32, 658 sizeof (struct nfe_desc32), ops); 659 } 660 661 void 662 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 663 { 664 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 665 (char *)desc64 - (char *)sc->txq.desc64, 666 sizeof (struct nfe_desc64), ops); 667 } 668 669 void 670 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 671 { 672 if (end > start) { 673 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 674 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 675 (char *)&sc->txq.desc32[end] - 676 (char *)&sc->txq.desc32[start], ops); 677 return; 678 } 679 /* sync from 'start' to end of ring */ 680 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 681 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 682 (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - 683 (char *)&sc->txq.desc32[start], ops); 684 685 /* sync from start of ring to 'end' */ 686 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 687 (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); 688 } 689 690 void 691 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 692 { 693 if (end > start) { 694 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 695 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 696 (char *)&sc->txq.desc64[end] - 697 (char *)&sc->txq.desc64[start], ops); 698 return; 699 } 700 /* sync from 'start' to end of ring */ 701 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 702 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 703 (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - 704 (char *)&sc->txq.desc64[start], ops); 705 706 /* sync from start of ring to 'end' */ 707 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 708 (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); 709 } 710 711 void 712 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 713 { 714 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 715 (char *)desc32 - (char *)sc->rxq.desc32, 716 sizeof (struct nfe_desc32), ops); 717 } 718 719 void 720 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 721 { 722 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 723 (char *)desc64 - (char *)sc->rxq.desc64, 724 sizeof (struct nfe_desc64), ops); 725 } 726 727 void 728 nfe_rxeof(struct nfe_softc *sc) 729 { 730 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 731 struct nfe_desc32 *desc32; 732 struct nfe_desc64 *desc64; 733 struct nfe_rx_data *data; 734 struct nfe_jbuf *jbuf; 735 struct mbuf *m, *mnew; 736 bus_addr_t physaddr; 737 uint16_t flags; 738 int error, len, i; 739 740 desc32 = NULL; 741 desc64 = NULL; 742 for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { 743 data = &sc->rxq.data[i]; 744 745 if (sc->sc_flags & NFE_40BIT_ADDR) { 746 desc64 = &sc->rxq.desc64[i]; 747 nfe_rxdesc64_sync(sc, desc64, 748 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 749 750 flags = le16toh(desc64->flags); 751 len = le16toh(desc64->length) & 0x3fff; 752 } else { 753 desc32 = &sc->rxq.desc32[i]; 754 nfe_rxdesc32_sync(sc, desc32, 755 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 756 757 flags = le16toh(desc32->flags); 758 len = le16toh(desc32->length) & 0x3fff; 759 } 760 761 if ((flags & NFE_RX_READY) != 0) 762 break; 763 764 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 765 if ((flags & NFE_RX_VALID_V1) == 0) 766 goto skip; 767 768 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 769 flags &= ~NFE_RX_ERROR; 770 len--; /* fix buffer length */ 771 } 772 } else { 773 if ((flags & NFE_RX_VALID_V2) == 0) 774 goto skip; 775 776 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 777 flags &= ~NFE_RX_ERROR; 778 len--; /* fix buffer length */ 779 } 780 } 781 782 if (flags & NFE_RX_ERROR) { 783 ifp->if_ierrors++; 784 goto skip; 785 } 786 787 /* 788 * Try to allocate a new mbuf for this ring element and load 789 * it before processing the current mbuf. If the ring element 790 * cannot be loaded, drop the received packet and reuse the 791 * old mbuf. In the unlikely case that the old mbuf can't be 792 * reloaded either, explicitly panic. 793 */ 794 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 795 if (mnew == NULL) { 796 ifp->if_ierrors++; 797 goto skip; 798 } 799 800 if (sc->sc_flags & NFE_USE_JUMBO) { 801 physaddr = 802 sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; 803 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 804 if (len > MCLBYTES) { 805 m_freem(mnew); 806 ifp->if_ierrors++; 807 goto skip1; 808 } 809 MCLGET(mnew, M_DONTWAIT); 810 if ((mnew->m_flags & M_EXT) == 0) { 811 m_freem(mnew); 812 ifp->if_ierrors++; 813 goto skip1; 814 } 815 816 (void)memcpy(mtod(mnew, void *), 817 mtod(data->m, const void *), len); 818 m = mnew; 819 goto mbufcopied; 820 } else { 821 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 822 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 823 mtod(data->m, char *) - (char *)sc->rxq.jpool, 824 NFE_JBYTES, BUS_DMASYNC_POSTREAD); 825 826 physaddr = jbuf->physaddr; 827 } 828 } else { 829 MCLGET(mnew, M_DONTWAIT); 830 if ((mnew->m_flags & M_EXT) == 0) { 831 m_freem(mnew); 832 ifp->if_ierrors++; 833 goto skip; 834 } 835 836 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 837 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 838 bus_dmamap_unload(sc->sc_dmat, data->map); 839 840 error = bus_dmamap_load(sc->sc_dmat, data->map, 841 mtod(mnew, void *), MCLBYTES, NULL, 842 BUS_DMA_READ | BUS_DMA_NOWAIT); 843 if (error != 0) { 844 m_freem(mnew); 845 846 /* try to reload the old mbuf */ 847 error = bus_dmamap_load(sc->sc_dmat, data->map, 848 mtod(data->m, void *), MCLBYTES, NULL, 849 BUS_DMA_READ | BUS_DMA_NOWAIT); 850 if (error != 0) { 851 /* very unlikely that it will fail.. */ 852 panic("%s: could not load old rx mbuf", 853 device_xname(sc->sc_dev)); 854 } 855 ifp->if_ierrors++; 856 goto skip; 857 } 858 physaddr = data->map->dm_segs[0].ds_addr; 859 } 860 861 /* 862 * New mbuf successfully loaded, update Rx ring and continue 863 * processing. 864 */ 865 m = data->m; 866 data->m = mnew; 867 868 mbufcopied: 869 /* finalize mbuf */ 870 m->m_pkthdr.len = m->m_len = len; 871 m->m_pkthdr.rcvif = ifp; 872 873 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 874 /* 875 * XXX 876 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? 877 */ 878 if (flags & NFE_RX_IP_CSUMOK) { 879 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 880 DPRINTFN(3, ("%s: ip4csum-rx ok\n", 881 device_xname(sc->sc_dev))); 882 } 883 /* 884 * XXX 885 * no way to check M_CSUM_TCP_UDP_BAD or 886 * other protocols? 887 */ 888 if (flags & NFE_RX_UDP_CSUMOK) { 889 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 890 DPRINTFN(3, ("%s: udp4csum-rx ok\n", 891 device_xname(sc->sc_dev))); 892 } else if (flags & NFE_RX_TCP_CSUMOK) { 893 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 894 DPRINTFN(3, ("%s: tcp4csum-rx ok\n", 895 device_xname(sc->sc_dev))); 896 } 897 } 898 #if NBPFILTER > 0 899 if (ifp->if_bpf) 900 bpf_mtap(ifp->if_bpf, m); 901 #endif 902 ifp->if_ipackets++; 903 (*ifp->if_input)(ifp, m); 904 905 skip1: 906 /* update mapping address in h/w descriptor */ 907 if (sc->sc_flags & NFE_40BIT_ADDR) { 908 #if defined(__LP64__) 909 desc64->physaddr[0] = htole32(physaddr >> 32); 910 #endif 911 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 912 } else { 913 desc32->physaddr = htole32(physaddr); 914 } 915 916 skip: 917 if (sc->sc_flags & NFE_40BIT_ADDR) { 918 desc64->length = htole16(sc->rxq.bufsz); 919 desc64->flags = htole16(NFE_RX_READY); 920 921 nfe_rxdesc64_sync(sc, desc64, 922 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 923 } else { 924 desc32->length = htole16(sc->rxq.bufsz); 925 desc32->flags = htole16(NFE_RX_READY); 926 927 nfe_rxdesc32_sync(sc, desc32, 928 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 929 } 930 } 931 /* update current RX pointer */ 932 sc->rxq.cur = i; 933 } 934 935 void 936 nfe_txeof(struct nfe_softc *sc) 937 { 938 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 939 struct nfe_desc32 *desc32; 940 struct nfe_desc64 *desc64; 941 struct nfe_tx_data *data = NULL; 942 int i; 943 uint16_t flags; 944 char buf[128]; 945 946 for (i = sc->txq.next; 947 sc->txq.queued > 0; 948 i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { 949 if (sc->sc_flags & NFE_40BIT_ADDR) { 950 desc64 = &sc->txq.desc64[i]; 951 nfe_txdesc64_sync(sc, desc64, 952 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 953 954 flags = le16toh(desc64->flags); 955 } else { 956 desc32 = &sc->txq.desc32[i]; 957 nfe_txdesc32_sync(sc, desc32, 958 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 959 960 flags = le16toh(desc32->flags); 961 } 962 963 if ((flags & NFE_TX_VALID) != 0) 964 break; 965 966 data = &sc->txq.data[i]; 967 968 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 969 if ((flags & NFE_TX_LASTFRAG_V1) == 0 && 970 data->m == NULL) 971 continue; 972 973 if ((flags & NFE_TX_ERROR_V1) != 0) { 974 aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", 975 bitmask_snprintf(flags, NFE_V1_TXERR, 976 buf, sizeof(buf))); 977 ifp->if_oerrors++; 978 } else 979 ifp->if_opackets++; 980 } else { 981 if ((flags & NFE_TX_LASTFRAG_V2) == 0 && 982 data->m == NULL) 983 continue; 984 985 if ((flags & NFE_TX_ERROR_V2) != 0) { 986 aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", 987 bitmask_snprintf(flags, NFE_V2_TXERR, 988 buf, sizeof(buf))); 989 ifp->if_oerrors++; 990 } else 991 ifp->if_opackets++; 992 } 993 994 if (data->m == NULL) { /* should not get there */ 995 aprint_error_dev(sc->sc_dev, 996 "last fragment bit w/o associated mbuf!\n"); 997 continue; 998 } 999 1000 /* last fragment of the mbuf chain transmitted */ 1001 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1002 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1003 bus_dmamap_unload(sc->sc_dmat, data->active); 1004 m_freem(data->m); 1005 data->m = NULL; 1006 } 1007 1008 sc->txq.next = i; 1009 1010 if (sc->txq.queued < NFE_TX_RING_COUNT) { 1011 /* at least one slot freed */ 1012 ifp->if_flags &= ~IFF_OACTIVE; 1013 } 1014 1015 if (sc->txq.queued == 0) { 1016 /* all queued packets are sent */ 1017 ifp->if_timer = 0; 1018 } 1019 } 1020 1021 int 1022 nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1023 { 1024 struct nfe_desc32 *desc32; 1025 struct nfe_desc64 *desc64; 1026 struct nfe_tx_data *data; 1027 bus_dmamap_t map; 1028 uint16_t flags, csumflags; 1029 #if NVLAN > 0 1030 struct m_tag *mtag; 1031 uint32_t vtag = 0; 1032 #endif 1033 int error, i, first; 1034 1035 desc32 = NULL; 1036 desc64 = NULL; 1037 data = NULL; 1038 1039 flags = 0; 1040 csumflags = 0; 1041 first = sc->txq.cur; 1042 1043 map = sc->txq.data[first].map; 1044 1045 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 1046 if (error != 0) { 1047 aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", 1048 error); 1049 return error; 1050 } 1051 1052 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 1053 bus_dmamap_unload(sc->sc_dmat, map); 1054 return ENOBUFS; 1055 } 1056 1057 #if NVLAN > 0 1058 /* setup h/w VLAN tagging */ 1059 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) 1060 vtag = NFE_TX_VTAG | VLAN_TAG_VALUE(mtag); 1061 #endif 1062 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 1063 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 1064 csumflags |= NFE_TX_IP_CSUM; 1065 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1066 csumflags |= NFE_TX_TCP_UDP_CSUM; 1067 } 1068 1069 for (i = 0; i < map->dm_nsegs; i++) { 1070 data = &sc->txq.data[sc->txq.cur]; 1071 1072 if (sc->sc_flags & NFE_40BIT_ADDR) { 1073 desc64 = &sc->txq.desc64[sc->txq.cur]; 1074 #if defined(__LP64__) 1075 desc64->physaddr[0] = 1076 htole32(map->dm_segs[i].ds_addr >> 32); 1077 #endif 1078 desc64->physaddr[1] = 1079 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 1080 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 1081 desc64->flags = htole16(flags); 1082 desc64->vtag = 0; 1083 } else { 1084 desc32 = &sc->txq.desc32[sc->txq.cur]; 1085 1086 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 1087 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 1088 desc32->flags = htole16(flags); 1089 } 1090 1091 /* 1092 * Setting of the valid bit in the first descriptor is 1093 * deferred until the whole chain is fully setup. 1094 */ 1095 flags |= NFE_TX_VALID; 1096 1097 sc->txq.queued++; 1098 sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); 1099 } 1100 1101 /* the whole mbuf chain has been setup */ 1102 if (sc->sc_flags & NFE_40BIT_ADDR) { 1103 /* fix last descriptor */ 1104 flags |= NFE_TX_LASTFRAG_V2; 1105 desc64->flags = htole16(flags); 1106 1107 /* Checksum flags and vtag belong to the first fragment only. */ 1108 #if NVLAN > 0 1109 sc->txq.desc64[first].vtag = htole32(vtag); 1110 #endif 1111 sc->txq.desc64[first].flags |= htole16(csumflags); 1112 1113 /* finally, set the valid bit in the first descriptor */ 1114 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1115 } else { 1116 /* fix last descriptor */ 1117 if (sc->sc_flags & NFE_JUMBO_SUP) 1118 flags |= NFE_TX_LASTFRAG_V2; 1119 else 1120 flags |= NFE_TX_LASTFRAG_V1; 1121 desc32->flags = htole16(flags); 1122 1123 /* Checksum flags belong to the first fragment only. */ 1124 sc->txq.desc32[first].flags |= htole16(csumflags); 1125 1126 /* finally, set the valid bit in the first descriptor */ 1127 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1128 } 1129 1130 data->m = m0; 1131 data->active = map; 1132 1133 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1134 BUS_DMASYNC_PREWRITE); 1135 1136 return 0; 1137 } 1138 1139 void 1140 nfe_start(struct ifnet *ifp) 1141 { 1142 struct nfe_softc *sc = ifp->if_softc; 1143 int old = sc->txq.queued; 1144 struct mbuf *m0; 1145 1146 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1147 return; 1148 1149 for (;;) { 1150 IFQ_POLL(&ifp->if_snd, m0); 1151 if (m0 == NULL) 1152 break; 1153 1154 if (nfe_encap(sc, m0) != 0) { 1155 ifp->if_flags |= IFF_OACTIVE; 1156 break; 1157 } 1158 1159 /* packet put in h/w queue, remove from s/w queue */ 1160 IFQ_DEQUEUE(&ifp->if_snd, m0); 1161 1162 #if NBPFILTER > 0 1163 if (ifp->if_bpf != NULL) 1164 bpf_mtap(ifp->if_bpf, m0); 1165 #endif 1166 } 1167 1168 if (sc->txq.queued != old) { 1169 /* packets are queued */ 1170 if (sc->sc_flags & NFE_40BIT_ADDR) 1171 nfe_txdesc64_rsync(sc, old, sc->txq.cur, 1172 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1173 else 1174 nfe_txdesc32_rsync(sc, old, sc->txq.cur, 1175 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1176 /* kick Tx */ 1177 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1178 1179 /* 1180 * Set a timeout in case the chip goes out to lunch. 1181 */ 1182 ifp->if_timer = 5; 1183 } 1184 } 1185 1186 void 1187 nfe_watchdog(struct ifnet *ifp) 1188 { 1189 struct nfe_softc *sc = ifp->if_softc; 1190 1191 aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); 1192 1193 ifp->if_flags &= ~IFF_RUNNING; 1194 nfe_init(ifp); 1195 1196 ifp->if_oerrors++; 1197 } 1198 1199 int 1200 nfe_init(struct ifnet *ifp) 1201 { 1202 struct nfe_softc *sc = ifp->if_softc; 1203 uint32_t tmp; 1204 int rc = 0, s; 1205 1206 if (ifp->if_flags & IFF_RUNNING) 1207 return 0; 1208 1209 nfe_stop(ifp, 0); 1210 1211 NFE_WRITE(sc, NFE_TX_UNK, 0); 1212 NFE_WRITE(sc, NFE_STATUS, 0); 1213 1214 sc->rxtxctl = NFE_RXTX_BIT2; 1215 if (sc->sc_flags & NFE_40BIT_ADDR) 1216 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1217 else if (sc->sc_flags & NFE_JUMBO_SUP) 1218 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1219 if (sc->sc_flags & NFE_HW_CSUM) 1220 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1221 #if NVLAN > 0 1222 /* 1223 * Although the adapter is capable of stripping VLAN tags from received 1224 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1225 * purpose. This will be done in software by our network stack. 1226 */ 1227 if (sc->sc_flags & NFE_HW_VLAN) 1228 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1229 #endif 1230 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1231 DELAY(10); 1232 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1233 1234 #if NVLAN 1235 if (sc->sc_flags & NFE_HW_VLAN) 1236 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1237 #endif 1238 1239 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1240 1241 /* set MAC address */ 1242 nfe_set_macaddr(sc, sc->sc_enaddr); 1243 1244 /* tell MAC where rings are in memory */ 1245 #ifdef __LP64__ 1246 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, sc->rxq.physaddr >> 32); 1247 #endif 1248 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1249 #ifdef __LP64__ 1250 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, sc->txq.physaddr >> 32); 1251 #endif 1252 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1253 1254 NFE_WRITE(sc, NFE_RING_SIZE, 1255 (NFE_RX_RING_COUNT - 1) << 16 | 1256 (NFE_TX_RING_COUNT - 1)); 1257 1258 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1259 1260 /* force MAC to wakeup */ 1261 tmp = NFE_READ(sc, NFE_PWR_STATE); 1262 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1263 DELAY(10); 1264 tmp = NFE_READ(sc, NFE_PWR_STATE); 1265 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1266 1267 s = splnet(); 1268 nfe_intr(sc); /* XXX clear IRQ status registers */ 1269 splx(s); 1270 1271 #if 1 1272 /* configure interrupts coalescing/mitigation */ 1273 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1274 #else 1275 /* no interrupt mitigation: one interrupt per packet */ 1276 NFE_WRITE(sc, NFE_IMTIMER, 970); 1277 #endif 1278 1279 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1280 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1281 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1282 1283 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1284 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1285 1286 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1287 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1288 1289 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1290 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1291 DELAY(10); 1292 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1293 1294 /* set Rx filter */ 1295 nfe_setmulti(sc); 1296 1297 if ((rc = ether_mediachange(ifp)) != 0) 1298 goto out; 1299 1300 nfe_tick(sc); 1301 1302 /* enable Rx */ 1303 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1304 1305 /* enable Tx */ 1306 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1307 1308 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1309 1310 /* enable interrupts */ 1311 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1312 1313 callout_schedule(&sc->sc_tick_ch, hz); 1314 1315 ifp->if_flags |= IFF_RUNNING; 1316 ifp->if_flags &= ~IFF_OACTIVE; 1317 1318 out: 1319 return rc; 1320 } 1321 1322 void 1323 nfe_stop(struct ifnet *ifp, int disable) 1324 { 1325 struct nfe_softc *sc = ifp->if_softc; 1326 1327 callout_stop(&sc->sc_tick_ch); 1328 1329 ifp->if_timer = 0; 1330 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1331 1332 mii_down(&sc->sc_mii); 1333 1334 /* abort Tx */ 1335 NFE_WRITE(sc, NFE_TX_CTL, 0); 1336 1337 /* disable Rx */ 1338 NFE_WRITE(sc, NFE_RX_CTL, 0); 1339 1340 /* disable interrupts */ 1341 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1342 1343 /* reset Tx and Rx rings */ 1344 nfe_reset_tx_ring(sc, &sc->txq); 1345 nfe_reset_rx_ring(sc, &sc->rxq); 1346 } 1347 1348 int 1349 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1350 { 1351 struct nfe_desc32 *desc32; 1352 struct nfe_desc64 *desc64; 1353 struct nfe_rx_data *data; 1354 struct nfe_jbuf *jbuf; 1355 void **desc; 1356 bus_addr_t physaddr; 1357 int i, nsegs, error, descsize; 1358 1359 if (sc->sc_flags & NFE_40BIT_ADDR) { 1360 desc = (void **)&ring->desc64; 1361 descsize = sizeof (struct nfe_desc64); 1362 } else { 1363 desc = (void **)&ring->desc32; 1364 descsize = sizeof (struct nfe_desc32); 1365 } 1366 1367 ring->cur = ring->next = 0; 1368 ring->bufsz = MCLBYTES; 1369 1370 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1371 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1372 if (error != 0) { 1373 aprint_error_dev(sc->sc_dev, 1374 "could not create desc DMA map\n"); 1375 goto fail; 1376 } 1377 1378 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1379 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1380 if (error != 0) { 1381 aprint_error_dev(sc->sc_dev, 1382 "could not allocate DMA memory\n"); 1383 goto fail; 1384 } 1385 1386 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1387 NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1388 if (error != 0) { 1389 aprint_error_dev(sc->sc_dev, 1390 "could not map desc DMA memory\n"); 1391 goto fail; 1392 } 1393 1394 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1395 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1396 if (error != 0) { 1397 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1398 goto fail; 1399 } 1400 1401 bzero(*desc, NFE_RX_RING_COUNT * descsize); 1402 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1403 1404 if (sc->sc_flags & NFE_USE_JUMBO) { 1405 ring->bufsz = NFE_JBYTES; 1406 if ((error = nfe_jpool_alloc(sc)) != 0) { 1407 aprint_error_dev(sc->sc_dev, 1408 "could not allocate jumbo frames\n"); 1409 goto fail; 1410 } 1411 } 1412 1413 /* 1414 * Pre-allocate Rx buffers and populate Rx ring. 1415 */ 1416 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1417 data = &sc->rxq.data[i]; 1418 1419 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1420 if (data->m == NULL) { 1421 aprint_error_dev(sc->sc_dev, 1422 "could not allocate rx mbuf\n"); 1423 error = ENOMEM; 1424 goto fail; 1425 } 1426 1427 if (sc->sc_flags & NFE_USE_JUMBO) { 1428 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 1429 aprint_error_dev(sc->sc_dev, 1430 "could not allocate jumbo buffer\n"); 1431 goto fail; 1432 } 1433 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1434 sc); 1435 1436 physaddr = jbuf->physaddr; 1437 } else { 1438 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1439 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1440 if (error != 0) { 1441 aprint_error_dev(sc->sc_dev, 1442 "could not create DMA map\n"); 1443 goto fail; 1444 } 1445 MCLGET(data->m, M_DONTWAIT); 1446 if (!(data->m->m_flags & M_EXT)) { 1447 aprint_error_dev(sc->sc_dev, 1448 "could not allocate mbuf cluster\n"); 1449 error = ENOMEM; 1450 goto fail; 1451 } 1452 1453 error = bus_dmamap_load(sc->sc_dmat, data->map, 1454 mtod(data->m, void *), MCLBYTES, NULL, 1455 BUS_DMA_READ | BUS_DMA_NOWAIT); 1456 if (error != 0) { 1457 aprint_error_dev(sc->sc_dev, 1458 "could not load rx buf DMA map"); 1459 goto fail; 1460 } 1461 physaddr = data->map->dm_segs[0].ds_addr; 1462 } 1463 1464 if (sc->sc_flags & NFE_40BIT_ADDR) { 1465 desc64 = &sc->rxq.desc64[i]; 1466 #if defined(__LP64__) 1467 desc64->physaddr[0] = htole32(physaddr >> 32); 1468 #endif 1469 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1470 desc64->length = htole16(sc->rxq.bufsz); 1471 desc64->flags = htole16(NFE_RX_READY); 1472 } else { 1473 desc32 = &sc->rxq.desc32[i]; 1474 desc32->physaddr = htole32(physaddr); 1475 desc32->length = htole16(sc->rxq.bufsz); 1476 desc32->flags = htole16(NFE_RX_READY); 1477 } 1478 } 1479 1480 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1481 BUS_DMASYNC_PREWRITE); 1482 1483 return 0; 1484 1485 fail: nfe_free_rx_ring(sc, ring); 1486 return error; 1487 } 1488 1489 void 1490 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1491 { 1492 int i; 1493 1494 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1495 if (sc->sc_flags & NFE_40BIT_ADDR) { 1496 ring->desc64[i].length = htole16(ring->bufsz); 1497 ring->desc64[i].flags = htole16(NFE_RX_READY); 1498 } else { 1499 ring->desc32[i].length = htole16(ring->bufsz); 1500 ring->desc32[i].flags = htole16(NFE_RX_READY); 1501 } 1502 } 1503 1504 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1505 BUS_DMASYNC_PREWRITE); 1506 1507 ring->cur = ring->next = 0; 1508 } 1509 1510 void 1511 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1512 { 1513 struct nfe_rx_data *data; 1514 void *desc; 1515 int i, descsize; 1516 1517 if (sc->sc_flags & NFE_40BIT_ADDR) { 1518 desc = ring->desc64; 1519 descsize = sizeof (struct nfe_desc64); 1520 } else { 1521 desc = ring->desc32; 1522 descsize = sizeof (struct nfe_desc32); 1523 } 1524 1525 if (desc != NULL) { 1526 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1527 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1528 bus_dmamap_unload(sc->sc_dmat, ring->map); 1529 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1530 NFE_RX_RING_COUNT * descsize); 1531 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1532 } 1533 1534 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1535 data = &ring->data[i]; 1536 1537 if (data->map != NULL) { 1538 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1539 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1540 bus_dmamap_unload(sc->sc_dmat, data->map); 1541 bus_dmamap_destroy(sc->sc_dmat, data->map); 1542 } 1543 if (data->m != NULL) 1544 m_freem(data->m); 1545 } 1546 } 1547 1548 struct nfe_jbuf * 1549 nfe_jalloc(struct nfe_softc *sc, int i) 1550 { 1551 struct nfe_jbuf *jbuf; 1552 1553 mutex_enter(&sc->rxq.mtx); 1554 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1555 if (jbuf != NULL) 1556 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1557 mutex_exit(&sc->rxq.mtx); 1558 if (jbuf == NULL) 1559 return NULL; 1560 sc->rxq.jbufmap[i] = 1561 ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1562 return jbuf; 1563 } 1564 1565 /* 1566 * This is called automatically by the network stack when the mbuf is freed. 1567 * Caution must be taken that the NIC might be reset by the time the mbuf is 1568 * freed. 1569 */ 1570 void 1571 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1572 { 1573 struct nfe_softc *sc = arg; 1574 struct nfe_jbuf *jbuf; 1575 int i; 1576 1577 /* find the jbuf from the base pointer */ 1578 i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1579 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1580 aprint_error_dev(sc->sc_dev, 1581 "request to free a buffer (%p) not managed by us\n", buf); 1582 return; 1583 } 1584 jbuf = &sc->rxq.jbuf[i]; 1585 1586 /* ..and put it back in the free list */ 1587 mutex_enter(&sc->rxq.mtx); 1588 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1589 mutex_exit(&sc->rxq.mtx); 1590 1591 if (m != NULL) 1592 pool_cache_put(mb_cache, m); 1593 } 1594 1595 int 1596 nfe_jpool_alloc(struct nfe_softc *sc) 1597 { 1598 struct nfe_rx_ring *ring = &sc->rxq; 1599 struct nfe_jbuf *jbuf; 1600 bus_addr_t physaddr; 1601 char *buf; 1602 int i, nsegs, error; 1603 1604 /* 1605 * Allocate a big chunk of DMA'able memory. 1606 */ 1607 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1608 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1609 if (error != 0) { 1610 aprint_error_dev(sc->sc_dev, 1611 "could not create jumbo DMA map\n"); 1612 goto fail; 1613 } 1614 1615 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1616 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1617 if (error != 0) { 1618 aprint_error_dev(sc->sc_dev, 1619 "could not allocate jumbo DMA memory\n"); 1620 goto fail; 1621 } 1622 1623 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1624 &ring->jpool, BUS_DMA_NOWAIT); 1625 if (error != 0) { 1626 aprint_error_dev(sc->sc_dev, 1627 "could not map jumbo DMA memory\n"); 1628 goto fail; 1629 } 1630 1631 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1632 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1633 if (error != 0) { 1634 aprint_error_dev(sc->sc_dev, 1635 "could not load jumbo DMA map\n"); 1636 goto fail; 1637 } 1638 1639 /* ..and split it into 9KB chunks */ 1640 SLIST_INIT(&ring->jfreelist); 1641 1642 buf = ring->jpool; 1643 physaddr = ring->jmap->dm_segs[0].ds_addr; 1644 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1645 jbuf = &ring->jbuf[i]; 1646 1647 jbuf->buf = buf; 1648 jbuf->physaddr = physaddr; 1649 1650 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1651 1652 buf += NFE_JBYTES; 1653 physaddr += NFE_JBYTES; 1654 } 1655 1656 return 0; 1657 1658 fail: nfe_jpool_free(sc); 1659 return error; 1660 } 1661 1662 void 1663 nfe_jpool_free(struct nfe_softc *sc) 1664 { 1665 struct nfe_rx_ring *ring = &sc->rxq; 1666 1667 if (ring->jmap != NULL) { 1668 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1669 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1670 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1671 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1672 } 1673 if (ring->jpool != NULL) { 1674 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1675 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1676 } 1677 } 1678 1679 int 1680 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1681 { 1682 int i, nsegs, error; 1683 void **desc; 1684 int descsize; 1685 1686 if (sc->sc_flags & NFE_40BIT_ADDR) { 1687 desc = (void **)&ring->desc64; 1688 descsize = sizeof (struct nfe_desc64); 1689 } else { 1690 desc = (void **)&ring->desc32; 1691 descsize = sizeof (struct nfe_desc32); 1692 } 1693 1694 ring->queued = 0; 1695 ring->cur = ring->next = 0; 1696 1697 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1698 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1699 1700 if (error != 0) { 1701 aprint_error_dev(sc->sc_dev, 1702 "could not create desc DMA map\n"); 1703 goto fail; 1704 } 1705 1706 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1707 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1708 if (error != 0) { 1709 aprint_error_dev(sc->sc_dev, 1710 "could not allocate DMA memory\n"); 1711 goto fail; 1712 } 1713 1714 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1715 NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1716 if (error != 0) { 1717 aprint_error_dev(sc->sc_dev, 1718 "could not map desc DMA memory\n"); 1719 goto fail; 1720 } 1721 1722 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1723 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1724 if (error != 0) { 1725 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1726 goto fail; 1727 } 1728 1729 bzero(*desc, NFE_TX_RING_COUNT * descsize); 1730 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1731 1732 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1733 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1734 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1735 &ring->data[i].map); 1736 if (error != 0) { 1737 aprint_error_dev(sc->sc_dev, 1738 "could not create DMA map\n"); 1739 goto fail; 1740 } 1741 } 1742 1743 return 0; 1744 1745 fail: nfe_free_tx_ring(sc, ring); 1746 return error; 1747 } 1748 1749 void 1750 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1751 { 1752 struct nfe_tx_data *data; 1753 int i; 1754 1755 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1756 if (sc->sc_flags & NFE_40BIT_ADDR) 1757 ring->desc64[i].flags = 0; 1758 else 1759 ring->desc32[i].flags = 0; 1760 1761 data = &ring->data[i]; 1762 1763 if (data->m != NULL) { 1764 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1765 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1766 bus_dmamap_unload(sc->sc_dmat, data->active); 1767 m_freem(data->m); 1768 data->m = NULL; 1769 } 1770 } 1771 1772 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1773 BUS_DMASYNC_PREWRITE); 1774 1775 ring->queued = 0; 1776 ring->cur = ring->next = 0; 1777 } 1778 1779 void 1780 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1781 { 1782 struct nfe_tx_data *data; 1783 void *desc; 1784 int i, descsize; 1785 1786 if (sc->sc_flags & NFE_40BIT_ADDR) { 1787 desc = ring->desc64; 1788 descsize = sizeof (struct nfe_desc64); 1789 } else { 1790 desc = ring->desc32; 1791 descsize = sizeof (struct nfe_desc32); 1792 } 1793 1794 if (desc != NULL) { 1795 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1796 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1797 bus_dmamap_unload(sc->sc_dmat, ring->map); 1798 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1799 NFE_TX_RING_COUNT * descsize); 1800 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1801 } 1802 1803 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1804 data = &ring->data[i]; 1805 1806 if (data->m != NULL) { 1807 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1808 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1809 bus_dmamap_unload(sc->sc_dmat, data->active); 1810 m_freem(data->m); 1811 } 1812 } 1813 1814 /* ..and now actually destroy the DMA mappings */ 1815 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1816 data = &ring->data[i]; 1817 if (data->map == NULL) 1818 continue; 1819 bus_dmamap_destroy(sc->sc_dmat, data->map); 1820 } 1821 } 1822 1823 void 1824 nfe_setmulti(struct nfe_softc *sc) 1825 { 1826 struct ethercom *ec = &sc->sc_ethercom; 1827 struct ifnet *ifp = &ec->ec_if; 1828 struct ether_multi *enm; 1829 struct ether_multistep step; 1830 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1831 uint32_t filter = NFE_RXFILTER_MAGIC; 1832 int i; 1833 1834 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1835 bzero(addr, ETHER_ADDR_LEN); 1836 bzero(mask, ETHER_ADDR_LEN); 1837 goto done; 1838 } 1839 1840 bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); 1841 bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); 1842 1843 ETHER_FIRST_MULTI(step, ec, enm); 1844 while (enm != NULL) { 1845 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1846 ifp->if_flags |= IFF_ALLMULTI; 1847 bzero(addr, ETHER_ADDR_LEN); 1848 bzero(mask, ETHER_ADDR_LEN); 1849 goto done; 1850 } 1851 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1852 addr[i] &= enm->enm_addrlo[i]; 1853 mask[i] &= ~enm->enm_addrlo[i]; 1854 } 1855 ETHER_NEXT_MULTI(step, enm); 1856 } 1857 for (i = 0; i < ETHER_ADDR_LEN; i++) 1858 mask[i] |= addr[i]; 1859 1860 done: 1861 addr[0] |= 0x01; /* make sure multicast bit is set */ 1862 1863 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1864 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1865 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1866 addr[5] << 8 | addr[4]); 1867 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1868 mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1869 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1870 mask[5] << 8 | mask[4]); 1871 1872 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1873 NFE_WRITE(sc, NFE_RXFILTER, filter); 1874 } 1875 1876 void 1877 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1878 { 1879 uint32_t tmp; 1880 1881 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 1882 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1883 addr[0] = (tmp & 0xff); 1884 addr[1] = (tmp >> 8) & 0xff; 1885 addr[2] = (tmp >> 16) & 0xff; 1886 addr[3] = (tmp >> 24) & 0xff; 1887 1888 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1889 addr[4] = (tmp & 0xff); 1890 addr[5] = (tmp >> 8) & 0xff; 1891 1892 } else { 1893 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1894 addr[0] = (tmp >> 8) & 0xff; 1895 addr[1] = (tmp & 0xff); 1896 1897 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1898 addr[2] = (tmp >> 24) & 0xff; 1899 addr[3] = (tmp >> 16) & 0xff; 1900 addr[4] = (tmp >> 8) & 0xff; 1901 addr[5] = (tmp & 0xff); 1902 } 1903 } 1904 1905 void 1906 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1907 { 1908 NFE_WRITE(sc, NFE_MACADDR_LO, 1909 addr[5] << 8 | addr[4]); 1910 NFE_WRITE(sc, NFE_MACADDR_HI, 1911 addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1912 } 1913 1914 void 1915 nfe_tick(void *arg) 1916 { 1917 struct nfe_softc *sc = arg; 1918 int s; 1919 1920 s = splnet(); 1921 mii_tick(&sc->sc_mii); 1922 splx(s); 1923 1924 callout_schedule(&sc->sc_tick_ch, hz); 1925 } 1926