1 /* $NetBSD: if_nfe.c,v 1.78 2020/03/13 05:10:39 msaitoh Exp $ */ 2 /* $OpenBSD: if_nfe.c,v 1.77 2008/02/05 16:52:50 brad Exp $ */ 3 4 /*- 5 * Copyright (c) 2006, 2007 Damien Bergamini <damien.bergamini@free.fr> 6 * Copyright (c) 2005, 2006 Jonathan Gray <jsg@openbsd.org> 7 * 8 * Permission to use, copy, modify, and distribute this software for any 9 * purpose with or without fee is hereby granted, provided that the above 10 * copyright notice and this permission notice appear in all copies. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 */ 20 21 /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ 22 23 #include <sys/cdefs.h> 24 __KERNEL_RCSID(0, "$NetBSD: if_nfe.c,v 1.78 2020/03/13 05:10:39 msaitoh Exp $"); 25 26 #include "opt_inet.h" 27 #include "vlan.h" 28 29 #include <sys/param.h> 30 #include <sys/endian.h> 31 #include <sys/systm.h> 32 #include <sys/types.h> 33 #include <sys/sockio.h> 34 #include <sys/mbuf.h> 35 #include <sys/mutex.h> 36 #include <sys/queue.h> 37 #include <sys/kernel.h> 38 #include <sys/device.h> 39 #include <sys/callout.h> 40 #include <sys/socket.h> 41 42 #include <sys/bus.h> 43 44 #include <net/if.h> 45 #include <net/if_dl.h> 46 #include <net/if_media.h> 47 #include <net/if_ether.h> 48 #include <net/if_arp.h> 49 50 #ifdef INET 51 #include <netinet/in.h> 52 #include <netinet/in_systm.h> 53 #include <netinet/in_var.h> 54 #include <netinet/ip.h> 55 #include <netinet/if_inarp.h> 56 #endif 57 58 #if NVLAN > 0 59 #include <net/if_types.h> 60 #endif 61 62 #include <net/bpf.h> 63 64 #include <dev/mii/mii.h> 65 #include <dev/mii/miivar.h> 66 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 #include <dev/pci/pcidevs.h> 70 71 #include <dev/pci/if_nfereg.h> 72 #include <dev/pci/if_nfevar.h> 73 74 static int nfe_ifflags_cb(struct ethercom *); 75 76 int nfe_match(device_t, cfdata_t, void *); 77 void nfe_attach(device_t, device_t, void *); 78 int nfe_detach(device_t, int); 79 void nfe_power(int, void *); 80 void nfe_miibus_statchg(struct ifnet *); 81 int nfe_miibus_readreg(device_t, int, int, uint16_t *); 82 int nfe_miibus_writereg(device_t, int, int, uint16_t); 83 int nfe_intr(void *); 84 int nfe_ioctl(struct ifnet *, u_long, void *); 85 void nfe_txdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 86 void nfe_txdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 87 void nfe_txdesc32_rsync(struct nfe_softc *, int, int, int); 88 void nfe_txdesc64_rsync(struct nfe_softc *, int, int, int); 89 void nfe_rxdesc32_sync(struct nfe_softc *, struct nfe_desc32 *, int); 90 void nfe_rxdesc64_sync(struct nfe_softc *, struct nfe_desc64 *, int); 91 void nfe_rxeof(struct nfe_softc *); 92 void nfe_txeof(struct nfe_softc *); 93 int nfe_encap(struct nfe_softc *, struct mbuf *); 94 void nfe_start(struct ifnet *); 95 void nfe_watchdog(struct ifnet *); 96 int nfe_init(struct ifnet *); 97 void nfe_stop(struct ifnet *, int); 98 struct nfe_jbuf *nfe_jalloc(struct nfe_softc *, int); 99 void nfe_jfree(struct mbuf *, void *, size_t, void *); 100 int nfe_jpool_alloc(struct nfe_softc *); 101 void nfe_jpool_free(struct nfe_softc *); 102 int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 103 void nfe_reset_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 104 void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); 105 int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 106 void nfe_reset_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 107 void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); 108 void nfe_setmulti(struct nfe_softc *); 109 void nfe_get_macaddr(struct nfe_softc *, uint8_t *); 110 void nfe_set_macaddr(struct nfe_softc *, const uint8_t *); 111 void nfe_tick(void *); 112 void nfe_poweron(device_t); 113 bool nfe_resume(device_t, const pmf_qual_t *); 114 115 CFATTACH_DECL_NEW(nfe, sizeof(struct nfe_softc), 116 nfe_match, nfe_attach, nfe_detach, NULL); 117 118 /* #define NFE_NO_JUMBO */ 119 120 #ifdef NFE_DEBUG 121 int nfedebug = 0; 122 #define DPRINTF(x) do { if (nfedebug) printf x; } while (0) 123 #define DPRINTFN(n, x) do { if (nfedebug >= (n)) printf x; } while (0) 124 #else 125 #define DPRINTF(x) 126 #define DPRINTFN(n, x) 127 #endif 128 129 /* deal with naming differences */ 130 131 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 \ 132 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1 133 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 \ 134 PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2 135 #define PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 \ 136 PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN 137 138 #define PCI_PRODUCT_NVIDIA_CK804_LAN1 \ 139 PCI_PRODUCT_NVIDIA_NFORCE4_LAN1 140 #define PCI_PRODUCT_NVIDIA_CK804_LAN2 \ 141 PCI_PRODUCT_NVIDIA_NFORCE4_LAN2 142 143 #define PCI_PRODUCT_NVIDIA_MCP51_LAN1 \ 144 PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 145 #define PCI_PRODUCT_NVIDIA_MCP51_LAN2 \ 146 PCI_PRODUCT_NVIDIA_NFORCE430_LAN2 147 148 const struct nfe_product { 149 pci_vendor_id_t vendor; 150 pci_product_id_t product; 151 } nfe_devices[] = { 152 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN }, 153 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN }, 154 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1 }, 155 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN2 }, 156 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN3 }, 157 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4 }, 158 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN5 }, 159 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN1 }, 160 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_CK804_LAN2 }, 161 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1 }, 162 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2 }, 163 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN1 }, 164 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP51_LAN2 }, 165 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1 }, 166 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2 }, 167 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1 }, 168 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2 }, 169 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3 }, 170 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4 }, 171 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1 }, 172 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2 }, 173 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3 }, 174 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4 }, 175 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1 }, 176 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2 }, 177 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3 }, 178 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4 }, 179 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1 }, 180 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2 }, 181 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3 }, 182 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4 }, 183 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1 }, 184 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2 }, 185 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3 }, 186 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4 }, 187 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1 }, 188 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2 }, 189 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3 }, 190 { PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4 } 191 }; 192 193 int 194 nfe_match(device_t dev, cfdata_t match, void *aux) 195 { 196 struct pci_attach_args *pa = aux; 197 const struct nfe_product *np; 198 int i; 199 200 for (i = 0; i < __arraycount(nfe_devices); i++) { 201 np = &nfe_devices[i]; 202 if (PCI_VENDOR(pa->pa_id) == np->vendor && 203 PCI_PRODUCT(pa->pa_id) == np->product) 204 return 1; 205 } 206 return 0; 207 } 208 209 void 210 nfe_attach(device_t parent, device_t self, void *aux) 211 { 212 struct nfe_softc *sc = device_private(self); 213 struct pci_attach_args *pa = aux; 214 pci_chipset_tag_t pc = pa->pa_pc; 215 pci_intr_handle_t ih; 216 const char *intrstr; 217 struct ifnet *ifp; 218 struct mii_data * const mii = &sc->sc_mii; 219 pcireg_t memtype, csr; 220 int mii_flags = 0; 221 char intrbuf[PCI_INTRSTR_LEN]; 222 223 sc->sc_dev = self; 224 sc->sc_pc = pa->pa_pc; 225 pci_aprint_devinfo(pa, NULL); 226 227 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, NFE_PCI_BA); 228 switch (memtype) { 229 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 230 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 231 if (pci_mapreg_map(pa, NFE_PCI_BA, memtype, 0, &sc->sc_memt, 232 &sc->sc_memh, NULL, &sc->sc_mems) == 0) 233 break; 234 /* FALLTHROUGH */ 235 default: 236 aprint_error_dev(self, "could not map mem space\n"); 237 return; 238 } 239 240 if (pci_intr_map(pa, &ih) != 0) { 241 aprint_error_dev(self, "could not map interrupt\n"); 242 goto fail; 243 } 244 245 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 246 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, nfe_intr, sc, 247 device_xname(self)); 248 if (sc->sc_ih == NULL) { 249 aprint_error_dev(self, "could not establish interrupt"); 250 if (intrstr != NULL) 251 aprint_error(" at %s", intrstr); 252 aprint_error("\n"); 253 goto fail; 254 } 255 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 256 257 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 258 csr |= PCI_COMMAND_MASTER_ENABLE; 259 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr); 260 261 sc->sc_flags = 0; 262 263 switch (PCI_PRODUCT(pa->pa_id)) { 264 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: 265 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: 266 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: 267 case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: 268 sc->sc_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; 269 break; 270 case PCI_PRODUCT_NVIDIA_MCP51_LAN1: 271 case PCI_PRODUCT_NVIDIA_MCP51_LAN2: 272 sc->sc_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT; 273 break; 274 case PCI_PRODUCT_NVIDIA_MCP61_LAN1: 275 case PCI_PRODUCT_NVIDIA_MCP61_LAN2: 276 case PCI_PRODUCT_NVIDIA_MCP61_LAN3: 277 case PCI_PRODUCT_NVIDIA_MCP61_LAN4: 278 case PCI_PRODUCT_NVIDIA_MCP67_LAN1: 279 case PCI_PRODUCT_NVIDIA_MCP67_LAN2: 280 case PCI_PRODUCT_NVIDIA_MCP67_LAN3: 281 case PCI_PRODUCT_NVIDIA_MCP67_LAN4: 282 case PCI_PRODUCT_NVIDIA_MCP73_LAN1: 283 case PCI_PRODUCT_NVIDIA_MCP73_LAN2: 284 case PCI_PRODUCT_NVIDIA_MCP73_LAN3: 285 case PCI_PRODUCT_NVIDIA_MCP73_LAN4: 286 sc->sc_flags |= NFE_40BIT_ADDR | NFE_CORRECT_MACADDR | 287 NFE_PWR_MGMT; 288 break; 289 case PCI_PRODUCT_NVIDIA_MCP77_LAN1: 290 case PCI_PRODUCT_NVIDIA_MCP77_LAN2: 291 case PCI_PRODUCT_NVIDIA_MCP77_LAN3: 292 case PCI_PRODUCT_NVIDIA_MCP77_LAN4: 293 sc->sc_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | 294 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 295 break; 296 case PCI_PRODUCT_NVIDIA_MCP79_LAN1: 297 case PCI_PRODUCT_NVIDIA_MCP79_LAN2: 298 case PCI_PRODUCT_NVIDIA_MCP79_LAN3: 299 case PCI_PRODUCT_NVIDIA_MCP79_LAN4: 300 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 301 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 302 break; 303 case PCI_PRODUCT_NVIDIA_CK804_LAN1: 304 case PCI_PRODUCT_NVIDIA_CK804_LAN2: 305 case PCI_PRODUCT_NVIDIA_MCP04_LAN1: 306 case PCI_PRODUCT_NVIDIA_MCP04_LAN2: 307 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM; 308 break; 309 case PCI_PRODUCT_NVIDIA_MCP65_LAN1: 310 case PCI_PRODUCT_NVIDIA_MCP65_LAN2: 311 case PCI_PRODUCT_NVIDIA_MCP65_LAN3: 312 case PCI_PRODUCT_NVIDIA_MCP65_LAN4: 313 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | 314 NFE_CORRECT_MACADDR | NFE_PWR_MGMT; 315 mii_flags = MIIF_DOPAUSE; 316 break; 317 case PCI_PRODUCT_NVIDIA_MCP55_LAN1: 318 case PCI_PRODUCT_NVIDIA_MCP55_LAN2: 319 sc->sc_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | 320 NFE_HW_VLAN | NFE_PWR_MGMT; 321 break; 322 } 323 324 if (pci_dma64_available(pa) && (sc->sc_flags & NFE_40BIT_ADDR) != 0) { 325 if (bus_dmatag_subregion(pa->pa_dmat64, 326 0, 327 (bus_addr_t)(1ULL << 40), 328 &sc->sc_dmat, 329 BUS_DMA_WAITOK) != 0) { 330 aprint_error_dev(self, 331 "unable to create 40-bit DMA tag\n"); 332 sc->sc_dmat = pa->pa_dmat64; 333 } else 334 sc->sc_dmat_needs_free = true; 335 } else 336 sc->sc_dmat = pa->pa_dmat; 337 338 nfe_poweron(self); 339 340 #ifndef NFE_NO_JUMBO 341 /* enable jumbo frames for adapters that support it */ 342 if (sc->sc_flags & NFE_JUMBO_SUP) 343 sc->sc_flags |= NFE_USE_JUMBO; 344 #endif 345 346 /* Check for reversed ethernet address */ 347 if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) 348 sc->sc_flags |= NFE_CORRECT_MACADDR; 349 350 nfe_get_macaddr(sc, sc->sc_enaddr); 351 aprint_normal_dev(self, "Ethernet address %s\n", 352 ether_sprintf(sc->sc_enaddr)); 353 354 /* 355 * Allocate Tx and Rx rings. 356 */ 357 if (nfe_alloc_tx_ring(sc, &sc->txq) != 0) { 358 aprint_error_dev(self, "could not allocate Tx ring\n"); 359 goto fail; 360 } 361 362 mutex_init(&sc->rxq.mtx, MUTEX_DEFAULT, IPL_NET); 363 364 if (nfe_alloc_rx_ring(sc, &sc->rxq) != 0) { 365 aprint_error_dev(self, "could not allocate Rx ring\n"); 366 nfe_free_tx_ring(sc, &sc->txq); 367 goto fail; 368 } 369 370 ifp = &sc->sc_ethercom.ec_if; 371 ifp->if_softc = sc; 372 ifp->if_mtu = ETHERMTU; 373 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 374 ifp->if_ioctl = nfe_ioctl; 375 ifp->if_start = nfe_start; 376 ifp->if_stop = nfe_stop; 377 ifp->if_watchdog = nfe_watchdog; 378 ifp->if_init = nfe_init; 379 ifp->if_baudrate = IF_Gbps(1); 380 IFQ_SET_MAXLEN(&ifp->if_snd, NFE_IFQ_MAXLEN); 381 IFQ_SET_READY(&ifp->if_snd); 382 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 383 384 if (sc->sc_flags & NFE_USE_JUMBO) 385 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU; 386 387 #if NVLAN > 0 388 if (sc->sc_flags & NFE_HW_VLAN) { 389 sc->sc_ethercom.ec_capabilities |= 390 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU; 391 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 392 } 393 #endif 394 if (sc->sc_flags & NFE_HW_CSUM) { 395 ifp->if_capabilities |= 396 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 397 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 398 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 399 } 400 401 mii->mii_ifp = ifp; 402 mii->mii_readreg = nfe_miibus_readreg; 403 mii->mii_writereg = nfe_miibus_writereg; 404 mii->mii_statchg = nfe_miibus_statchg; 405 406 sc->sc_ethercom.ec_mii = mii; 407 ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 408 409 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, 0, mii_flags); 410 411 if (LIST_FIRST(&mii->mii_phys) == NULL) { 412 aprint_error_dev(self, "no PHY found!\n"); 413 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_MANUAL, 0, NULL); 414 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_MANUAL); 415 } else 416 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 417 418 if_attach(ifp); 419 if_deferred_start_init(ifp, NULL); 420 ether_ifattach(ifp, sc->sc_enaddr); 421 ether_set_ifflags_cb(&sc->sc_ethercom, nfe_ifflags_cb); 422 423 callout_init(&sc->sc_tick_ch, 0); 424 callout_setfunc(&sc->sc_tick_ch, nfe_tick, sc); 425 426 if (pmf_device_register(self, NULL, nfe_resume)) 427 pmf_class_network_register(self, ifp); 428 else 429 aprint_error_dev(self, "couldn't establish power handler\n"); 430 431 return; 432 433 fail: 434 if (sc->sc_ih != NULL) { 435 pci_intr_disestablish(pc, sc->sc_ih); 436 sc->sc_ih = NULL; 437 } 438 if (sc->sc_mems != 0) { 439 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 440 sc->sc_mems = 0; 441 } 442 } 443 444 int 445 nfe_detach(device_t self, int flags) 446 { 447 struct nfe_softc *sc = device_private(self); 448 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 449 int s; 450 451 s = splnet(); 452 453 nfe_stop(ifp, 1); 454 455 pmf_device_deregister(self); 456 callout_destroy(&sc->sc_tick_ch); 457 ether_ifdetach(ifp); 458 if_detach(ifp); 459 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 460 ifmedia_fini(&sc->sc_mii.mii_media); 461 462 nfe_free_rx_ring(sc, &sc->rxq); 463 mutex_destroy(&sc->rxq.mtx); 464 nfe_free_tx_ring(sc, &sc->txq); 465 466 if (sc->sc_dmat_needs_free) 467 bus_dmatag_destroy(sc->sc_dmat); 468 469 if (sc->sc_ih != NULL) { 470 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 471 sc->sc_ih = NULL; 472 } 473 474 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 475 nfe_set_macaddr(sc, sc->sc_enaddr); 476 } else { 477 NFE_WRITE(sc, NFE_MACADDR_LO, 478 sc->sc_enaddr[0] << 8 | sc->sc_enaddr[1]); 479 NFE_WRITE(sc, NFE_MACADDR_HI, 480 sc->sc_enaddr[2] << 24 | sc->sc_enaddr[3] << 16 | 481 sc->sc_enaddr[4] << 8 | sc->sc_enaddr[5]); 482 } 483 484 if (sc->sc_mems != 0) { 485 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 486 sc->sc_mems = 0; 487 } 488 489 splx(s); 490 491 return 0; 492 } 493 494 void 495 nfe_miibus_statchg(struct ifnet *ifp) 496 { 497 struct nfe_softc *sc = ifp->if_softc; 498 struct mii_data *mii = &sc->sc_mii; 499 uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; 500 501 phy = NFE_READ(sc, NFE_PHY_IFACE); 502 phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); 503 504 seed = NFE_READ(sc, NFE_RNDSEED); 505 seed &= ~NFE_SEED_MASK; 506 507 if ((mii->mii_media_active & IFM_HDX) != 0) { 508 phy |= NFE_PHY_HDX; /* half-duplex */ 509 misc |= NFE_MISC1_HDX; 510 } 511 512 switch (IFM_SUBTYPE(mii->mii_media_active)) { 513 case IFM_1000_T: /* full-duplex only */ 514 link |= NFE_MEDIA_1000T; 515 seed |= NFE_SEED_1000T; 516 phy |= NFE_PHY_1000T; 517 break; 518 case IFM_100_TX: 519 link |= NFE_MEDIA_100TX; 520 seed |= NFE_SEED_100TX; 521 phy |= NFE_PHY_100TX; 522 break; 523 case IFM_10_T: 524 link |= NFE_MEDIA_10T; 525 seed |= NFE_SEED_10T; 526 break; 527 } 528 529 NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ 530 531 NFE_WRITE(sc, NFE_PHY_IFACE, phy); 532 NFE_WRITE(sc, NFE_MISC1, misc); 533 NFE_WRITE(sc, NFE_LINKSPEED, link); 534 } 535 536 int 537 nfe_miibus_readreg(device_t dev, int phy, int reg, uint16_t *val) 538 { 539 struct nfe_softc *sc = device_private(dev); 540 uint32_t data; 541 int ntries; 542 543 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 544 545 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 546 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 547 DELAY(100); 548 } 549 550 NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); 551 552 for (ntries = 0; ntries < 1000; ntries++) { 553 DELAY(100); 554 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 555 break; 556 } 557 if (ntries == 1000) { 558 DPRINTFN(2, ("%s: timeout waiting for PHY read (%d, %d)\n", 559 device_xname(sc->sc_dev), phy, reg)); 560 return ETIMEDOUT; 561 } 562 563 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 564 DPRINTFN(2, ("%s: could not read PHY (%d, %d)\n", 565 device_xname(sc->sc_dev), phy, reg)); 566 return -1; 567 } 568 569 data = NFE_READ(sc, NFE_PHY_DATA); 570 sc->mii_phyaddr = phy; 571 572 DPRINTFN(2, ("%s: mii read phy %d reg 0x%x data 0x%x\n", 573 device_xname(sc->sc_dev), phy, reg, data)); 574 575 *val = data & 0x0000ffff; 576 return 0; 577 } 578 579 int 580 nfe_miibus_writereg(device_t dev, int phy, int reg, uint16_t val) 581 { 582 struct nfe_softc *sc = device_private(dev); 583 uint32_t ctl; 584 int ntries; 585 586 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 587 588 if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { 589 NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); 590 DELAY(100); 591 } 592 593 NFE_WRITE(sc, NFE_PHY_DATA, val); 594 ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; 595 NFE_WRITE(sc, NFE_PHY_CTL, ctl); 596 597 for (ntries = 0; ntries < 1000; ntries++) { 598 DELAY(100); 599 if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) 600 break; 601 } 602 if (ntries == 1000) { 603 #ifdef NFE_DEBUG 604 if (nfedebug >= 2) 605 printf("timeout waiting for PHY write (%d, %d)\n", 606 phy, reg); 607 #endif 608 return ETIMEDOUT; 609 } 610 if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { 611 DPRINTFN(2, ("%s: could not write PHY (%d, %d)\n", 612 device_xname(sc->sc_dev), phy, reg)); 613 return -1; 614 } 615 return 0; 616 } 617 618 int 619 nfe_intr(void *arg) 620 { 621 struct nfe_softc *sc = arg; 622 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 623 uint32_t r; 624 int handled; 625 626 if ((ifp->if_flags & IFF_UP) == 0) 627 return 0; 628 629 handled = 0; 630 631 for (;;) { 632 r = NFE_READ(sc, NFE_IRQ_STATUS); 633 if ((r & NFE_IRQ_WANTED) == 0) 634 break; 635 636 NFE_WRITE(sc, NFE_IRQ_STATUS, r); 637 handled = 1; 638 DPRINTFN(5, ("nfe_intr: interrupt register %x\n", r)); 639 640 if ((r & (NFE_IRQ_RXERR |NFE_IRQ_RX_NOBUF |NFE_IRQ_RX)) != 0) { 641 /* check Rx ring */ 642 nfe_rxeof(sc); 643 } 644 if ((r & (NFE_IRQ_TXERR|NFE_IRQ_TXERR2|NFE_IRQ_TX_DONE)) != 0) { 645 /* check Tx ring */ 646 nfe_txeof(sc); 647 } 648 if ((r & NFE_IRQ_LINK) != 0) { 649 NFE_READ(sc, NFE_PHY_STATUS); 650 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 651 DPRINTF(("%s: link state changed\n", 652 device_xname(sc->sc_dev))); 653 } 654 } 655 656 if (handled) 657 if_schedule_deferred_start(ifp); 658 659 return handled; 660 } 661 662 static int 663 nfe_ifflags_cb(struct ethercom *ec) 664 { 665 struct ifnet *ifp = &ec->ec_if; 666 struct nfe_softc *sc = ifp->if_softc; 667 u_short change = ifp->if_flags ^ sc->sc_if_flags; 668 669 /* 670 * If only the PROMISC flag changes, then 671 * don't do a full re-init of the chip, just update 672 * the Rx filter. 673 */ 674 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) 675 return ENETRESET; 676 else if ((change & IFF_PROMISC) != 0) 677 nfe_setmulti(sc); 678 679 return 0; 680 } 681 682 int 683 nfe_ioctl(struct ifnet *ifp, u_long cmd, void *data) 684 { 685 struct nfe_softc *sc = ifp->if_softc; 686 struct ifaddr *ifa = (struct ifaddr *)data; 687 int s, error = 0; 688 689 s = splnet(); 690 691 switch (cmd) { 692 case SIOCINITIFADDR: 693 ifp->if_flags |= IFF_UP; 694 nfe_init(ifp); 695 switch (ifa->ifa_addr->sa_family) { 696 #ifdef INET 697 case AF_INET: 698 arp_ifinit(ifp, ifa); 699 break; 700 #endif 701 default: 702 break; 703 } 704 break; 705 default: 706 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 707 break; 708 709 error = 0; 710 711 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 712 ; 713 else if (ifp->if_flags & IFF_RUNNING) 714 nfe_setmulti(sc); 715 break; 716 } 717 sc->sc_if_flags = ifp->if_flags; 718 719 splx(s); 720 721 return error; 722 } 723 724 void 725 nfe_txdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 726 { 727 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 728 (char *)desc32 - (char *)sc->txq.desc32, 729 sizeof (struct nfe_desc32), ops); 730 } 731 732 void 733 nfe_txdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 734 { 735 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 736 (char *)desc64 - (char *)sc->txq.desc64, 737 sizeof (struct nfe_desc64), ops); 738 } 739 740 void 741 nfe_txdesc32_rsync(struct nfe_softc *sc, int start, int end, int ops) 742 { 743 if (end > start) { 744 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 745 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 746 (char *)&sc->txq.desc32[end] - 747 (char *)&sc->txq.desc32[start], ops); 748 return; 749 } 750 /* sync from 'start' to end of ring */ 751 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 752 (char *)&sc->txq.desc32[start] - (char *)sc->txq.desc32, 753 (char *)&sc->txq.desc32[NFE_TX_RING_COUNT] - 754 (char *)&sc->txq.desc32[start], ops); 755 756 /* sync from start of ring to 'end' */ 757 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 758 (char *)&sc->txq.desc32[end] - (char *)sc->txq.desc32, ops); 759 } 760 761 void 762 nfe_txdesc64_rsync(struct nfe_softc *sc, int start, int end, int ops) 763 { 764 if (end > start) { 765 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 766 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 767 (char *)&sc->txq.desc64[end] - 768 (char *)&sc->txq.desc64[start], ops); 769 return; 770 } 771 /* sync from 'start' to end of ring */ 772 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 773 (char *)&sc->txq.desc64[start] - (char *)sc->txq.desc64, 774 (char *)&sc->txq.desc64[NFE_TX_RING_COUNT] - 775 (char *)&sc->txq.desc64[start], ops); 776 777 /* sync from start of ring to 'end' */ 778 bus_dmamap_sync(sc->sc_dmat, sc->txq.map, 0, 779 (char *)&sc->txq.desc64[end] - (char *)sc->txq.desc64, ops); 780 } 781 782 void 783 nfe_rxdesc32_sync(struct nfe_softc *sc, struct nfe_desc32 *desc32, int ops) 784 { 785 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 786 (char *)desc32 - (char *)sc->rxq.desc32, 787 sizeof (struct nfe_desc32), ops); 788 } 789 790 void 791 nfe_rxdesc64_sync(struct nfe_softc *sc, struct nfe_desc64 *desc64, int ops) 792 { 793 bus_dmamap_sync(sc->sc_dmat, sc->rxq.map, 794 (char *)desc64 - (char *)sc->rxq.desc64, 795 sizeof (struct nfe_desc64), ops); 796 } 797 798 void 799 nfe_rxeof(struct nfe_softc *sc) 800 { 801 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 802 struct nfe_desc32 *desc32; 803 struct nfe_desc64 *desc64; 804 struct nfe_rx_data *data; 805 struct nfe_jbuf *jbuf; 806 struct mbuf *m, *mnew; 807 bus_addr_t physaddr; 808 uint16_t flags; 809 int error, len, i; 810 811 desc32 = NULL; 812 desc64 = NULL; 813 for (i = sc->rxq.cur;; i = NFE_RX_NEXTDESC(i)) { 814 data = &sc->rxq.data[i]; 815 816 if (sc->sc_flags & NFE_40BIT_ADDR) { 817 desc64 = &sc->rxq.desc64[i]; 818 nfe_rxdesc64_sync(sc, desc64, 819 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 820 821 flags = le16toh(desc64->flags); 822 len = le16toh(desc64->length) & 0x3fff; 823 } else { 824 desc32 = &sc->rxq.desc32[i]; 825 nfe_rxdesc32_sync(sc, desc32, 826 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 827 828 flags = le16toh(desc32->flags); 829 len = le16toh(desc32->length) & 0x3fff; 830 } 831 832 if ((flags & NFE_RX_READY) != 0) 833 break; 834 835 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 836 if ((flags & NFE_RX_VALID_V1) == 0) 837 goto skip; 838 839 if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { 840 flags &= ~NFE_RX_ERROR; 841 len--; /* fix buffer length */ 842 } 843 } else { 844 if ((flags & NFE_RX_VALID_V2) == 0) 845 goto skip; 846 847 if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { 848 flags &= ~NFE_RX_ERROR; 849 len--; /* fix buffer length */ 850 } 851 } 852 853 if (flags & NFE_RX_ERROR) { 854 if_statinc(ifp, if_ierrors); 855 goto skip; 856 } 857 858 /* 859 * Try to allocate a new mbuf for this ring element and load 860 * it before processing the current mbuf. If the ring element 861 * cannot be loaded, drop the received packet and reuse the 862 * old mbuf. In the unlikely case that the old mbuf can't be 863 * reloaded either, explicitly panic. 864 */ 865 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 866 if (mnew == NULL) { 867 if_statinc(ifp, if_ierrors); 868 goto skip; 869 } 870 871 if (sc->sc_flags & NFE_USE_JUMBO) { 872 physaddr = 873 sc->rxq.jbuf[sc->rxq.jbufmap[i]].physaddr; 874 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 875 if (len > MCLBYTES) { 876 m_freem(mnew); 877 if_statinc(ifp, if_ierrors); 878 goto skip1; 879 } 880 MCLGET(mnew, M_DONTWAIT); 881 if ((mnew->m_flags & M_EXT) == 0) { 882 m_freem(mnew); 883 if_statinc(ifp, if_ierrors); 884 goto skip1; 885 } 886 887 (void)memcpy(mtod(mnew, void *), 888 mtod(data->m, const void *), len); 889 m = mnew; 890 goto mbufcopied; 891 } else { 892 MEXTADD(mnew, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, sc); 893 bus_dmamap_sync(sc->sc_dmat, sc->rxq.jmap, 894 mtod(data->m, char *) - (char *)sc->rxq.jpool, 895 NFE_JBYTES, BUS_DMASYNC_POSTREAD); 896 897 physaddr = jbuf->physaddr; 898 } 899 } else { 900 MCLGET(mnew, M_DONTWAIT); 901 if ((mnew->m_flags & M_EXT) == 0) { 902 m_freem(mnew); 903 if_statinc(ifp, if_ierrors); 904 goto skip; 905 } 906 907 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 908 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 909 bus_dmamap_unload(sc->sc_dmat, data->map); 910 911 error = bus_dmamap_load(sc->sc_dmat, data->map, 912 mtod(mnew, void *), MCLBYTES, NULL, 913 BUS_DMA_READ | BUS_DMA_NOWAIT); 914 if (error != 0) { 915 m_freem(mnew); 916 917 /* try to reload the old mbuf */ 918 error = bus_dmamap_load(sc->sc_dmat, data->map, 919 mtod(data->m, void *), MCLBYTES, NULL, 920 BUS_DMA_READ | BUS_DMA_NOWAIT); 921 if (error != 0) { 922 /* very unlikely that it will fail.. */ 923 panic("%s: could not load old rx mbuf", 924 device_xname(sc->sc_dev)); 925 } 926 if_statinc(ifp, if_ierrors); 927 goto skip; 928 } 929 physaddr = data->map->dm_segs[0].ds_addr; 930 } 931 932 /* 933 * New mbuf successfully loaded, update Rx ring and continue 934 * processing. 935 */ 936 m = data->m; 937 data->m = mnew; 938 939 mbufcopied: 940 /* finalize mbuf */ 941 m->m_pkthdr.len = m->m_len = len; 942 m_set_rcvif(m, ifp); 943 944 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 945 /* 946 * XXX 947 * no way to check M_CSUM_IPv4_BAD or non-IPv4 packets? 948 */ 949 if (flags & NFE_RX_IP_CSUMOK) { 950 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 951 DPRINTFN(3, ("%s: ip4csum-rx ok\n", 952 device_xname(sc->sc_dev))); 953 } 954 /* 955 * XXX 956 * no way to check M_CSUM_TCP_UDP_BAD or 957 * other protocols? 958 */ 959 if (flags & NFE_RX_UDP_CSUMOK) { 960 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 961 DPRINTFN(3, ("%s: udp4csum-rx ok\n", 962 device_xname(sc->sc_dev))); 963 } else if (flags & NFE_RX_TCP_CSUMOK) { 964 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 965 DPRINTFN(3, ("%s: tcp4csum-rx ok\n", 966 device_xname(sc->sc_dev))); 967 } 968 } 969 if_percpuq_enqueue(ifp->if_percpuq, m); 970 971 skip1: 972 /* update mapping address in h/w descriptor */ 973 if (sc->sc_flags & NFE_40BIT_ADDR) { 974 desc64->physaddr[0] = 975 htole32(((uint64_t)physaddr) >> 32); 976 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 977 } else { 978 desc32->physaddr = htole32(physaddr); 979 } 980 981 skip: 982 if (sc->sc_flags & NFE_40BIT_ADDR) { 983 desc64->length = htole16(sc->rxq.bufsz); 984 desc64->flags = htole16(NFE_RX_READY); 985 986 nfe_rxdesc64_sync(sc, desc64, 987 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 988 } else { 989 desc32->length = htole16(sc->rxq.bufsz); 990 desc32->flags = htole16(NFE_RX_READY); 991 992 nfe_rxdesc32_sync(sc, desc32, 993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 994 } 995 } 996 /* update current RX pointer */ 997 sc->rxq.cur = i; 998 } 999 1000 void 1001 nfe_txeof(struct nfe_softc *sc) 1002 { 1003 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1004 struct nfe_desc32 *desc32; 1005 struct nfe_desc64 *desc64; 1006 struct nfe_tx_data *data = NULL; 1007 int i; 1008 uint16_t flags; 1009 char buf[128]; 1010 1011 for (i = sc->txq.next; 1012 sc->txq.queued > 0; 1013 i = NFE_TX_NEXTDESC(i), sc->txq.queued--) { 1014 if (sc->sc_flags & NFE_40BIT_ADDR) { 1015 desc64 = &sc->txq.desc64[i]; 1016 nfe_txdesc64_sync(sc, desc64, 1017 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1018 1019 flags = le16toh(desc64->flags); 1020 } else { 1021 desc32 = &sc->txq.desc32[i]; 1022 nfe_txdesc32_sync(sc, desc32, 1023 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1024 1025 flags = le16toh(desc32->flags); 1026 } 1027 1028 if ((flags & NFE_TX_VALID) != 0) 1029 break; 1030 1031 data = &sc->txq.data[i]; 1032 1033 if ((sc->sc_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { 1034 if ((flags & NFE_TX_LASTFRAG_V1) == 0 && 1035 data->m == NULL) 1036 continue; 1037 1038 if ((flags & NFE_TX_ERROR_V1) != 0) { 1039 snprintb(buf, sizeof(buf), NFE_V1_TXERR, flags); 1040 aprint_error_dev(sc->sc_dev, "tx v1 error %s\n", 1041 buf); 1042 if_statinc(ifp, if_oerrors); 1043 } else 1044 if_statinc(ifp, if_opackets); 1045 } else { 1046 if ((flags & NFE_TX_LASTFRAG_V2) == 0 && 1047 data->m == NULL) 1048 continue; 1049 1050 if ((flags & NFE_TX_ERROR_V2) != 0) { 1051 snprintb(buf, sizeof(buf), NFE_V2_TXERR, flags); 1052 aprint_error_dev(sc->sc_dev, "tx v2 error %s\n", 1053 buf); 1054 if_statinc(ifp, if_oerrors); 1055 } else 1056 if_statinc(ifp, if_opackets); 1057 } 1058 1059 if (data->m == NULL) { /* should not get there */ 1060 aprint_error_dev(sc->sc_dev, 1061 "last fragment bit w/o associated mbuf!\n"); 1062 continue; 1063 } 1064 1065 /* last fragment of the mbuf chain transmitted */ 1066 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1067 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1068 bus_dmamap_unload(sc->sc_dmat, data->active); 1069 m_freem(data->m); 1070 data->m = NULL; 1071 } 1072 1073 sc->txq.next = i; 1074 1075 if (sc->txq.queued < NFE_TX_RING_COUNT) { 1076 /* at least one slot freed */ 1077 ifp->if_flags &= ~IFF_OACTIVE; 1078 } 1079 1080 if (sc->txq.queued == 0) { 1081 /* all queued packets are sent */ 1082 ifp->if_timer = 0; 1083 } 1084 } 1085 1086 int 1087 nfe_encap(struct nfe_softc *sc, struct mbuf *m0) 1088 { 1089 struct nfe_desc32 *desc32; 1090 struct nfe_desc64 *desc64; 1091 struct nfe_tx_data *data; 1092 bus_dmamap_t map; 1093 uint16_t flags, csumflags; 1094 #if NVLAN > 0 1095 uint32_t vtag = 0; 1096 #endif 1097 int error, i, first; 1098 1099 desc32 = NULL; 1100 desc64 = NULL; 1101 data = NULL; 1102 1103 flags = 0; 1104 csumflags = 0; 1105 first = sc->txq.cur; 1106 1107 map = sc->txq.data[first].map; 1108 1109 error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m0, BUS_DMA_NOWAIT); 1110 if (error != 0) { 1111 aprint_error_dev(sc->sc_dev, "could not map mbuf (error %d)\n", 1112 error); 1113 return error; 1114 } 1115 1116 if (sc->txq.queued + map->dm_nsegs >= NFE_TX_RING_COUNT - 1) { 1117 bus_dmamap_unload(sc->sc_dmat, map); 1118 return ENOBUFS; 1119 } 1120 1121 #if NVLAN > 0 1122 /* setup h/w VLAN tagging */ 1123 if (vlan_has_tag(m0)) 1124 vtag = NFE_TX_VTAG | vlan_get_tag(m0); 1125 #endif 1126 if ((sc->sc_flags & NFE_HW_CSUM) != 0) { 1127 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) 1128 csumflags |= NFE_TX_IP_CSUM; 1129 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) 1130 csumflags |= NFE_TX_TCP_UDP_CSUM; 1131 } 1132 1133 for (i = 0; i < map->dm_nsegs; i++) { 1134 data = &sc->txq.data[sc->txq.cur]; 1135 1136 if (sc->sc_flags & NFE_40BIT_ADDR) { 1137 desc64 = &sc->txq.desc64[sc->txq.cur]; 1138 desc64->physaddr[0] = 1139 htole32(((uint64_t)map->dm_segs[i].ds_addr) >> 32); 1140 desc64->physaddr[1] = 1141 htole32(map->dm_segs[i].ds_addr & 0xffffffff); 1142 desc64->length = htole16(map->dm_segs[i].ds_len - 1); 1143 desc64->flags = htole16(flags); 1144 desc64->vtag = 0; 1145 } else { 1146 desc32 = &sc->txq.desc32[sc->txq.cur]; 1147 1148 desc32->physaddr = htole32(map->dm_segs[i].ds_addr); 1149 desc32->length = htole16(map->dm_segs[i].ds_len - 1); 1150 desc32->flags = htole16(flags); 1151 } 1152 1153 /* 1154 * Setting of the valid bit in the first descriptor is 1155 * deferred until the whole chain is fully setup. 1156 */ 1157 flags |= NFE_TX_VALID; 1158 1159 sc->txq.queued++; 1160 sc->txq.cur = NFE_TX_NEXTDESC(sc->txq.cur); 1161 } 1162 1163 /* the whole mbuf chain has been setup */ 1164 if (sc->sc_flags & NFE_40BIT_ADDR) { 1165 /* fix last descriptor */ 1166 flags |= NFE_TX_LASTFRAG_V2; 1167 desc64->flags = htole16(flags); 1168 1169 /* Checksum flags and vtag belong to the first fragment only. */ 1170 #if NVLAN > 0 1171 sc->txq.desc64[first].vtag = htole32(vtag); 1172 #endif 1173 sc->txq.desc64[first].flags |= htole16(csumflags); 1174 1175 /* finally, set the valid bit in the first descriptor */ 1176 sc->txq.desc64[first].flags |= htole16(NFE_TX_VALID); 1177 } else { 1178 /* fix last descriptor */ 1179 if (sc->sc_flags & NFE_JUMBO_SUP) 1180 flags |= NFE_TX_LASTFRAG_V2; 1181 else 1182 flags |= NFE_TX_LASTFRAG_V1; 1183 desc32->flags = htole16(flags); 1184 1185 /* Checksum flags belong to the first fragment only. */ 1186 sc->txq.desc32[first].flags |= htole16(csumflags); 1187 1188 /* finally, set the valid bit in the first descriptor */ 1189 sc->txq.desc32[first].flags |= htole16(NFE_TX_VALID); 1190 } 1191 1192 data->m = m0; 1193 data->active = map; 1194 1195 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1196 BUS_DMASYNC_PREWRITE); 1197 1198 return 0; 1199 } 1200 1201 void 1202 nfe_start(struct ifnet *ifp) 1203 { 1204 struct nfe_softc *sc = ifp->if_softc; 1205 int old = sc->txq.queued; 1206 struct mbuf *m0; 1207 1208 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1209 return; 1210 1211 for (;;) { 1212 IFQ_POLL(&ifp->if_snd, m0); 1213 if (m0 == NULL) 1214 break; 1215 1216 if (nfe_encap(sc, m0) != 0) { 1217 ifp->if_flags |= IFF_OACTIVE; 1218 break; 1219 } 1220 1221 /* packet put in h/w queue, remove from s/w queue */ 1222 IFQ_DEQUEUE(&ifp->if_snd, m0); 1223 1224 bpf_mtap(ifp, m0, BPF_D_OUT); 1225 } 1226 1227 if (sc->txq.queued != old) { 1228 /* packets are queued */ 1229 if (sc->sc_flags & NFE_40BIT_ADDR) 1230 nfe_txdesc64_rsync(sc, old, sc->txq.cur, 1231 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1232 else 1233 nfe_txdesc32_rsync(sc, old, sc->txq.cur, 1234 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1235 /* kick Tx */ 1236 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); 1237 1238 /* 1239 * Set a timeout in case the chip goes out to lunch. 1240 */ 1241 ifp->if_timer = 5; 1242 } 1243 } 1244 1245 void 1246 nfe_watchdog(struct ifnet *ifp) 1247 { 1248 struct nfe_softc *sc = ifp->if_softc; 1249 1250 aprint_error_dev(sc->sc_dev, "watchdog timeout\n"); 1251 1252 ifp->if_flags &= ~IFF_RUNNING; 1253 nfe_init(ifp); 1254 1255 if_statinc(ifp, if_oerrors); 1256 } 1257 1258 int 1259 nfe_init(struct ifnet *ifp) 1260 { 1261 struct nfe_softc *sc = ifp->if_softc; 1262 uint32_t tmp; 1263 int rc = 0, s; 1264 1265 if (ifp->if_flags & IFF_RUNNING) 1266 return 0; 1267 1268 nfe_stop(ifp, 0); 1269 1270 NFE_WRITE(sc, NFE_TX_UNK, 0); 1271 NFE_WRITE(sc, NFE_STATUS, 0); 1272 1273 sc->rxtxctl = NFE_RXTX_BIT2; 1274 if (sc->sc_flags & NFE_40BIT_ADDR) 1275 sc->rxtxctl |= NFE_RXTX_V3MAGIC; 1276 else if (sc->sc_flags & NFE_JUMBO_SUP) 1277 sc->rxtxctl |= NFE_RXTX_V2MAGIC; 1278 if (sc->sc_flags & NFE_HW_CSUM) 1279 sc->rxtxctl |= NFE_RXTX_RXCSUM; 1280 #if NVLAN > 0 1281 /* 1282 * Although the adapter is capable of stripping VLAN tags from received 1283 * frames (NFE_RXTX_VTAG_STRIP), we do not enable this functionality on 1284 * purpose. This will be done in software by our network stack. 1285 */ 1286 if (sc->sc_flags & NFE_HW_VLAN) 1287 sc->rxtxctl |= NFE_RXTX_VTAG_INSERT; 1288 #endif 1289 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); 1290 DELAY(10); 1291 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1292 1293 #if NVLAN 1294 if (sc->sc_flags & NFE_HW_VLAN) 1295 NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); 1296 #endif 1297 1298 NFE_WRITE(sc, NFE_SETUP_R6, 0); 1299 1300 /* set MAC address */ 1301 nfe_set_macaddr(sc, sc->sc_enaddr); 1302 1303 /* tell MAC where rings are in memory */ 1304 NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, ((uint64_t)sc->rxq.physaddr) >> 32); 1305 NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, sc->rxq.physaddr & 0xffffffff); 1306 NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, ((uint64_t)sc->txq.physaddr) >> 32); 1307 NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, sc->txq.physaddr & 0xffffffff); 1308 1309 NFE_WRITE(sc, NFE_RING_SIZE, 1310 (NFE_RX_RING_COUNT - 1) << 16 | 1311 (NFE_TX_RING_COUNT - 1)); 1312 1313 NFE_WRITE(sc, NFE_RXBUFSZ, sc->rxq.bufsz); 1314 1315 /* force MAC to wakeup */ 1316 tmp = NFE_READ(sc, NFE_PWR_STATE); 1317 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_WAKEUP); 1318 DELAY(10); 1319 tmp = NFE_READ(sc, NFE_PWR_STATE); 1320 NFE_WRITE(sc, NFE_PWR_STATE, tmp | NFE_PWR_VALID); 1321 1322 s = splnet(); 1323 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1324 nfe_intr(sc); /* XXX clear IRQ status registers */ 1325 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1326 splx(s); 1327 1328 #if 1 1329 /* configure interrupts coalescing/mitigation */ 1330 NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); 1331 #else 1332 /* no interrupt mitigation: one interrupt per packet */ 1333 NFE_WRITE(sc, NFE_IMTIMER, 970); 1334 #endif 1335 1336 NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC); 1337 NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); 1338 NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); 1339 1340 /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ 1341 NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); 1342 1343 NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); 1344 NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_ENABLE); 1345 1346 sc->rxtxctl &= ~NFE_RXTX_BIT2; 1347 NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); 1348 DELAY(10); 1349 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); 1350 1351 /* set Rx filter */ 1352 nfe_setmulti(sc); 1353 1354 if ((rc = ether_mediachange(ifp)) != 0) 1355 goto out; 1356 1357 nfe_tick(sc); 1358 1359 /* enable Rx */ 1360 NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); 1361 1362 /* enable Tx */ 1363 NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); 1364 1365 NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); 1366 1367 /* enable interrupts */ 1368 NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); 1369 1370 callout_schedule(&sc->sc_tick_ch, hz); 1371 1372 ifp->if_flags |= IFF_RUNNING; 1373 ifp->if_flags &= ~IFF_OACTIVE; 1374 1375 out: 1376 return rc; 1377 } 1378 1379 void 1380 nfe_stop(struct ifnet *ifp, int disable) 1381 { 1382 struct nfe_softc *sc = ifp->if_softc; 1383 1384 callout_stop(&sc->sc_tick_ch); 1385 1386 ifp->if_timer = 0; 1387 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1388 1389 mii_down(&sc->sc_mii); 1390 1391 /* abort Tx */ 1392 NFE_WRITE(sc, NFE_TX_CTL, 0); 1393 1394 /* disable Rx */ 1395 NFE_WRITE(sc, NFE_RX_CTL, 0); 1396 1397 /* disable interrupts */ 1398 NFE_WRITE(sc, NFE_IRQ_MASK, 0); 1399 1400 /* reset Tx and Rx rings */ 1401 nfe_reset_tx_ring(sc, &sc->txq); 1402 nfe_reset_rx_ring(sc, &sc->rxq); 1403 } 1404 1405 int 1406 nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1407 { 1408 struct nfe_desc32 *desc32; 1409 struct nfe_desc64 *desc64; 1410 struct nfe_rx_data *data; 1411 struct nfe_jbuf *jbuf; 1412 void **desc; 1413 bus_addr_t physaddr; 1414 int i, nsegs, error, descsize; 1415 1416 if (sc->sc_flags & NFE_40BIT_ADDR) { 1417 desc = (void **)&ring->desc64; 1418 descsize = sizeof (struct nfe_desc64); 1419 } else { 1420 desc = (void **)&ring->desc32; 1421 descsize = sizeof (struct nfe_desc32); 1422 } 1423 1424 ring->cur = ring->next = 0; 1425 ring->bufsz = MCLBYTES; 1426 1427 error = bus_dmamap_create(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1, 1428 NFE_RX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1429 if (error != 0) { 1430 aprint_error_dev(sc->sc_dev, 1431 "could not create desc DMA map\n"); 1432 ring->map = NULL; 1433 goto fail; 1434 } 1435 1436 error = bus_dmamem_alloc(sc->sc_dmat, NFE_RX_RING_COUNT * descsize, 1437 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1438 if (error != 0) { 1439 aprint_error_dev(sc->sc_dev, 1440 "could not allocate DMA memory\n"); 1441 goto fail; 1442 } 1443 1444 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1445 NFE_RX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1446 if (error != 0) { 1447 aprint_error_dev(sc->sc_dev, 1448 "could not map desc DMA memory\n"); 1449 goto fail; 1450 } 1451 1452 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1453 NFE_RX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1454 if (error != 0) { 1455 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1456 goto fail; 1457 } 1458 1459 memset(*desc, 0, NFE_RX_RING_COUNT * descsize); 1460 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1461 1462 if (sc->sc_flags & NFE_USE_JUMBO) { 1463 ring->bufsz = NFE_JBYTES; 1464 if ((error = nfe_jpool_alloc(sc)) != 0) { 1465 aprint_error_dev(sc->sc_dev, 1466 "could not allocate jumbo frames\n"); 1467 goto fail; 1468 } 1469 } 1470 1471 /* 1472 * Pre-allocate Rx buffers and populate Rx ring. 1473 */ 1474 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1475 data = &sc->rxq.data[i]; 1476 1477 MGETHDR(data->m, M_DONTWAIT, MT_DATA); 1478 if (data->m == NULL) { 1479 aprint_error_dev(sc->sc_dev, 1480 "could not allocate rx mbuf\n"); 1481 error = ENOMEM; 1482 goto fail; 1483 } 1484 1485 if (sc->sc_flags & NFE_USE_JUMBO) { 1486 if ((jbuf = nfe_jalloc(sc, i)) == NULL) { 1487 aprint_error_dev(sc->sc_dev, 1488 "could not allocate jumbo buffer\n"); 1489 goto fail; 1490 } 1491 MEXTADD(data->m, jbuf->buf, NFE_JBYTES, 0, nfe_jfree, 1492 sc); 1493 1494 physaddr = jbuf->physaddr; 1495 } else { 1496 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1497 MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map); 1498 if (error != 0) { 1499 aprint_error_dev(sc->sc_dev, 1500 "could not create DMA map\n"); 1501 data->map = NULL; 1502 goto fail; 1503 } 1504 MCLGET(data->m, M_DONTWAIT); 1505 if (!(data->m->m_flags & M_EXT)) { 1506 aprint_error_dev(sc->sc_dev, 1507 "could not allocate mbuf cluster\n"); 1508 error = ENOMEM; 1509 goto fail; 1510 } 1511 1512 error = bus_dmamap_load(sc->sc_dmat, data->map, 1513 mtod(data->m, void *), MCLBYTES, NULL, 1514 BUS_DMA_READ | BUS_DMA_NOWAIT); 1515 if (error != 0) { 1516 aprint_error_dev(sc->sc_dev, 1517 "could not load rx buf DMA map"); 1518 goto fail; 1519 } 1520 physaddr = data->map->dm_segs[0].ds_addr; 1521 } 1522 1523 if (sc->sc_flags & NFE_40BIT_ADDR) { 1524 desc64 = &sc->rxq.desc64[i]; 1525 desc64->physaddr[0] = 1526 htole32(((uint64_t)physaddr) >> 32); 1527 desc64->physaddr[1] = htole32(physaddr & 0xffffffff); 1528 desc64->length = htole16(sc->rxq.bufsz); 1529 desc64->flags = htole16(NFE_RX_READY); 1530 } else { 1531 desc32 = &sc->rxq.desc32[i]; 1532 desc32->physaddr = htole32(physaddr); 1533 desc32->length = htole16(sc->rxq.bufsz); 1534 desc32->flags = htole16(NFE_RX_READY); 1535 } 1536 } 1537 1538 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1539 BUS_DMASYNC_PREWRITE); 1540 1541 return 0; 1542 1543 fail: nfe_free_rx_ring(sc, ring); 1544 return error; 1545 } 1546 1547 void 1548 nfe_reset_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1549 { 1550 int i; 1551 1552 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1553 if (sc->sc_flags & NFE_40BIT_ADDR) { 1554 ring->desc64[i].length = htole16(ring->bufsz); 1555 ring->desc64[i].flags = htole16(NFE_RX_READY); 1556 } else { 1557 ring->desc32[i].length = htole16(ring->bufsz); 1558 ring->desc32[i].flags = htole16(NFE_RX_READY); 1559 } 1560 } 1561 1562 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1563 BUS_DMASYNC_PREWRITE); 1564 1565 ring->cur = ring->next = 0; 1566 } 1567 1568 void 1569 nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) 1570 { 1571 struct nfe_rx_data *data; 1572 void *desc; 1573 int i, descsize; 1574 1575 if (sc->sc_flags & NFE_40BIT_ADDR) { 1576 desc = ring->desc64; 1577 descsize = sizeof (struct nfe_desc64); 1578 } else { 1579 desc = ring->desc32; 1580 descsize = sizeof (struct nfe_desc32); 1581 } 1582 1583 if (desc != NULL) { 1584 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1585 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1586 bus_dmamap_unload(sc->sc_dmat, ring->map); 1587 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1588 NFE_RX_RING_COUNT * descsize); 1589 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1590 } 1591 1592 for (i = 0; i < NFE_RX_RING_COUNT; i++) { 1593 data = &ring->data[i]; 1594 1595 if (data->map != NULL) { 1596 bus_dmamap_sync(sc->sc_dmat, data->map, 0, 1597 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD); 1598 bus_dmamap_unload(sc->sc_dmat, data->map); 1599 bus_dmamap_destroy(sc->sc_dmat, data->map); 1600 } 1601 if (data->m != NULL) 1602 m_freem(data->m); 1603 } 1604 1605 nfe_jpool_free(sc); 1606 } 1607 1608 struct nfe_jbuf * 1609 nfe_jalloc(struct nfe_softc *sc, int i) 1610 { 1611 struct nfe_jbuf *jbuf; 1612 1613 mutex_enter(&sc->rxq.mtx); 1614 jbuf = SLIST_FIRST(&sc->rxq.jfreelist); 1615 if (jbuf != NULL) 1616 SLIST_REMOVE_HEAD(&sc->rxq.jfreelist, jnext); 1617 mutex_exit(&sc->rxq.mtx); 1618 if (jbuf == NULL) 1619 return NULL; 1620 sc->rxq.jbufmap[i] = 1621 ((char *)jbuf->buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1622 return jbuf; 1623 } 1624 1625 /* 1626 * This is called automatically by the network stack when the mbuf is freed. 1627 * Caution must be taken that the NIC might be reset by the time the mbuf is 1628 * freed. 1629 */ 1630 void 1631 nfe_jfree(struct mbuf *m, void *buf, size_t size, void *arg) 1632 { 1633 struct nfe_softc *sc = arg; 1634 struct nfe_jbuf *jbuf; 1635 int i; 1636 1637 /* find the jbuf from the base pointer */ 1638 i = ((char *)buf - (char *)sc->rxq.jpool) / NFE_JBYTES; 1639 if (i < 0 || i >= NFE_JPOOL_COUNT) { 1640 aprint_error_dev(sc->sc_dev, 1641 "request to free a buffer (%p) not managed by us\n", buf); 1642 return; 1643 } 1644 jbuf = &sc->rxq.jbuf[i]; 1645 1646 /* ..and put it back in the free list */ 1647 mutex_enter(&sc->rxq.mtx); 1648 SLIST_INSERT_HEAD(&sc->rxq.jfreelist, jbuf, jnext); 1649 mutex_exit(&sc->rxq.mtx); 1650 1651 if (m != NULL) 1652 pool_cache_put(mb_cache, m); 1653 } 1654 1655 int 1656 nfe_jpool_alloc(struct nfe_softc *sc) 1657 { 1658 struct nfe_rx_ring *ring = &sc->rxq; 1659 struct nfe_jbuf *jbuf; 1660 bus_addr_t physaddr; 1661 char *buf; 1662 int i, nsegs, error; 1663 1664 /* 1665 * Allocate a big chunk of DMA'able memory. 1666 */ 1667 error = bus_dmamap_create(sc->sc_dmat, NFE_JPOOL_SIZE, 1, 1668 NFE_JPOOL_SIZE, 0, BUS_DMA_NOWAIT, &ring->jmap); 1669 if (error != 0) { 1670 aprint_error_dev(sc->sc_dev, 1671 "could not create jumbo DMA map\n"); 1672 ring->jmap = NULL; 1673 goto fail; 1674 } 1675 1676 error = bus_dmamem_alloc(sc->sc_dmat, NFE_JPOOL_SIZE, PAGE_SIZE, 0, 1677 &ring->jseg, 1, &nsegs, BUS_DMA_NOWAIT); 1678 if (error != 0) { 1679 aprint_error_dev(sc->sc_dev, 1680 "could not allocate jumbo DMA memory\n"); 1681 goto fail; 1682 } 1683 1684 error = bus_dmamem_map(sc->sc_dmat, &ring->jseg, nsegs, NFE_JPOOL_SIZE, 1685 &ring->jpool, BUS_DMA_NOWAIT); 1686 if (error != 0) { 1687 aprint_error_dev(sc->sc_dev, 1688 "could not map jumbo DMA memory\n"); 1689 goto fail; 1690 } 1691 1692 error = bus_dmamap_load(sc->sc_dmat, ring->jmap, ring->jpool, 1693 NFE_JPOOL_SIZE, NULL, BUS_DMA_READ | BUS_DMA_NOWAIT); 1694 if (error != 0) { 1695 aprint_error_dev(sc->sc_dev, 1696 "could not load jumbo DMA map\n"); 1697 goto fail; 1698 } 1699 1700 /* ..and split it into 9KB chunks */ 1701 SLIST_INIT(&ring->jfreelist); 1702 1703 buf = ring->jpool; 1704 physaddr = ring->jmap->dm_segs[0].ds_addr; 1705 for (i = 0; i < NFE_JPOOL_COUNT; i++) { 1706 jbuf = &ring->jbuf[i]; 1707 1708 jbuf->buf = buf; 1709 jbuf->physaddr = physaddr; 1710 1711 SLIST_INSERT_HEAD(&ring->jfreelist, jbuf, jnext); 1712 1713 buf += NFE_JBYTES; 1714 physaddr += NFE_JBYTES; 1715 } 1716 1717 return 0; 1718 1719 fail: nfe_jpool_free(sc); 1720 return error; 1721 } 1722 1723 void 1724 nfe_jpool_free(struct nfe_softc *sc) 1725 { 1726 struct nfe_rx_ring *ring = &sc->rxq; 1727 1728 if (ring->jmap != NULL) { 1729 bus_dmamap_sync(sc->sc_dmat, ring->jmap, 0, 1730 ring->jmap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1731 bus_dmamap_unload(sc->sc_dmat, ring->jmap); 1732 bus_dmamap_destroy(sc->sc_dmat, ring->jmap); 1733 ring->jmap = NULL; 1734 } 1735 if (ring->jpool != NULL) { 1736 bus_dmamem_unmap(sc->sc_dmat, ring->jpool, NFE_JPOOL_SIZE); 1737 bus_dmamem_free(sc->sc_dmat, &ring->jseg, 1); 1738 ring->jpool = NULL; 1739 } 1740 } 1741 1742 int 1743 nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1744 { 1745 int i, nsegs, error; 1746 void **desc; 1747 int descsize; 1748 1749 if (sc->sc_flags & NFE_40BIT_ADDR) { 1750 desc = (void **)&ring->desc64; 1751 descsize = sizeof (struct nfe_desc64); 1752 } else { 1753 desc = (void **)&ring->desc32; 1754 descsize = sizeof (struct nfe_desc32); 1755 } 1756 1757 ring->queued = 0; 1758 ring->cur = ring->next = 0; 1759 1760 error = bus_dmamap_create(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1, 1761 NFE_TX_RING_COUNT * descsize, 0, BUS_DMA_NOWAIT, &ring->map); 1762 1763 if (error != 0) { 1764 aprint_error_dev(sc->sc_dev, 1765 "could not create desc DMA map\n"); 1766 ring->map = NULL; 1767 goto fail; 1768 } 1769 1770 error = bus_dmamem_alloc(sc->sc_dmat, NFE_TX_RING_COUNT * descsize, 1771 PAGE_SIZE, 0, &ring->seg, 1, &nsegs, BUS_DMA_NOWAIT); 1772 if (error != 0) { 1773 aprint_error_dev(sc->sc_dev, 1774 "could not allocate DMA memory\n"); 1775 goto fail; 1776 } 1777 1778 error = bus_dmamem_map(sc->sc_dmat, &ring->seg, nsegs, 1779 NFE_TX_RING_COUNT * descsize, (void **)desc, BUS_DMA_NOWAIT); 1780 if (error != 0) { 1781 aprint_error_dev(sc->sc_dev, 1782 "could not map desc DMA memory\n"); 1783 goto fail; 1784 } 1785 1786 error = bus_dmamap_load(sc->sc_dmat, ring->map, *desc, 1787 NFE_TX_RING_COUNT * descsize, NULL, BUS_DMA_NOWAIT); 1788 if (error != 0) { 1789 aprint_error_dev(sc->sc_dev, "could not load desc DMA map\n"); 1790 goto fail; 1791 } 1792 1793 memset(*desc, 0, NFE_TX_RING_COUNT * descsize); 1794 ring->physaddr = ring->map->dm_segs[0].ds_addr; 1795 1796 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1797 error = bus_dmamap_create(sc->sc_dmat, NFE_JBYTES, 1798 NFE_MAX_SCATTER, NFE_JBYTES, 0, BUS_DMA_NOWAIT, 1799 &ring->data[i].map); 1800 if (error != 0) { 1801 aprint_error_dev(sc->sc_dev, 1802 "could not create DMA map\n"); 1803 ring->data[i].map = NULL; 1804 goto fail; 1805 } 1806 } 1807 1808 return 0; 1809 1810 fail: nfe_free_tx_ring(sc, ring); 1811 return error; 1812 } 1813 1814 void 1815 nfe_reset_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1816 { 1817 struct nfe_tx_data *data; 1818 int i; 1819 1820 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1821 if (sc->sc_flags & NFE_40BIT_ADDR) 1822 ring->desc64[i].flags = 0; 1823 else 1824 ring->desc32[i].flags = 0; 1825 1826 data = &ring->data[i]; 1827 1828 if (data->m != NULL) { 1829 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1830 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1831 bus_dmamap_unload(sc->sc_dmat, data->active); 1832 m_freem(data->m); 1833 data->m = NULL; 1834 } 1835 } 1836 1837 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, ring->map->dm_mapsize, 1838 BUS_DMASYNC_PREWRITE); 1839 1840 ring->queued = 0; 1841 ring->cur = ring->next = 0; 1842 } 1843 1844 void 1845 nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) 1846 { 1847 struct nfe_tx_data *data; 1848 void *desc; 1849 int i, descsize; 1850 1851 if (sc->sc_flags & NFE_40BIT_ADDR) { 1852 desc = ring->desc64; 1853 descsize = sizeof (struct nfe_desc64); 1854 } else { 1855 desc = ring->desc32; 1856 descsize = sizeof (struct nfe_desc32); 1857 } 1858 1859 if (desc != NULL) { 1860 bus_dmamap_sync(sc->sc_dmat, ring->map, 0, 1861 ring->map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1862 bus_dmamap_unload(sc->sc_dmat, ring->map); 1863 bus_dmamem_unmap(sc->sc_dmat, (void *)desc, 1864 NFE_TX_RING_COUNT * descsize); 1865 bus_dmamem_free(sc->sc_dmat, &ring->seg, 1); 1866 } 1867 1868 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1869 data = &ring->data[i]; 1870 1871 if (data->m != NULL) { 1872 bus_dmamap_sync(sc->sc_dmat, data->active, 0, 1873 data->active->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1874 bus_dmamap_unload(sc->sc_dmat, data->active); 1875 m_freem(data->m); 1876 } 1877 } 1878 1879 /* ..and now actually destroy the DMA mappings */ 1880 for (i = 0; i < NFE_TX_RING_COUNT; i++) { 1881 data = &ring->data[i]; 1882 if (data->map == NULL) 1883 continue; 1884 bus_dmamap_destroy(sc->sc_dmat, data->map); 1885 } 1886 } 1887 1888 void 1889 nfe_setmulti(struct nfe_softc *sc) 1890 { 1891 struct ethercom *ec = &sc->sc_ethercom; 1892 struct ifnet *ifp = &ec->ec_if; 1893 struct ether_multi *enm; 1894 struct ether_multistep step; 1895 uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; 1896 uint32_t filter = NFE_RXFILTER_MAGIC; 1897 int i; 1898 1899 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 1900 memset(addr, 0, ETHER_ADDR_LEN); 1901 memset(mask, 0, ETHER_ADDR_LEN); 1902 goto done; 1903 } 1904 1905 memcpy(addr, etherbroadcastaddr, ETHER_ADDR_LEN); 1906 memcpy(mask, etherbroadcastaddr, ETHER_ADDR_LEN); 1907 1908 ETHER_LOCK(ec); 1909 ETHER_FIRST_MULTI(step, ec, enm); 1910 while (enm != NULL) { 1911 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1912 ifp->if_flags |= IFF_ALLMULTI; 1913 memset(addr, 0, ETHER_ADDR_LEN); 1914 memset(mask, 0, ETHER_ADDR_LEN); 1915 ETHER_UNLOCK(ec); 1916 goto done; 1917 } 1918 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1919 addr[i] &= enm->enm_addrlo[i]; 1920 mask[i] &= ~enm->enm_addrlo[i]; 1921 } 1922 ETHER_NEXT_MULTI(step, enm); 1923 } 1924 ETHER_UNLOCK(ec); 1925 for (i = 0; i < ETHER_ADDR_LEN; i++) 1926 mask[i] |= addr[i]; 1927 1928 done: 1929 addr[0] |= 0x01; /* make sure multicast bit is set */ 1930 1931 NFE_WRITE(sc, NFE_MULTIADDR_HI, 1932 (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1933 NFE_WRITE(sc, NFE_MULTIADDR_LO, 1934 addr[5] << 8 | addr[4]); 1935 NFE_WRITE(sc, NFE_MULTIMASK_HI, 1936 (uint32_t)mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); 1937 NFE_WRITE(sc, NFE_MULTIMASK_LO, 1938 mask[5] << 8 | mask[4]); 1939 1940 filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PROMISC : NFE_U2M; 1941 NFE_WRITE(sc, NFE_RXFILTER, filter); 1942 } 1943 1944 void 1945 nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) 1946 { 1947 uint32_t tmp; 1948 1949 if ((sc->sc_flags & NFE_CORRECT_MACADDR) != 0) { 1950 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1951 addr[0] = (tmp & 0xff); 1952 addr[1] = (tmp >> 8) & 0xff; 1953 addr[2] = (tmp >> 16) & 0xff; 1954 addr[3] = (tmp >> 24) & 0xff; 1955 1956 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1957 addr[4] = (tmp & 0xff); 1958 addr[5] = (tmp >> 8) & 0xff; 1959 1960 } else { 1961 tmp = NFE_READ(sc, NFE_MACADDR_LO); 1962 addr[0] = (tmp >> 8) & 0xff; 1963 addr[1] = (tmp & 0xff); 1964 1965 tmp = NFE_READ(sc, NFE_MACADDR_HI); 1966 addr[2] = (tmp >> 24) & 0xff; 1967 addr[3] = (tmp >> 16) & 0xff; 1968 addr[4] = (tmp >> 8) & 0xff; 1969 addr[5] = (tmp & 0xff); 1970 } 1971 } 1972 1973 void 1974 nfe_set_macaddr(struct nfe_softc *sc, const uint8_t *addr) 1975 { 1976 NFE_WRITE(sc, NFE_MACADDR_LO, 1977 addr[5] << 8 | addr[4]); 1978 NFE_WRITE(sc, NFE_MACADDR_HI, 1979 (uint32_t)addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); 1980 } 1981 1982 void 1983 nfe_tick(void *arg) 1984 { 1985 struct nfe_softc *sc = arg; 1986 int s; 1987 1988 s = splnet(); 1989 mii_tick(&sc->sc_mii); 1990 splx(s); 1991 1992 callout_schedule(&sc->sc_tick_ch, hz); 1993 } 1994 1995 void 1996 nfe_poweron(device_t self) 1997 { 1998 struct nfe_softc *sc = device_private(self); 1999 2000 if ((sc->sc_flags & NFE_PWR_MGMT) != 0) { 2001 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); 2002 NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); 2003 DELAY(100); 2004 NFE_WRITE(sc, NFE_MAC_RESET, 0); 2005 DELAY(100); 2006 NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); 2007 NFE_WRITE(sc, NFE_PWR2_CTL, 2008 NFE_READ(sc, NFE_PWR2_CTL) & ~NFE_PWR2_WAKEUP_MASK); 2009 } 2010 } 2011 2012 bool 2013 nfe_resume(device_t dv, const pmf_qual_t *qual) 2014 { 2015 nfe_poweron(dv); 2016 2017 return true; 2018 } 2019