1 /* $NetBSD: ixp425_if_npe.c,v 1.50 2021/12/31 14:25:22 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Sam Leffler. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #if 0 29 __FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $"); 30 #endif 31 __KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.50 2021/12/31 14:25:22 riastradh Exp $"); 32 33 /* 34 * Intel XScale NPE Ethernet driver. 35 * 36 * This driver handles the two ports present on the IXP425. 37 * Packet processing is done by the Network Processing Engines 38 * (NPE's) that work together with a MAC and PHY. The MAC 39 * is also mapped to the XScale cpu; the PHY is accessed via 40 * the MAC. NPE-XScale communication happens through h/w 41 * queues managed by the Q Manager block. 42 * 43 * The code here replaces the ethAcc, ethMii, and ethDB classes 44 * in the Intel Access Library (IAL) and the OS-specific driver. 45 * 46 * XXX add vlan support 47 * XXX NPE-C port doesn't work yet 48 */ 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/socket.h> 58 #include <sys/endian.h> 59 #include <sys/ioctl.h> 60 #include <sys/syslog.h> 61 #include <sys/bus.h> 62 #include <sys/rndsource.h> 63 64 #include <net/if.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_ether.h> 68 #include <net/bpf.h> 69 70 #include <arm/xscale/ixp425reg.h> 71 #include <arm/xscale/ixp425var.h> 72 #include <arm/xscale/ixp425_qmgr.h> 73 #include <arm/xscale/ixp425_npevar.h> 74 #include <arm/xscale/ixp425_if_npereg.h> 75 76 #include <dev/mii/miivar.h> 77 78 #include "locators.h" 79 80 struct npebuf { 81 struct npebuf *ix_next; /* chain to next buffer */ 82 void *ix_m; /* backpointer to mbuf */ 83 bus_dmamap_t ix_map; /* bus dma map for associated data */ 84 struct npehwbuf *ix_hw; /* associated h/w block */ 85 uint32_t ix_neaddr; /* phys address of ix_hw */ 86 }; 87 88 struct npedma { 89 const char* name; 90 int nbuf; /* # npebuf's allocated */ 91 bus_dmamap_t m_map; 92 struct npehwbuf *hwbuf; /* NPE h/w buffers */ 93 bus_dmamap_t buf_map; 94 bus_addr_t buf_phys; /* phys addr of buffers */ 95 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ 96 }; 97 98 struct npe_softc { 99 device_t sc_dev; 100 struct ethercom sc_ethercom; 101 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 102 struct mii_data sc_mii; 103 bus_space_tag_t sc_iot; 104 bus_dma_tag_t sc_dt; 105 bus_space_handle_t sc_ioh; /* MAC register window */ 106 bus_space_handle_t sc_miih; /* MII register window */ 107 struct ixpnpe_softc *sc_npe; /* NPE support */ 108 int sc_unit; 109 int sc_phy; 110 struct callout sc_tick_ch; /* Tick callout */ 111 struct npedma txdma; 112 struct npebuf *tx_free; /* list of free tx buffers */ 113 struct npedma rxdma; 114 int rx_qid; /* rx qid */ 115 int rx_freeqid; /* rx free buffers qid */ 116 int tx_qid; /* tx qid */ 117 int tx_doneqid; /* tx completed qid */ 118 struct npestats *sc_stats; 119 bus_dmamap_t sc_stats_map; 120 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ 121 u_short sc_if_flags; /* keep last if_flags */ 122 krndsource_t rnd_source; /* random source */ 123 }; 124 125 /* 126 * Per-unit static configuration for IXP425. The tx and 127 * rx free Q id's are fixed by the NPE microcode. The 128 * rx Q id's are programmed to be separate to simplify 129 * multi-port processing. It may be better to handle 130 * all traffic through one Q (as done by the Intel drivers). 131 * 132 * Note that the PHY's are accessible only from MAC A 133 * on the IXP425. This and other platform-specific 134 * assumptions probably need to be handled through hints. 135 */ 136 static const struct { 137 const char *desc; /* device description */ 138 int npeid; /* NPE assignment */ 139 int macport; /* Port number of the MAC */ 140 uint32_t imageid; /* NPE firmware image id */ 141 uint32_t regbase; 142 int regsize; 143 uint32_t miibase; 144 int miisize; 145 uint8_t rx_qid; 146 uint8_t rx_freeqid; 147 uint8_t tx_qid; 148 uint8_t tx_doneqid; 149 } npeconfig[NPE_PORTS_MAX] = { 150 { .desc = "IXP NPE-B", 151 .npeid = NPE_B, 152 .macport = 0x10, 153 .imageid = IXP425_NPE_B_IMAGEID, 154 .regbase = IXP425_MAC_A_HWBASE, 155 .regsize = IXP425_MAC_A_SIZE, 156 .miibase = IXP425_MAC_A_HWBASE, 157 .miisize = IXP425_MAC_A_SIZE, 158 .rx_qid = 4, 159 .rx_freeqid = 27, 160 .tx_qid = 24, 161 .tx_doneqid = 31 162 }, 163 { .desc = "IXP NPE-C", 164 .npeid = NPE_C, 165 .macport = 0x20, 166 .imageid = IXP425_NPE_C_IMAGEID, 167 .regbase = IXP425_MAC_B_HWBASE, 168 .regsize = IXP425_MAC_B_SIZE, 169 .miibase = IXP425_MAC_A_HWBASE, 170 .miisize = IXP425_MAC_A_SIZE, 171 .rx_qid = 12, 172 .rx_freeqid = 28, 173 .tx_qid = 25, 174 .tx_doneqid = 31 175 }, 176 }; 177 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ 178 179 static __inline uint32_t 180 RD4(struct npe_softc *sc, bus_size_t off) 181 { 182 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 183 } 184 185 static __inline void 186 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) 187 { 188 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 189 } 190 191 static int npe_activate(struct npe_softc *); 192 #if 0 193 static void npe_deactivate(struct npe_softc *); 194 #endif 195 static void npe_setmac(struct npe_softc *, const u_char *); 196 static void npe_getmac(struct npe_softc *); 197 static void npe_txdone(int, void *); 198 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, 199 struct mbuf *); 200 static void npe_rxdone(int, void *); 201 static void npeinit_macreg(struct npe_softc *); 202 static int npeinit(struct ifnet *); 203 static void npeinit_resetcb(void *); 204 static void npeinit_locked(void *); 205 static void npestart(struct ifnet *); 206 static void npestop(struct ifnet *, int); 207 static void npewatchdog(struct ifnet *); 208 static int npeioctl(struct ifnet *, u_long, void *); 209 210 static int npe_setrxqosentry(struct npe_softc *, int, int, int); 211 static int npe_updatestats(struct npe_softc *); 212 #if 0 213 static int npe_getstats(struct npe_softc *); 214 static uint32_t npe_getimageid(struct npe_softc *); 215 static int npe_setloopback(struct npe_softc *, int); 216 #endif 217 218 static int npe_miibus_readreg(device_t, int, int, uint16_t *); 219 static int npe_miibus_writereg(device_t, int, int, uint16_t); 220 static void npe_miibus_statchg(struct ifnet *); 221 222 static int npe_debug; 223 #define DPRINTF(sc, fmt, ...) do { \ 224 if (npe_debug) printf(fmt, __VA_ARGS__); \ 225 } while (0) 226 #define DPRINTFn(n, sc, fmt, ...) do { \ 227 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ 228 } while (0) 229 230 #define NPE_TXBUF 128 231 #define NPE_RXBUF 64 232 233 #define MAC2UINT64(addr) (((uint64_t)addr[0] << 40) \ 234 + ((uint64_t)addr[1] << 32) \ 235 + ((uint64_t)addr[2] << 24) \ 236 + ((uint64_t)addr[3] << 16) \ 237 + ((uint64_t)addr[4] << 8) \ 238 + (uint64_t)addr[5]) 239 240 /* NB: all tx done processing goes through one queue */ 241 static int tx_doneqid = -1; 242 243 void (*npe_getmac_md)(int, uint8_t *); 244 245 static int npe_match(device_t, cfdata_t, void *); 246 static void npe_attach(device_t, device_t, void *); 247 248 CFATTACH_DECL_NEW(npe, sizeof(struct npe_softc), 249 npe_match, npe_attach, NULL, NULL); 250 251 static int 252 npe_match(device_t parent, cfdata_t cf, void *arg) 253 { 254 struct ixpnpe_attach_args *na = arg; 255 256 return (na->na_unit == NPE_B || na->na_unit == NPE_C); 257 } 258 259 static void 260 npe_attach(device_t parent, device_t self, void *arg) 261 { 262 struct npe_softc *sc = device_private(self); 263 struct ixpnpe_softc *isc = device_private(parent); 264 struct ixpnpe_attach_args *na = arg; 265 struct ifnet *ifp; 266 struct mii_data * const mii = &sc->sc_mii; 267 268 aprint_naive("\n"); 269 aprint_normal(": Ethernet co-processor\n"); 270 271 sc->sc_dev = self; 272 sc->sc_iot = na->na_iot; 273 sc->sc_dt = na->na_dt; 274 sc->sc_npe = na->na_npe; 275 sc->sc_unit = (na->na_unit == NPE_B) ? 0 : 1; 276 sc->sc_phy = na->na_phy; 277 278 memset(&sc->sc_ethercom, 0, sizeof(sc->sc_ethercom)); 279 memset(mii, 0, sizeof(*mii)); 280 281 callout_init(&sc->sc_tick_ch, 0); 282 283 if (npe_activate(sc)) { 284 aprint_error_dev(sc->sc_dev, 285 "Failed to activate NPE (missing microcode?)\n"); 286 return; 287 } 288 289 npe_getmac(sc); 290 npeinit_macreg(sc); 291 292 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 293 ether_sprintf(sc->sc_enaddr)); 294 295 ifp = &sc->sc_ethercom.ec_if; 296 mii->mii_ifp = ifp; 297 mii->mii_readreg = npe_miibus_readreg; 298 mii->mii_writereg = npe_miibus_writereg; 299 mii->mii_statchg = npe_miibus_statchg; 300 sc->sc_ethercom.ec_mii = mii; 301 302 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 303 ether_mediastatus); 304 305 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 306 MII_OFFSET_ANY, MIIF_DOPAUSE); 307 if (LIST_FIRST(&mii->mii_phys) == NULL) { 308 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 309 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 310 } else 311 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 312 313 ifp->if_softc = sc; 314 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 315 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 316 ifp->if_start = npestart; 317 ifp->if_ioctl = npeioctl; 318 ifp->if_watchdog = npewatchdog; 319 ifp->if_init = npeinit; 320 ifp->if_stop = npestop; 321 IFQ_SET_READY(&ifp->if_snd); 322 323 /* VLAN capable */ 324 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 325 326 if_attach(ifp); 327 if_deferred_start_init(ifp, NULL); 328 ether_ifattach(ifp, sc->sc_enaddr); 329 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 330 RND_TYPE_NET, RND_FLAG_DEFAULT); 331 332 /* callback function to reset MAC */ 333 isc->macresetcbfunc = npeinit_resetcb; 334 isc->macresetcbarg = sc; 335 } 336 337 /* 338 * Compute and install the multicast filter. 339 */ 340 static void 341 npe_setmcast(struct npe_softc *sc) 342 { 343 struct ethercom *ec = &sc->sc_ethercom; 344 struct ifnet *ifp = &ec->ec_if; 345 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; 346 uint32_t reg; 347 uint32_t msg[2]; 348 int i; 349 350 /* Always use filter. Is here a correct position? */ 351 reg = RD4(sc, NPE_MAC_RX_CNTRL1); 352 WR4(sc, NPE_MAC_RX_CNTRL1, reg | NPE_RX_CNTRL1_ADDR_FLTR_EN); 353 354 if (ifp->if_flags & IFF_PROMISC) { 355 memset(mask, 0, ETHER_ADDR_LEN); 356 memset(addr, 0, ETHER_ADDR_LEN); 357 } else if (ifp->if_flags & IFF_ALLMULTI) { 358 static const uint8_t allmulti[ETHER_ADDR_LEN] = 359 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 360 all_multi: 361 memcpy(mask, allmulti, ETHER_ADDR_LEN); 362 memcpy(addr, allmulti, ETHER_ADDR_LEN); 363 } else { 364 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; 365 struct ether_multistep step; 366 struct ether_multi *enm; 367 368 memset(clr, 0, ETHER_ADDR_LEN); 369 memset(set, 0xff, ETHER_ADDR_LEN); 370 371 ETHER_LOCK(ec); 372 ETHER_FIRST_MULTI(step, ec, enm); 373 while (enm != NULL) { 374 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 375 ETHER_ADDR_LEN)) { 376 ifp->if_flags |= IFF_ALLMULTI; 377 ETHER_UNLOCK(ec); 378 goto all_multi; 379 } 380 381 for (i = 0; i < ETHER_ADDR_LEN; i++) { 382 clr[i] |= enm->enm_addrlo[i]; 383 set[i] &= enm->enm_addrlo[i]; 384 } 385 386 ETHER_NEXT_MULTI(step, enm); 387 } 388 ETHER_UNLOCK(ec); 389 390 for (i = 0; i < ETHER_ADDR_LEN; i++) { 391 mask[i] = set[i] | ~clr[i]; 392 addr[i] = set[i]; 393 } 394 } 395 396 /* 397 * Write the mask and address registers. 398 */ 399 for (i = 0; i < ETHER_ADDR_LEN; i++) { 400 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); 401 WR4(sc, NPE_MAC_ADDR(i), addr[i]); 402 } 403 404 msg[0] = NPE_ADDRESSFILTERCONFIG << NPE_MAC_MSGID_SHL 405 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 406 msg[1] = ((ifp->if_flags & IFF_PROMISC) ? 1 : 0) << 24 407 | ((RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff) << 16) 408 | (addr[5] << 8) | mask[5]; 409 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 410 } 411 412 static int 413 npe_dma_setup(struct npe_softc *sc, struct npedma *dma, 414 const char *name, int nbuf, int maxseg) 415 { 416 bus_dma_segment_t seg; 417 int rseg, error, i; 418 void *hwbuf; 419 size_t size; 420 421 memset(dma, 0, sizeof(*dma)); 422 423 dma->name = name; 424 dma->nbuf = nbuf; 425 426 size = nbuf * sizeof(struct npehwbuf); 427 428 /* XXX COHERENT for now */ 429 error = bus_dmamem_alloc(sc->sc_dt, size, sizeof(uint32_t), 0, &seg, 430 1, &rseg, BUS_DMA_NOWAIT); 431 if (error) { 432 aprint_error_dev(sc->sc_dev, 433 "unable to %s for %s %s buffers, error %u\n", 434 "allocate memory", dma->name, "h/w", error); 435 } 436 437 error = bus_dmamem_map(sc->sc_dt, &seg, 1, size, &hwbuf, 438 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE); 439 if (error) { 440 aprint_error_dev(sc->sc_dev, 441 "unable to %s for %s %s buffers, error %u\n", 442 "map memory", dma->name, "h/w", error); 443 free_dmamem: 444 bus_dmamem_free(sc->sc_dt, &seg, rseg); 445 return error; 446 } 447 dma->hwbuf = (void *)hwbuf; 448 449 error = bus_dmamap_create(sc->sc_dt, size, 1, size, 0, 450 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dma->buf_map); 451 if (error) { 452 aprint_error_dev(sc->sc_dev, 453 "unable to %s for %s %s buffers, error %u\n", 454 "create map", dma->name, "h/w", error); 455 unmap_dmamem: 456 dma->hwbuf = NULL; 457 bus_dmamem_unmap(sc->sc_dt, hwbuf, size); 458 goto free_dmamem; 459 } 460 461 error = bus_dmamap_load(sc->sc_dt, dma->buf_map, hwbuf, size, NULL, 462 BUS_DMA_NOWAIT); 463 if (error) { 464 aprint_error_dev(sc->sc_dev, 465 "unable to %s for %s %s buffers, error %u\n", 466 "load map", dma->name, "h/w", error); 467 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 468 goto unmap_dmamem; 469 } 470 471 /* XXX M_TEMP */ 472 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, 473 M_WAITOK | M_ZERO); 474 dma->buf_phys = dma->buf_map->dm_segs[0].ds_addr; 475 for (i = 0; i < dma->nbuf; i++) { 476 struct npebuf *npe = &dma->buf[i]; 477 struct npehwbuf *hw = &dma->hwbuf[i]; 478 479 /* Calculate offset to shared area */ 480 npe->ix_neaddr = dma->buf_phys + 481 ((uintptr_t)hw - (uintptr_t)dma->hwbuf); 482 KASSERT((npe->ix_neaddr & 0x1f) == 0); 483 error = bus_dmamap_create(sc->sc_dt, MCLBYTES, maxseg, 484 MCLBYTES, 0, 0, &npe->ix_map); 485 if (error != 0) { 486 aprint_error_dev(sc->sc_dev, 487 "unable to %s for %s buffer %u, error %u\n", 488 "create dmamap", dma->name, i, error); 489 /* XXXSCW: Free up maps... */ 490 return error; 491 } 492 npe->ix_hw = hw; 493 } 494 bus_dmamap_sync(sc->sc_dt, dma->buf_map, 0, dma->buf_map->dm_mapsize, 495 BUS_DMASYNC_PREWRITE); 496 return 0; 497 } 498 499 #if 0 500 static void 501 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) 502 { 503 int i; 504 505 /* XXXSCW: Clean this up */ 506 507 if (dma->hwbuf != NULL) { 508 for (i = 0; i < dma->nbuf; i++) { 509 struct npebuf *npe = &dma->buf[i]; 510 bus_dmamap_destroy(sc->sc_dt, npe->ix_map); 511 } 512 bus_dmamap_unload(sc->sc_dt, dma->buf_map); 513 bus_dmamem_free(sc->sc_dt, (void *)dma->hwbuf, dma->buf_map); 514 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 515 } 516 if (dma->buf != NULL) 517 free(dma->buf, M_TEMP); 518 memset(dma, 0, sizeof(*dma)); 519 } 520 #endif 521 522 static int 523 npe_activate(struct npe_softc *sc) 524 { 525 bus_dma_segment_t seg; 526 int unit = sc->sc_unit; 527 int error, i, rseg; 528 void *statbuf; 529 530 /* load NPE firmware and start it running */ 531 error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid); 532 if (error != 0) 533 return error; 534 535 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase, 536 npeconfig[unit].regsize, 0, &sc->sc_ioh)) { 537 aprint_error_dev(sc->sc_dev, "Cannot map registers 0x%x:0x%x\n", 538 npeconfig[unit].regbase, npeconfig[unit].regsize); 539 return ENOMEM; 540 } 541 542 if (npeconfig[unit].miibase != npeconfig[unit].regbase) { 543 /* 544 * The PHY's are only accessible from one MAC (it appears) 545 * so for other MAC's setup an additional mapping for 546 * frobbing the PHY registers. 547 */ 548 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase, 549 npeconfig[unit].miisize, 0, &sc->sc_miih)) { 550 aprint_error_dev(sc->sc_dev, 551 "Cannot map MII registers 0x%x:0x%x\n", 552 npeconfig[unit].miibase, npeconfig[unit].miisize); 553 return ENOMEM; 554 } 555 } else 556 sc->sc_miih = sc->sc_ioh; 557 error = npe_dma_setup(sc, &sc->txdma, "tx", NPE_TXBUF, NPE_MAXSEG); 558 if (error != 0) 559 return error; 560 error = npe_dma_setup(sc, &sc->rxdma, "rx", NPE_RXBUF, 1); 561 if (error != 0) 562 return error; 563 564 /* setup statistics block */ 565 error = bus_dmamem_alloc(sc->sc_dt, sizeof(struct npestats), 566 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 567 if (error) { 568 aprint_error_dev(sc->sc_dev, 569 "unable to %s for %s, error %u\n", 570 "allocate memory", "stats block", error); 571 return error; 572 } 573 574 error = bus_dmamem_map(sc->sc_dt, &seg, 1, sizeof(struct npestats), 575 &statbuf, BUS_DMA_NOWAIT); 576 if (error) { 577 aprint_error_dev(sc->sc_dev, 578 "unable to %s for %s, error %u\n", 579 "map memory", "stats block", error); 580 return error; 581 } 582 sc->sc_stats = (void *)statbuf; 583 584 error = bus_dmamap_create(sc->sc_dt, sizeof(struct npestats), 1, 585 sizeof(struct npestats), 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 586 &sc->sc_stats_map); 587 if (error) { 588 aprint_error_dev(sc->sc_dev, 589 "unable to %s for %s, error %u\n", 590 "create map", "stats block", error); 591 return error; 592 } 593 594 error = bus_dmamap_load(sc->sc_dt, sc->sc_stats_map, sc->sc_stats, 595 sizeof(struct npestats), NULL, BUS_DMA_NOWAIT); 596 if (error) { 597 aprint_error_dev(sc->sc_dev, 598 "unable to %s for %s, error %u\n", 599 "load map", "stats block", error); 600 return error; 601 } 602 sc->sc_stats_phys = sc->sc_stats_map->dm_segs[0].ds_addr; 603 604 /* XXX disable half-bridge LEARNING+FILTERING feature */ 605 606 /* 607 * Setup h/w rx/tx queues. There are four q's: 608 * rx inbound q of rx'd frames 609 * rx_free pool of ixpbuf's for receiving frames 610 * tx outbound q of frames to send 611 * tx_done q of tx frames that have been processed 612 * 613 * The NPE handles the actual tx/rx process and the q manager 614 * handles the queues. The driver just writes entries to the 615 * q manager mailbox's and gets callbacks when there are rx'd 616 * frames to process or tx'd frames to reap. These callbacks 617 * are controlled by the q configurations; e.g. we get a 618 * callback when tx_done has 2 or more frames to process and 619 * when the rx q has at least one frame. These settings can 620 * changed at the time the q is configured. 621 */ 622 sc->rx_qid = npeconfig[unit].rx_qid; 623 ixpqmgr_qconfig(sc->rx_qid, NPE_RXBUF, 0, 1, 624 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc); 625 sc->rx_freeqid = npeconfig[unit].rx_freeqid; 626 ixpqmgr_qconfig(sc->rx_freeqid, NPE_RXBUF, 0, NPE_RXBUF/2, 0, NULL, sc); 627 /* tell the NPE to direct all traffic to rx_qid */ 628 #if 0 629 for (i = 0; i < 8; i++) 630 #else 631 printf("%s: remember to fix rx q setup\n", device_xname(sc->sc_dev)); 632 for (i = 0; i < 4; i++) 633 #endif 634 npe_setrxqosentry(sc, i, 0, sc->rx_qid); 635 636 sc->tx_qid = npeconfig[unit].tx_qid; 637 sc->tx_doneqid = npeconfig[unit].tx_doneqid; 638 ixpqmgr_qconfig(sc->tx_qid, NPE_TXBUF, 0, NPE_TXBUF, 0, NULL, sc); 639 if (tx_doneqid == -1) { 640 ixpqmgr_qconfig(sc->tx_doneqid, NPE_TXBUF, 0, 2, 641 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); 642 tx_doneqid = sc->tx_doneqid; 643 } 644 645 KASSERT(npes[npeconfig[unit].npeid] == NULL); 646 npes[npeconfig[unit].npeid] = sc; 647 648 return 0; 649 } 650 651 #if 0 652 static void 653 npe_deactivate(struct npe_softc *sc); 654 { 655 int unit = sc->sc_unit; 656 657 npes[npeconfig[unit].npeid] = NULL; 658 659 /* XXX disable q's */ 660 if (sc->sc_npe != NULL) 661 ixpnpe_stop(sc->sc_npe); 662 if (sc->sc_stats != NULL) { 663 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); 664 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, 665 sc->sc_stats_map); 666 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map); 667 } 668 if (sc->sc_stats_tag != NULL) 669 bus_dma_tag_destroy(sc->sc_stats_tag); 670 npe_dma_destroy(sc, &sc->txdma); 671 npe_dma_destroy(sc, &sc->rxdma); 672 bus_generic_detach(sc->sc_dev); 673 XXX ifmedia_fini somewhere 674 if (sc->sc_mii) 675 device_delete_child(sc->sc_dev, sc->sc_mii); 676 #if 0 677 /* XXX sc_ioh and sc_miih */ 678 if (sc->mem_res) 679 bus_release_resource(dev, SYS_RES_IOPORT, 680 rman_get_rid(sc->mem_res), sc->mem_res); 681 sc->mem_res = 0; 682 #endif 683 } 684 #endif 685 686 static void 687 npe_addstats(struct npe_softc *sc) 688 { 689 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 690 struct npestats *ns = sc->sc_stats; 691 692 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 693 if_statadd_ref(nsr, if_oerrors, 694 be32toh(ns->dot3StatsInternalMacTransmitErrors) 695 + be32toh(ns->dot3StatsCarrierSenseErrors) 696 + be32toh(ns->TxVLANIdFilterDiscards) 697 ); 698 if_statadd_ref(nsr, if_ierrors, 699 be32toh(ns->dot3StatsFCSErrors) 700 + be32toh(ns->dot3StatsInternalMacReceiveErrors) 701 + be32toh(ns->RxOverrunDiscards) 702 + be32toh(ns->RxUnderflowEntryDiscards) 703 ); 704 if_statadd_ref(nsr, if_collisions, 705 be32toh(ns->dot3StatsSingleCollisionFrames) 706 + be32toh(ns->dot3StatsMultipleCollisionFrames) 707 ); 708 IF_STAT_PUTREF(ifp); 709 } 710 711 static void 712 npe_tick(void *xsc) 713 { 714 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) 715 struct npe_softc *sc = xsc; 716 uint32_t msg[2]; 717 718 /* 719 * NB: to avoid sleeping with the softc lock held we 720 * split the NPE msg processing into two parts. The 721 * request for statistics is sent w/o waiting for a 722 * reply and then on the next tick we retrieve the 723 * results. This works because npe_tick is the only 724 * code that talks via the mailbox's (except at setup). 725 * This likely can be handled better. 726 */ 727 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) { 728 bus_dmamap_sync(sc->sc_dt, sc->sc_stats_map, 0, 729 sizeof(struct npestats), BUS_DMASYNC_POSTREAD); 730 npe_addstats(sc); 731 } 732 npe_updatestats(sc); 733 mii_tick(&sc->sc_mii); 734 735 /* Schedule next poll */ 736 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 737 #undef ACK 738 } 739 740 static void 741 npe_setmac(struct npe_softc *sc, const u_char *eaddr) 742 { 743 744 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); 745 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); 746 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); 747 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); 748 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); 749 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); 750 } 751 752 static void 753 npe_getmac(struct npe_softc *sc) 754 { 755 uint8_t *eaddr = sc->sc_enaddr; 756 757 if (npe_getmac_md != NULL) { 758 (*npe_getmac_md)(device_unit(sc->sc_dev), eaddr); 759 } else { 760 /* 761 * Some system's unicast address appears to be loaded from 762 * EEPROM on reset 763 */ 764 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; 765 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; 766 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; 767 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; 768 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; 769 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; 770 } 771 } 772 773 struct txdone { 774 struct npebuf *head; 775 struct npebuf **tail; 776 int count; 777 }; 778 779 static __inline void 780 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) 781 { 782 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 783 784 *td->tail = sc->tx_free; 785 sc->tx_free = td->head; 786 /* 787 * We're no longer busy, so clear the busy flag and call the 788 * start routine to xmit more packets. 789 */ 790 if_statadd(ifp, if_opackets, td->count); 791 ifp->if_flags &= ~IFF_OACTIVE; 792 ifp->if_timer = 0; 793 if_schedule_deferred_start(ifp); 794 } 795 796 /* 797 * Q manager callback on tx done queue. Reap mbufs 798 * and return tx buffers to the free list. Finally 799 * restart output. Note the microcode has only one 800 * txdone q wired into it so we must use the NPE ID 801 * returned with each npehwbuf to decide where to 802 * send buffers. 803 */ 804 static void 805 npe_txdone(int qid, void *arg) 806 { 807 #define P2V(a, dma) \ 808 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 809 struct npe_softc *sc; 810 struct npebuf *npe; 811 struct txdone *td, q[NPE_MAX]; 812 uint32_t entry; 813 814 /* XXX no NPE-A support */ 815 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; 816 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; 817 /* XXX max # at a time? */ 818 while (ixpqmgr_qread(qid, &entry) == 0) { 819 sc = npes[NPE_QM_Q_NPE(entry)]; 820 DPRINTF(sc, "%s: entry 0x%x NPE %u port %u\n", 821 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); 822 rnd_add_uint32(&sc->rnd_source, entry); 823 824 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); 825 m_freem(npe->ix_m); 826 npe->ix_m = NULL; 827 828 td = &q[NPE_QM_Q_NPE(entry)]; 829 *td->tail = npe; 830 td->tail = &npe->ix_next; 831 td->count++; 832 } 833 834 if (q[NPE_B].count) 835 npe_txdone_finish(npes[NPE_B], &q[NPE_B]); 836 if (q[NPE_C].count) 837 npe_txdone_finish(npes[NPE_C], &q[NPE_C]); 838 #undef P2V 839 } 840 841 static __inline struct mbuf * 842 npe_getcl(void) 843 { 844 struct mbuf *m; 845 846 MGETHDR(m, M_DONTWAIT, MT_DATA); 847 if (m != NULL) { 848 MCLGET(m, M_DONTWAIT); 849 if ((m->m_flags & M_EXT) == 0) { 850 m_freem(m); 851 m = NULL; 852 } 853 } 854 return m; 855 } 856 857 static int 858 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) 859 { 860 struct npehwbuf *hw; 861 int error; 862 863 if (m == NULL) { 864 m = npe_getcl(); 865 if (m == NULL) 866 return ENOBUFS; 867 } 868 KASSERT(m->m_ext.ext_size >= (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 869 m->m_pkthdr.len = m->m_len = NPE_FRAME_SIZE_DEFAULT; 870 /* backload payload and align ip hdr */ 871 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size 872 - (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 873 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 874 BUS_DMA_READ | BUS_DMA_NOWAIT); 875 if (error != 0) { 876 m_freem(m); 877 return error; 878 } 879 hw = npe->ix_hw; 880 hw->ix_ne[0].data = htobe32(npe->ix_map->dm_segs[0].ds_addr); 881 /* NB: NPE requires length be a multiple of 64 */ 882 /* NB: buffer length is shifted in word */ 883 hw->ix_ne[0].len = htobe32(npe->ix_map->dm_segs[0].ds_len << 16); 884 hw->ix_ne[0].next = 0; 885 npe->ix_m = m; 886 /* Flush the memory in the mbuf */ 887 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, npe->ix_map->dm_mapsize, 888 BUS_DMASYNC_PREREAD); 889 return 0; 890 } 891 892 /* 893 * RX q processing for a specific NPE. Claim entries 894 * from the hardware queue and pass the frames up the 895 * stack. Pass the rx buffers to the free list. 896 */ 897 static void 898 npe_rxdone(int qid, void *arg) 899 { 900 #define P2V(a, dma) \ 901 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 902 struct npe_softc *sc = arg; 903 struct npedma *dma = &sc->rxdma; 904 uint32_t entry; 905 906 while (ixpqmgr_qread(qid, &entry) == 0) { 907 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); 908 struct mbuf *m; 909 910 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", 911 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); 912 rnd_add_uint32(&sc->rnd_source, entry); 913 /* 914 * Allocate a new mbuf to replenish the rx buffer. 915 * If doing so fails we drop the rx'd frame so we 916 * can reuse the previous mbuf. When we're able to 917 * allocate a new mbuf dispatch the mbuf w/ rx'd 918 * data up the stack and replace it with the newly 919 * allocated one. 920 */ 921 m = npe_getcl(); 922 if (m != NULL) { 923 struct mbuf *mrx = npe->ix_m; 924 struct npehwbuf *hw = npe->ix_hw; 925 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 926 927 /* Flush mbuf memory for rx'd data */ 928 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 929 npe->ix_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 930 931 /* XXX flush hw buffer; works now 'cuz coherent */ 932 /* set m_len etc. per rx frame size */ 933 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; 934 mrx->m_pkthdr.len = mrx->m_len; 935 m_set_rcvif(mrx, ifp); 936 /* Don't add M_HASFCS. See below */ 937 938 #if 1 939 if (mrx->m_pkthdr.len < sizeof(struct ether_header)) { 940 log(LOG_INFO, "%s: too short frame (len=%d)\n", 941 device_xname(sc->sc_dev), 942 mrx->m_pkthdr.len); 943 /* Back out "newly allocated" mbuf. */ 944 m_freem(m); 945 if_statinc(ifp, if_ierrors); 946 goto fail; 947 } 948 if ((ifp->if_flags & IFF_PROMISC) == 0) { 949 struct ether_header *eh; 950 951 /* 952 * Workaround for "Non-Intel XScale Technology 953 * Eratta" No. 29. AA:BB:CC:DD:EE:xF's packet 954 * matches the filter (both unicast and 955 * multicast). 956 */ 957 eh = mtod(mrx, struct ether_header *); 958 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0) { 959 /* Unicast */ 960 961 if (sc->sc_enaddr[5] != eh->ether_dhost[5]) { 962 /* Discard it */ 963 #if 0 964 printf("discard it\n"); 965 #endif 966 /* 967 * Back out "newly allocated" 968 * mbuf. 969 */ 970 m_freem(m); 971 goto fail; 972 } 973 } else if (memcmp(eh->ether_dhost, 974 etherbroadcastaddr, 6) == 0) { 975 /* Always accept broadcast packet*/ 976 } else { 977 struct ethercom *ec = &sc->sc_ethercom; 978 struct ether_multi *enm; 979 struct ether_multistep step; 980 int match = 0; 981 982 /* Multicast */ 983 984 ETHER_LOCK(ec); 985 ETHER_FIRST_MULTI(step, ec, enm); 986 while (enm != NULL) { 987 uint64_t lowint, highint, dest; 988 989 lowint = MAC2UINT64(enm->enm_addrlo); 990 highint = MAC2UINT64(enm->enm_addrhi); 991 dest = MAC2UINT64(eh->ether_dhost); 992 #if 0 993 printf("%llx\n", lowint); 994 printf("%llx\n", dest); 995 printf("%llx\n", highint); 996 #endif 997 if ((lowint <= dest) && (dest <= highint)) { 998 match = 1; 999 break; 1000 } 1001 ETHER_NEXT_MULTI(step, enm); 1002 } 1003 ETHER_UNLOCK(ec); 1004 1005 if (match == 0) { 1006 /* Discard it */ 1007 #if 0 1008 printf("discard it(M)\n"); 1009 #endif 1010 /* 1011 * Back out "newly allocated" 1012 * mbuf. 1013 */ 1014 m_freem(m); 1015 goto fail; 1016 } 1017 } 1018 } 1019 if (mrx->m_pkthdr.len > NPE_FRAME_SIZE_DEFAULT) { 1020 log(LOG_INFO, "%s: oversized frame (len=%d)\n", 1021 device_xname(sc->sc_dev), mrx->m_pkthdr.len); 1022 /* Back out "newly allocated" mbuf. */ 1023 m_freem(m); 1024 if_statinc(ifp, if_ierrors); 1025 goto fail; 1026 } 1027 #endif 1028 1029 /* 1030 * Trim FCS! 1031 * NPE always adds the FCS by this driver's setting, 1032 * so we always trim it here and not add M_HASFCS. 1033 */ 1034 m_adj(mrx, -ETHER_CRC_LEN); 1035 1036 /* 1037 * Tap off here if there is a bpf listener. 1038 */ 1039 1040 if_percpuq_enqueue(ifp->if_percpuq, mrx); 1041 } else { 1042 fail: 1043 /* discard frame and re-use mbuf */ 1044 m = npe->ix_m; 1045 } 1046 if (npe_rxbuf_init(sc, npe, m) == 0) { 1047 /* return npe buf to rx free list */ 1048 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1049 } else { 1050 /* XXX should not happen */ 1051 } 1052 } 1053 #undef P2V 1054 } 1055 1056 static void 1057 npe_startxmit(struct npe_softc *sc) 1058 { 1059 struct npedma *dma = &sc->txdma; 1060 int i; 1061 1062 sc->tx_free = NULL; 1063 for (i = 0; i < dma->nbuf; i++) { 1064 struct npebuf *npe = &dma->buf[i]; 1065 if (npe->ix_m != NULL) { 1066 /* NB: should not happen */ 1067 printf("%s: %s: free mbuf at entry %u\n", 1068 device_xname(sc->sc_dev), __func__, i); 1069 m_freem(npe->ix_m); 1070 } 1071 npe->ix_m = NULL; 1072 npe->ix_next = sc->tx_free; 1073 sc->tx_free = npe; 1074 } 1075 } 1076 1077 static void 1078 npe_startrecv(struct npe_softc *sc) 1079 { 1080 struct npedma *dma = &sc->rxdma; 1081 struct npebuf *npe; 1082 int i; 1083 1084 for (i = 0; i < dma->nbuf; i++) { 1085 npe = &dma->buf[i]; 1086 npe_rxbuf_init(sc, npe, npe->ix_m); 1087 /* Set npe buf on rx free list */ 1088 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1089 } 1090 } 1091 1092 static void 1093 npeinit_macreg(struct npe_softc *sc) 1094 { 1095 1096 /* 1097 * Reset MAC core. 1098 */ 1099 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1100 DELAY(NPE_MAC_RESET_DELAY); 1101 /* Configure MAC to generate MDC clock */ 1102 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1103 1104 /* Disable transmitter and receiver in the MAC */ 1105 WR4(sc, NPE_MAC_RX_CNTRL1, 1106 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1107 WR4(sc, NPE_MAC_TX_CNTRL1, 1108 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1109 1110 /* 1111 * Set the MAC core registers. 1112 */ 1113 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ 1114 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ 1115 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ 1116 /* Thresholds determined by NPE firmware FS */ 1117 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); 1118 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); 1119 WR4(sc, NPE_MAC_BUF_SIZE_TX, NPE_MAC_BUF_SIZE_TX_DEFAULT); 1120 /* tx fifo threshold (bytes) */ 1121 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ 1122 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ 1123 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ 1124 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ 1125 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1126 /* assumes MII mode */ 1127 WR4(sc, NPE_MAC_TX_CNTRL1, 1128 NPE_TX_CNTRL1_RETRY /* retry failed xmits */ 1129 | NPE_TX_CNTRL1_FCS_EN /* append FCS */ 1130 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ 1131 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ 1132 /* XXX pad strip? */ 1133 WR4(sc, NPE_MAC_RX_CNTRL1, 1134 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */ 1135 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */ 1136 WR4(sc, NPE_MAC_RX_CNTRL2, 0); 1137 } 1138 1139 static void 1140 npeinit_resetcb(void *xsc) 1141 { 1142 struct npe_softc *sc = xsc; 1143 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1144 uint32_t msg[2]; 1145 1146 if_statinc(ifp, if_oerrors); 1147 npeinit_locked(sc); 1148 1149 msg[0] = NPE_NOTIFYMACRECOVERYDONE << NPE_MAC_MSGID_SHL 1150 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 1151 msg[1] = 0; 1152 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1153 } 1154 1155 /* 1156 * Reset and initialize the chip 1157 */ 1158 static void 1159 npeinit_locked(void *xsc) 1160 { 1161 struct npe_softc *sc = xsc; 1162 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1163 1164 /* Cancel any pending I/O. */ 1165 npestop(ifp, 0); 1166 1167 /* Reset the chip to a known state. */ 1168 npeinit_macreg(sc); 1169 npe_setmac(sc, CLLADDR(ifp->if_sadl)); 1170 ether_mediachange(ifp); 1171 npe_setmcast(sc); 1172 1173 npe_startxmit(sc); 1174 npe_startrecv(sc); 1175 1176 ifp->if_flags |= IFF_RUNNING; 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 ifp->if_timer = 0; /* just in case */ 1179 1180 /* Enable transmitter and receiver in the MAC */ 1181 WR4(sc, NPE_MAC_RX_CNTRL1, 1182 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); 1183 WR4(sc, NPE_MAC_TX_CNTRL1, 1184 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); 1185 1186 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 1187 } 1188 1189 static int 1190 npeinit(struct ifnet *ifp) 1191 { 1192 struct npe_softc *sc = ifp->if_softc; 1193 int s; 1194 1195 s = splnet(); 1196 npeinit_locked(sc); 1197 splx(s); 1198 1199 return 0; 1200 } 1201 1202 /* 1203 * Defragment an mbuf chain, returning at most maxfrags separate 1204 * mbufs+clusters. If this is not possible NULL is returned and 1205 * the original mbuf chain is left in its present (potentially 1206 * modified) state. We use two techniques: collapsing consecutive 1207 * mbufs and replacing consecutive mbufs by a cluster. 1208 */ 1209 static __inline struct mbuf * 1210 npe_defrag(struct mbuf *m0) 1211 { 1212 struct mbuf *m; 1213 1214 MGETHDR(m, M_DONTWAIT, MT_DATA); 1215 if (m == NULL) 1216 return NULL; 1217 m_copy_pkthdr(m, m0); 1218 1219 if ((m->m_len = m0->m_pkthdr.len) > MHLEN) { 1220 MCLGET(m, M_DONTWAIT); 1221 if ((m->m_flags & M_EXT) == 0) { 1222 m_freem(m); 1223 return NULL; 1224 } 1225 } 1226 1227 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1228 m_freem(m0); 1229 1230 return m; 1231 } 1232 1233 /* 1234 * Dequeue packets and place on the h/w transmit queue. 1235 */ 1236 static void 1237 npestart(struct ifnet *ifp) 1238 { 1239 struct npe_softc *sc = ifp->if_softc; 1240 struct npebuf *npe; 1241 struct npehwbuf *hw; 1242 struct mbuf *m, *n; 1243 bus_dma_segment_t *segs; 1244 int nseg, len, error, i; 1245 uint32_t next; 1246 1247 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1248 return; 1249 1250 while (sc->tx_free != NULL) { 1251 IFQ_DEQUEUE(&ifp->if_snd, m); 1252 if (m == NULL) 1253 break; 1254 npe = sc->tx_free; 1255 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 1256 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1257 if (error == EFBIG) { 1258 n = npe_defrag(m); 1259 if (n == NULL) { 1260 printf("%s: %s: too many fragments\n", 1261 device_xname(sc->sc_dev), __func__); 1262 m_freem(m); 1263 return; /* XXX? */ 1264 } 1265 m = n; 1266 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, 1267 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1268 } 1269 if (error != 0) { 1270 printf("%s: %s: error %u\n", 1271 device_xname(sc->sc_dev), __func__, error); 1272 m_freem(m); 1273 return; /* XXX? */ 1274 } 1275 sc->tx_free = npe->ix_next; 1276 1277 /* 1278 * Tap off here if there is a bpf listener. 1279 */ 1280 bpf_mtap(ifp, m, BPF_D_OUT); 1281 1282 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 1283 npe->ix_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1284 1285 npe->ix_m = m; 1286 hw = npe->ix_hw; 1287 len = m->m_pkthdr.len; 1288 nseg = npe->ix_map->dm_nsegs; 1289 segs = npe->ix_map->dm_segs; 1290 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); 1291 for (i = 0; i < nseg; i++) { 1292 hw->ix_ne[i].data = htobe32(segs[i].ds_addr); 1293 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); 1294 hw->ix_ne[i].next = htobe32(next); 1295 1296 len = 0; /* zero for segments > 1 */ 1297 next += sizeof(hw->ix_ne[0]); 1298 } 1299 hw->ix_ne[i-1].next = 0; /* zero last in chain */ 1300 /* XXX flush descriptor instead of using uncached memory */ 1301 1302 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", 1303 __func__, sc->tx_qid, npe->ix_neaddr, 1304 hw->ix_ne[0].data, hw->ix_ne[0].len); 1305 /* stick it on the tx q */ 1306 /* XXX add vlan priority */ 1307 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); 1308 1309 ifp->if_timer = 5; 1310 } 1311 if (sc->tx_free == NULL) 1312 ifp->if_flags |= IFF_OACTIVE; 1313 } 1314 1315 static void 1316 npe_stopxmit(struct npe_softc *sc) 1317 { 1318 struct npedma *dma = &sc->txdma; 1319 int i; 1320 1321 /* XXX qmgr */ 1322 for (i = 0; i < dma->nbuf; i++) { 1323 struct npebuf *npe = &dma->buf[i]; 1324 1325 if (npe->ix_m != NULL) { 1326 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1327 m_freem(npe->ix_m); 1328 npe->ix_m = NULL; 1329 } 1330 } 1331 } 1332 1333 static void 1334 npe_stoprecv(struct npe_softc *sc) 1335 { 1336 struct npedma *dma = &sc->rxdma; 1337 int i; 1338 1339 /* XXX qmgr */ 1340 for (i = 0; i < dma->nbuf; i++) { 1341 struct npebuf *npe = &dma->buf[i]; 1342 1343 if (npe->ix_m != NULL) { 1344 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1345 m_freem(npe->ix_m); 1346 npe->ix_m = NULL; 1347 } 1348 } 1349 } 1350 1351 /* 1352 * Turn off interrupts, and stop the nic. 1353 */ 1354 void 1355 npestop(struct ifnet *ifp, int disable) 1356 { 1357 struct npe_softc *sc = ifp->if_softc; 1358 1359 /* Disable transmitter and receiver in the MAC */ 1360 WR4(sc, NPE_MAC_RX_CNTRL1, 1361 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1362 WR4(sc, NPE_MAC_TX_CNTRL1, 1363 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1364 1365 callout_stop(&sc->sc_tick_ch); 1366 1367 npe_stopxmit(sc); 1368 npe_stoprecv(sc); 1369 /* XXX go into loopback & drain q's? */ 1370 /* XXX but beware of disabling tx above */ 1371 1372 /* 1373 * The MAC core rx/tx disable may leave the MAC hardware in an 1374 * unpredictable state. A hw reset is executed before resetting 1375 * all the MAC parameters to a known value. 1376 */ 1377 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1378 DELAY(NPE_MAC_RESET_DELAY); 1379 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); 1380 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1381 1382 ifp->if_timer = 0; 1383 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1384 } 1385 1386 void 1387 npewatchdog(struct ifnet *ifp) 1388 { 1389 struct npe_softc *sc = ifp->if_softc; 1390 int s; 1391 1392 aprint_error_dev(sc->sc_dev, "device timeout\n"); 1393 s = splnet(); 1394 if_statinc(ifp, if_oerrors); 1395 npeinit_locked(sc); 1396 splx(s); 1397 } 1398 1399 static int 1400 npeioctl(struct ifnet *ifp, u_long cmd, void *data) 1401 { 1402 struct npe_softc *sc = ifp->if_softc; 1403 struct ifreq *ifr = (struct ifreq *) data; 1404 int s, error = 0; 1405 1406 s = splnet(); 1407 1408 switch (cmd) { 1409 case SIOCSIFMEDIA: 1410 #if 0 /* not yet */ 1411 /* Flow control requires full-duplex mode. */ 1412 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1413 (ifr->ifr_media & IFM_FDX) == 0) 1414 ifr->ifr_media &= ~IFM_ETH_FMASK; 1415 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1416 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1417 /* We can do both TXPAUSE and RXPAUSE. */ 1418 ifr->ifr_media |= 1419 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1420 } 1421 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1422 } 1423 #endif 1424 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1425 break; 1426 case SIOCSIFFLAGS: 1427 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_RUNNING) { 1428 /* 1429 * If interface is marked down and it is running, 1430 * then stop and disable it. 1431 */ 1432 if_stop(ifp, 1); 1433 } else if ((ifp->if_flags & (IFF_UP |IFF_RUNNING)) == IFF_UP) { 1434 /* 1435 * If interface is marked up and it is stopped, then 1436 * start it. 1437 */ 1438 error = if_init(ifp); 1439 } else if ((ifp->if_flags & IFF_UP) != 0) { 1440 u_short diff; 1441 1442 /* Up (AND RUNNING). */ 1443 1444 diff = (ifp->if_flags ^ sc->sc_if_flags) 1445 & (IFF_PROMISC | IFF_ALLMULTI); 1446 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1447 /* 1448 * If the difference bettween last flag and 1449 * new flag only IFF_PROMISC or IFF_ALLMULTI, 1450 * set multicast filter only (don't reset to 1451 * prevent link down). 1452 */ 1453 npe_setmcast(sc); 1454 } else { 1455 /* 1456 * Reset the interface to pick up changes in 1457 * any other flags that affect the hardware 1458 * state. 1459 */ 1460 error = if_init(ifp); 1461 } 1462 } 1463 sc->sc_if_flags = ifp->if_flags; 1464 break; 1465 default: 1466 error = ether_ioctl(ifp, cmd, data); 1467 if (error == ENETRESET) { 1468 /* 1469 * Multicast list has changed; set the hardware filter 1470 * accordingly. 1471 */ 1472 npe_setmcast(sc); 1473 error = 0; 1474 } 1475 } 1476 1477 npestart(ifp); 1478 1479 splx(s); 1480 return error; 1481 } 1482 1483 /* 1484 * Setup a traffic class -> rx queue mapping. 1485 */ 1486 static int 1487 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) 1488 { 1489 int npeid = npeconfig[sc->sc_unit].npeid; 1490 uint32_t msg[2]; 1491 1492 msg[0] = (NPE_SETRXQOSENTRY << NPE_MAC_MSGID_SHL) | (npeid << 20) 1493 | classix; 1494 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); 1495 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1496 } 1497 1498 /* 1499 * Update and reset the statistics in the NPE. 1500 */ 1501 static int 1502 npe_updatestats(struct npe_softc *sc) 1503 { 1504 uint32_t msg[2]; 1505 1506 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; 1507 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1508 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */ 1509 } 1510 1511 #if 0 1512 /* 1513 * Get the current statistics block. 1514 */ 1515 static int 1516 npe_getstats(struct npe_softc *sc) 1517 { 1518 uint32_t msg[2]; 1519 1520 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; 1521 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1522 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1523 } 1524 1525 /* 1526 * Query the image id of the loaded firmware. 1527 */ 1528 static uint32_t 1529 npe_getimageid(struct npe_softc *sc) 1530 { 1531 uint32_t msg[2]; 1532 1533 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; 1534 msg[1] = 0; 1535 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; 1536 } 1537 1538 /* 1539 * Enable/disable loopback. 1540 */ 1541 static int 1542 npe_setloopback(struct npe_softc *sc, int ena) 1543 { 1544 uint32_t msg[2]; 1545 1546 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); 1547 msg[1] = 0; 1548 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1549 } 1550 #endif 1551 1552 /* 1553 * MII bus support routines. 1554 * 1555 * NB: ixp425 has one PHY per NPE 1556 */ 1557 static uint32_t 1558 npe_mii_mdio_read(struct npe_softc *sc, int reg) 1559 { 1560 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) 1561 uint32_t v; 1562 1563 /* NB: registers are known to be sequential */ 1564 v = (MII_RD4(sc, reg+0) & 0xff) << 0; 1565 v |= (MII_RD4(sc, reg+4) & 0xff) << 8; 1566 v |= (MII_RD4(sc, reg+8) & 0xff) << 16; 1567 v |= (MII_RD4(sc, reg+12) & 0xff) << 24; 1568 return v; 1569 #undef MII_RD4 1570 } 1571 1572 static void 1573 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) 1574 { 1575 #define MII_WR4(sc, reg, v) \ 1576 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) 1577 1578 /* NB: registers are known to be sequential */ 1579 MII_WR4(sc, reg+0, cmd & 0xff); 1580 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); 1581 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); 1582 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); 1583 #undef MII_WR4 1584 } 1585 1586 static int 1587 npe_mii_mdio_wait(struct npe_softc *sc) 1588 { 1589 #define MAXTRIES 100 /* XXX */ 1590 uint32_t v; 1591 int i; 1592 1593 for (i = 0; i < MAXTRIES; i++) { 1594 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); 1595 if ((v & NPE_MII_GO) == 0) 1596 return 0; 1597 } 1598 return ETIMEDOUT; 1599 #undef MAXTRIES 1600 } 1601 1602 static int 1603 npe_miibus_readreg(device_t self, int phy, int reg, uint16_t *val) 1604 { 1605 struct npe_softc *sc = device_private(self); 1606 uint32_t v; 1607 1608 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1609 return -1; 1610 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1611 | NPE_MII_GO; 1612 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1613 if (npe_mii_mdio_wait(sc) == 0) 1614 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); 1615 else 1616 v = 0xffff | NPE_MII_READ_FAIL; 1617 1618 if ((v & NPE_MII_READ_FAIL) != 0) 1619 return -1; 1620 1621 *val = v & 0xffff; 1622 return 0; 1623 #undef MAXTRIES 1624 } 1625 1626 static int 1627 npe_miibus_writereg(device_t self, int phy, int reg, uint16_t val) 1628 { 1629 struct npe_softc *sc = device_private(self); 1630 uint32_t v; 1631 1632 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1633 return -1; 1634 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1635 | val | NPE_MII_WRITE 1636 | NPE_MII_GO; 1637 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1638 1639 return npe_mii_mdio_wait(sc); 1640 } 1641 1642 static void 1643 npe_miibus_statchg(struct ifnet *ifp) 1644 { 1645 struct npe_softc *sc = ifp->if_softc; 1646 uint32_t tx1, rx1; 1647 uint32_t randoff; 1648 1649 /* Sync MAC duplex state */ 1650 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); 1651 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); 1652 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1653 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1654 tx1 &= ~NPE_TX_CNTRL1_DUPLEX; 1655 rx1 |= NPE_RX_CNTRL1_PAUSE_EN; 1656 } else { 1657 struct timeval now; 1658 getmicrotime(&now); 1659 randoff = (RD4(sc, NPE_MAC_UNI_ADDR_6) ^ now.tv_usec) 1660 & 0x7f; 1661 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT 1662 + randoff); 1663 tx1 |= NPE_TX_CNTRL1_DUPLEX; 1664 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; 1665 } 1666 WR4(sc, NPE_MAC_RX_CNTRL1, rx1); 1667 WR4(sc, NPE_MAC_TX_CNTRL1, tx1); 1668 } 1669