1 /* $NetBSD: ixp425_if_npe.c,v 1.17 2009/03/12 13:15:13 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Sam Leffler. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #if 0 29 __FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $"); 30 #endif 31 __KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.17 2009/03/12 13:15:13 msaitoh Exp $"); 32 33 /* 34 * Intel XScale NPE Ethernet driver. 35 * 36 * This driver handles the two ports present on the IXP425. 37 * Packet processing is done by the Network Processing Engines 38 * (NPE's) that work together with a MAC and PHY. The MAC 39 * is also mapped to the XScale cpu; the PHY is accessed via 40 * the MAC. NPE-XScale communication happens through h/w 41 * queues managed by the Q Manager block. 42 * 43 * The code here replaces the ethAcc, ethMii, and ethDB classes 44 * in the Intel Access Library (IAL) and the OS-specific driver. 45 * 46 * XXX add vlan support 47 * XXX NPE-C port doesn't work yet 48 */ 49 50 #include "bpfilter.h" 51 #include "rnd.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/kernel.h> 56 #include <sys/device.h> 57 #include <sys/callout.h> 58 #include <sys/mbuf.h> 59 #include <sys/malloc.h> 60 #include <sys/socket.h> 61 #include <sys/endian.h> 62 #include <sys/ioctl.h> 63 #include <sys/syslog.h> 64 65 #include <machine/bus.h> 66 67 #include <net/if.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_ether.h> 71 72 #if NBPFILTER > 0 73 #include <net/bpf.h> 74 #endif 75 76 #if NRND > 0 77 #include <sys/rnd.h> 78 #endif 79 80 #include <arm/xscale/ixp425reg.h> 81 #include <arm/xscale/ixp425var.h> 82 #include <arm/xscale/ixp425_qmgr.h> 83 #include <arm/xscale/ixp425_npevar.h> 84 #include <arm/xscale/ixp425_if_npereg.h> 85 86 #include <dev/mii/miivar.h> 87 88 #include "locators.h" 89 90 struct npebuf { 91 struct npebuf *ix_next; /* chain to next buffer */ 92 void *ix_m; /* backpointer to mbuf */ 93 bus_dmamap_t ix_map; /* bus dma map for associated data */ 94 struct npehwbuf *ix_hw; /* associated h/w block */ 95 uint32_t ix_neaddr; /* phys address of ix_hw */ 96 }; 97 98 struct npedma { 99 const char* name; 100 int nbuf; /* # npebuf's allocated */ 101 bus_dmamap_t m_map; 102 struct npehwbuf *hwbuf; /* NPE h/w buffers */ 103 bus_dmamap_t buf_map; 104 bus_addr_t buf_phys; /* phys addr of buffers */ 105 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ 106 }; 107 108 struct npe_softc { 109 struct device sc_dev; 110 struct ethercom sc_ethercom; 111 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 112 struct mii_data sc_mii; 113 bus_space_tag_t sc_iot; 114 bus_dma_tag_t sc_dt; 115 bus_space_handle_t sc_ioh; /* MAC register window */ 116 bus_space_handle_t sc_miih; /* MII register window */ 117 struct ixpnpe_softc *sc_npe; /* NPE support */ 118 int sc_unit; 119 int sc_phy; 120 struct callout sc_tick_ch; /* Tick callout */ 121 struct npedma txdma; 122 struct npebuf *tx_free; /* list of free tx buffers */ 123 struct npedma rxdma; 124 int rx_qid; /* rx qid */ 125 int rx_freeqid; /* rx free buffers qid */ 126 int tx_qid; /* tx qid */ 127 int tx_doneqid; /* tx completed qid */ 128 struct npestats *sc_stats; 129 bus_dmamap_t sc_stats_map; 130 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ 131 int sc_if_flags; /* keep last if_flags */ 132 #if NRND > 0 133 rndsource_element_t rnd_source; /* random source */ 134 #endif 135 }; 136 137 /* 138 * Per-unit static configuration for IXP425. The tx and 139 * rx free Q id's are fixed by the NPE microcode. The 140 * rx Q id's are programmed to be separate to simplify 141 * multi-port processing. It may be better to handle 142 * all traffic through one Q (as done by the Intel drivers). 143 * 144 * Note that the PHY's are accessible only from MAC A 145 * on the IXP425. This and other platform-specific 146 * assumptions probably need to be handled through hints. 147 */ 148 static const struct { 149 const char *desc; /* device description */ 150 int npeid; /* NPE assignment */ 151 int macport; /* Port number of the MAC */ 152 uint32_t imageid; /* NPE firmware image id */ 153 uint32_t regbase; 154 int regsize; 155 uint32_t miibase; 156 int miisize; 157 uint8_t rx_qid; 158 uint8_t rx_freeqid; 159 uint8_t tx_qid; 160 uint8_t tx_doneqid; 161 } npeconfig[NPE_PORTS_MAX] = { 162 { .desc = "IXP NPE-B", 163 .npeid = NPE_B, 164 .macport = 0x10, 165 .imageid = IXP425_NPE_B_IMAGEID, 166 .regbase = IXP425_MAC_A_HWBASE, 167 .regsize = IXP425_MAC_A_SIZE, 168 .miibase = IXP425_MAC_A_HWBASE, 169 .miisize = IXP425_MAC_A_SIZE, 170 .rx_qid = 4, 171 .rx_freeqid = 27, 172 .tx_qid = 24, 173 .tx_doneqid = 31 174 }, 175 { .desc = "IXP NPE-C", 176 .npeid = NPE_C, 177 .macport = 0x20, 178 .imageid = IXP425_NPE_C_IMAGEID, 179 .regbase = IXP425_MAC_B_HWBASE, 180 .regsize = IXP425_MAC_B_SIZE, 181 .miibase = IXP425_MAC_A_HWBASE, 182 .miisize = IXP425_MAC_A_SIZE, 183 .rx_qid = 12, 184 .rx_freeqid = 28, 185 .tx_qid = 25, 186 .tx_doneqid = 31 187 }, 188 }; 189 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ 190 191 static __inline uint32_t 192 RD4(struct npe_softc *sc, bus_size_t off) 193 { 194 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 195 } 196 197 static __inline void 198 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) 199 { 200 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 201 } 202 203 static int npe_activate(struct npe_softc *); 204 #if 0 205 static void npe_deactivate(struct npe_softc *); 206 #endif 207 static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr); 208 static void npe_setmac(struct npe_softc *sc, const u_char *eaddr); 209 static void npe_getmac(struct npe_softc *sc); 210 static void npe_txdone(int qid, void *arg); 211 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, 212 struct mbuf *); 213 static void npe_rxdone(int qid, void *arg); 214 static void npeinit_macreg(struct npe_softc *); 215 static int npeinit(struct ifnet *); 216 static void npeinit_resetcb(void *); 217 static void npeinit_locked(void *); 218 static void npestart(struct ifnet *); 219 static void npestop(struct ifnet *, int); 220 static void npewatchdog(struct ifnet *); 221 static int npeioctl(struct ifnet * ifp, u_long, void *); 222 223 static int npe_setrxqosentry(struct npe_softc *, int classix, 224 int trafclass, int qid); 225 static int npe_updatestats(struct npe_softc *); 226 #if 0 227 static int npe_getstats(struct npe_softc *); 228 static uint32_t npe_getimageid(struct npe_softc *); 229 static int npe_setloopback(struct npe_softc *, int ena); 230 #endif 231 232 static int npe_miibus_readreg(struct device *, int, int); 233 static void npe_miibus_writereg(struct device *, int, int, int); 234 static void npe_miibus_statchg(struct device *); 235 236 static int npe_debug; 237 #define DPRINTF(sc, fmt, ...) do { \ 238 if (npe_debug) printf(fmt, __VA_ARGS__); \ 239 } while (0) 240 #define DPRINTFn(n, sc, fmt, ...) do { \ 241 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ 242 } while (0) 243 244 #define NPE_TXBUF 128 245 #define NPE_RXBUF 64 246 247 #ifndef ETHER_ALIGN 248 #define ETHER_ALIGN 2 /* XXX: Ditch this */ 249 #endif 250 251 #define MAC2UINT64(addr) (((uint64_t)addr[0] << 40) \ 252 + ((uint64_t)addr[1] << 32) \ 253 + ((uint64_t)addr[2] << 24) \ 254 + ((uint64_t)addr[3] << 16) \ 255 + ((uint64_t)addr[4] << 8) \ 256 + (uint64_t)addr[5]) 257 258 /* NB: all tx done processing goes through one queue */ 259 static int tx_doneqid = -1; 260 261 void (*npe_getmac_md)(int, uint8_t *); 262 263 static int npe_match(struct device *, struct cfdata *, void *); 264 static void npe_attach(struct device *, struct device *, void *); 265 266 CFATTACH_DECL(npe, sizeof(struct npe_softc), 267 npe_match, npe_attach, NULL, NULL); 268 269 static int 270 npe_match(struct device *parent, struct cfdata *cf, void *arg) 271 { 272 struct ixpnpe_attach_args *na = arg; 273 274 return (na->na_unit == NPE_B || na->na_unit == NPE_C); 275 } 276 277 static void 278 npe_attach(struct device *parent, struct device *self, void *arg) 279 { 280 struct npe_softc *sc = (void *)self; 281 struct ixpnpe_attach_args *na = arg; 282 struct ixpnpe_softc *isc = (struct ixpnpe_softc *)parent; 283 struct ifnet *ifp; 284 285 aprint_naive("\n"); 286 aprint_normal(": Ethernet co-processor\n"); 287 288 sc->sc_iot = na->na_iot; 289 sc->sc_dt = na->na_dt; 290 sc->sc_npe = na->na_npe; 291 sc->sc_unit = (na->na_unit == NPE_B) ? 0 : 1; 292 sc->sc_phy = na->na_phy; 293 294 memset(&sc->sc_ethercom, 0, sizeof(sc->sc_ethercom)); 295 memset(&sc->sc_mii, 0, sizeof(sc->sc_mii)); 296 297 callout_init(&sc->sc_tick_ch, 0); 298 299 if (npe_activate(sc)) { 300 aprint_error("%s: Failed to activate NPE (missing " 301 "microcode?)\n", sc->sc_dev.dv_xname); 302 return; 303 } 304 305 npe_getmac(sc); 306 npeinit_macreg(sc); 307 308 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 309 ether_sprintf(sc->sc_enaddr)); 310 311 ifp = &sc->sc_ethercom.ec_if; 312 sc->sc_mii.mii_ifp = ifp; 313 sc->sc_mii.mii_readreg = npe_miibus_readreg; 314 sc->sc_mii.mii_writereg = npe_miibus_writereg; 315 sc->sc_mii.mii_statchg = npe_miibus_statchg; 316 sc->sc_ethercom.ec_mii = &sc->sc_mii; 317 318 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, 319 npe_ifmedia_status); 320 321 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 322 MII_OFFSET_ANY, MIIF_DOPAUSE); 323 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 324 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 325 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 326 } else 327 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 328 329 ifp->if_softc = sc; 330 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 331 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 332 ifp->if_start = npestart; 333 ifp->if_ioctl = npeioctl; 334 ifp->if_watchdog = npewatchdog; 335 ifp->if_init = npeinit; 336 ifp->if_stop = npestop; 337 IFQ_SET_READY(&ifp->if_snd); 338 339 /* VLAN capable */ 340 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 341 342 if_attach(ifp); 343 ether_ifattach(ifp, sc->sc_enaddr); 344 #if NRND > 0 345 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname, 346 RND_TYPE_NET, 0); 347 #endif 348 349 /* callback function to reset MAC */ 350 isc->macresetcbfunc = npeinit_resetcb; 351 isc->macresetcbarg = sc; 352 } 353 354 /* 355 * Compute and install the multicast filter. 356 */ 357 static void 358 npe_setmcast(struct npe_softc *sc) 359 { 360 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 361 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; 362 uint32_t reg; 363 uint32_t msg[2]; 364 int i; 365 366 /* Always use filter. Is here a correct position? */ 367 reg = RD4(sc, NPE_MAC_RX_CNTRL1); 368 WR4(sc, NPE_MAC_RX_CNTRL1, reg | NPE_RX_CNTRL1_ADDR_FLTR_EN); 369 370 if (ifp->if_flags & IFF_PROMISC) { 371 memset(mask, 0, ETHER_ADDR_LEN); 372 memset(addr, 0, ETHER_ADDR_LEN); 373 } else if (ifp->if_flags & IFF_ALLMULTI) { 374 static const uint8_t allmulti[ETHER_ADDR_LEN] = 375 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 376 all_multi: 377 memcpy(mask, allmulti, ETHER_ADDR_LEN); 378 memcpy(addr, allmulti, ETHER_ADDR_LEN); 379 } else { 380 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; 381 struct ether_multistep step; 382 struct ether_multi *enm; 383 384 memset(clr, 0, ETHER_ADDR_LEN); 385 memset(set, 0xff, ETHER_ADDR_LEN); 386 387 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 388 while (enm != NULL) { 389 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 390 ifp->if_flags |= IFF_ALLMULTI; 391 goto all_multi; 392 } 393 394 for (i = 0; i < ETHER_ADDR_LEN; i++) { 395 clr[i] |= enm->enm_addrlo[i]; 396 set[i] &= enm->enm_addrlo[i]; 397 } 398 399 ETHER_NEXT_MULTI(step, enm); 400 } 401 402 for (i = 0; i < ETHER_ADDR_LEN; i++) { 403 mask[i] = set[i] | ~clr[i]; 404 addr[i] = set[i]; 405 } 406 } 407 408 /* 409 * Write the mask and address registers. 410 */ 411 for (i = 0; i < ETHER_ADDR_LEN; i++) { 412 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); 413 WR4(sc, NPE_MAC_ADDR(i), addr[i]); 414 } 415 416 msg[0] = NPE_ADDRESSFILTERCONFIG << NPE_MAC_MSGID_SHL 417 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 418 msg[1] = ((ifp->if_flags & IFF_PROMISC) ? 1 : 0) << 24 419 | ((RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff) << 16) 420 | (addr[5] << 8) | mask[5]; 421 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 422 } 423 424 static int 425 npe_dma_setup(struct npe_softc *sc, struct npedma *dma, 426 const char *name, int nbuf, int maxseg) 427 { 428 bus_dma_segment_t seg; 429 int rseg, error, i; 430 void *hwbuf; 431 size_t size; 432 433 memset(dma, 0, sizeof(*dma)); 434 435 dma->name = name; 436 dma->nbuf = nbuf; 437 438 size = nbuf * sizeof(struct npehwbuf); 439 440 /* XXX COHERENT for now */ 441 error = bus_dmamem_alloc(sc->sc_dt, size, sizeof(uint32_t), 0, &seg, 442 1, &rseg, BUS_DMA_NOWAIT); 443 if (error) { 444 printf("%s: unable to allocate memory for %s h/w buffers, " 445 "error %u\n", sc->sc_dev.dv_xname, dma->name, error); 446 } 447 448 error = bus_dmamem_map(sc->sc_dt, &seg, 1, size, &hwbuf, 449 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE); 450 if (error) { 451 printf("%s: unable to map memory for %s h/w buffers, " 452 "error %u\n", sc->sc_dev.dv_xname, dma->name, error); 453 free_dmamem: 454 bus_dmamem_free(sc->sc_dt, &seg, rseg); 455 return error; 456 } 457 dma->hwbuf = (void *)hwbuf; 458 459 error = bus_dmamap_create(sc->sc_dt, size, 1, size, 0, 460 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dma->buf_map); 461 if (error) { 462 printf("%s: unable to create map for %s h/w buffers, " 463 "error %u\n", sc->sc_dev.dv_xname, dma->name, error); 464 unmap_dmamem: 465 dma->hwbuf = NULL; 466 bus_dmamem_unmap(sc->sc_dt, hwbuf, size); 467 goto free_dmamem; 468 } 469 470 error = bus_dmamap_load(sc->sc_dt, dma->buf_map, hwbuf, size, NULL, 471 BUS_DMA_NOWAIT); 472 if (error) { 473 printf("%s: unable to load map for %s h/w buffers, " 474 "error %u\n", sc->sc_dev.dv_xname, dma->name, error); 475 destroy_dmamap: 476 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 477 goto unmap_dmamem; 478 } 479 480 /* XXX M_TEMP */ 481 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO); 482 if (dma->buf == NULL) { 483 printf("%s: unable to allocate memory for %s s/w buffers\n", 484 sc->sc_dev.dv_xname, dma->name); 485 bus_dmamap_unload(sc->sc_dt, dma->buf_map); 486 error = ENOMEM; 487 goto destroy_dmamap; 488 } 489 490 dma->buf_phys = dma->buf_map->dm_segs[0].ds_addr; 491 for (i = 0; i < dma->nbuf; i++) { 492 struct npebuf *npe = &dma->buf[i]; 493 struct npehwbuf *hw = &dma->hwbuf[i]; 494 495 /* calculate offset to shared area */ 496 npe->ix_neaddr = dma->buf_phys + 497 ((uintptr_t)hw - (uintptr_t)dma->hwbuf); 498 KASSERT((npe->ix_neaddr & 0x1f) == 0); 499 error = bus_dmamap_create(sc->sc_dt, MCLBYTES, maxseg, 500 MCLBYTES, 0, 0, &npe->ix_map); 501 if (error != 0) { 502 printf("%s: unable to create dmamap for %s buffer %u, " 503 "error %u\n", sc->sc_dev.dv_xname, dma->name, i, 504 error); 505 /* XXXSCW: Free up maps... */ 506 return error; 507 } 508 npe->ix_hw = hw; 509 } 510 bus_dmamap_sync(sc->sc_dt, dma->buf_map, 0, dma->buf_map->dm_mapsize, 511 BUS_DMASYNC_PREWRITE); 512 return 0; 513 } 514 515 #if 0 516 static void 517 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) 518 { 519 int i; 520 521 /* XXXSCW: Clean this up */ 522 523 if (dma->hwbuf != NULL) { 524 for (i = 0; i < dma->nbuf; i++) { 525 struct npebuf *npe = &dma->buf[i]; 526 bus_dmamap_destroy(sc->sc_dt, npe->ix_map); 527 } 528 bus_dmamap_unload(sc->sc_dt, dma->buf_map); 529 bus_dmamem_free(sc->sc_dt, (void *)dma->hwbuf, dma->buf_map); 530 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 531 } 532 if (dma->buf != NULL) 533 free(dma->buf, M_TEMP); 534 memset(dma, 0, sizeof(*dma)); 535 } 536 #endif 537 538 static int 539 npe_activate(struct npe_softc *sc) 540 { 541 bus_dma_segment_t seg; 542 int unit = sc->sc_unit; 543 int error, i, rseg; 544 void *statbuf; 545 546 /* load NPE firmware and start it running */ 547 error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid); 548 if (error != 0) 549 return error; 550 551 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase, 552 npeconfig[unit].regsize, 0, &sc->sc_ioh)) { 553 printf("%s: Cannot map registers 0x%x:0x%x\n", 554 sc->sc_dev.dv_xname, npeconfig[unit].regbase, 555 npeconfig[unit].regsize); 556 return ENOMEM; 557 } 558 559 if (npeconfig[unit].miibase != npeconfig[unit].regbase) { 560 /* 561 * The PHY's are only accessible from one MAC (it appears) 562 * so for other MAC's setup an additional mapping for 563 * frobbing the PHY registers. 564 */ 565 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase, 566 npeconfig[unit].miisize, 0, &sc->sc_miih)) { 567 printf("%s: Cannot map MII registers 0x%x:0x%x\n", 568 sc->sc_dev.dv_xname, npeconfig[unit].miibase, 569 npeconfig[unit].miisize); 570 return ENOMEM; 571 } 572 } else 573 sc->sc_miih = sc->sc_ioh; 574 error = npe_dma_setup(sc, &sc->txdma, "tx", NPE_TXBUF, NPE_MAXSEG); 575 if (error != 0) 576 return error; 577 error = npe_dma_setup(sc, &sc->rxdma, "rx", NPE_RXBUF, 1); 578 if (error != 0) 579 return error; 580 581 /* setup statistics block */ 582 error = bus_dmamem_alloc(sc->sc_dt, sizeof(struct npestats), 583 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 584 if (error) { 585 printf("%s: unable to allocate memory for stats block, " 586 "error %u\n", sc->sc_dev.dv_xname, error); 587 return error; 588 } 589 590 error = bus_dmamem_map(sc->sc_dt, &seg, 1, sizeof(struct npestats), 591 &statbuf, BUS_DMA_NOWAIT); 592 if (error) { 593 printf("%s: unable to map memory for stats block, " 594 "error %u\n", sc->sc_dev.dv_xname, error); 595 return error; 596 } 597 sc->sc_stats = (void *)statbuf; 598 599 error = bus_dmamap_create(sc->sc_dt, sizeof(struct npestats), 1, 600 sizeof(struct npestats), 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 601 &sc->sc_stats_map); 602 if (error) { 603 printf("%s: unable to create map for stats block, " 604 "error %u\n", sc->sc_dev.dv_xname, error); 605 return error; 606 } 607 608 if (bus_dmamap_load(sc->sc_dt, sc->sc_stats_map, sc->sc_stats, 609 sizeof(struct npestats), NULL, BUS_DMA_NOWAIT) != 0) { 610 printf("%s: unable to load memory for stats block, error %u\n", 611 sc->sc_dev.dv_xname, error); 612 return error; 613 } 614 sc->sc_stats_phys = sc->sc_stats_map->dm_segs[0].ds_addr; 615 616 /* XXX disable half-bridge LEARNING+FILTERING feature */ 617 618 /* 619 * Setup h/w rx/tx queues. There are four q's: 620 * rx inbound q of rx'd frames 621 * rx_free pool of ixpbuf's for receiving frames 622 * tx outbound q of frames to send 623 * tx_done q of tx frames that have been processed 624 * 625 * The NPE handles the actual tx/rx process and the q manager 626 * handles the queues. The driver just writes entries to the 627 * q manager mailbox's and gets callbacks when there are rx'd 628 * frames to process or tx'd frames to reap. These callbacks 629 * are controlled by the q configurations; e.g. we get a 630 * callback when tx_done has 2 or more frames to process and 631 * when the rx q has at least one frame. These setings can 632 * changed at the time the q is configured. 633 */ 634 sc->rx_qid = npeconfig[unit].rx_qid; 635 ixpqmgr_qconfig(sc->rx_qid, NPE_RXBUF, 0, 1, 636 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc); 637 sc->rx_freeqid = npeconfig[unit].rx_freeqid; 638 ixpqmgr_qconfig(sc->rx_freeqid, NPE_RXBUF, 0, NPE_RXBUF/2, 0, NULL, sc); 639 /* tell the NPE to direct all traffic to rx_qid */ 640 #if 0 641 for (i = 0; i < 8; i++) 642 #else 643 printf("%s: remember to fix rx q setup\n", sc->sc_dev.dv_xname); 644 for (i = 0; i < 4; i++) 645 #endif 646 npe_setrxqosentry(sc, i, 0, sc->rx_qid); 647 648 sc->tx_qid = npeconfig[unit].tx_qid; 649 sc->tx_doneqid = npeconfig[unit].tx_doneqid; 650 ixpqmgr_qconfig(sc->tx_qid, NPE_TXBUF, 0, NPE_TXBUF, 0, NULL, sc); 651 if (tx_doneqid == -1) { 652 ixpqmgr_qconfig(sc->tx_doneqid, NPE_TXBUF, 0, 2, 653 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); 654 tx_doneqid = sc->tx_doneqid; 655 } 656 657 KASSERT(npes[npeconfig[unit].npeid] == NULL); 658 npes[npeconfig[unit].npeid] = sc; 659 660 return 0; 661 } 662 663 #if 0 664 static void 665 npe_deactivate(struct npe_softc *sc); 666 { 667 int unit = sc->sc_unit; 668 669 npes[npeconfig[unit].npeid] = NULL; 670 671 /* XXX disable q's */ 672 if (sc->sc_npe != NULL) 673 ixpnpe_stop(sc->sc_npe); 674 if (sc->sc_stats != NULL) { 675 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); 676 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, 677 sc->sc_stats_map); 678 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map); 679 } 680 if (sc->sc_stats_tag != NULL) 681 bus_dma_tag_destroy(sc->sc_stats_tag); 682 npe_dma_destroy(sc, &sc->txdma); 683 npe_dma_destroy(sc, &sc->rxdma); 684 bus_generic_detach(sc->sc_dev); 685 if (sc->sc_mii) 686 device_delete_child(sc->sc_dev, sc->sc_mii); 687 #if 0 688 /* XXX sc_ioh and sc_miih */ 689 if (sc->mem_res) 690 bus_release_resource(dev, SYS_RES_IOPORT, 691 rman_get_rid(sc->mem_res), sc->mem_res); 692 sc->mem_res = 0; 693 #endif 694 } 695 #endif 696 697 /* 698 * Notify the world which media we're using. 699 */ 700 static void 701 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 702 { 703 struct npe_softc *sc = ifp->if_softc; 704 705 mii_pollstat(&sc->sc_mii); 706 707 ifmr->ifm_active = sc->sc_mii.mii_media_active; 708 ifmr->ifm_status = sc->sc_mii.mii_media_status; 709 } 710 711 static void 712 npe_addstats(struct npe_softc *sc) 713 { 714 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 715 struct npestats *ns = sc->sc_stats; 716 717 ifp->if_oerrors += 718 be32toh(ns->dot3StatsInternalMacTransmitErrors) 719 + be32toh(ns->dot3StatsCarrierSenseErrors) 720 + be32toh(ns->TxVLANIdFilterDiscards) 721 ; 722 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors) 723 + be32toh(ns->dot3StatsInternalMacReceiveErrors) 724 + be32toh(ns->RxOverrunDiscards) 725 + be32toh(ns->RxUnderflowEntryDiscards) 726 ; 727 ifp->if_collisions += 728 be32toh(ns->dot3StatsSingleCollisionFrames) 729 + be32toh(ns->dot3StatsMultipleCollisionFrames) 730 ; 731 } 732 733 static void 734 npe_tick(void *xsc) 735 { 736 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) 737 struct npe_softc *sc = xsc; 738 uint32_t msg[2]; 739 740 /* 741 * NB: to avoid sleeping with the softc lock held we 742 * split the NPE msg processing into two parts. The 743 * request for statistics is sent w/o waiting for a 744 * reply and then on the next tick we retrieve the 745 * results. This works because npe_tick is the only 746 * code that talks via the mailbox's (except at setup). 747 * This likely can be handled better. 748 */ 749 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) { 750 bus_dmamap_sync(sc->sc_dt, sc->sc_stats_map, 0, 751 sizeof(struct npestats), BUS_DMASYNC_POSTREAD); 752 npe_addstats(sc); 753 } 754 npe_updatestats(sc); 755 mii_tick(&sc->sc_mii); 756 757 /* schedule next poll */ 758 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 759 #undef ACK 760 } 761 762 static void 763 npe_setmac(struct npe_softc *sc, const u_char *eaddr) 764 { 765 766 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); 767 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); 768 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); 769 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); 770 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); 771 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); 772 } 773 774 static void 775 npe_getmac(struct npe_softc *sc) 776 { 777 uint8_t *eaddr = sc->sc_enaddr; 778 779 if (npe_getmac_md != NULL) { 780 (*npe_getmac_md)(sc->sc_dev.dv_unit, eaddr); 781 } else { 782 /* 783 * Some system's unicast address appears to be loaded from 784 * EEPROM on reset 785 */ 786 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; 787 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; 788 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; 789 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; 790 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; 791 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; 792 } 793 } 794 795 struct txdone { 796 struct npebuf *head; 797 struct npebuf **tail; 798 int count; 799 }; 800 801 static __inline void 802 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) 803 { 804 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 805 806 *td->tail = sc->tx_free; 807 sc->tx_free = td->head; 808 /* 809 * We're no longer busy, so clear the busy flag and call the 810 * start routine to xmit more packets. 811 */ 812 ifp->if_opackets += td->count; 813 ifp->if_flags &= ~IFF_OACTIVE; 814 ifp->if_timer = 0; 815 npestart(ifp); 816 } 817 818 /* 819 * Q manager callback on tx done queue. Reap mbufs 820 * and return tx buffers to the free list. Finally 821 * restart output. Note the microcode has only one 822 * txdone q wired into it so we must use the NPE ID 823 * returned with each npehwbuf to decide where to 824 * send buffers. 825 */ 826 static void 827 npe_txdone(int qid, void *arg) 828 { 829 #define P2V(a, dma) \ 830 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 831 struct npe_softc *sc; 832 struct npebuf *npe; 833 struct txdone *td, q[NPE_MAX]; 834 uint32_t entry; 835 836 /* XXX no NPE-A support */ 837 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; 838 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; 839 /* XXX max # at a time? */ 840 while (ixpqmgr_qread(qid, &entry) == 0) { 841 sc = npes[NPE_QM_Q_NPE(entry)]; 842 DPRINTF(sc, "%s: entry 0x%x NPE %u port %u\n", 843 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); 844 #if NRND > 0 845 if (RND_ENABLED(&sc->rnd_source)) 846 rnd_add_uint32(&sc->rnd_source, entry); 847 #endif 848 849 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); 850 m_freem(npe->ix_m); 851 npe->ix_m = NULL; 852 853 td = &q[NPE_QM_Q_NPE(entry)]; 854 *td->tail = npe; 855 td->tail = &npe->ix_next; 856 td->count++; 857 } 858 859 if (q[NPE_B].count) 860 npe_txdone_finish(npes[NPE_B], &q[NPE_B]); 861 if (q[NPE_C].count) 862 npe_txdone_finish(npes[NPE_C], &q[NPE_C]); 863 #undef P2V 864 } 865 866 static __inline struct mbuf * 867 npe_getcl(void) 868 { 869 struct mbuf *m; 870 871 MGETHDR(m, M_DONTWAIT, MT_DATA); 872 if (m != NULL) { 873 MCLGET(m, M_DONTWAIT); 874 if ((m->m_flags & M_EXT) == 0) { 875 m_freem(m); 876 m = NULL; 877 } 878 } 879 return (m); 880 } 881 882 static int 883 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) 884 { 885 struct npehwbuf *hw; 886 int error; 887 888 if (m == NULL) { 889 m = npe_getcl(); 890 if (m == NULL) 891 return ENOBUFS; 892 } 893 KASSERT(m->m_ext.ext_size >= (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 894 m->m_pkthdr.len = m->m_len = NPE_FRAME_SIZE_DEFAULT; 895 /* backload payload and align ip hdr */ 896 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size 897 - (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 898 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 899 BUS_DMA_READ|BUS_DMA_NOWAIT); 900 if (error != 0) { 901 m_freem(m); 902 return error; 903 } 904 hw = npe->ix_hw; 905 hw->ix_ne[0].data = htobe32(npe->ix_map->dm_segs[0].ds_addr); 906 /* NB: NPE requires length be a multiple of 64 */ 907 /* NB: buffer length is shifted in word */ 908 hw->ix_ne[0].len = htobe32(npe->ix_map->dm_segs[0].ds_len << 16); 909 hw->ix_ne[0].next = 0; 910 npe->ix_m = m; 911 /* Flush the memory in the mbuf */ 912 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, npe->ix_map->dm_mapsize, 913 BUS_DMASYNC_PREREAD); 914 return 0; 915 } 916 917 /* 918 * RX q processing for a specific NPE. Claim entries 919 * from the hardware queue and pass the frames up the 920 * stack. Pass the rx buffers to the free list. 921 */ 922 static void 923 npe_rxdone(int qid, void *arg) 924 { 925 #define P2V(a, dma) \ 926 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 927 struct npe_softc *sc = arg; 928 struct npedma *dma = &sc->rxdma; 929 uint32_t entry; 930 931 while (ixpqmgr_qread(qid, &entry) == 0) { 932 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); 933 struct mbuf *m; 934 935 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", 936 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); 937 #if NRND > 0 938 if (RND_ENABLED(&sc->rnd_source)) 939 rnd_add_uint32(&sc->rnd_source, entry); 940 #endif 941 /* 942 * Allocate a new mbuf to replenish the rx buffer. 943 * If doing so fails we drop the rx'd frame so we 944 * can reuse the previous mbuf. When we're able to 945 * allocate a new mbuf dispatch the mbuf w/ rx'd 946 * data up the stack and replace it with the newly 947 * allocated one. 948 */ 949 m = npe_getcl(); 950 if (m != NULL) { 951 struct mbuf *mrx = npe->ix_m; 952 struct npehwbuf *hw = npe->ix_hw; 953 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 954 955 /* Flush mbuf memory for rx'd data */ 956 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 957 npe->ix_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 958 959 /* XXX flush hw buffer; works now 'cuz coherent */ 960 /* set m_len etc. per rx frame size */ 961 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; 962 mrx->m_pkthdr.len = mrx->m_len; 963 mrx->m_pkthdr.rcvif = ifp; 964 /* Don't add M_HASFCS. See below */ 965 966 #if 1 967 if (mrx->m_pkthdr.len < sizeof(struct ether_header)) { 968 log(LOG_INFO, "%s: too short frame (len=%d)\n", 969 sc->sc_dev.dv_xname, mrx->m_pkthdr.len); 970 /* Back out "newly allocated" mbuf. */ 971 m_freem(m); 972 ifp->if_ierrors++; 973 goto fail; 974 } 975 if ((ifp->if_flags & IFF_PROMISC) == 0) { 976 struct ether_header *eh; 977 978 /* 979 * Workaround for "Non-Intel XScale Technology 980 * Eratta" No. 29. AA:BB:CC:DD:EE:xF's packet 981 * matches the filter (both unicast and 982 * multicast). 983 */ 984 eh = mtod(mrx, struct ether_header *); 985 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0) { 986 /* unicast */ 987 988 if (sc->sc_enaddr[5] != eh->ether_dhost[5]) { 989 /* discard it */ 990 #if 0 991 printf("discard it\n"); 992 #endif 993 /* 994 * Back out "newly allocated" 995 * mbuf. 996 */ 997 m_freem(m); 998 goto fail; 999 } 1000 } else if (memcmp(eh->ether_dhost, 1001 etherbroadcastaddr, 6) == 0) { 1002 /* Always accept broadcast packet*/ 1003 } else { 1004 struct ethercom *ec = &sc->sc_ethercom; 1005 struct ether_multi *enm; 1006 struct ether_multistep step; 1007 int match = 0; 1008 1009 /* multicast */ 1010 1011 ETHER_FIRST_MULTI(step, ec, enm); 1012 while (enm != NULL) { 1013 uint64_t lowint, highint, dest; 1014 1015 lowint = MAC2UINT64(enm->enm_addrlo); 1016 highint = MAC2UINT64(enm->enm_addrhi); 1017 dest = MAC2UINT64(eh->ether_dhost); 1018 #if 0 1019 printf("%llx\n", lowint); 1020 printf("%llx\n", dest); 1021 printf("%llx\n", highint); 1022 #endif 1023 if ((lowint <= dest) && (dest <= highint)) { 1024 match = 1; 1025 break; 1026 } 1027 ETHER_NEXT_MULTI(step, enm); 1028 } 1029 if (match == 0) { 1030 /* discard it */ 1031 #if 0 1032 printf("discard it(M)\n"); 1033 #endif 1034 /* 1035 * Back out "newly allocated" 1036 * mbuf. 1037 */ 1038 m_freem(m); 1039 goto fail; 1040 } 1041 } 1042 } 1043 if (mrx->m_pkthdr.len > NPE_FRAME_SIZE_DEFAULT) { 1044 log(LOG_INFO, "%s: oversized frame (len=%d)\n", 1045 sc->sc_dev.dv_xname, mrx->m_pkthdr.len); 1046 /* Back out "newly allocated" mbuf. */ 1047 m_freem(m); 1048 ifp->if_ierrors++; 1049 goto fail; 1050 } 1051 #endif 1052 1053 /* 1054 * Trim FCS! 1055 * NPE always adds the FCS by this driver's setting, 1056 * so we always trim it here and not add M_HASFCS. 1057 */ 1058 m_adj(mrx, -ETHER_CRC_LEN); 1059 1060 ifp->if_ipackets++; 1061 #if NBPFILTER > 0 1062 /* 1063 * Tap off here if there is a bpf listener. 1064 */ 1065 if (__predict_false(ifp->if_bpf)) 1066 bpf_mtap(ifp->if_bpf, mrx); 1067 #endif 1068 ifp->if_input(ifp, mrx); 1069 } else { 1070 fail: 1071 /* discard frame and re-use mbuf */ 1072 m = npe->ix_m; 1073 } 1074 if (npe_rxbuf_init(sc, npe, m) == 0) { 1075 /* return npe buf to rx free list */ 1076 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1077 } else { 1078 /* XXX should not happen */ 1079 } 1080 } 1081 #undef P2V 1082 } 1083 1084 static void 1085 npe_startxmit(struct npe_softc *sc) 1086 { 1087 struct npedma *dma = &sc->txdma; 1088 int i; 1089 1090 sc->tx_free = NULL; 1091 for (i = 0; i < dma->nbuf; i++) { 1092 struct npebuf *npe = &dma->buf[i]; 1093 if (npe->ix_m != NULL) { 1094 /* NB: should not happen */ 1095 printf("%s: %s: free mbuf at entry %u\n", 1096 sc->sc_dev.dv_xname, __func__, i); 1097 m_freem(npe->ix_m); 1098 } 1099 npe->ix_m = NULL; 1100 npe->ix_next = sc->tx_free; 1101 sc->tx_free = npe; 1102 } 1103 } 1104 1105 static void 1106 npe_startrecv(struct npe_softc *sc) 1107 { 1108 struct npedma *dma = &sc->rxdma; 1109 struct npebuf *npe; 1110 int i; 1111 1112 for (i = 0; i < dma->nbuf; i++) { 1113 npe = &dma->buf[i]; 1114 npe_rxbuf_init(sc, npe, npe->ix_m); 1115 /* set npe buf on rx free list */ 1116 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1117 } 1118 } 1119 1120 static void 1121 npeinit_macreg(struct npe_softc *sc) 1122 { 1123 1124 /* 1125 * Reset MAC core. 1126 */ 1127 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1128 DELAY(NPE_MAC_RESET_DELAY); 1129 /* configure MAC to generate MDC clock */ 1130 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1131 1132 /* disable transmitter and reciver in the MAC */ 1133 WR4(sc, NPE_MAC_RX_CNTRL1, 1134 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1135 WR4(sc, NPE_MAC_TX_CNTRL1, 1136 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1137 1138 /* 1139 * Set the MAC core registers. 1140 */ 1141 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ 1142 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ 1143 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ 1144 /* thresholds determined by NPE firmware FS */ 1145 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); 1146 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); 1147 WR4(sc, NPE_MAC_BUF_SIZE_TX, NPE_MAC_BUF_SIZE_TX_DEFAULT); 1148 /* tx fifo threshold (bytes) */ 1149 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ 1150 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ 1151 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ 1152 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ 1153 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1154 /* assumes MII mode */ 1155 WR4(sc, NPE_MAC_TX_CNTRL1, 1156 NPE_TX_CNTRL1_RETRY /* retry failed xmits */ 1157 | NPE_TX_CNTRL1_FCS_EN /* append FCS */ 1158 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ 1159 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ 1160 /* XXX pad strip? */ 1161 WR4(sc, NPE_MAC_RX_CNTRL1, 1162 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */ 1163 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */ 1164 WR4(sc, NPE_MAC_RX_CNTRL2, 0); 1165 } 1166 1167 static void 1168 npeinit_resetcb(void *xsc) 1169 { 1170 struct npe_softc *sc = xsc; 1171 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1172 uint32_t msg[2]; 1173 1174 ifp->if_oerrors++; 1175 npeinit_locked(sc); 1176 1177 msg[0] = NPE_NOTIFYMACRECOVERYDONE << NPE_MAC_MSGID_SHL 1178 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 1179 msg[1] = 0; 1180 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1181 } 1182 1183 /* 1184 * Reset and initialize the chip 1185 */ 1186 static void 1187 npeinit_locked(void *xsc) 1188 { 1189 struct npe_softc *sc = xsc; 1190 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1191 1192 /* Cancel any pending I/O. */ 1193 npestop(ifp, 0); 1194 1195 /* Reset the chip to a known state. */ 1196 npeinit_macreg(sc); 1197 npe_setmac(sc, CLLADDR(ifp->if_sadl)); 1198 ether_mediachange(ifp); 1199 npe_setmcast(sc); 1200 1201 npe_startxmit(sc); 1202 npe_startrecv(sc); 1203 1204 ifp->if_flags |= IFF_RUNNING; 1205 ifp->if_flags &= ~IFF_OACTIVE; 1206 ifp->if_timer = 0; /* just in case */ 1207 1208 /* enable transmitter and reciver in the MAC */ 1209 WR4(sc, NPE_MAC_RX_CNTRL1, 1210 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); 1211 WR4(sc, NPE_MAC_TX_CNTRL1, 1212 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); 1213 1214 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 1215 } 1216 1217 static int 1218 npeinit(struct ifnet *ifp) 1219 { 1220 struct npe_softc *sc = ifp->if_softc; 1221 int s; 1222 1223 s = splnet(); 1224 npeinit_locked(sc); 1225 splx(s); 1226 1227 return (0); 1228 } 1229 1230 /* 1231 * Defragment an mbuf chain, returning at most maxfrags separate 1232 * mbufs+clusters. If this is not possible NULL is returned and 1233 * the original mbuf chain is left in it's present (potentially 1234 * modified) state. We use two techniques: collapsing consecutive 1235 * mbufs and replacing consecutive mbufs by a cluster. 1236 */ 1237 static __inline struct mbuf * 1238 npe_defrag(struct mbuf *m0) 1239 { 1240 struct mbuf *m; 1241 1242 MGETHDR(m, M_DONTWAIT, MT_DATA); 1243 if (m == NULL) 1244 return (NULL); 1245 M_COPY_PKTHDR(m, m0); 1246 1247 if ((m->m_len = m0->m_pkthdr.len) > MHLEN) { 1248 MCLGET(m, M_DONTWAIT); 1249 if ((m->m_flags & M_EXT) == 0) { 1250 m_freem(m); 1251 return (NULL); 1252 } 1253 } 1254 1255 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1256 m_freem(m0); 1257 1258 return (m); 1259 } 1260 1261 /* 1262 * Dequeue packets and place on the h/w transmit queue. 1263 */ 1264 static void 1265 npestart(struct ifnet *ifp) 1266 { 1267 struct npe_softc *sc = ifp->if_softc; 1268 struct npebuf *npe; 1269 struct npehwbuf *hw; 1270 struct mbuf *m, *n; 1271 bus_dma_segment_t *segs; 1272 int nseg, len, error, i; 1273 uint32_t next; 1274 1275 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1276 return; 1277 1278 while (sc->tx_free != NULL) { 1279 IFQ_DEQUEUE(&ifp->if_snd, m); 1280 if (m == NULL) 1281 break; 1282 npe = sc->tx_free; 1283 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 1284 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1285 if (error == EFBIG) { 1286 n = npe_defrag(m); 1287 if (n == NULL) { 1288 printf("%s: %s: too many fragments\n", 1289 sc->sc_dev.dv_xname, __func__); 1290 m_freem(m); 1291 return; /* XXX? */ 1292 } 1293 m = n; 1294 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, 1295 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1296 } 1297 if (error != 0) { 1298 printf("%s: %s: error %u\n", 1299 sc->sc_dev.dv_xname, __func__, error); 1300 m_freem(m); 1301 return; /* XXX? */ 1302 } 1303 sc->tx_free = npe->ix_next; 1304 1305 #if NBPFILTER > 0 1306 /* 1307 * Tap off here if there is a bpf listener. 1308 */ 1309 if (__predict_false(ifp->if_bpf)) 1310 bpf_mtap(ifp->if_bpf, m); 1311 #endif 1312 1313 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 1314 npe->ix_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1315 1316 npe->ix_m = m; 1317 hw = npe->ix_hw; 1318 len = m->m_pkthdr.len; 1319 nseg = npe->ix_map->dm_nsegs; 1320 segs = npe->ix_map->dm_segs; 1321 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); 1322 for (i = 0; i < nseg; i++) { 1323 hw->ix_ne[i].data = htobe32(segs[i].ds_addr); 1324 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); 1325 hw->ix_ne[i].next = htobe32(next); 1326 1327 len = 0; /* zero for segments > 1 */ 1328 next += sizeof(hw->ix_ne[0]); 1329 } 1330 hw->ix_ne[i-1].next = 0; /* zero last in chain */ 1331 /* XXX flush descriptor instead of using uncached memory */ 1332 1333 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", 1334 __func__, sc->tx_qid, npe->ix_neaddr, 1335 hw->ix_ne[0].data, hw->ix_ne[0].len); 1336 /* stick it on the tx q */ 1337 /* XXX add vlan priority */ 1338 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); 1339 1340 ifp->if_timer = 5; 1341 } 1342 if (sc->tx_free == NULL) 1343 ifp->if_flags |= IFF_OACTIVE; 1344 } 1345 1346 static void 1347 npe_stopxmit(struct npe_softc *sc) 1348 { 1349 struct npedma *dma = &sc->txdma; 1350 int i; 1351 1352 /* XXX qmgr */ 1353 for (i = 0; i < dma->nbuf; i++) { 1354 struct npebuf *npe = &dma->buf[i]; 1355 1356 if (npe->ix_m != NULL) { 1357 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1358 m_freem(npe->ix_m); 1359 npe->ix_m = NULL; 1360 } 1361 } 1362 } 1363 1364 static void 1365 npe_stoprecv(struct npe_softc *sc) 1366 { 1367 struct npedma *dma = &sc->rxdma; 1368 int i; 1369 1370 /* XXX qmgr */ 1371 for (i = 0; i < dma->nbuf; i++) { 1372 struct npebuf *npe = &dma->buf[i]; 1373 1374 if (npe->ix_m != NULL) { 1375 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1376 m_freem(npe->ix_m); 1377 npe->ix_m = NULL; 1378 } 1379 } 1380 } 1381 1382 /* 1383 * Turn off interrupts, and stop the nic. 1384 */ 1385 void 1386 npestop(struct ifnet *ifp, int disable) 1387 { 1388 struct npe_softc *sc = ifp->if_softc; 1389 1390 /* disable transmitter and reciver in the MAC */ 1391 WR4(sc, NPE_MAC_RX_CNTRL1, 1392 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1393 WR4(sc, NPE_MAC_TX_CNTRL1, 1394 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1395 1396 callout_stop(&sc->sc_tick_ch); 1397 1398 npe_stopxmit(sc); 1399 npe_stoprecv(sc); 1400 /* XXX go into loopback & drain q's? */ 1401 /* XXX but beware of disabling tx above */ 1402 1403 /* 1404 * The MAC core rx/tx disable may leave the MAC hardware in an 1405 * unpredictable state. A hw reset is executed before resetting 1406 * all the MAC parameters to a known value. 1407 */ 1408 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1409 DELAY(NPE_MAC_RESET_DELAY); 1410 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); 1411 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1412 1413 ifp->if_timer = 0; 1414 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1415 } 1416 1417 void 1418 npewatchdog(struct ifnet *ifp) 1419 { 1420 struct npe_softc *sc = ifp->if_softc; 1421 int s; 1422 1423 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1424 s = splnet(); 1425 ifp->if_oerrors++; 1426 npeinit_locked(sc); 1427 splx(s); 1428 } 1429 1430 static int 1431 npeioctl(struct ifnet *ifp, u_long cmd, void *data) 1432 { 1433 struct npe_softc *sc = ifp->if_softc; 1434 struct ifreq *ifr = (struct ifreq *) data; 1435 int s, error = 0; 1436 1437 s = splnet(); 1438 1439 switch (cmd) { 1440 case SIOCSIFMEDIA: 1441 case SIOCGIFMEDIA: 1442 #if 0 /* not yet */ 1443 /* Flow control requires full-duplex mode. */ 1444 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1445 (ifr->ifr_media & IFM_FDX) == 0) 1446 ifr->ifr_media &= ~IFM_ETH_FMASK; 1447 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1448 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1449 /* We can do both TXPAUSE and RXPAUSE. */ 1450 ifr->ifr_media |= 1451 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1452 } 1453 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1454 } 1455 #endif 1456 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1457 break; 1458 case SIOCSIFFLAGS: 1459 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_RUNNING) { 1460 /* 1461 * If interface is marked down and it is running, 1462 * then stop and disable it. 1463 */ 1464 (*ifp->if_stop)(ifp, 1); 1465 } else if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_UP) { 1466 /* 1467 * If interface is marked up and it is stopped, then 1468 * start it. 1469 */ 1470 error = (*ifp->if_init)(ifp); 1471 } else if ((ifp->if_flags & IFF_UP) != 0) { 1472 int diff; 1473 1474 /* Up (AND RUNNING). */ 1475 1476 diff = (ifp->if_flags ^ sc->sc_if_flags) 1477 & (IFF_PROMISC|IFF_ALLMULTI); 1478 if ((diff & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 1479 /* 1480 * If the difference bettween last flag and 1481 * new flag only IFF_PROMISC or IFF_ALLMULTI, 1482 * set multicast filter only (don't reset to 1483 * prevent link down). 1484 */ 1485 npe_setmcast(sc); 1486 } else { 1487 /* 1488 * Reset the interface to pick up changes in 1489 * any other flags that affect the hardware 1490 * state. 1491 */ 1492 error = (*ifp->if_init)(ifp); 1493 } 1494 } 1495 sc->sc_if_flags = ifp->if_flags; 1496 break; 1497 default: 1498 error = ether_ioctl(ifp, cmd, data); 1499 if (error == ENETRESET) { 1500 /* 1501 * Multicast list has changed; set the hardware filter 1502 * accordingly. 1503 */ 1504 npe_setmcast(sc); 1505 error = 0; 1506 } 1507 } 1508 1509 npestart(ifp); 1510 1511 splx(s); 1512 return error; 1513 } 1514 1515 /* 1516 * Setup a traffic class -> rx queue mapping. 1517 */ 1518 static int 1519 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) 1520 { 1521 int npeid = npeconfig[sc->sc_unit].npeid; 1522 uint32_t msg[2]; 1523 1524 msg[0] = (NPE_SETRXQOSENTRY << NPE_MAC_MSGID_SHL) | (npeid << 20) 1525 | classix; 1526 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); 1527 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1528 } 1529 1530 /* 1531 * Update and reset the statistics in the NPE. 1532 */ 1533 static int 1534 npe_updatestats(struct npe_softc *sc) 1535 { 1536 uint32_t msg[2]; 1537 1538 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; 1539 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1540 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */ 1541 } 1542 1543 #if 0 1544 /* 1545 * Get the current statistics block. 1546 */ 1547 static int 1548 npe_getstats(struct npe_softc *sc) 1549 { 1550 uint32_t msg[2]; 1551 1552 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; 1553 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1554 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1555 } 1556 1557 /* 1558 * Query the image id of the loaded firmware. 1559 */ 1560 static uint32_t 1561 npe_getimageid(struct npe_softc *sc) 1562 { 1563 uint32_t msg[2]; 1564 1565 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; 1566 msg[1] = 0; 1567 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; 1568 } 1569 1570 /* 1571 * Enable/disable loopback. 1572 */ 1573 static int 1574 npe_setloopback(struct npe_softc *sc, int ena) 1575 { 1576 uint32_t msg[2]; 1577 1578 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); 1579 msg[1] = 0; 1580 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1581 } 1582 #endif 1583 1584 /* 1585 * MII bus support routines. 1586 * 1587 * NB: ixp425 has one PHY per NPE 1588 */ 1589 static uint32_t 1590 npe_mii_mdio_read(struct npe_softc *sc, int reg) 1591 { 1592 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) 1593 uint32_t v; 1594 1595 /* NB: registers are known to be sequential */ 1596 v = (MII_RD4(sc, reg+0) & 0xff) << 0; 1597 v |= (MII_RD4(sc, reg+4) & 0xff) << 8; 1598 v |= (MII_RD4(sc, reg+8) & 0xff) << 16; 1599 v |= (MII_RD4(sc, reg+12) & 0xff) << 24; 1600 return v; 1601 #undef MII_RD4 1602 } 1603 1604 static void 1605 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) 1606 { 1607 #define MII_WR4(sc, reg, v) \ 1608 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) 1609 1610 /* NB: registers are known to be sequential */ 1611 MII_WR4(sc, reg+0, cmd & 0xff); 1612 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); 1613 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); 1614 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); 1615 #undef MII_WR4 1616 } 1617 1618 static int 1619 npe_mii_mdio_wait(struct npe_softc *sc) 1620 { 1621 #define MAXTRIES 100 /* XXX */ 1622 uint32_t v; 1623 int i; 1624 1625 for (i = 0; i < MAXTRIES; i++) { 1626 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); 1627 if ((v & NPE_MII_GO) == 0) 1628 return 1; 1629 } 1630 return 0; /* NB: timeout */ 1631 #undef MAXTRIES 1632 } 1633 1634 static int 1635 npe_miibus_readreg(struct device *self, int phy, int reg) 1636 { 1637 struct npe_softc *sc = (void *)self; 1638 uint32_t v; 1639 1640 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1641 return 0xffff; 1642 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1643 | NPE_MII_GO; 1644 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1645 if (npe_mii_mdio_wait(sc)) 1646 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); 1647 else 1648 v = 0xffff | NPE_MII_READ_FAIL; 1649 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff); 1650 #undef MAXTRIES 1651 } 1652 1653 static void 1654 npe_miibus_writereg(struct device *self, int phy, int reg, int data) 1655 { 1656 struct npe_softc *sc = (void *)self; 1657 uint32_t v; 1658 1659 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1660 return; 1661 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1662 | data | NPE_MII_WRITE 1663 | NPE_MII_GO; 1664 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1665 /* XXX complain about timeout */ 1666 (void) npe_mii_mdio_wait(sc); 1667 } 1668 1669 static void 1670 npe_miibus_statchg(struct device *self) 1671 { 1672 struct npe_softc *sc = (void *)self; 1673 uint32_t tx1, rx1; 1674 uint32_t randoff; 1675 1676 /* sync MAC duplex state */ 1677 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); 1678 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); 1679 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1680 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1681 tx1 &= ~NPE_TX_CNTRL1_DUPLEX; 1682 rx1 |= NPE_RX_CNTRL1_PAUSE_EN; 1683 } else { 1684 struct timeval now; 1685 getmicrotime(&now); 1686 randoff = (RD4(sc, NPE_MAC_UNI_ADDR_6) ^ now.tv_usec) 1687 & 0x7f; 1688 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT 1689 + randoff); 1690 tx1 |= NPE_TX_CNTRL1_DUPLEX; 1691 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; 1692 } 1693 WR4(sc, NPE_MAC_RX_CNTRL1, rx1); 1694 WR4(sc, NPE_MAC_TX_CNTRL1, tx1); 1695 } 1696