1 /* $NetBSD: ixp425_if_npe.c,v 1.25 2014/03/20 06:48:54 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2006 Sam Leffler. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 #if 0 29 __FBSDID("$FreeBSD: src/sys/arm/xscale/ixp425/if_npe.c,v 1.1 2006/11/19 23:55:23 sam Exp $"); 30 #endif 31 __KERNEL_RCSID(0, "$NetBSD: ixp425_if_npe.c,v 1.25 2014/03/20 06:48:54 skrll Exp $"); 32 33 /* 34 * Intel XScale NPE Ethernet driver. 35 * 36 * This driver handles the two ports present on the IXP425. 37 * Packet processing is done by the Network Processing Engines 38 * (NPE's) that work together with a MAC and PHY. The MAC 39 * is also mapped to the XScale cpu; the PHY is accessed via 40 * the MAC. NPE-XScale communication happens through h/w 41 * queues managed by the Q Manager block. 42 * 43 * The code here replaces the ethAcc, ethMii, and ethDB classes 44 * in the Intel Access Library (IAL) and the OS-specific driver. 45 * 46 * XXX add vlan support 47 * XXX NPE-C port doesn't work yet 48 */ 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/kernel.h> 53 #include <sys/device.h> 54 #include <sys/callout.h> 55 #include <sys/mbuf.h> 56 #include <sys/malloc.h> 57 #include <sys/socket.h> 58 #include <sys/endian.h> 59 #include <sys/ioctl.h> 60 #include <sys/syslog.h> 61 62 #include <sys/bus.h> 63 64 #include <net/if.h> 65 #include <net/if_dl.h> 66 #include <net/if_media.h> 67 #include <net/if_ether.h> 68 69 #include <net/bpf.h> 70 71 #include <sys/rnd.h> 72 73 #include <arm/xscale/ixp425reg.h> 74 #include <arm/xscale/ixp425var.h> 75 #include <arm/xscale/ixp425_qmgr.h> 76 #include <arm/xscale/ixp425_npevar.h> 77 #include <arm/xscale/ixp425_if_npereg.h> 78 79 #include <dev/mii/miivar.h> 80 81 #include "locators.h" 82 83 struct npebuf { 84 struct npebuf *ix_next; /* chain to next buffer */ 85 void *ix_m; /* backpointer to mbuf */ 86 bus_dmamap_t ix_map; /* bus dma map for associated data */ 87 struct npehwbuf *ix_hw; /* associated h/w block */ 88 uint32_t ix_neaddr; /* phys address of ix_hw */ 89 }; 90 91 struct npedma { 92 const char* name; 93 int nbuf; /* # npebuf's allocated */ 94 bus_dmamap_t m_map; 95 struct npehwbuf *hwbuf; /* NPE h/w buffers */ 96 bus_dmamap_t buf_map; 97 bus_addr_t buf_phys; /* phys addr of buffers */ 98 struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ 99 }; 100 101 struct npe_softc { 102 device_t sc_dev; 103 struct ethercom sc_ethercom; 104 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 105 struct mii_data sc_mii; 106 bus_space_tag_t sc_iot; 107 bus_dma_tag_t sc_dt; 108 bus_space_handle_t sc_ioh; /* MAC register window */ 109 bus_space_handle_t sc_miih; /* MII register window */ 110 struct ixpnpe_softc *sc_npe; /* NPE support */ 111 int sc_unit; 112 int sc_phy; 113 struct callout sc_tick_ch; /* Tick callout */ 114 struct npedma txdma; 115 struct npebuf *tx_free; /* list of free tx buffers */ 116 struct npedma rxdma; 117 int rx_qid; /* rx qid */ 118 int rx_freeqid; /* rx free buffers qid */ 119 int tx_qid; /* tx qid */ 120 int tx_doneqid; /* tx completed qid */ 121 struct npestats *sc_stats; 122 bus_dmamap_t sc_stats_map; 123 bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ 124 int sc_if_flags; /* keep last if_flags */ 125 krndsource_t rnd_source; /* random source */ 126 }; 127 128 /* 129 * Per-unit static configuration for IXP425. The tx and 130 * rx free Q id's are fixed by the NPE microcode. The 131 * rx Q id's are programmed to be separate to simplify 132 * multi-port processing. It may be better to handle 133 * all traffic through one Q (as done by the Intel drivers). 134 * 135 * Note that the PHY's are accessible only from MAC A 136 * on the IXP425. This and other platform-specific 137 * assumptions probably need to be handled through hints. 138 */ 139 static const struct { 140 const char *desc; /* device description */ 141 int npeid; /* NPE assignment */ 142 int macport; /* Port number of the MAC */ 143 uint32_t imageid; /* NPE firmware image id */ 144 uint32_t regbase; 145 int regsize; 146 uint32_t miibase; 147 int miisize; 148 uint8_t rx_qid; 149 uint8_t rx_freeqid; 150 uint8_t tx_qid; 151 uint8_t tx_doneqid; 152 } npeconfig[NPE_PORTS_MAX] = { 153 { .desc = "IXP NPE-B", 154 .npeid = NPE_B, 155 .macport = 0x10, 156 .imageid = IXP425_NPE_B_IMAGEID, 157 .regbase = IXP425_MAC_A_HWBASE, 158 .regsize = IXP425_MAC_A_SIZE, 159 .miibase = IXP425_MAC_A_HWBASE, 160 .miisize = IXP425_MAC_A_SIZE, 161 .rx_qid = 4, 162 .rx_freeqid = 27, 163 .tx_qid = 24, 164 .tx_doneqid = 31 165 }, 166 { .desc = "IXP NPE-C", 167 .npeid = NPE_C, 168 .macport = 0x20, 169 .imageid = IXP425_NPE_C_IMAGEID, 170 .regbase = IXP425_MAC_B_HWBASE, 171 .regsize = IXP425_MAC_B_SIZE, 172 .miibase = IXP425_MAC_A_HWBASE, 173 .miisize = IXP425_MAC_A_SIZE, 174 .rx_qid = 12, 175 .rx_freeqid = 28, 176 .tx_qid = 25, 177 .tx_doneqid = 31 178 }, 179 }; 180 static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ 181 182 static __inline uint32_t 183 RD4(struct npe_softc *sc, bus_size_t off) 184 { 185 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); 186 } 187 188 static __inline void 189 WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) 190 { 191 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 192 } 193 194 static int npe_activate(struct npe_softc *); 195 #if 0 196 static void npe_deactivate(struct npe_softc *); 197 #endif 198 static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr); 199 static void npe_setmac(struct npe_softc *sc, const u_char *eaddr); 200 static void npe_getmac(struct npe_softc *sc); 201 static void npe_txdone(int qid, void *arg); 202 static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, 203 struct mbuf *); 204 static void npe_rxdone(int qid, void *arg); 205 static void npeinit_macreg(struct npe_softc *); 206 static int npeinit(struct ifnet *); 207 static void npeinit_resetcb(void *); 208 static void npeinit_locked(void *); 209 static void npestart(struct ifnet *); 210 static void npestop(struct ifnet *, int); 211 static void npewatchdog(struct ifnet *); 212 static int npeioctl(struct ifnet * ifp, u_long, void *); 213 214 static int npe_setrxqosentry(struct npe_softc *, int classix, 215 int trafclass, int qid); 216 static int npe_updatestats(struct npe_softc *); 217 #if 0 218 static int npe_getstats(struct npe_softc *); 219 static uint32_t npe_getimageid(struct npe_softc *); 220 static int npe_setloopback(struct npe_softc *, int ena); 221 #endif 222 223 static int npe_miibus_readreg(device_t, int, int); 224 static void npe_miibus_writereg(device_t, int, int, int); 225 static void npe_miibus_statchg(struct ifnet *); 226 227 static int npe_debug; 228 #define DPRINTF(sc, fmt, ...) do { \ 229 if (npe_debug) printf(fmt, __VA_ARGS__); \ 230 } while (0) 231 #define DPRINTFn(n, sc, fmt, ...) do { \ 232 if (npe_debug >= n) printf(fmt, __VA_ARGS__); \ 233 } while (0) 234 235 #define NPE_TXBUF 128 236 #define NPE_RXBUF 64 237 238 #ifndef ETHER_ALIGN 239 #define ETHER_ALIGN 2 /* XXX: Ditch this */ 240 #endif 241 242 #define MAC2UINT64(addr) (((uint64_t)addr[0] << 40) \ 243 + ((uint64_t)addr[1] << 32) \ 244 + ((uint64_t)addr[2] << 24) \ 245 + ((uint64_t)addr[3] << 16) \ 246 + ((uint64_t)addr[4] << 8) \ 247 + (uint64_t)addr[5]) 248 249 /* NB: all tx done processing goes through one queue */ 250 static int tx_doneqid = -1; 251 252 void (*npe_getmac_md)(int, uint8_t *); 253 254 static int npe_match(device_t, cfdata_t, void *); 255 static void npe_attach(device_t, device_t, void *); 256 257 CFATTACH_DECL_NEW(npe, sizeof(struct npe_softc), 258 npe_match, npe_attach, NULL, NULL); 259 260 static int 261 npe_match(device_t parent, cfdata_t cf, void *arg) 262 { 263 struct ixpnpe_attach_args *na = arg; 264 265 return (na->na_unit == NPE_B || na->na_unit == NPE_C); 266 } 267 268 static void 269 npe_attach(device_t parent, device_t self, void *arg) 270 { 271 struct npe_softc *sc = device_private(self); 272 struct ixpnpe_softc *isc = device_private(parent); 273 struct ixpnpe_attach_args *na = arg; 274 struct ifnet *ifp; 275 276 aprint_naive("\n"); 277 aprint_normal(": Ethernet co-processor\n"); 278 279 sc->sc_dev = self; 280 sc->sc_iot = na->na_iot; 281 sc->sc_dt = na->na_dt; 282 sc->sc_npe = na->na_npe; 283 sc->sc_unit = (na->na_unit == NPE_B) ? 0 : 1; 284 sc->sc_phy = na->na_phy; 285 286 memset(&sc->sc_ethercom, 0, sizeof(sc->sc_ethercom)); 287 memset(&sc->sc_mii, 0, sizeof(sc->sc_mii)); 288 289 callout_init(&sc->sc_tick_ch, 0); 290 291 if (npe_activate(sc)) { 292 aprint_error_dev(sc->sc_dev, 293 "Failed to activate NPE (missing microcode?)\n"); 294 return; 295 } 296 297 npe_getmac(sc); 298 npeinit_macreg(sc); 299 300 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 301 ether_sprintf(sc->sc_enaddr)); 302 303 ifp = &sc->sc_ethercom.ec_if; 304 sc->sc_mii.mii_ifp = ifp; 305 sc->sc_mii.mii_readreg = npe_miibus_readreg; 306 sc->sc_mii.mii_writereg = npe_miibus_writereg; 307 sc->sc_mii.mii_statchg = npe_miibus_statchg; 308 sc->sc_ethercom.ec_mii = &sc->sc_mii; 309 310 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, 311 npe_ifmedia_status); 312 313 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 314 MII_OFFSET_ANY, MIIF_DOPAUSE); 315 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 316 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 317 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 318 } else 319 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 320 321 ifp->if_softc = sc; 322 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 323 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 324 ifp->if_start = npestart; 325 ifp->if_ioctl = npeioctl; 326 ifp->if_watchdog = npewatchdog; 327 ifp->if_init = npeinit; 328 ifp->if_stop = npestop; 329 IFQ_SET_READY(&ifp->if_snd); 330 331 /* VLAN capable */ 332 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 333 334 if_attach(ifp); 335 ether_ifattach(ifp, sc->sc_enaddr); 336 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 337 RND_TYPE_NET, 0); 338 339 /* callback function to reset MAC */ 340 isc->macresetcbfunc = npeinit_resetcb; 341 isc->macresetcbarg = sc; 342 } 343 344 /* 345 * Compute and install the multicast filter. 346 */ 347 static void 348 npe_setmcast(struct npe_softc *sc) 349 { 350 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 351 uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; 352 uint32_t reg; 353 uint32_t msg[2]; 354 int i; 355 356 /* Always use filter. Is here a correct position? */ 357 reg = RD4(sc, NPE_MAC_RX_CNTRL1); 358 WR4(sc, NPE_MAC_RX_CNTRL1, reg | NPE_RX_CNTRL1_ADDR_FLTR_EN); 359 360 if (ifp->if_flags & IFF_PROMISC) { 361 memset(mask, 0, ETHER_ADDR_LEN); 362 memset(addr, 0, ETHER_ADDR_LEN); 363 } else if (ifp->if_flags & IFF_ALLMULTI) { 364 static const uint8_t allmulti[ETHER_ADDR_LEN] = 365 { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 366 all_multi: 367 memcpy(mask, allmulti, ETHER_ADDR_LEN); 368 memcpy(addr, allmulti, ETHER_ADDR_LEN); 369 } else { 370 uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; 371 struct ether_multistep step; 372 struct ether_multi *enm; 373 374 memset(clr, 0, ETHER_ADDR_LEN); 375 memset(set, 0xff, ETHER_ADDR_LEN); 376 377 ETHER_FIRST_MULTI(step, &sc->sc_ethercom, enm); 378 while (enm != NULL) { 379 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 380 ifp->if_flags |= IFF_ALLMULTI; 381 goto all_multi; 382 } 383 384 for (i = 0; i < ETHER_ADDR_LEN; i++) { 385 clr[i] |= enm->enm_addrlo[i]; 386 set[i] &= enm->enm_addrlo[i]; 387 } 388 389 ETHER_NEXT_MULTI(step, enm); 390 } 391 392 for (i = 0; i < ETHER_ADDR_LEN; i++) { 393 mask[i] = set[i] | ~clr[i]; 394 addr[i] = set[i]; 395 } 396 } 397 398 /* 399 * Write the mask and address registers. 400 */ 401 for (i = 0; i < ETHER_ADDR_LEN; i++) { 402 WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); 403 WR4(sc, NPE_MAC_ADDR(i), addr[i]); 404 } 405 406 msg[0] = NPE_ADDRESSFILTERCONFIG << NPE_MAC_MSGID_SHL 407 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 408 msg[1] = ((ifp->if_flags & IFF_PROMISC) ? 1 : 0) << 24 409 | ((RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff) << 16) 410 | (addr[5] << 8) | mask[5]; 411 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 412 } 413 414 static int 415 npe_dma_setup(struct npe_softc *sc, struct npedma *dma, 416 const char *name, int nbuf, int maxseg) 417 { 418 bus_dma_segment_t seg; 419 int rseg, error, i; 420 void *hwbuf; 421 size_t size; 422 423 memset(dma, 0, sizeof(*dma)); 424 425 dma->name = name; 426 dma->nbuf = nbuf; 427 428 size = nbuf * sizeof(struct npehwbuf); 429 430 /* XXX COHERENT for now */ 431 error = bus_dmamem_alloc(sc->sc_dt, size, sizeof(uint32_t), 0, &seg, 432 1, &rseg, BUS_DMA_NOWAIT); 433 if (error) { 434 aprint_error_dev(sc->sc_dev, 435 "unable to %s for %s %s buffers, error %u\n", 436 "allocate memory", dma->name, "h/w", error); 437 } 438 439 error = bus_dmamem_map(sc->sc_dt, &seg, 1, size, &hwbuf, 440 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_NOCACHE); 441 if (error) { 442 aprint_error_dev(sc->sc_dev, 443 "unable to %s for %s %s buffers, error %u\n", 444 "map memory", dma->name, "h/w", error); 445 free_dmamem: 446 bus_dmamem_free(sc->sc_dt, &seg, rseg); 447 return error; 448 } 449 dma->hwbuf = (void *)hwbuf; 450 451 error = bus_dmamap_create(sc->sc_dt, size, 1, size, 0, 452 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dma->buf_map); 453 if (error) { 454 aprint_error_dev(sc->sc_dev, 455 "unable to %s for %s %s buffers, error %u\n", 456 "create map", dma->name, "h/w", error); 457 unmap_dmamem: 458 dma->hwbuf = NULL; 459 bus_dmamem_unmap(sc->sc_dt, hwbuf, size); 460 goto free_dmamem; 461 } 462 463 error = bus_dmamap_load(sc->sc_dt, dma->buf_map, hwbuf, size, NULL, 464 BUS_DMA_NOWAIT); 465 if (error) { 466 aprint_error_dev(sc->sc_dev, 467 "unable to %s for %s %s buffers, error %u\n", 468 "load map", dma->name, "h/w", error); 469 destroy_dmamap: 470 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 471 goto unmap_dmamem; 472 } 473 474 /* XXX M_TEMP */ 475 dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO); 476 if (dma->buf == NULL) { 477 aprint_error_dev(sc->sc_dev, 478 "unable to %s for %s %s buffers, error %u\n", 479 "allocate memory", dma->name, "h/w", error); 480 bus_dmamap_unload(sc->sc_dt, dma->buf_map); 481 error = ENOMEM; 482 goto destroy_dmamap; 483 } 484 485 dma->buf_phys = dma->buf_map->dm_segs[0].ds_addr; 486 for (i = 0; i < dma->nbuf; i++) { 487 struct npebuf *npe = &dma->buf[i]; 488 struct npehwbuf *hw = &dma->hwbuf[i]; 489 490 /* calculate offset to shared area */ 491 npe->ix_neaddr = dma->buf_phys + 492 ((uintptr_t)hw - (uintptr_t)dma->hwbuf); 493 KASSERT((npe->ix_neaddr & 0x1f) == 0); 494 error = bus_dmamap_create(sc->sc_dt, MCLBYTES, maxseg, 495 MCLBYTES, 0, 0, &npe->ix_map); 496 if (error != 0) { 497 aprint_error_dev(sc->sc_dev, 498 "unable to %s for %s buffer %u, error %u\n", 499 "create dmamap", dma->name, i, error); 500 /* XXXSCW: Free up maps... */ 501 return error; 502 } 503 npe->ix_hw = hw; 504 } 505 bus_dmamap_sync(sc->sc_dt, dma->buf_map, 0, dma->buf_map->dm_mapsize, 506 BUS_DMASYNC_PREWRITE); 507 return 0; 508 } 509 510 #if 0 511 static void 512 npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) 513 { 514 int i; 515 516 /* XXXSCW: Clean this up */ 517 518 if (dma->hwbuf != NULL) { 519 for (i = 0; i < dma->nbuf; i++) { 520 struct npebuf *npe = &dma->buf[i]; 521 bus_dmamap_destroy(sc->sc_dt, npe->ix_map); 522 } 523 bus_dmamap_unload(sc->sc_dt, dma->buf_map); 524 bus_dmamem_free(sc->sc_dt, (void *)dma->hwbuf, dma->buf_map); 525 bus_dmamap_destroy(sc->sc_dt, dma->buf_map); 526 } 527 if (dma->buf != NULL) 528 free(dma->buf, M_TEMP); 529 memset(dma, 0, sizeof(*dma)); 530 } 531 #endif 532 533 static int 534 npe_activate(struct npe_softc *sc) 535 { 536 bus_dma_segment_t seg; 537 int unit = sc->sc_unit; 538 int error, i, rseg; 539 void *statbuf; 540 541 /* load NPE firmware and start it running */ 542 error = ixpnpe_init(sc->sc_npe, "npe_fw", npeconfig[unit].imageid); 543 if (error != 0) 544 return error; 545 546 if (bus_space_map(sc->sc_iot, npeconfig[unit].regbase, 547 npeconfig[unit].regsize, 0, &sc->sc_ioh)) { 548 aprint_error_dev(sc->sc_dev, "Cannot map registers 0x%x:0x%x\n", 549 npeconfig[unit].regbase, npeconfig[unit].regsize); 550 return ENOMEM; 551 } 552 553 if (npeconfig[unit].miibase != npeconfig[unit].regbase) { 554 /* 555 * The PHY's are only accessible from one MAC (it appears) 556 * so for other MAC's setup an additional mapping for 557 * frobbing the PHY registers. 558 */ 559 if (bus_space_map(sc->sc_iot, npeconfig[unit].miibase, 560 npeconfig[unit].miisize, 0, &sc->sc_miih)) { 561 aprint_error_dev(sc->sc_dev, 562 "Cannot map MII registers 0x%x:0x%x\n", 563 npeconfig[unit].miibase, npeconfig[unit].miisize); 564 return ENOMEM; 565 } 566 } else 567 sc->sc_miih = sc->sc_ioh; 568 error = npe_dma_setup(sc, &sc->txdma, "tx", NPE_TXBUF, NPE_MAXSEG); 569 if (error != 0) 570 return error; 571 error = npe_dma_setup(sc, &sc->rxdma, "rx", NPE_RXBUF, 1); 572 if (error != 0) 573 return error; 574 575 /* setup statistics block */ 576 error = bus_dmamem_alloc(sc->sc_dt, sizeof(struct npestats), 577 sizeof(uint32_t), 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 578 if (error) { 579 aprint_error_dev(sc->sc_dev, 580 "unable to %s for %s, error %u\n", 581 "allocate memory", "stats block", error); 582 return error; 583 } 584 585 error = bus_dmamem_map(sc->sc_dt, &seg, 1, sizeof(struct npestats), 586 &statbuf, BUS_DMA_NOWAIT); 587 if (error) { 588 aprint_error_dev(sc->sc_dev, 589 "unable to %s for %s, error %u\n", 590 "map memory", "stats block", error); 591 return error; 592 } 593 sc->sc_stats = (void *)statbuf; 594 595 error = bus_dmamap_create(sc->sc_dt, sizeof(struct npestats), 1, 596 sizeof(struct npestats), 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 597 &sc->sc_stats_map); 598 if (error) { 599 aprint_error_dev(sc->sc_dev, 600 "unable to %s for %s, error %u\n", 601 "create map", "stats block", error); 602 return error; 603 } 604 605 if (bus_dmamap_load(sc->sc_dt, sc->sc_stats_map, sc->sc_stats, 606 sizeof(struct npestats), NULL, BUS_DMA_NOWAIT) != 0) { 607 aprint_error_dev(sc->sc_dev, 608 "unable to %s for %s, error %u\n", 609 "load map", "stats block", error); 610 return error; 611 } 612 sc->sc_stats_phys = sc->sc_stats_map->dm_segs[0].ds_addr; 613 614 /* XXX disable half-bridge LEARNING+FILTERING feature */ 615 616 /* 617 * Setup h/w rx/tx queues. There are four q's: 618 * rx inbound q of rx'd frames 619 * rx_free pool of ixpbuf's for receiving frames 620 * tx outbound q of frames to send 621 * tx_done q of tx frames that have been processed 622 * 623 * The NPE handles the actual tx/rx process and the q manager 624 * handles the queues. The driver just writes entries to the 625 * q manager mailbox's and gets callbacks when there are rx'd 626 * frames to process or tx'd frames to reap. These callbacks 627 * are controlled by the q configurations; e.g. we get a 628 * callback when tx_done has 2 or more frames to process and 629 * when the rx q has at least one frame. These setings can 630 * changed at the time the q is configured. 631 */ 632 sc->rx_qid = npeconfig[unit].rx_qid; 633 ixpqmgr_qconfig(sc->rx_qid, NPE_RXBUF, 0, 1, 634 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc); 635 sc->rx_freeqid = npeconfig[unit].rx_freeqid; 636 ixpqmgr_qconfig(sc->rx_freeqid, NPE_RXBUF, 0, NPE_RXBUF/2, 0, NULL, sc); 637 /* tell the NPE to direct all traffic to rx_qid */ 638 #if 0 639 for (i = 0; i < 8; i++) 640 #else 641 printf("%s: remember to fix rx q setup\n", device_xname(sc->sc_dev)); 642 for (i = 0; i < 4; i++) 643 #endif 644 npe_setrxqosentry(sc, i, 0, sc->rx_qid); 645 646 sc->tx_qid = npeconfig[unit].tx_qid; 647 sc->tx_doneqid = npeconfig[unit].tx_doneqid; 648 ixpqmgr_qconfig(sc->tx_qid, NPE_TXBUF, 0, NPE_TXBUF, 0, NULL, sc); 649 if (tx_doneqid == -1) { 650 ixpqmgr_qconfig(sc->tx_doneqid, NPE_TXBUF, 0, 2, 651 IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); 652 tx_doneqid = sc->tx_doneqid; 653 } 654 655 KASSERT(npes[npeconfig[unit].npeid] == NULL); 656 npes[npeconfig[unit].npeid] = sc; 657 658 return 0; 659 } 660 661 #if 0 662 static void 663 npe_deactivate(struct npe_softc *sc); 664 { 665 int unit = sc->sc_unit; 666 667 npes[npeconfig[unit].npeid] = NULL; 668 669 /* XXX disable q's */ 670 if (sc->sc_npe != NULL) 671 ixpnpe_stop(sc->sc_npe); 672 if (sc->sc_stats != NULL) { 673 bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); 674 bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, 675 sc->sc_stats_map); 676 bus_dmamap_destroy(sc->sc_stats_tag, sc->sc_stats_map); 677 } 678 if (sc->sc_stats_tag != NULL) 679 bus_dma_tag_destroy(sc->sc_stats_tag); 680 npe_dma_destroy(sc, &sc->txdma); 681 npe_dma_destroy(sc, &sc->rxdma); 682 bus_generic_detach(sc->sc_dev); 683 if (sc->sc_mii) 684 device_delete_child(sc->sc_dev, sc->sc_mii); 685 #if 0 686 /* XXX sc_ioh and sc_miih */ 687 if (sc->mem_res) 688 bus_release_resource(dev, SYS_RES_IOPORT, 689 rman_get_rid(sc->mem_res), sc->mem_res); 690 sc->mem_res = 0; 691 #endif 692 } 693 #endif 694 695 /* 696 * Notify the world which media we're using. 697 */ 698 static void 699 npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 700 { 701 struct npe_softc *sc = ifp->if_softc; 702 703 mii_pollstat(&sc->sc_mii); 704 705 ifmr->ifm_active = sc->sc_mii.mii_media_active; 706 ifmr->ifm_status = sc->sc_mii.mii_media_status; 707 } 708 709 static void 710 npe_addstats(struct npe_softc *sc) 711 { 712 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 713 struct npestats *ns = sc->sc_stats; 714 715 ifp->if_oerrors += 716 be32toh(ns->dot3StatsInternalMacTransmitErrors) 717 + be32toh(ns->dot3StatsCarrierSenseErrors) 718 + be32toh(ns->TxVLANIdFilterDiscards) 719 ; 720 ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors) 721 + be32toh(ns->dot3StatsInternalMacReceiveErrors) 722 + be32toh(ns->RxOverrunDiscards) 723 + be32toh(ns->RxUnderflowEntryDiscards) 724 ; 725 ifp->if_collisions += 726 be32toh(ns->dot3StatsSingleCollisionFrames) 727 + be32toh(ns->dot3StatsMultipleCollisionFrames) 728 ; 729 } 730 731 static void 732 npe_tick(void *xsc) 733 { 734 #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) 735 struct npe_softc *sc = xsc; 736 uint32_t msg[2]; 737 738 /* 739 * NB: to avoid sleeping with the softc lock held we 740 * split the NPE msg processing into two parts. The 741 * request for statistics is sent w/o waiting for a 742 * reply and then on the next tick we retrieve the 743 * results. This works because npe_tick is the only 744 * code that talks via the mailbox's (except at setup). 745 * This likely can be handled better. 746 */ 747 if (ixpnpe_recvmsg(sc->sc_npe, msg) == 0 && msg[0] == ACK) { 748 bus_dmamap_sync(sc->sc_dt, sc->sc_stats_map, 0, 749 sizeof(struct npestats), BUS_DMASYNC_POSTREAD); 750 npe_addstats(sc); 751 } 752 npe_updatestats(sc); 753 mii_tick(&sc->sc_mii); 754 755 /* schedule next poll */ 756 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 757 #undef ACK 758 } 759 760 static void 761 npe_setmac(struct npe_softc *sc, const u_char *eaddr) 762 { 763 764 WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); 765 WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); 766 WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); 767 WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); 768 WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); 769 WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); 770 } 771 772 static void 773 npe_getmac(struct npe_softc *sc) 774 { 775 uint8_t *eaddr = sc->sc_enaddr; 776 777 if (npe_getmac_md != NULL) { 778 (*npe_getmac_md)(device_unit(sc->sc_dev), eaddr); 779 } else { 780 /* 781 * Some system's unicast address appears to be loaded from 782 * EEPROM on reset 783 */ 784 eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; 785 eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; 786 eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; 787 eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; 788 eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; 789 eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; 790 } 791 } 792 793 struct txdone { 794 struct npebuf *head; 795 struct npebuf **tail; 796 int count; 797 }; 798 799 static __inline void 800 npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) 801 { 802 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 803 804 *td->tail = sc->tx_free; 805 sc->tx_free = td->head; 806 /* 807 * We're no longer busy, so clear the busy flag and call the 808 * start routine to xmit more packets. 809 */ 810 ifp->if_opackets += td->count; 811 ifp->if_flags &= ~IFF_OACTIVE; 812 ifp->if_timer = 0; 813 npestart(ifp); 814 } 815 816 /* 817 * Q manager callback on tx done queue. Reap mbufs 818 * and return tx buffers to the free list. Finally 819 * restart output. Note the microcode has only one 820 * txdone q wired into it so we must use the NPE ID 821 * returned with each npehwbuf to decide where to 822 * send buffers. 823 */ 824 static void 825 npe_txdone(int qid, void *arg) 826 { 827 #define P2V(a, dma) \ 828 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 829 struct npe_softc *sc; 830 struct npebuf *npe; 831 struct txdone *td, q[NPE_MAX]; 832 uint32_t entry; 833 834 /* XXX no NPE-A support */ 835 q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; 836 q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; 837 /* XXX max # at a time? */ 838 while (ixpqmgr_qread(qid, &entry) == 0) { 839 sc = npes[NPE_QM_Q_NPE(entry)]; 840 DPRINTF(sc, "%s: entry 0x%x NPE %u port %u\n", 841 __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); 842 rnd_add_uint32(&sc->rnd_source, entry); 843 844 npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); 845 m_freem(npe->ix_m); 846 npe->ix_m = NULL; 847 848 td = &q[NPE_QM_Q_NPE(entry)]; 849 *td->tail = npe; 850 td->tail = &npe->ix_next; 851 td->count++; 852 } 853 854 if (q[NPE_B].count) 855 npe_txdone_finish(npes[NPE_B], &q[NPE_B]); 856 if (q[NPE_C].count) 857 npe_txdone_finish(npes[NPE_C], &q[NPE_C]); 858 #undef P2V 859 } 860 861 static __inline struct mbuf * 862 npe_getcl(void) 863 { 864 struct mbuf *m; 865 866 MGETHDR(m, M_DONTWAIT, MT_DATA); 867 if (m != NULL) { 868 MCLGET(m, M_DONTWAIT); 869 if ((m->m_flags & M_EXT) == 0) { 870 m_freem(m); 871 m = NULL; 872 } 873 } 874 return (m); 875 } 876 877 static int 878 npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) 879 { 880 struct npehwbuf *hw; 881 int error; 882 883 if (m == NULL) { 884 m = npe_getcl(); 885 if (m == NULL) 886 return ENOBUFS; 887 } 888 KASSERT(m->m_ext.ext_size >= (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 889 m->m_pkthdr.len = m->m_len = NPE_FRAME_SIZE_DEFAULT; 890 /* backload payload and align ip hdr */ 891 m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size 892 - (NPE_FRAME_SIZE_DEFAULT + ETHER_ALIGN)); 893 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 894 BUS_DMA_READ|BUS_DMA_NOWAIT); 895 if (error != 0) { 896 m_freem(m); 897 return error; 898 } 899 hw = npe->ix_hw; 900 hw->ix_ne[0].data = htobe32(npe->ix_map->dm_segs[0].ds_addr); 901 /* NB: NPE requires length be a multiple of 64 */ 902 /* NB: buffer length is shifted in word */ 903 hw->ix_ne[0].len = htobe32(npe->ix_map->dm_segs[0].ds_len << 16); 904 hw->ix_ne[0].next = 0; 905 npe->ix_m = m; 906 /* Flush the memory in the mbuf */ 907 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, npe->ix_map->dm_mapsize, 908 BUS_DMASYNC_PREREAD); 909 return 0; 910 } 911 912 /* 913 * RX q processing for a specific NPE. Claim entries 914 * from the hardware queue and pass the frames up the 915 * stack. Pass the rx buffers to the free list. 916 */ 917 static void 918 npe_rxdone(int qid, void *arg) 919 { 920 #define P2V(a, dma) \ 921 &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] 922 struct npe_softc *sc = arg; 923 struct npedma *dma = &sc->rxdma; 924 uint32_t entry; 925 926 while (ixpqmgr_qread(qid, &entry) == 0) { 927 struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); 928 struct mbuf *m; 929 930 DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", 931 __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); 932 rnd_add_uint32(&sc->rnd_source, entry); 933 /* 934 * Allocate a new mbuf to replenish the rx buffer. 935 * If doing so fails we drop the rx'd frame so we 936 * can reuse the previous mbuf. When we're able to 937 * allocate a new mbuf dispatch the mbuf w/ rx'd 938 * data up the stack and replace it with the newly 939 * allocated one. 940 */ 941 m = npe_getcl(); 942 if (m != NULL) { 943 struct mbuf *mrx = npe->ix_m; 944 struct npehwbuf *hw = npe->ix_hw; 945 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 946 947 /* Flush mbuf memory for rx'd data */ 948 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 949 npe->ix_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 950 951 /* XXX flush hw buffer; works now 'cuz coherent */ 952 /* set m_len etc. per rx frame size */ 953 mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; 954 mrx->m_pkthdr.len = mrx->m_len; 955 mrx->m_pkthdr.rcvif = ifp; 956 /* Don't add M_HASFCS. See below */ 957 958 #if 1 959 if (mrx->m_pkthdr.len < sizeof(struct ether_header)) { 960 log(LOG_INFO, "%s: too short frame (len=%d)\n", 961 device_xname(sc->sc_dev), mrx->m_pkthdr.len); 962 /* Back out "newly allocated" mbuf. */ 963 m_freem(m); 964 ifp->if_ierrors++; 965 goto fail; 966 } 967 if ((ifp->if_flags & IFF_PROMISC) == 0) { 968 struct ether_header *eh; 969 970 /* 971 * Workaround for "Non-Intel XScale Technology 972 * Eratta" No. 29. AA:BB:CC:DD:EE:xF's packet 973 * matches the filter (both unicast and 974 * multicast). 975 */ 976 eh = mtod(mrx, struct ether_header *); 977 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0) { 978 /* unicast */ 979 980 if (sc->sc_enaddr[5] != eh->ether_dhost[5]) { 981 /* discard it */ 982 #if 0 983 printf("discard it\n"); 984 #endif 985 /* 986 * Back out "newly allocated" 987 * mbuf. 988 */ 989 m_freem(m); 990 goto fail; 991 } 992 } else if (memcmp(eh->ether_dhost, 993 etherbroadcastaddr, 6) == 0) { 994 /* Always accept broadcast packet*/ 995 } else { 996 struct ethercom *ec = &sc->sc_ethercom; 997 struct ether_multi *enm; 998 struct ether_multistep step; 999 int match = 0; 1000 1001 /* multicast */ 1002 1003 ETHER_FIRST_MULTI(step, ec, enm); 1004 while (enm != NULL) { 1005 uint64_t lowint, highint, dest; 1006 1007 lowint = MAC2UINT64(enm->enm_addrlo); 1008 highint = MAC2UINT64(enm->enm_addrhi); 1009 dest = MAC2UINT64(eh->ether_dhost); 1010 #if 0 1011 printf("%llx\n", lowint); 1012 printf("%llx\n", dest); 1013 printf("%llx\n", highint); 1014 #endif 1015 if ((lowint <= dest) && (dest <= highint)) { 1016 match = 1; 1017 break; 1018 } 1019 ETHER_NEXT_MULTI(step, enm); 1020 } 1021 if (match == 0) { 1022 /* discard it */ 1023 #if 0 1024 printf("discard it(M)\n"); 1025 #endif 1026 /* 1027 * Back out "newly allocated" 1028 * mbuf. 1029 */ 1030 m_freem(m); 1031 goto fail; 1032 } 1033 } 1034 } 1035 if (mrx->m_pkthdr.len > NPE_FRAME_SIZE_DEFAULT) { 1036 log(LOG_INFO, "%s: oversized frame (len=%d)\n", 1037 device_xname(sc->sc_dev), mrx->m_pkthdr.len); 1038 /* Back out "newly allocated" mbuf. */ 1039 m_freem(m); 1040 ifp->if_ierrors++; 1041 goto fail; 1042 } 1043 #endif 1044 1045 /* 1046 * Trim FCS! 1047 * NPE always adds the FCS by this driver's setting, 1048 * so we always trim it here and not add M_HASFCS. 1049 */ 1050 m_adj(mrx, -ETHER_CRC_LEN); 1051 1052 ifp->if_ipackets++; 1053 /* 1054 * Tap off here if there is a bpf listener. 1055 */ 1056 bpf_mtap(ifp, mrx); 1057 ifp->if_input(ifp, mrx); 1058 } else { 1059 fail: 1060 /* discard frame and re-use mbuf */ 1061 m = npe->ix_m; 1062 } 1063 if (npe_rxbuf_init(sc, npe, m) == 0) { 1064 /* return npe buf to rx free list */ 1065 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1066 } else { 1067 /* XXX should not happen */ 1068 } 1069 } 1070 #undef P2V 1071 } 1072 1073 static void 1074 npe_startxmit(struct npe_softc *sc) 1075 { 1076 struct npedma *dma = &sc->txdma; 1077 int i; 1078 1079 sc->tx_free = NULL; 1080 for (i = 0; i < dma->nbuf; i++) { 1081 struct npebuf *npe = &dma->buf[i]; 1082 if (npe->ix_m != NULL) { 1083 /* NB: should not happen */ 1084 printf("%s: %s: free mbuf at entry %u\n", 1085 device_xname(sc->sc_dev), __func__, i); 1086 m_freem(npe->ix_m); 1087 } 1088 npe->ix_m = NULL; 1089 npe->ix_next = sc->tx_free; 1090 sc->tx_free = npe; 1091 } 1092 } 1093 1094 static void 1095 npe_startrecv(struct npe_softc *sc) 1096 { 1097 struct npedma *dma = &sc->rxdma; 1098 struct npebuf *npe; 1099 int i; 1100 1101 for (i = 0; i < dma->nbuf; i++) { 1102 npe = &dma->buf[i]; 1103 npe_rxbuf_init(sc, npe, npe->ix_m); 1104 /* set npe buf on rx free list */ 1105 ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); 1106 } 1107 } 1108 1109 static void 1110 npeinit_macreg(struct npe_softc *sc) 1111 { 1112 1113 /* 1114 * Reset MAC core. 1115 */ 1116 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1117 DELAY(NPE_MAC_RESET_DELAY); 1118 /* configure MAC to generate MDC clock */ 1119 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1120 1121 /* disable transmitter and reciver in the MAC */ 1122 WR4(sc, NPE_MAC_RX_CNTRL1, 1123 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1124 WR4(sc, NPE_MAC_TX_CNTRL1, 1125 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1126 1127 /* 1128 * Set the MAC core registers. 1129 */ 1130 WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ 1131 WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ 1132 WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ 1133 /* thresholds determined by NPE firmware FS */ 1134 WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); 1135 WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); 1136 WR4(sc, NPE_MAC_BUF_SIZE_TX, NPE_MAC_BUF_SIZE_TX_DEFAULT); 1137 /* tx fifo threshold (bytes) */ 1138 WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ 1139 WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ 1140 WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ 1141 WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ 1142 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1143 /* assumes MII mode */ 1144 WR4(sc, NPE_MAC_TX_CNTRL1, 1145 NPE_TX_CNTRL1_RETRY /* retry failed xmits */ 1146 | NPE_TX_CNTRL1_FCS_EN /* append FCS */ 1147 | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ 1148 | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ 1149 /* XXX pad strip? */ 1150 WR4(sc, NPE_MAC_RX_CNTRL1, 1151 NPE_RX_CNTRL1_CRC_EN /* include CRC/FCS */ 1152 | NPE_RX_CNTRL1_PAUSE_EN); /* ena pause frame handling */ 1153 WR4(sc, NPE_MAC_RX_CNTRL2, 0); 1154 } 1155 1156 static void 1157 npeinit_resetcb(void *xsc) 1158 { 1159 struct npe_softc *sc = xsc; 1160 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1161 uint32_t msg[2]; 1162 1163 ifp->if_oerrors++; 1164 npeinit_locked(sc); 1165 1166 msg[0] = NPE_NOTIFYMACRECOVERYDONE << NPE_MAC_MSGID_SHL 1167 | (npeconfig[sc->sc_unit].macport << NPE_MAC_PORTID_SHL); 1168 msg[1] = 0; 1169 ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1170 } 1171 1172 /* 1173 * Reset and initialize the chip 1174 */ 1175 static void 1176 npeinit_locked(void *xsc) 1177 { 1178 struct npe_softc *sc = xsc; 1179 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1180 1181 /* Cancel any pending I/O. */ 1182 npestop(ifp, 0); 1183 1184 /* Reset the chip to a known state. */ 1185 npeinit_macreg(sc); 1186 npe_setmac(sc, CLLADDR(ifp->if_sadl)); 1187 ether_mediachange(ifp); 1188 npe_setmcast(sc); 1189 1190 npe_startxmit(sc); 1191 npe_startrecv(sc); 1192 1193 ifp->if_flags |= IFF_RUNNING; 1194 ifp->if_flags &= ~IFF_OACTIVE; 1195 ifp->if_timer = 0; /* just in case */ 1196 1197 /* enable transmitter and reciver in the MAC */ 1198 WR4(sc, NPE_MAC_RX_CNTRL1, 1199 RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); 1200 WR4(sc, NPE_MAC_TX_CNTRL1, 1201 RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); 1202 1203 callout_reset(&sc->sc_tick_ch, hz, npe_tick, sc); 1204 } 1205 1206 static int 1207 npeinit(struct ifnet *ifp) 1208 { 1209 struct npe_softc *sc = ifp->if_softc; 1210 int s; 1211 1212 s = splnet(); 1213 npeinit_locked(sc); 1214 splx(s); 1215 1216 return (0); 1217 } 1218 1219 /* 1220 * Defragment an mbuf chain, returning at most maxfrags separate 1221 * mbufs+clusters. If this is not possible NULL is returned and 1222 * the original mbuf chain is left in it's present (potentially 1223 * modified) state. We use two techniques: collapsing consecutive 1224 * mbufs and replacing consecutive mbufs by a cluster. 1225 */ 1226 static __inline struct mbuf * 1227 npe_defrag(struct mbuf *m0) 1228 { 1229 struct mbuf *m; 1230 1231 MGETHDR(m, M_DONTWAIT, MT_DATA); 1232 if (m == NULL) 1233 return (NULL); 1234 M_COPY_PKTHDR(m, m0); 1235 1236 if ((m->m_len = m0->m_pkthdr.len) > MHLEN) { 1237 MCLGET(m, M_DONTWAIT); 1238 if ((m->m_flags & M_EXT) == 0) { 1239 m_freem(m); 1240 return (NULL); 1241 } 1242 } 1243 1244 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1245 m_freem(m0); 1246 1247 return (m); 1248 } 1249 1250 /* 1251 * Dequeue packets and place on the h/w transmit queue. 1252 */ 1253 static void 1254 npestart(struct ifnet *ifp) 1255 { 1256 struct npe_softc *sc = ifp->if_softc; 1257 struct npebuf *npe; 1258 struct npehwbuf *hw; 1259 struct mbuf *m, *n; 1260 bus_dma_segment_t *segs; 1261 int nseg, len, error, i; 1262 uint32_t next; 1263 1264 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1265 return; 1266 1267 while (sc->tx_free != NULL) { 1268 IFQ_DEQUEUE(&ifp->if_snd, m); 1269 if (m == NULL) 1270 break; 1271 npe = sc->tx_free; 1272 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, m, 1273 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1274 if (error == EFBIG) { 1275 n = npe_defrag(m); 1276 if (n == NULL) { 1277 printf("%s: %s: too many fragments\n", 1278 device_xname(sc->sc_dev), __func__); 1279 m_freem(m); 1280 return; /* XXX? */ 1281 } 1282 m = n; 1283 error = bus_dmamap_load_mbuf(sc->sc_dt, npe->ix_map, 1284 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1285 } 1286 if (error != 0) { 1287 printf("%s: %s: error %u\n", 1288 device_xname(sc->sc_dev), __func__, error); 1289 m_freem(m); 1290 return; /* XXX? */ 1291 } 1292 sc->tx_free = npe->ix_next; 1293 1294 /* 1295 * Tap off here if there is a bpf listener. 1296 */ 1297 bpf_mtap(ifp, m); 1298 1299 bus_dmamap_sync(sc->sc_dt, npe->ix_map, 0, 1300 npe->ix_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1301 1302 npe->ix_m = m; 1303 hw = npe->ix_hw; 1304 len = m->m_pkthdr.len; 1305 nseg = npe->ix_map->dm_nsegs; 1306 segs = npe->ix_map->dm_segs; 1307 next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); 1308 for (i = 0; i < nseg; i++) { 1309 hw->ix_ne[i].data = htobe32(segs[i].ds_addr); 1310 hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); 1311 hw->ix_ne[i].next = htobe32(next); 1312 1313 len = 0; /* zero for segments > 1 */ 1314 next += sizeof(hw->ix_ne[0]); 1315 } 1316 hw->ix_ne[i-1].next = 0; /* zero last in chain */ 1317 /* XXX flush descriptor instead of using uncached memory */ 1318 1319 DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", 1320 __func__, sc->tx_qid, npe->ix_neaddr, 1321 hw->ix_ne[0].data, hw->ix_ne[0].len); 1322 /* stick it on the tx q */ 1323 /* XXX add vlan priority */ 1324 ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); 1325 1326 ifp->if_timer = 5; 1327 } 1328 if (sc->tx_free == NULL) 1329 ifp->if_flags |= IFF_OACTIVE; 1330 } 1331 1332 static void 1333 npe_stopxmit(struct npe_softc *sc) 1334 { 1335 struct npedma *dma = &sc->txdma; 1336 int i; 1337 1338 /* XXX qmgr */ 1339 for (i = 0; i < dma->nbuf; i++) { 1340 struct npebuf *npe = &dma->buf[i]; 1341 1342 if (npe->ix_m != NULL) { 1343 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1344 m_freem(npe->ix_m); 1345 npe->ix_m = NULL; 1346 } 1347 } 1348 } 1349 1350 static void 1351 npe_stoprecv(struct npe_softc *sc) 1352 { 1353 struct npedma *dma = &sc->rxdma; 1354 int i; 1355 1356 /* XXX qmgr */ 1357 for (i = 0; i < dma->nbuf; i++) { 1358 struct npebuf *npe = &dma->buf[i]; 1359 1360 if (npe->ix_m != NULL) { 1361 bus_dmamap_unload(sc->sc_dt, npe->ix_map); 1362 m_freem(npe->ix_m); 1363 npe->ix_m = NULL; 1364 } 1365 } 1366 } 1367 1368 /* 1369 * Turn off interrupts, and stop the nic. 1370 */ 1371 void 1372 npestop(struct ifnet *ifp, int disable) 1373 { 1374 struct npe_softc *sc = ifp->if_softc; 1375 1376 /* disable transmitter and reciver in the MAC */ 1377 WR4(sc, NPE_MAC_RX_CNTRL1, 1378 RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); 1379 WR4(sc, NPE_MAC_TX_CNTRL1, 1380 RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); 1381 1382 callout_stop(&sc->sc_tick_ch); 1383 1384 npe_stopxmit(sc); 1385 npe_stoprecv(sc); 1386 /* XXX go into loopback & drain q's? */ 1387 /* XXX but beware of disabling tx above */ 1388 1389 /* 1390 * The MAC core rx/tx disable may leave the MAC hardware in an 1391 * unpredictable state. A hw reset is executed before resetting 1392 * all the MAC parameters to a known value. 1393 */ 1394 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); 1395 DELAY(NPE_MAC_RESET_DELAY); 1396 WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); 1397 WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); 1398 1399 ifp->if_timer = 0; 1400 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1401 } 1402 1403 void 1404 npewatchdog(struct ifnet *ifp) 1405 { 1406 struct npe_softc *sc = ifp->if_softc; 1407 int s; 1408 1409 aprint_error_dev(sc->sc_dev, "device timeout\n"); 1410 s = splnet(); 1411 ifp->if_oerrors++; 1412 npeinit_locked(sc); 1413 splx(s); 1414 } 1415 1416 static int 1417 npeioctl(struct ifnet *ifp, u_long cmd, void *data) 1418 { 1419 struct npe_softc *sc = ifp->if_softc; 1420 struct ifreq *ifr = (struct ifreq *) data; 1421 int s, error = 0; 1422 1423 s = splnet(); 1424 1425 switch (cmd) { 1426 case SIOCSIFMEDIA: 1427 case SIOCGIFMEDIA: 1428 #if 0 /* not yet */ 1429 /* Flow control requires full-duplex mode. */ 1430 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1431 (ifr->ifr_media & IFM_FDX) == 0) 1432 ifr->ifr_media &= ~IFM_ETH_FMASK; 1433 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1434 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1435 /* We can do both TXPAUSE and RXPAUSE. */ 1436 ifr->ifr_media |= 1437 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1438 } 1439 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1440 } 1441 #endif 1442 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1443 break; 1444 case SIOCSIFFLAGS: 1445 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_RUNNING) { 1446 /* 1447 * If interface is marked down and it is running, 1448 * then stop and disable it. 1449 */ 1450 (*ifp->if_stop)(ifp, 1); 1451 } else if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == IFF_UP) { 1452 /* 1453 * If interface is marked up and it is stopped, then 1454 * start it. 1455 */ 1456 error = (*ifp->if_init)(ifp); 1457 } else if ((ifp->if_flags & IFF_UP) != 0) { 1458 int diff; 1459 1460 /* Up (AND RUNNING). */ 1461 1462 diff = (ifp->if_flags ^ sc->sc_if_flags) 1463 & (IFF_PROMISC|IFF_ALLMULTI); 1464 if ((diff & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { 1465 /* 1466 * If the difference bettween last flag and 1467 * new flag only IFF_PROMISC or IFF_ALLMULTI, 1468 * set multicast filter only (don't reset to 1469 * prevent link down). 1470 */ 1471 npe_setmcast(sc); 1472 } else { 1473 /* 1474 * Reset the interface to pick up changes in 1475 * any other flags that affect the hardware 1476 * state. 1477 */ 1478 error = (*ifp->if_init)(ifp); 1479 } 1480 } 1481 sc->sc_if_flags = ifp->if_flags; 1482 break; 1483 default: 1484 error = ether_ioctl(ifp, cmd, data); 1485 if (error == ENETRESET) { 1486 /* 1487 * Multicast list has changed; set the hardware filter 1488 * accordingly. 1489 */ 1490 npe_setmcast(sc); 1491 error = 0; 1492 } 1493 } 1494 1495 npestart(ifp); 1496 1497 splx(s); 1498 return error; 1499 } 1500 1501 /* 1502 * Setup a traffic class -> rx queue mapping. 1503 */ 1504 static int 1505 npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) 1506 { 1507 int npeid = npeconfig[sc->sc_unit].npeid; 1508 uint32_t msg[2]; 1509 1510 msg[0] = (NPE_SETRXQOSENTRY << NPE_MAC_MSGID_SHL) | (npeid << 20) 1511 | classix; 1512 msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); 1513 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1514 } 1515 1516 /* 1517 * Update and reset the statistics in the NPE. 1518 */ 1519 static int 1520 npe_updatestats(struct npe_softc *sc) 1521 { 1522 uint32_t msg[2]; 1523 1524 msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; 1525 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1526 return ixpnpe_sendmsg(sc->sc_npe, msg); /* NB: no recv */ 1527 } 1528 1529 #if 0 1530 /* 1531 * Get the current statistics block. 1532 */ 1533 static int 1534 npe_getstats(struct npe_softc *sc) 1535 { 1536 uint32_t msg[2]; 1537 1538 msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; 1539 msg[1] = sc->sc_stats_phys; /* physical address of stat block */ 1540 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1541 } 1542 1543 /* 1544 * Query the image id of the loaded firmware. 1545 */ 1546 static uint32_t 1547 npe_getimageid(struct npe_softc *sc) 1548 { 1549 uint32_t msg[2]; 1550 1551 msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; 1552 msg[1] = 0; 1553 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; 1554 } 1555 1556 /* 1557 * Enable/disable loopback. 1558 */ 1559 static int 1560 npe_setloopback(struct npe_softc *sc, int ena) 1561 { 1562 uint32_t msg[2]; 1563 1564 msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); 1565 msg[1] = 0; 1566 return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); 1567 } 1568 #endif 1569 1570 /* 1571 * MII bus support routines. 1572 * 1573 * NB: ixp425 has one PHY per NPE 1574 */ 1575 static uint32_t 1576 npe_mii_mdio_read(struct npe_softc *sc, int reg) 1577 { 1578 #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) 1579 uint32_t v; 1580 1581 /* NB: registers are known to be sequential */ 1582 v = (MII_RD4(sc, reg+0) & 0xff) << 0; 1583 v |= (MII_RD4(sc, reg+4) & 0xff) << 8; 1584 v |= (MII_RD4(sc, reg+8) & 0xff) << 16; 1585 v |= (MII_RD4(sc, reg+12) & 0xff) << 24; 1586 return v; 1587 #undef MII_RD4 1588 } 1589 1590 static void 1591 npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) 1592 { 1593 #define MII_WR4(sc, reg, v) \ 1594 bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) 1595 1596 /* NB: registers are known to be sequential */ 1597 MII_WR4(sc, reg+0, cmd & 0xff); 1598 MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); 1599 MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); 1600 MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); 1601 #undef MII_WR4 1602 } 1603 1604 static int 1605 npe_mii_mdio_wait(struct npe_softc *sc) 1606 { 1607 #define MAXTRIES 100 /* XXX */ 1608 uint32_t v; 1609 int i; 1610 1611 for (i = 0; i < MAXTRIES; i++) { 1612 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); 1613 if ((v & NPE_MII_GO) == 0) 1614 return 1; 1615 } 1616 return 0; /* NB: timeout */ 1617 #undef MAXTRIES 1618 } 1619 1620 static int 1621 npe_miibus_readreg(device_t self, int phy, int reg) 1622 { 1623 struct npe_softc *sc = device_private(self); 1624 uint32_t v; 1625 1626 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1627 return 0xffff; 1628 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1629 | NPE_MII_GO; 1630 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1631 if (npe_mii_mdio_wait(sc)) 1632 v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); 1633 else 1634 v = 0xffff | NPE_MII_READ_FAIL; 1635 return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff); 1636 #undef MAXTRIES 1637 } 1638 1639 static void 1640 npe_miibus_writereg(device_t self, int phy, int reg, int data) 1641 { 1642 struct npe_softc *sc = device_private(self); 1643 uint32_t v; 1644 1645 if (sc->sc_phy > IXPNPECF_PHY_DEFAULT && phy != sc->sc_phy) 1646 return; 1647 v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) 1648 | data | NPE_MII_WRITE 1649 | NPE_MII_GO; 1650 npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); 1651 /* XXX complain about timeout */ 1652 (void) npe_mii_mdio_wait(sc); 1653 } 1654 1655 static void 1656 npe_miibus_statchg(struct ifnet *ifp) 1657 { 1658 struct npe_softc *sc = ifp->if_softc; 1659 uint32_t tx1, rx1; 1660 uint32_t randoff; 1661 1662 /* sync MAC duplex state */ 1663 tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); 1664 rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); 1665 if (sc->sc_mii.mii_media_active & IFM_FDX) { 1666 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT); 1667 tx1 &= ~NPE_TX_CNTRL1_DUPLEX; 1668 rx1 |= NPE_RX_CNTRL1_PAUSE_EN; 1669 } else { 1670 struct timeval now; 1671 getmicrotime(&now); 1672 randoff = (RD4(sc, NPE_MAC_UNI_ADDR_6) ^ now.tv_usec) 1673 & 0x7f; 1674 WR4(sc, NPE_MAC_SLOT_TIME, NPE_MAC_SLOT_TIME_MII_DEFAULT 1675 + randoff); 1676 tx1 |= NPE_TX_CNTRL1_DUPLEX; 1677 rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; 1678 } 1679 WR4(sc, NPE_MAC_RX_CNTRL1, rx1); 1680 WR4(sc, NPE_MAC_TX_CNTRL1, tx1); 1681 } 1682