1 /* $NetBSD: epe.c,v 1.13 2007/10/17 19:53:40 garbled Exp $ */ 2 3 /* 4 * Copyright (c) 2004 Jesse Off 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the NetBSD 18 * Foundation, Inc. and its contributors. 19 * 4. Neither the name of The NetBSD Foundation nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 25 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 * POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: epe.c,v 1.13 2007/10/17 19:53:40 garbled Exp $"); 38 39 #include <sys/types.h> 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/ioctl.h> 43 #include <sys/kernel.h> 44 #include <sys/proc.h> 45 #include <sys/malloc.h> 46 #include <sys/time.h> 47 #include <sys/device.h> 48 #include <uvm/uvm_extern.h> 49 50 #include <machine/bus.h> 51 #include <machine/intr.h> 52 53 #include <arm/cpufunc.h> 54 55 #include <arm/ep93xx/epsocvar.h> 56 #include <arm/ep93xx/ep93xxvar.h> 57 58 #include <net/if.h> 59 #include <net/if_dl.h> 60 #include <net/if_types.h> 61 #include <net/if_media.h> 62 #include <net/if_ether.h> 63 64 #include <dev/mii/mii.h> 65 #include <dev/mii/miivar.h> 66 67 #ifdef INET 68 #include <netinet/in.h> 69 #include <netinet/in_systm.h> 70 #include <netinet/in_var.h> 71 #include <netinet/ip.h> 72 #include <netinet/if_inarp.h> 73 #endif 74 75 #ifdef NS 76 #include <netns/ns.h> 77 #include <netns/ns_if.h> 78 #endif 79 80 #include "bpfilter.h" 81 #if NBPFILTER > 0 82 #include <net/bpf.h> 83 #include <net/bpfdesc.h> 84 #endif 85 86 #ifdef IPKDB_EP93XX 87 #include <ipkdb/ipkdb.h> 88 #endif 89 90 #include <arm/ep93xx/ep93xxreg.h> 91 #include <arm/ep93xx/epereg.h> 92 #include <arm/ep93xx/epevar.h> 93 94 #define DEFAULT_MDCDIV 32 95 96 #ifndef EPE_FAST 97 #define EPE_FAST 98 #endif 99 100 #ifndef EPE_FAST 101 #define EPE_READ(x) \ 102 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x)) 103 #define EPE_WRITE(x, y) \ 104 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y)) 105 #define CTRLPAGE_DMASYNC(x, y, z) \ 106 bus_dmamap_sync(sc->sc_dmat, sc->ctrlpage_dmamap, (x), (y), (z)) 107 #else 108 #define EPE_READ(x) *(volatile u_int32_t *) \ 109 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x)) 110 #define EPE_WRITE(x, y) *(volatile u_int32_t *) \ 111 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x)) = y 112 #define CTRLPAGE_DMASYNC(x, y, z) 113 #endif /* ! EPE_FAST */ 114 115 static int epe_match(struct device *, struct cfdata *, void *); 116 static void epe_attach(struct device *, struct device *, void *); 117 static void epe_init(struct epe_softc *); 118 static int epe_intr(void* arg); 119 static int epe_gctx(struct epe_softc *); 120 static int epe_mediachange(struct ifnet *); 121 static void epe_mediastatus(struct ifnet *, struct ifmediareq *); 122 int epe_mii_readreg (struct device *, int, int); 123 void epe_mii_writereg (struct device *, int, int, int); 124 void epe_statchg (struct device *); 125 void epe_tick (void *); 126 static int epe_ifioctl (struct ifnet *, u_long, void *); 127 static void epe_ifstart (struct ifnet *); 128 static void epe_ifwatchdog (struct ifnet *); 129 static int epe_ifinit (struct ifnet *); 130 static void epe_ifstop (struct ifnet *, int); 131 static void epe_setaddr (struct ifnet *); 132 133 CFATTACH_DECL(epe, sizeof(struct epe_softc), 134 epe_match, epe_attach, NULL, NULL); 135 136 static int 137 epe_match(struct device *parent, struct cfdata *match, void *aux) 138 { 139 return 2; 140 } 141 142 static void 143 epe_attach(struct device *parent, struct device *self, void *aux) 144 { 145 struct epe_softc *sc; 146 struct epsoc_attach_args *sa; 147 prop_data_t enaddr; 148 149 printf("\n"); 150 sc = (struct epe_softc*) self; 151 sa = aux; 152 sc->sc_iot = sa->sa_iot; 153 sc->sc_intr = sa->sa_intr; 154 sc->sc_dmat = sa->sa_dmat; 155 156 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 157 0, &sc->sc_ioh)) 158 panic("%s: Cannot map registers", self->dv_xname); 159 160 /* Fetch the Ethernet address from property if set. */ 161 enaddr = prop_dictionary_get(device_properties(self), "mac-addr"); 162 if (enaddr != NULL) { 163 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA); 164 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN); 165 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr), 166 ETHER_ADDR_LEN); 167 bus_space_write_4(sc->sc_iot, sc->sc_ioh, EPE_AFP, 0); 168 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 169 sc->sc_enaddr, ETHER_ADDR_LEN); 170 } 171 172 ep93xx_intr_establish(sc->sc_intr, IPL_NET, epe_intr, sc); 173 epe_init(sc); 174 } 175 176 static int 177 epe_gctx(struct epe_softc *sc) 178 { 179 struct ifnet * ifp = &sc->sc_ec.ec_if; 180 u_int32_t *cur, ndq = 0; 181 182 /* Handle transmit completions */ 183 cur = (u_int32_t *)(EPE_READ(TXStsQCurAdd) - 184 sc->ctrlpage_dsaddr + (char*)sc->ctrlpage); 185 186 if (sc->TXStsQ_cur != cur) { 187 CTRLPAGE_DMASYNC(TX_QLEN * 2 * sizeof(u_int32_t), 188 TX_QLEN * sizeof(u_int32_t), BUS_DMASYNC_PREREAD); 189 } else { 190 return 0; 191 } 192 193 do { 194 u_int32_t tbi = *sc->TXStsQ_cur & 0x7fff; 195 struct mbuf *m = sc->txq[tbi].m; 196 197 if ((*sc->TXStsQ_cur & TXStsQ_TxWE) == 0) { 198 ifp->if_oerrors++; 199 } 200 bus_dmamap_unload(sc->sc_dmat, sc->txq[tbi].m_dmamap); 201 m_freem(m); 202 do { 203 sc->txq[tbi].m = NULL; 204 ndq++; 205 tbi = (tbi + 1) % TX_QLEN; 206 } while (sc->txq[tbi].m == m); 207 208 ifp->if_opackets++; 209 sc->TXStsQ_cur++; 210 if (sc->TXStsQ_cur >= sc->TXStsQ + TX_QLEN) { 211 sc->TXStsQ_cur = sc->TXStsQ; 212 } 213 } while (sc->TXStsQ_cur != cur); 214 215 sc->TXDQ_avail += ndq; 216 if (ifp->if_flags & IFF_OACTIVE) { 217 ifp->if_flags &= ~IFF_OACTIVE; 218 /* Disable end-of-tx-chain interrupt */ 219 EPE_WRITE(IntEn, IntEn_REOFIE); 220 } 221 return ndq; 222 } 223 224 static int 225 epe_intr(void *arg) 226 { 227 struct epe_softc *sc = (struct epe_softc *)arg; 228 struct ifnet * ifp = &sc->sc_ec.ec_if; 229 u_int32_t ndq = 0, irq, *cur; 230 231 irq = EPE_READ(IntStsC); 232 begin: 233 cur = (u_int32_t *)(EPE_READ(RXStsQCurAdd) - 234 sc->ctrlpage_dsaddr + (char*)sc->ctrlpage); 235 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(u_int32_t), 236 RX_QLEN * 4 * sizeof(u_int32_t), 237 BUS_DMASYNC_PREREAD); 238 while (sc->RXStsQ_cur != cur) { 239 if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) == 240 (RXStsQ_RWE|RXStsQ_RFP|RXStsQ_EOB)) { 241 u_int32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff; 242 u_int32_t fl = sc->RXStsQ_cur[1] & 0xffff; 243 struct mbuf *m; 244 245 MGETHDR(m, M_DONTWAIT, MT_DATA); 246 if (m != NULL) MCLGET(m, M_DONTWAIT); 247 if (m != NULL && (m->m_flags & M_EXT)) { 248 bus_dmamap_unload(sc->sc_dmat, 249 sc->rxq[bi].m_dmamap); 250 sc->rxq[bi].m->m_pkthdr.rcvif = ifp; 251 sc->rxq[bi].m->m_pkthdr.len = 252 sc->rxq[bi].m->m_len = fl; 253 #if NBPFILTER > 0 254 if (ifp->if_bpf) 255 bpf_mtap(ifp->if_bpf, sc->rxq[bi].m); 256 #endif /* NBPFILTER > 0 */ 257 (*ifp->if_input)(ifp, sc->rxq[bi].m); 258 sc->rxq[bi].m = m; 259 bus_dmamap_load(sc->sc_dmat, 260 sc->rxq[bi].m_dmamap, 261 m->m_ext.ext_buf, MCLBYTES, 262 NULL, BUS_DMA_NOWAIT); 263 sc->RXDQ[bi * 2] = 264 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr; 265 } else { 266 /* Drop packets until we can get replacement 267 * empty mbufs for the RXDQ. 268 */ 269 if (m != NULL) { 270 m_freem(m); 271 } 272 ifp->if_ierrors++; 273 } 274 } else { 275 ifp->if_ierrors++; 276 } 277 278 ndq++; 279 280 sc->RXStsQ_cur += 2; 281 if (sc->RXStsQ_cur >= sc->RXStsQ + (RX_QLEN * 2)) { 282 sc->RXStsQ_cur = sc->RXStsQ; 283 } 284 } 285 286 if (ndq > 0) { 287 ifp->if_ipackets += ndq; 288 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(u_int32_t), 289 RX_QLEN * 4 * sizeof(u_int32_t), 290 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 291 EPE_WRITE(RXStsEnq, ndq); 292 EPE_WRITE(RXDEnq, ndq); 293 ndq = 0; 294 } 295 296 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { 297 epe_ifstart(ifp); 298 } 299 300 irq = EPE_READ(IntStsC); 301 if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0) 302 goto begin; 303 304 return (1); 305 } 306 307 308 static void 309 epe_init(struct epe_softc *sc) 310 { 311 bus_dma_segment_t segs; 312 char *addr; 313 int rsegs, err, i; 314 struct ifnet * ifp = &sc->sc_ec.ec_if; 315 int mdcdiv = DEFAULT_MDCDIV; 316 317 callout_init(&sc->epe_tick_ch, 0); 318 319 /* Select primary Individual Address in Address Filter Pointer */ 320 EPE_WRITE(AFP, 0); 321 /* Read ethernet MAC, should already be set by bootrom */ 322 bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 323 sc->sc_enaddr, ETHER_ADDR_LEN); 324 printf("%s: MAC address %s\n", sc->sc_dev.dv_xname, 325 ether_sprintf(sc->sc_enaddr)); 326 327 /* Soft Reset the MAC */ 328 EPE_WRITE(SelfCtl, SelfCtl_RESET); 329 while(EPE_READ(SelfCtl) & SelfCtl_RESET); 330 331 /* suggested magic initialization values from datasheet */ 332 EPE_WRITE(RXBufThrshld, 0x800040); 333 EPE_WRITE(TXBufThrshld, 0x200010); 334 EPE_WRITE(RXStsThrshld, 0x40002); 335 EPE_WRITE(TXStsThrshld, 0x40002); 336 EPE_WRITE(RXDThrshld, 0x40002); 337 EPE_WRITE(TXDThrshld, 0x40002); 338 339 /* Allocate a page of memory for descriptor and status queues */ 340 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 0, PAGE_SIZE, 341 &segs, 1, &rsegs, BUS_DMA_WAITOK); 342 if (err == 0) { 343 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, 344 &sc->ctrlpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT)); 345 } 346 if (err == 0) { 347 err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 348 0, BUS_DMA_WAITOK, &sc->ctrlpage_dmamap); 349 } 350 if (err == 0) { 351 err = bus_dmamap_load(sc->sc_dmat, sc->ctrlpage_dmamap, 352 sc->ctrlpage, PAGE_SIZE, NULL, BUS_DMA_WAITOK); 353 } 354 if (err != 0) { 355 panic("%s: Cannot get DMA memory", sc->sc_dev.dv_xname); 356 } 357 sc->ctrlpage_dsaddr = sc->ctrlpage_dmamap->dm_segs[0].ds_addr; 358 bzero(sc->ctrlpage, PAGE_SIZE); 359 360 /* Set up pointers to start of each queue in kernel addr space. 361 * Each descriptor queue or status queue entry uses 2 words 362 */ 363 sc->TXDQ = (u_int32_t *)sc->ctrlpage; 364 sc->TXDQ_cur = sc->TXDQ; 365 sc->TXDQ_avail = TX_QLEN - 1; 366 sc->TXStsQ = &sc->TXDQ[TX_QLEN * 2]; 367 sc->TXStsQ_cur = sc->TXStsQ; 368 sc->RXDQ = &sc->TXStsQ[TX_QLEN]; 369 sc->RXStsQ = &sc->RXDQ[RX_QLEN * 2]; 370 sc->RXStsQ_cur = sc->RXStsQ; 371 372 /* Program each queue's start addr, cur addr, and len registers 373 * with the physical addresses. 374 */ 375 addr = (char *)sc->ctrlpage_dmamap->dm_segs[0].ds_addr; 376 EPE_WRITE(TXDQBAdd, (u_int32_t)addr); 377 EPE_WRITE(TXDQCurAdd, (u_int32_t)addr); 378 EPE_WRITE(TXDQBLen, TX_QLEN * 2 * sizeof(u_int32_t)); 379 380 addr += (sc->TXStsQ - sc->TXDQ) * sizeof(u_int32_t); 381 EPE_WRITE(TXStsQBAdd, (u_int32_t)addr); 382 EPE_WRITE(TXStsQCurAdd, (u_int32_t)addr); 383 EPE_WRITE(TXStsQBLen, TX_QLEN * sizeof(u_int32_t)); 384 385 addr += (sc->RXDQ - sc->TXStsQ) * sizeof(u_int32_t); 386 EPE_WRITE(RXDQBAdd, (u_int32_t)addr); 387 EPE_WRITE(RXDCurAdd, (u_int32_t)addr); 388 EPE_WRITE(RXDQBLen, RX_QLEN * 2 * sizeof(u_int32_t)); 389 390 addr += (sc->RXStsQ - sc->RXDQ) * sizeof(u_int32_t); 391 EPE_WRITE(RXStsQBAdd, (u_int32_t)addr); 392 EPE_WRITE(RXStsQCurAdd, (u_int32_t)addr); 393 EPE_WRITE(RXStsQBLen, RX_QLEN * 2 * sizeof(u_int32_t)); 394 395 /* Populate the RXDQ with mbufs */ 396 for(i = 0; i < RX_QLEN; i++) { 397 struct mbuf *m; 398 399 bus_dmamap_create(sc->sc_dmat, MCLBYTES, TX_QLEN/4, MCLBYTES, 0, 400 BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap); 401 MGETHDR(m, M_WAIT, MT_DATA); 402 MCLGET(m, M_WAIT); 403 sc->rxq[i].m = m; 404 bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap, 405 m->m_ext.ext_buf, MCLBYTES, NULL, 406 BUS_DMA_WAITOK); 407 408 sc->RXDQ[i * 2] = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr; 409 sc->RXDQ[i * 2 + 1] = (i << 16) | MCLBYTES; 410 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0, 411 MCLBYTES, BUS_DMASYNC_PREREAD); 412 } 413 414 for(i = 0; i < TX_QLEN; i++) { 415 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 416 (BUS_DMA_WAITOK|BUS_DMA_ALLOCNOW), 417 &sc->txq[i].m_dmamap); 418 sc->txq[i].m = NULL; 419 sc->TXDQ[i * 2 + 1] = (i << 16); 420 } 421 422 /* Divide HCLK by 32 for MDC clock */ 423 if (device_cfdata(&sc->sc_dev)->cf_flags) 424 mdcdiv = device_cfdata(&sc->sc_dev)->cf_flags; 425 EPE_WRITE(SelfCtl, (SelfCtl_MDCDIV(mdcdiv)|SelfCtl_PSPRS)); 426 427 sc->sc_mii.mii_ifp = ifp; 428 sc->sc_mii.mii_readreg = epe_mii_readreg; 429 sc->sc_mii.mii_writereg = epe_mii_writereg; 430 sc->sc_mii.mii_statchg = epe_statchg; 431 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, epe_mediachange, 432 epe_mediastatus); 433 mii_attach((struct device *)sc, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 434 MII_OFFSET_ANY, 0); 435 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 436 437 EPE_WRITE(BMCtl, BMCtl_RxEn|BMCtl_TxEn); 438 EPE_WRITE(IntEn, IntEn_REOFIE); 439 /* maximum valid max frame length */ 440 EPE_WRITE(MaxFrmLen, (0x7ff << 16)|MHLEN); 441 /* wait for receiver ready */ 442 while((EPE_READ(BMSts) & BMSts_RxAct) == 0); 443 /* enqueue the entries in RXStsQ and RXDQ */ 444 CTRLPAGE_DMASYNC(0, sc->ctrlpage_dmamap->dm_mapsize, 445 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 446 EPE_WRITE(RXDEnq, RX_QLEN - 1); 447 EPE_WRITE(RXStsEnq, RX_QLEN - 1); 448 449 /* 450 * We can support 802.1Q VLAN-sized frames. 451 */ 452 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 453 454 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 455 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 456 ifp->if_ioctl = epe_ifioctl; 457 ifp->if_start = epe_ifstart; 458 ifp->if_watchdog = epe_ifwatchdog; 459 ifp->if_init = epe_ifinit; 460 ifp->if_stop = epe_ifstop; 461 ifp->if_timer = 0; 462 ifp->if_softc = sc; 463 IFQ_SET_READY(&ifp->if_snd); 464 if_attach(ifp); 465 ether_ifattach(ifp, (sc)->sc_enaddr); 466 } 467 468 static int 469 epe_mediachange(ifp) 470 struct ifnet *ifp; 471 { 472 if (ifp->if_flags & IFF_UP) 473 epe_ifinit(ifp); 474 return (0); 475 } 476 477 static void 478 epe_mediastatus(ifp, ifmr) 479 struct ifnet *ifp; 480 struct ifmediareq *ifmr; 481 { 482 struct epe_softc *sc = ifp->if_softc; 483 484 mii_pollstat(&sc->sc_mii); 485 ifmr->ifm_active = sc->sc_mii.mii_media_active; 486 ifmr->ifm_status = sc->sc_mii.mii_media_status; 487 } 488 489 490 int 491 epe_mii_readreg(self, phy, reg) 492 struct device *self; 493 int phy, reg; 494 { 495 u_int32_t d, v; 496 struct epe_softc *sc; 497 498 sc = (struct epe_softc *)self; 499 d = EPE_READ(SelfCtl); 500 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */ 501 EPE_WRITE(MIICmd, (MIICmd_READ | (phy << 5) | reg)); 502 while(EPE_READ(MIISts) & MIISts_BUSY); 503 v = EPE_READ(MIIData); 504 EPE_WRITE(SelfCtl, d); /* restore old value */ 505 return v; 506 } 507 508 void 509 epe_mii_writereg(self, phy, reg, val) 510 struct device *self; 511 int phy, reg, val; 512 { 513 struct epe_softc *sc; 514 u_int32_t d; 515 516 sc = (struct epe_softc *)self; 517 d = EPE_READ(SelfCtl); 518 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */ 519 EPE_WRITE(MIIData, val); 520 EPE_WRITE(MIICmd, (MIICmd_WRITE | (phy << 5) | reg)); 521 while(EPE_READ(MIISts) & MIISts_BUSY); 522 EPE_WRITE(SelfCtl, d); /* restore old value */ 523 } 524 525 526 void 527 epe_statchg(self) 528 struct device *self; 529 { 530 struct epe_softc *sc = (struct epe_softc *)self; 531 u_int32_t reg; 532 533 /* 534 * We must keep the MAC and the PHY in sync as 535 * to the status of full-duplex! 536 */ 537 reg = EPE_READ(TestCtl); 538 if (sc->sc_mii.mii_media_active & IFM_FDX) 539 reg |= TestCtl_MFDX; 540 else 541 reg &= ~TestCtl_MFDX; 542 EPE_WRITE(TestCtl, reg); 543 } 544 545 void 546 epe_tick(arg) 547 void *arg; 548 { 549 struct epe_softc* sc = (struct epe_softc *)arg; 550 struct ifnet * ifp = &sc->sc_ec.ec_if; 551 int s; 552 u_int32_t misses; 553 554 ifp->if_collisions += EPE_READ(TXCollCnt); 555 /* These misses are ok, they will happen if the RAM/CPU can't keep up */ 556 misses = EPE_READ(RXMissCnt); 557 if (misses > 0) 558 printf("%s: %d rx misses\n", sc->sc_dev.dv_xname, misses); 559 560 s = splnet(); 561 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { 562 epe_ifstart(ifp); 563 } 564 splx(s); 565 566 mii_tick(&sc->sc_mii); 567 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc); 568 } 569 570 571 static int 572 epe_ifioctl(ifp, cmd, data) 573 struct ifnet *ifp; 574 u_long cmd; 575 void *data; 576 { 577 struct epe_softc *sc = ifp->if_softc; 578 struct ifreq *ifr = (struct ifreq *)data; 579 int s, error; 580 581 s = splnet(); 582 switch(cmd) { 583 case SIOCSIFMEDIA: 584 case SIOCGIFMEDIA: 585 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 586 break; 587 default: 588 error = ether_ioctl(ifp, cmd, data); 589 if (error == ENETRESET) { 590 if (ifp->if_flags & IFF_RUNNING) 591 epe_setaddr(ifp); 592 error = 0; 593 } 594 } 595 splx(s); 596 return error; 597 } 598 599 static void 600 epe_ifstart(ifp) 601 struct ifnet *ifp; 602 { 603 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc; 604 struct mbuf *m; 605 bus_dma_segment_t *segs; 606 int s, bi, err, nsegs, ndq; 607 608 s = splnet(); 609 start: 610 ndq = 0; 611 if (sc->TXDQ_avail == 0) { 612 if (epe_gctx(sc) == 0) { 613 /* Enable End-Of-TX-Chain interrupt */ 614 EPE_WRITE(IntEn, IntEn_REOFIE|IntEn_ECIE); 615 ifp->if_flags |= IFF_OACTIVE; 616 ifp->if_timer = 10; 617 splx(s); 618 return; 619 } 620 } 621 622 bi = sc->TXDQ_cur - sc->TXDQ; 623 624 IFQ_POLL(&ifp->if_snd, m); 625 if (m == NULL) { 626 splx(s); 627 return; 628 } 629 more: 630 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 631 BUS_DMA_NOWAIT)) || 632 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 || 633 sc->txq[bi].m_dmamap->dm_nsegs > (sc->TXDQ_avail - ndq)) { 634 /* Copy entire mbuf chain to new and 32-bit aligned storage */ 635 struct mbuf *mn; 636 637 if (err == 0) 638 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap); 639 640 MGETHDR(mn, M_DONTWAIT, MT_DATA); 641 if (mn == NULL) goto stop; 642 if (m->m_pkthdr.len > (MHLEN & (~0x3))) { 643 MCLGET(mn, M_DONTWAIT); 644 if ((mn->m_flags & M_EXT) == 0) { 645 m_freem(mn); 646 goto stop; 647 } 648 } 649 mn->m_data = (void *)(((u_int32_t)mn->m_data + 0x3) & (~0x3)); 650 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *)); 651 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len; 652 IFQ_DEQUEUE(&ifp->if_snd, m); 653 m_freem(m); 654 m = mn; 655 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 656 BUS_DMA_NOWAIT); 657 } else { 658 IFQ_DEQUEUE(&ifp->if_snd, m); 659 } 660 661 #if NBPFILTER > 0 662 if (ifp->if_bpf) 663 bpf_mtap(ifp->if_bpf, m); 664 #endif /* NBPFILTER > 0 */ 665 666 nsegs = sc->txq[bi].m_dmamap->dm_nsegs; 667 segs = sc->txq[bi].m_dmamap->dm_segs; 668 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0, 669 sc->txq[bi].m_dmamap->dm_mapsize, 670 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 671 672 /* XXX: This driver hasn't been tested w/nsegs > 1 */ 673 while (nsegs > 0) { 674 nsegs--; 675 sc->txq[bi].m = m; 676 sc->TXDQ[bi * 2] = segs->ds_addr; 677 if (nsegs == 0) 678 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16) | 679 (1 << 31); 680 else 681 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16); 682 segs++; 683 bi = (bi + 1) % TX_QLEN; 684 ndq++; 685 } 686 687 688 /* 689 * Enqueue another. Don't do more than half the available 690 * descriptors before telling the MAC about them 691 */ 692 if ((sc->TXDQ_avail - ndq) > 0 && ndq < TX_QLEN / 2) { 693 IFQ_POLL(&ifp->if_snd, m); 694 if (m != NULL) { 695 goto more; 696 } 697 } 698 stop: 699 if (ndq > 0) { 700 sc->TXDQ_avail -= ndq; 701 sc->TXDQ_cur = &sc->TXDQ[bi]; 702 CTRLPAGE_DMASYNC(0, TX_QLEN * 2 * sizeof(u_int32_t), 703 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 704 EPE_WRITE(TXDEnq, ndq); 705 } 706 707 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 708 goto start; 709 710 splx(s); 711 return; 712 } 713 714 static void 715 epe_ifwatchdog(ifp) 716 struct ifnet *ifp; 717 { 718 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc; 719 720 if ((ifp->if_flags & IFF_RUNNING) == 0) 721 return; 722 printf("%s: device timeout, BMCtl = 0x%08x, BMSts = 0x%08x\n", 723 sc->sc_dev.dv_xname, EPE_READ(BMCtl), EPE_READ(BMSts)); 724 } 725 726 static int 727 epe_ifinit(ifp) 728 struct ifnet *ifp; 729 { 730 struct epe_softc *sc = ifp->if_softc; 731 int s = splnet(); 732 733 callout_stop(&sc->epe_tick_ch); 734 EPE_WRITE(RXCtl, RXCtl_IA0|RXCtl_BA|RXCtl_RCRCA|RXCtl_SRxON); 735 EPE_WRITE(TXCtl, TXCtl_STxON); 736 EPE_WRITE(GIIntMsk, GIIntMsk_INT); /* start interrupting */ 737 mii_mediachg(&sc->sc_mii); 738 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc); 739 ifp->if_flags |= IFF_RUNNING; 740 splx(s); 741 return 0; 742 } 743 744 static void 745 epe_ifstop(ifp, disable) 746 struct ifnet *ifp; 747 int disable; 748 { 749 struct epe_softc *sc = ifp->if_softc; 750 751 752 EPE_WRITE(RXCtl, 0); 753 EPE_WRITE(TXCtl, 0); 754 EPE_WRITE(GIIntMsk, 0); 755 callout_stop(&sc->epe_tick_ch); 756 757 /* Down the MII. */ 758 mii_down(&sc->sc_mii); 759 760 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 761 ifp->if_timer = 0; 762 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE; 763 } 764 765 static void 766 epe_setaddr(ifp) 767 struct ifnet *ifp; 768 { 769 struct epe_softc *sc = ifp->if_softc; 770 struct ethercom *ac = &sc->sc_ec; 771 struct ether_multi *enm; 772 struct ether_multistep step; 773 u_int8_t ias[2][ETHER_ADDR_LEN]; 774 u_int32_t h, nma = 0, hashes[2] = { 0, 0 }; 775 u_int32_t rxctl = EPE_READ(RXCtl); 776 777 /* disable receiver temporarily */ 778 EPE_WRITE(RXCtl, rxctl & ~RXCtl_SRxON); 779 780 rxctl &= ~(RXCtl_MA|RXCtl_PA|RXCtl_IA2|RXCtl_IA3); 781 782 if (ifp->if_flags & IFF_PROMISC) { 783 rxctl |= RXCtl_PA; 784 } 785 786 ifp->if_flags &= ~IFF_ALLMULTI; 787 788 ETHER_FIRST_MULTI(step, ac, enm); 789 while (enm != NULL) { 790 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 791 /* 792 * We must listen to a range of multicast addresses. 793 * For now, just accept all multicasts, rather than 794 * trying to set only those filter bits needed to match 795 * the range. (At this time, the only use of address 796 * ranges is for IP multicast routing, for which the 797 * range is big enough to require all bits set.) 798 */ 799 rxctl &= ~(RXCtl_IA2|RXCtl_IA3); 800 rxctl |= RXCtl_MA; 801 hashes[0] = 0xffffffffUL; 802 hashes[1] = 0xffffffffUL; 803 ifp->if_flags |= IFF_ALLMULTI; 804 break; 805 } 806 807 if (nma < 2) { 808 /* We can program 2 perfect address filters for mcast */ 809 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN); 810 rxctl |= (1 << (nma + 2)); 811 } else { 812 /* 813 * XXX: Datasheet is not very clear here, I'm not sure 814 * if I'm doing this right. --joff 815 */ 816 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 817 818 /* Just want the 6 most-significant bits. */ 819 h = h >> 26; 820 821 hashes[ h / 32 ] |= (1 << (h % 32)); 822 rxctl |= RXCtl_MA; 823 } 824 ETHER_NEXT_MULTI(step, enm); 825 nma++; 826 } 827 828 EPE_WRITE(AFP, 0); 829 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 830 sc->sc_enaddr, ETHER_ADDR_LEN); 831 if (rxctl & RXCtl_IA2) { 832 EPE_WRITE(AFP, 2); 833 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 834 ias[0], ETHER_ADDR_LEN); 835 } 836 if (rxctl & RXCtl_IA3) { 837 EPE_WRITE(AFP, 3); 838 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 839 ias[1], ETHER_ADDR_LEN); 840 } 841 if (hashes[0] != 0 && hashes[1] != 0) { 842 EPE_WRITE(AFP, 7); 843 EPE_WRITE(HashTbl, hashes[0]); 844 EPE_WRITE(HashTbl + 4, hashes[1]); 845 } 846 EPE_WRITE(RXCtl, rxctl); 847 } 848