1 /* $NetBSD: epe.c,v 1.48 2020/02/19 02:51:54 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2004 Jesse Off 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: epe.c,v 1.48 2020/02/19 02:51:54 thorpej Exp $"); 31 32 #include <sys/types.h> 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/ioctl.h> 36 #include <sys/kernel.h> 37 #include <sys/proc.h> 38 #include <sys/malloc.h> 39 #include <sys/time.h> 40 #include <sys/device.h> 41 #include <uvm/uvm_extern.h> 42 43 #include <sys/bus.h> 44 #include <machine/intr.h> 45 46 #include <arm/cpufunc.h> 47 48 #include <arm/ep93xx/epsocvar.h> 49 #include <arm/ep93xx/ep93xxvar.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_types.h> 54 #include <net/if_media.h> 55 #include <net/if_ether.h> 56 #include <net/bpf.h> 57 58 #include <dev/mii/mii.h> 59 #include <dev/mii/miivar.h> 60 61 #ifdef INET 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/in_var.h> 65 #include <netinet/ip.h> 66 #include <netinet/if_inarp.h> 67 #endif 68 69 #include <arm/ep93xx/ep93xxreg.h> 70 #include <arm/ep93xx/epereg.h> 71 #include <arm/ep93xx/epevar.h> 72 73 #define DEFAULT_MDCDIV 32 74 75 #ifndef EPE_FAST 76 #define EPE_FAST 77 #endif 78 79 #ifndef EPE_FAST 80 #define EPE_READ(x) \ 81 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x)) 82 #define EPE_WRITE(x, y) \ 83 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y)) 84 #define CTRLPAGE_DMASYNC(x, y, z) \ 85 bus_dmamap_sync(sc->sc_dmat, sc->ctrlpage_dmamap, (x), (y), (z)) 86 #else 87 #define EPE_READ(x) *(volatile uint32_t *) \ 88 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x)) 89 #define EPE_WRITE(x, y) *(volatile uint32_t *) \ 90 (EP93XX_AHB_VBASE + EP93XX_AHB_EPE + (EPE_ ## x)) = y 91 #define CTRLPAGE_DMASYNC(x, y, z) 92 #endif /* ! EPE_FAST */ 93 94 static int epe_match(device_t , cfdata_t, void *); 95 static void epe_attach(device_t, device_t, void *); 96 static void epe_init(struct epe_softc *); 97 static int epe_intr(void* arg); 98 static int epe_gctx(struct epe_softc *); 99 int epe_mii_readreg (device_t, int, int, uint16_t *); 100 int epe_mii_writereg (device_t, int, int, uint16_t); 101 void epe_statchg (struct ifnet *); 102 void epe_tick (void *); 103 static int epe_ifioctl (struct ifnet *, u_long, void *); 104 static void epe_ifstart (struct ifnet *); 105 static void epe_ifwatchdog (struct ifnet *); 106 static int epe_ifinit (struct ifnet *); 107 static void epe_ifstop (struct ifnet *, int); 108 static void epe_setaddr (struct ifnet *); 109 110 CFATTACH_DECL_NEW(epe, sizeof(struct epe_softc), 111 epe_match, epe_attach, NULL, NULL); 112 113 static int 114 epe_match(device_t parent, cfdata_t match, void *aux) 115 { 116 return 2; 117 } 118 119 static void 120 epe_attach(device_t parent, device_t self, void *aux) 121 { 122 struct epe_softc *sc = device_private(self); 123 struct epsoc_attach_args *sa; 124 prop_data_t enaddr; 125 126 aprint_normal("\n"); 127 sa = aux; 128 sc->sc_dev = self; 129 sc->sc_iot = sa->sa_iot; 130 sc->sc_intr = sa->sa_intr; 131 sc->sc_dmat = sa->sa_dmat; 132 133 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 134 0, &sc->sc_ioh)) 135 panic("%s: Cannot map registers", device_xname(self)); 136 137 /* Fetch the Ethernet address from property if set. */ 138 enaddr = prop_dictionary_get(device_properties(self), "mac-address"); 139 if (enaddr != NULL) { 140 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA); 141 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN); 142 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr), 143 ETHER_ADDR_LEN); 144 bus_space_write_4(sc->sc_iot, sc->sc_ioh, EPE_AFP, 0); 145 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 146 sc->sc_enaddr, ETHER_ADDR_LEN); 147 } 148 149 ep93xx_intr_establish(sc->sc_intr, IPL_NET, epe_intr, sc); 150 epe_init(sc); 151 } 152 153 static int 154 epe_gctx(struct epe_softc *sc) 155 { 156 struct ifnet * ifp = &sc->sc_ec.ec_if; 157 uint32_t *cur, ndq = 0; 158 159 /* Handle transmit completions */ 160 cur = (uint32_t *)(EPE_READ(TXStsQCurAdd) - 161 sc->ctrlpage_dsaddr + (char*)sc->ctrlpage); 162 163 if (sc->TXStsQ_cur != cur) { 164 CTRLPAGE_DMASYNC(TX_QLEN * 2 * sizeof(uint32_t), 165 TX_QLEN * sizeof(uint32_t), BUS_DMASYNC_PREREAD); 166 } else 167 return 0; 168 169 do { 170 uint32_t tbi = *sc->TXStsQ_cur & 0x7fff; 171 struct mbuf *m = sc->txq[tbi].m; 172 173 if ((*sc->TXStsQ_cur & TXStsQ_TxWE) == 0) 174 if_statinc(ifp, if_oerrors); 175 176 bus_dmamap_unload(sc->sc_dmat, sc->txq[tbi].m_dmamap); 177 m_freem(m); 178 do { 179 sc->txq[tbi].m = NULL; 180 ndq++; 181 tbi = (tbi + 1) % TX_QLEN; 182 } while (sc->txq[tbi].m == m); 183 184 if_statinc(ifp, if_opackets); 185 sc->TXStsQ_cur++; 186 if (sc->TXStsQ_cur >= sc->TXStsQ + TX_QLEN) { 187 sc->TXStsQ_cur = sc->TXStsQ; 188 } 189 } while (sc->TXStsQ_cur != cur); 190 191 sc->TXDQ_avail += ndq; 192 if (ifp->if_flags & IFF_OACTIVE) { 193 ifp->if_flags &= ~IFF_OACTIVE; 194 /* Disable end-of-tx-chain interrupt */ 195 EPE_WRITE(IntEn, IntEn_REOFIE); 196 } 197 return ndq; 198 } 199 200 static int 201 epe_intr(void *arg) 202 { 203 struct epe_softc *sc = (struct epe_softc *)arg; 204 struct ifnet * ifp = &sc->sc_ec.ec_if; 205 uint32_t ndq = 0, irq, *cur; 206 207 irq = EPE_READ(IntStsC); 208 begin: 209 cur = (uint32_t *)(EPE_READ(RXStsQCurAdd) - 210 sc->ctrlpage_dsaddr + (char*)sc->ctrlpage); 211 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(uint32_t), 212 RX_QLEN * 4 * sizeof(uint32_t), 213 BUS_DMASYNC_PREREAD); 214 while (sc->RXStsQ_cur != cur) { 215 if ((sc->RXStsQ_cur[0] & (RXStsQ_RWE | RXStsQ_RFP |RXStsQ_EOB)) 216 == (RXStsQ_RWE | RXStsQ_RFP | RXStsQ_EOB)) { 217 uint32_t bi = (sc->RXStsQ_cur[1] >> 16) & 0x7fff; 218 uint32_t fl = sc->RXStsQ_cur[1] & 0xffff; 219 struct mbuf *m; 220 221 MGETHDR(m, M_DONTWAIT, MT_DATA); 222 if (m != NULL) MCLGET(m, M_DONTWAIT); 223 if (m != NULL && (m->m_flags & M_EXT)) { 224 bus_dmamap_unload(sc->sc_dmat, 225 sc->rxq[bi].m_dmamap); 226 m_set_rcvif(sc->rxq[bi].m, ifp); 227 sc->rxq[bi].m->m_pkthdr.len = 228 sc->rxq[bi].m->m_len = fl; 229 if_percpuq_enqueue(ifp->if_percpuq, 230 sc->rxq[bi].m); 231 sc->rxq[bi].m = m; 232 bus_dmamap_load(sc->sc_dmat, 233 sc->rxq[bi].m_dmamap, 234 m->m_ext.ext_buf, MCLBYTES, 235 NULL, BUS_DMA_NOWAIT); 236 sc->RXDQ[bi * 2] = 237 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr; 238 } else { 239 /* Drop packets until we can get replacement 240 * empty mbufs for the RXDQ. 241 */ 242 if (m != NULL) 243 m_freem(m); 244 245 if_statinc(ifp, if_ierrors); 246 } 247 } else 248 if_statinc(ifp, if_ierrors); 249 250 ndq++; 251 252 sc->RXStsQ_cur += 2; 253 if (sc->RXStsQ_cur >= sc->RXStsQ + (RX_QLEN * 2)) 254 sc->RXStsQ_cur = sc->RXStsQ; 255 } 256 257 if (ndq > 0) { 258 CTRLPAGE_DMASYNC(TX_QLEN * 3 * sizeof(uint32_t), 259 RX_QLEN * 4 * sizeof(uint32_t), 260 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 261 EPE_WRITE(RXStsEnq, ndq); 262 EPE_WRITE(RXDEnq, ndq); 263 ndq = 0; 264 } 265 266 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { 267 if_schedule_deferred_start(ifp); 268 } 269 270 irq = EPE_READ(IntStsC); 271 if ((irq & (IntSts_RxSQ | IntSts_ECI)) != 0) 272 goto begin; 273 274 return 1; 275 } 276 277 278 static void 279 epe_init(struct epe_softc *sc) 280 { 281 bus_dma_segment_t segs; 282 char *addr; 283 int rsegs, err, i; 284 struct ifnet * ifp = &sc->sc_ec.ec_if; 285 struct mii_data *mii = &sc->sc_mii; 286 int mdcdiv = DEFAULT_MDCDIV; 287 288 callout_init(&sc->epe_tick_ch, 0); 289 290 /* Select primary Individual Address in Address Filter Pointer */ 291 EPE_WRITE(AFP, 0); 292 /* Read ethernet MAC, should already be set by bootrom */ 293 bus_space_read_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 294 sc->sc_enaddr, ETHER_ADDR_LEN); 295 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", 296 ether_sprintf(sc->sc_enaddr)); 297 298 /* Soft Reset the MAC */ 299 EPE_WRITE(SelfCtl, SelfCtl_RESET); 300 while (EPE_READ(SelfCtl) & SelfCtl_RESET) 301 ; 302 303 /* suggested magic initialization values from datasheet */ 304 EPE_WRITE(RXBufThrshld, 0x800040); 305 EPE_WRITE(TXBufThrshld, 0x200010); 306 EPE_WRITE(RXStsThrshld, 0x40002); 307 EPE_WRITE(TXStsThrshld, 0x40002); 308 EPE_WRITE(RXDThrshld, 0x40002); 309 EPE_WRITE(TXDThrshld, 0x40002); 310 311 /* Allocate a page of memory for descriptor and status queues */ 312 err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 0, PAGE_SIZE, 313 &segs, 1, &rsegs, BUS_DMA_WAITOK); 314 if (err == 0) { 315 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, 316 &sc->ctrlpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 317 } 318 if (err == 0) { 319 err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 320 0, BUS_DMA_WAITOK, &sc->ctrlpage_dmamap); 321 } 322 if (err == 0) { 323 err = bus_dmamap_load(sc->sc_dmat, sc->ctrlpage_dmamap, 324 sc->ctrlpage, PAGE_SIZE, NULL, BUS_DMA_WAITOK); 325 } 326 if (err != 0) { 327 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev)); 328 } 329 sc->ctrlpage_dsaddr = sc->ctrlpage_dmamap->dm_segs[0].ds_addr; 330 memset(sc->ctrlpage, 0, PAGE_SIZE); 331 332 /* Set up pointers to start of each queue in kernel addr space. 333 * Each descriptor queue or status queue entry uses 2 words 334 */ 335 sc->TXDQ = (uint32_t *)sc->ctrlpage; 336 sc->TXDQ_cur = sc->TXDQ; 337 sc->TXDQ_avail = TX_QLEN - 1; 338 sc->TXStsQ = &sc->TXDQ[TX_QLEN * 2]; 339 sc->TXStsQ_cur = sc->TXStsQ; 340 sc->RXDQ = &sc->TXStsQ[TX_QLEN]; 341 sc->RXStsQ = &sc->RXDQ[RX_QLEN * 2]; 342 sc->RXStsQ_cur = sc->RXStsQ; 343 344 /* Program each queue's start addr, cur addr, and len registers 345 * with the physical addresses. 346 */ 347 addr = (char *)sc->ctrlpage_dmamap->dm_segs[0].ds_addr; 348 EPE_WRITE(TXDQBAdd, (uint32_t)addr); 349 EPE_WRITE(TXDQCurAdd, (uint32_t)addr); 350 EPE_WRITE(TXDQBLen, TX_QLEN * 2 * sizeof(uint32_t)); 351 352 addr += (sc->TXStsQ - sc->TXDQ) * sizeof(uint32_t); 353 EPE_WRITE(TXStsQBAdd, (uint32_t)addr); 354 EPE_WRITE(TXStsQCurAdd, (uint32_t)addr); 355 EPE_WRITE(TXStsQBLen, TX_QLEN * sizeof(uint32_t)); 356 357 addr += (sc->RXDQ - sc->TXStsQ) * sizeof(uint32_t); 358 EPE_WRITE(RXDQBAdd, (uint32_t)addr); 359 EPE_WRITE(RXDCurAdd, (uint32_t)addr); 360 EPE_WRITE(RXDQBLen, RX_QLEN * 2 * sizeof(uint32_t)); 361 362 addr += (sc->RXStsQ - sc->RXDQ) * sizeof(uint32_t); 363 EPE_WRITE(RXStsQBAdd, (uint32_t)addr); 364 EPE_WRITE(RXStsQCurAdd, (uint32_t)addr); 365 EPE_WRITE(RXStsQBLen, RX_QLEN * 2 * sizeof(uint32_t)); 366 367 /* Populate the RXDQ with mbufs */ 368 for (i = 0; i < RX_QLEN; i++) { 369 struct mbuf *m; 370 371 bus_dmamap_create(sc->sc_dmat, MCLBYTES, TX_QLEN/4, MCLBYTES, 372 0, BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap); 373 MGETHDR(m, M_WAIT, MT_DATA); 374 MCLGET(m, M_WAIT); 375 sc->rxq[i].m = m; 376 bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap, 377 m->m_ext.ext_buf, MCLBYTES, NULL, BUS_DMA_WAITOK); 378 379 sc->RXDQ[i * 2] = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr; 380 sc->RXDQ[i * 2 + 1] = (i << 16) | MCLBYTES; 381 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0, 382 MCLBYTES, BUS_DMASYNC_PREREAD); 383 } 384 385 for (i = 0; i < TX_QLEN; i++) { 386 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 387 (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW), 388 &sc->txq[i].m_dmamap); 389 sc->txq[i].m = NULL; 390 sc->TXDQ[i * 2 + 1] = (i << 16); 391 } 392 393 /* Divide HCLK by 32 for MDC clock */ 394 if (device_cfdata(sc->sc_dev)->cf_flags) 395 mdcdiv = device_cfdata(sc->sc_dev)->cf_flags; 396 EPE_WRITE(SelfCtl, (SelfCtl_MDCDIV(mdcdiv) | SelfCtl_PSPRS)); 397 398 mii->mii_ifp = ifp; 399 mii->mii_readreg = epe_mii_readreg; 400 mii->mii_writereg = epe_mii_writereg; 401 mii->mii_statchg = epe_statchg; 402 sc->sc_ec.ec_mii = mii; 403 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 404 ether_mediastatus); 405 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 406 MII_OFFSET_ANY, 0); 407 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 408 409 EPE_WRITE(BMCtl, BMCtl_RxEn | BMCtl_TxEn); 410 EPE_WRITE(IntEn, IntEn_REOFIE); 411 /* maximum valid max frame length */ 412 EPE_WRITE(MaxFrmLen, (0x7ff << 16) | MHLEN); 413 /* wait for receiver ready */ 414 while ((EPE_READ(BMSts) & BMSts_RxAct) == 0) 415 continue; 416 /* enqueue the entries in RXStsQ and RXDQ */ 417 CTRLPAGE_DMASYNC(0, sc->ctrlpage_dmamap->dm_mapsize, 418 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 419 EPE_WRITE(RXDEnq, RX_QLEN - 1); 420 EPE_WRITE(RXStsEnq, RX_QLEN - 1); 421 422 /* 423 * We can support 802.1Q VLAN-sized frames. 424 */ 425 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 426 427 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 428 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 429 ifp->if_ioctl = epe_ifioctl; 430 ifp->if_start = epe_ifstart; 431 ifp->if_watchdog = epe_ifwatchdog; 432 ifp->if_init = epe_ifinit; 433 ifp->if_stop = epe_ifstop; 434 ifp->if_timer = 0; 435 ifp->if_softc = sc; 436 IFQ_SET_READY(&ifp->if_snd); 437 if_attach(ifp); 438 if_deferred_start_init(ifp, NULL); 439 ether_ifattach(ifp, (sc)->sc_enaddr); 440 } 441 442 int 443 epe_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 444 { 445 uint32_t d; 446 447 d = EPE_READ(SelfCtl); 448 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */ 449 EPE_WRITE(MIICmd, (MIICmd_READ | (phy << 5) | reg)); 450 while (EPE_READ(MIISts) & MIISts_BUSY) 451 ; 452 *val = EPE_READ(MIIData) & 0xffff; 453 EPE_WRITE(SelfCtl, d); /* restore old value */ 454 return 0; 455 } 456 457 int 458 epe_mii_writereg(device_t self, int phy, int reg, uint16_t val) 459 { 460 uint32_t d; 461 462 d = EPE_READ(SelfCtl); 463 EPE_WRITE(SelfCtl, d & ~SelfCtl_PSPRS); /* no preamble suppress */ 464 EPE_WRITE(MIIData, val); 465 EPE_WRITE(MIICmd, (MIICmd_WRITE | (phy << 5) | reg)); 466 while (EPE_READ(MIISts) & MIISts_BUSY) 467 ; 468 EPE_WRITE(SelfCtl, d); /* restore old value */ 469 470 return 0; 471 } 472 473 void 474 epe_statchg(struct ifnet *ifp) 475 { 476 struct epe_softc *sc = ifp->if_softc; 477 uint32_t reg; 478 479 /* 480 * We must keep the MAC and the PHY in sync as 481 * to the status of full-duplex! 482 */ 483 reg = EPE_READ(TestCtl); 484 if (sc->sc_mii.mii_media_active & IFM_FDX) 485 reg |= TestCtl_MFDX; 486 else 487 reg &= ~TestCtl_MFDX; 488 EPE_WRITE(TestCtl, reg); 489 } 490 491 void 492 epe_tick(void *arg) 493 { 494 struct epe_softc* sc = (struct epe_softc *)arg; 495 struct ifnet * ifp = &sc->sc_ec.ec_if; 496 int s; 497 uint32_t misses; 498 499 if_statadd(ifp, if_collisions, EPE_READ(TXCollCnt)); 500 /* These misses are ok, they will happen if the RAM/CPU can't keep up */ 501 misses = EPE_READ(RXMissCnt); 502 if (misses > 0) 503 printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses); 504 505 s = splnet(); 506 if (epe_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { 507 epe_ifstart(ifp); 508 } 509 splx(s); 510 511 mii_tick(&sc->sc_mii); 512 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc); 513 } 514 515 516 static int 517 epe_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 518 { 519 int s, error; 520 521 s = splnet(); 522 error = ether_ioctl(ifp, cmd, data); 523 if (error == ENETRESET) { 524 if (ifp->if_flags & IFF_RUNNING) 525 epe_setaddr(ifp); 526 error = 0; 527 } 528 splx(s); 529 return error; 530 } 531 532 static void 533 epe_ifstart(struct ifnet *ifp) 534 { 535 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc; 536 struct mbuf *m; 537 bus_dma_segment_t *segs; 538 int s, bi, err, nsegs, ndq; 539 540 s = splnet(); 541 start: 542 ndq = 0; 543 if (sc->TXDQ_avail == 0) { 544 if (epe_gctx(sc) == 0) { 545 /* Enable End-Of-TX-Chain interrupt */ 546 EPE_WRITE(IntEn, IntEn_REOFIE | IntEn_ECIE); 547 ifp->if_flags |= IFF_OACTIVE; 548 ifp->if_timer = 10; 549 splx(s); 550 return; 551 } 552 } 553 554 bi = sc->TXDQ_cur - sc->TXDQ; 555 556 IFQ_POLL(&ifp->if_snd, m); 557 if (m == NULL) { 558 splx(s); 559 return; 560 } 561 more: 562 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 563 BUS_DMA_NOWAIT)) || 564 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 || 565 sc->txq[bi].m_dmamap->dm_nsegs > (sc->TXDQ_avail - ndq)) { 566 /* Copy entire mbuf chain to new and 32-bit aligned storage */ 567 struct mbuf *mn; 568 569 if (err == 0) 570 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap); 571 572 MGETHDR(mn, M_DONTWAIT, MT_DATA); 573 if (mn == NULL) goto stop; 574 if (m->m_pkthdr.len > (MHLEN & (~0x3))) { 575 MCLGET(mn, M_DONTWAIT); 576 if ((mn->m_flags & M_EXT) == 0) { 577 m_freem(mn); 578 goto stop; 579 } 580 } 581 mn->m_data = (void *)(((uint32_t)mn->m_data + 0x3) & (~0x3)); 582 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *)); 583 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len; 584 IFQ_DEQUEUE(&ifp->if_snd, m); 585 m_freem(m); 586 m = mn; 587 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 588 BUS_DMA_NOWAIT); 589 } else { 590 IFQ_DEQUEUE(&ifp->if_snd, m); 591 } 592 593 bpf_mtap(ifp, m, BPF_D_OUT); 594 595 nsegs = sc->txq[bi].m_dmamap->dm_nsegs; 596 segs = sc->txq[bi].m_dmamap->dm_segs; 597 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0, 598 sc->txq[bi].m_dmamap->dm_mapsize, 599 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 600 601 /* XXX: This driver hasn't been tested w/nsegs > 1 */ 602 while (nsegs > 0) { 603 nsegs--; 604 sc->txq[bi].m = m; 605 sc->TXDQ[bi * 2] = segs->ds_addr; 606 if (nsegs == 0) 607 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16) | 608 (1 << 31); 609 else 610 sc->TXDQ[bi * 2 + 1] = segs->ds_len | (bi << 16); 611 segs++; 612 bi = (bi + 1) % TX_QLEN; 613 ndq++; 614 } 615 616 617 /* 618 * Enqueue another. Don't do more than half the available 619 * descriptors before telling the MAC about them 620 */ 621 if ((sc->TXDQ_avail - ndq) > 0 && ndq < TX_QLEN / 2) { 622 IFQ_POLL(&ifp->if_snd, m); 623 if (m != NULL) 624 goto more; 625 } 626 stop: 627 if (ndq > 0) { 628 sc->TXDQ_avail -= ndq; 629 sc->TXDQ_cur = &sc->TXDQ[bi]; 630 CTRLPAGE_DMASYNC(0, TX_QLEN * 2 * sizeof(uint32_t), 631 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 632 EPE_WRITE(TXDEnq, ndq); 633 } 634 635 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 636 goto start; 637 638 splx(s); 639 return; 640 } 641 642 static void 643 epe_ifwatchdog(struct ifnet *ifp) 644 { 645 struct epe_softc *sc = (struct epe_softc *)ifp->if_softc; 646 647 if ((ifp->if_flags & IFF_RUNNING) == 0) 648 return; 649 printf("%s: device timeout, BMCtl = 0x%08x, BMSts = 0x%08x\n", 650 device_xname(sc->sc_dev), EPE_READ(BMCtl), EPE_READ(BMSts)); 651 } 652 653 static int 654 epe_ifinit(struct ifnet *ifp) 655 { 656 struct epe_softc *sc = ifp->if_softc; 657 int rc, s = splnet(); 658 659 callout_stop(&sc->epe_tick_ch); 660 EPE_WRITE(RXCtl, RXCtl_IA0 | RXCtl_BA | RXCtl_RCRCA | RXCtl_SRxON); 661 EPE_WRITE(TXCtl, TXCtl_STxON); 662 EPE_WRITE(GIIntMsk, GIIntMsk_INT); /* start interrupting */ 663 664 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 665 rc = 0; 666 else if (rc != 0) 667 goto out; 668 669 callout_reset(&sc->epe_tick_ch, hz, epe_tick, sc); 670 ifp->if_flags |= IFF_RUNNING; 671 out: 672 splx(s); 673 return 0; 674 } 675 676 static void 677 epe_ifstop(struct ifnet *ifp, int disable) 678 { 679 struct epe_softc *sc = ifp->if_softc; 680 681 682 EPE_WRITE(RXCtl, 0); 683 EPE_WRITE(TXCtl, 0); 684 EPE_WRITE(GIIntMsk, 0); 685 callout_stop(&sc->epe_tick_ch); 686 687 /* Down the MII. */ 688 mii_down(&sc->sc_mii); 689 690 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 691 ifp->if_timer = 0; 692 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE; 693 } 694 695 static void 696 epe_setaddr(struct ifnet *ifp) 697 { 698 struct epe_softc *sc = ifp->if_softc; 699 struct ethercom *ec = &sc->sc_ec; 700 struct ether_multi *enm; 701 struct ether_multistep step; 702 uint8_t ias[2][ETHER_ADDR_LEN]; 703 uint32_t h, nma = 0, hashes[2] = { 0, 0 }; 704 uint32_t rxctl = EPE_READ(RXCtl); 705 706 /* disable receiver temporarily */ 707 EPE_WRITE(RXCtl, rxctl & ~RXCtl_SRxON); 708 709 rxctl &= ~(RXCtl_MA | RXCtl_PA | RXCtl_IA2 | RXCtl_IA3); 710 711 if (ifp->if_flags & IFF_PROMISC) 712 rxctl |= RXCtl_PA; 713 714 ifp->if_flags &= ~IFF_ALLMULTI; 715 716 ETHER_LOCK(ec); 717 ETHER_FIRST_MULTI(step, ec, enm); 718 while (enm != NULL) { 719 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 720 /* 721 * We must listen to a range of multicast addresses. 722 * For now, just accept all multicasts, rather than 723 * trying to set only those filter bits needed to match 724 * the range. (At this time, the only use of address 725 * ranges is for IP multicast routing, for which the 726 * range is big enough to require all bits set.) 727 */ 728 rxctl &= ~(RXCtl_IA2 | RXCtl_IA3); 729 rxctl |= RXCtl_MA; 730 hashes[0] = 0xffffffffUL; 731 hashes[1] = 0xffffffffUL; 732 ifp->if_flags |= IFF_ALLMULTI; 733 break; 734 } 735 736 if (nma < 2) { 737 /* We can program 2 perfect address filters for mcast */ 738 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN); 739 rxctl |= (1 << (nma + 2)); 740 } else { 741 /* 742 * XXX: Datasheet is not very clear here, I'm not sure 743 * if I'm doing this right. --joff 744 */ 745 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 746 747 /* Just want the 6 most-significant bits. */ 748 h = h >> 26; 749 750 hashes[ h / 32 ] |= (1 << (h % 32)); 751 rxctl |= RXCtl_MA; 752 } 753 ETHER_NEXT_MULTI(step, enm); 754 nma++; 755 } 756 ETHER_UNLOCK(ec); 757 758 EPE_WRITE(AFP, 0); 759 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 760 sc->sc_enaddr, ETHER_ADDR_LEN); 761 if (rxctl & RXCtl_IA2) { 762 EPE_WRITE(AFP, 2); 763 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 764 ias[0], ETHER_ADDR_LEN); 765 } 766 if (rxctl & RXCtl_IA3) { 767 EPE_WRITE(AFP, 3); 768 bus_space_write_region_1(sc->sc_iot, sc->sc_ioh, EPE_IndAd, 769 ias[1], ETHER_ADDR_LEN); 770 } 771 if (hashes[0] != 0 && hashes[1] != 0) { 772 EPE_WRITE(AFP, 7); 773 EPE_WRITE(HashTbl, hashes[0]); 774 EPE_WRITE(HashTbl + 4, hashes[1]); 775 } 776 EPE_WRITE(RXCtl, rxctl); 777 } 778