1 /* $NetBSD: at91emac.c,v 1.32 2020/02/19 02:51:54 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2007 Embedtronics Oy 5 * All rights reserved. 6 * 7 * Based on arch/arm/ep93xx/epe.c 8 * 9 * Copyright (c) 2004 Jesse Off 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: at91emac.c,v 1.32 2020/02/19 02:51:54 thorpej Exp $"); 36 37 #include <sys/types.h> 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/ioctl.h> 41 #include <sys/kernel.h> 42 #include <sys/proc.h> 43 #include <sys/malloc.h> 44 #include <sys/time.h> 45 #include <sys/device.h> 46 #include <uvm/uvm_extern.h> 47 48 #include <sys/bus.h> 49 #include <machine/intr.h> 50 51 #include <arm/cpufunc.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_types.h> 56 #include <net/if_media.h> 57 #include <net/if_ether.h> 58 #include <net/bpf.h> 59 60 #include <dev/mii/mii.h> 61 #include <dev/mii/miivar.h> 62 63 #ifdef INET 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/in_var.h> 67 #include <netinet/ip.h> 68 #include <netinet/if_inarp.h> 69 #endif 70 71 #include <arm/at91/at91var.h> 72 #include <arm/at91/at91emacreg.h> 73 #include <arm/at91/at91emacvar.h> 74 75 #define DEFAULT_MDCDIV 32 76 77 #ifndef EMAC_FAST 78 #define EMAC_FAST 79 #endif 80 81 #ifndef EMAC_FAST 82 #define EMAC_READ(x) \ 83 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x)) 84 #define EMAC_WRITE(x, y) \ 85 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (EPE_ ## x), (y)) 86 #else 87 #define EMAC_READ(x) ETHREG(x) 88 #define EMAC_WRITE(x, y) ETHREG(x) = (y) 89 #endif /* ! EMAC_FAST */ 90 91 static int emac_match(device_t, cfdata_t, void *); 92 static void emac_attach(device_t, device_t, void *); 93 static void emac_init(struct emac_softc *); 94 static int emac_intr(void* arg); 95 static int emac_gctx(struct emac_softc *); 96 int emac_mii_readreg (device_t, int, int, uint16_t *); 97 int emac_mii_writereg (device_t, int, int, uint16_t); 98 void emac_statchg (struct ifnet *); 99 void emac_tick (void *); 100 static int emac_ifioctl (struct ifnet *, u_long, void *); 101 static void emac_ifstart (struct ifnet *); 102 static void emac_ifwatchdog (struct ifnet *); 103 static int emac_ifinit (struct ifnet *); 104 static void emac_ifstop (struct ifnet *, int); 105 static void emac_setaddr (struct ifnet *); 106 107 CFATTACH_DECL_NEW(at91emac, sizeof(struct emac_softc), 108 emac_match, emac_attach, NULL, NULL); 109 110 #ifdef EMAC_DEBUG 111 int emac_debug = EMAC_DEBUG; 112 #define DPRINTFN(n, fmt) if (emac_debug >= (n)) printf fmt 113 #else 114 #define DPRINTFN(n, fmt) 115 #endif 116 117 static int 118 emac_match(device_t parent, cfdata_t match, void *aux) 119 { 120 if (strcmp(match->cf_name, "at91emac") == 0) 121 return 2; 122 return 0; 123 } 124 125 static void 126 emac_attach(device_t parent, device_t self, void *aux) 127 { 128 struct emac_softc *sc = device_private(self); 129 struct at91bus_attach_args *sa = aux; 130 prop_data_t enaddr; 131 uint32_t u; 132 133 printf("\n"); 134 sc->sc_dev = self; 135 sc->sc_iot = sa->sa_iot; 136 sc->sc_pid = sa->sa_pid; 137 sc->sc_dmat = sa->sa_dmat; 138 139 if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh)) 140 panic("%s: Cannot map registers", device_xname(self)); 141 142 /* enable peripheral clock */ 143 at91_peripheral_clock(sc->sc_pid, 1); 144 145 /* configure emac: */ 146 EMAC_WRITE(ETH_CTL, 0); // disable everything 147 EMAC_WRITE(ETH_IDR, -1); // disable interrupts 148 EMAC_WRITE(ETH_RBQP, 0); // clear receive 149 EMAC_WRITE(ETH_CFG, 150 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 151 EMAC_WRITE(ETH_TCR, 0); // send nothing 152 //(void)EMAC_READ(ETH_ISR); 153 u = EMAC_READ(ETH_TSR); 154 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 155 | ETH_TSR_IDLE | ETH_TSR_RLE 156 | ETH_TSR_COL | ETH_TSR_OVR))); 157 u = EMAC_READ(ETH_RSR); 158 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA))); 159 160 /* Fetch the Ethernet address from property if set. */ 161 enaddr = prop_dictionary_get(device_properties(self), "mac-address"); 162 163 if (enaddr != NULL) { 164 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA); 165 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN); 166 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr), 167 ETHER_ADDR_LEN); 168 } else { 169 static const uint8_t hardcoded[ETHER_ADDR_LEN] = { 170 0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94 171 }; 172 memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN); 173 } 174 175 at91_intr_establish(sc->sc_pid, IPL_NET, INTR_HIGH_LEVEL, emac_intr, 176 sc); 177 emac_init(sc); 178 } 179 180 static int 181 emac_gctx(struct emac_softc *sc) 182 { 183 struct ifnet * ifp = &sc->sc_ec.ec_if; 184 uint32_t tsr; 185 186 tsr = EMAC_READ(ETH_TSR); 187 if (!(tsr & ETH_TSR_BNQ)) { 188 // no space left 189 return 0; 190 } 191 192 // free sent frames 193 while (sc->txqc > (tsr & ETH_TSR_IDLE ? 0 : 1)) { 194 int i = sc->txqi % TX_QLEN; 195 bus_dmamap_sync(sc->sc_dmat, sc->txq[i].m_dmamap, 0, 196 sc->txq[i].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); 197 bus_dmamap_unload(sc->sc_dmat, sc->txq[i].m_dmamap); 198 m_freem(sc->txq[i].m); 199 DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n", 200 __FUNCTION__, i, sc->txq[i].m, sc->txqc)); 201 sc->txq[i].m = NULL; 202 sc->txqi = (i + 1) % TX_QLEN; 203 sc->txqc--; 204 } 205 206 // mark we're free 207 if (ifp->if_flags & IFF_OACTIVE) { 208 ifp->if_flags &= ~IFF_OACTIVE; 209 /* Disable transmit-buffer-free interrupt */ 210 /*EMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/ 211 } 212 213 return 1; 214 } 215 216 static int 217 emac_intr(void *arg) 218 { 219 struct emac_softc *sc = (struct emac_softc *)arg; 220 struct ifnet * ifp = &sc->sc_ec.ec_if; 221 uint32_t imr, isr, ctl; 222 int bi; 223 224 imr = ~EMAC_READ(ETH_IMR); 225 if (!(imr & (ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE 226 | ETH_ISR_RBNA | ETH_ISR_ROVR))) { 227 // interrupt not enabled, can't be us 228 return 0; 229 } 230 231 isr = EMAC_READ(ETH_ISR) & imr; 232 #ifdef EMAC_DEBUG 233 uint32_t rsr = 234 #endif 235 EMAC_READ(ETH_RSR); // get receive status register 236 237 DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, 238 isr, rsr, imr)); 239 240 if (isr & ETH_ISR_RBNA) { // out of receive buffers 241 EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear interrupt 242 ctl = EMAC_READ(ETH_CTL); // get current control register value 243 EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); // disable receiver 244 EMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear BNA bit 245 EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); // re-enable receiver 246 if_statinc(ifp, if_ierrors); 247 if_statinc(ifp, if_ipackets); 248 DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__)); 249 } 250 if (isr & ETH_ISR_ROVR) { 251 EMAC_WRITE(ETH_RSR, ETH_RSR_OVR); // clear interrupt 252 if_statinc(ifp, if_ierrors); 253 if_statinc(ifp, if_ipackets); 254 DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__)); 255 } 256 257 if (isr & ETH_ISR_RCOM) { // packet has been received! 258 uint32_t nfo; 259 // @@@ if memory is NOT coherent, then we're in trouble @@@@ 260 // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 261 // printf("## RDSC[%i].ADDR=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Addr); 262 DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, 263 sc->RDSC[sc->rxqi % RX_QLEN].Info)); 264 while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) { 265 int fl; 266 struct mbuf *m; 267 268 nfo = sc->RDSC[bi].Info; 269 fl = (nfo & ETH_RDSC_I_LEN) - 4; 270 DPRINTFN(2,("## nfo=0x%08X\n", nfo)); 271 272 MGETHDR(m, M_DONTWAIT, MT_DATA); 273 if (m != NULL) MCLGET(m, M_DONTWAIT); 274 if (m != NULL && (m->m_flags & M_EXT)) { 275 bus_dmamap_sync(sc->sc_dmat, 276 sc->rxq[bi].m_dmamap, 0, 277 MCLBYTES, BUS_DMASYNC_POSTREAD); 278 bus_dmamap_unload(sc->sc_dmat, 279 sc->rxq[bi].m_dmamap); 280 m_set_rcvif(sc->rxq[bi].m, ifp); 281 sc->rxq[bi].m->m_pkthdr.len = 282 sc->rxq[bi].m->m_len = fl; 283 DPRINTFN(2,("received %u bytes packet\n", fl)); 284 if_percpuq_enqueue(ifp->if_percpuq, sc->rxq[bi].m); 285 if (mtod(m, intptr_t) & 3) { 286 m_adj(m, mtod(m, intptr_t) & 3); 287 } 288 sc->rxq[bi].m = m; 289 bus_dmamap_load(sc->sc_dmat, 290 sc->rxq[bi].m_dmamap, 291 m->m_ext.ext_buf, MCLBYTES, 292 NULL, BUS_DMA_NOWAIT); 293 bus_dmamap_sync(sc->sc_dmat, 294 sc->rxq[bi].m_dmamap, 0, 295 MCLBYTES, BUS_DMASYNC_PREREAD); 296 sc->RDSC[bi].Info = 0; 297 sc->RDSC[bi].Addr = 298 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr 299 | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0); 300 } else { 301 /* Drop packets until we can get replacement 302 * empty mbufs for the RXDQ. 303 */ 304 if (m != NULL) { 305 m_freem(m); 306 } 307 if_statinc(ifp, if_ierrors); 308 } 309 sc->rxqi++; 310 } 311 // bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 312 } 313 314 if (emac_gctx(sc) > 0) 315 if_schedule_deferred_start(ifp); 316 #if 0 // reloop 317 irq = EMAC_READ(IntStsC); 318 if ((irq & (IntSts_RxSQ | IntSts_ECI)) != 0) 319 goto begin; 320 #endif 321 322 return (1); 323 } 324 325 326 static void 327 emac_init(struct emac_softc *sc) 328 { 329 bus_dma_segment_t segs; 330 void *addr; 331 int rsegs, err, i; 332 struct ifnet * ifp = &sc->sc_ec.ec_if; 333 struct mii_data * const mii = &sc->sc_mii; 334 uint32_t u; 335 #if 0 336 int mdcdiv = DEFAULT_MDCDIV; 337 #endif 338 339 callout_init(&sc->emac_tick_ch, 0); 340 341 // ok... 342 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything 343 EMAC_WRITE(ETH_IDR, -1); // disable interrupts 344 EMAC_WRITE(ETH_RBQP, 0); // clear receive 345 EMAC_WRITE(ETH_CFG, 346 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 347 EMAC_WRITE(ETH_TCR, 0); // send nothing 348 // (void)EMAC_READ(ETH_ISR); 349 u = EMAC_READ(ETH_TSR); 350 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 351 | ETH_TSR_IDLE | ETH_TSR_RLE 352 | ETH_TSR_COL | ETH_TSR_OVR))); 353 u = EMAC_READ(ETH_RSR); 354 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA))); 355 356 /* configure EMAC */ 357 EMAC_WRITE(ETH_CFG, 358 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 359 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); 360 #if 0 361 if (device_cfdata(sc->sc_dev)->cf_flags) 362 mdcdiv = device_cfdata(sc->sc_dev)->cf_flags; 363 #endif 364 /* set ethernet address */ 365 EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24) 366 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8) 367 | (sc->sc_enaddr[0])); 368 EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8) 369 | (sc->sc_enaddr[4])); 370 EMAC_WRITE(ETH_SA2L, 0); 371 EMAC_WRITE(ETH_SA2H, 0); 372 EMAC_WRITE(ETH_SA3L, 0); 373 EMAC_WRITE(ETH_SA3H, 0); 374 EMAC_WRITE(ETH_SA4L, 0); 375 EMAC_WRITE(ETH_SA4H, 0); 376 377 /* Allocate a page of memory for receive queue descriptors */ 378 sc->rbqlen = (ETH_RDSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE; 379 sc->rbqlen *= PAGE_SIZE; 380 DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen)); 381 382 err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0, 383 MAX(16384, PAGE_SIZE), // see EMAC errata why forced to 16384 byte boundary 384 &segs, 1, &rsegs, BUS_DMA_WAITOK); 385 if (err == 0) { 386 DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__)); 387 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen, 388 &sc->rbqpage, (BUS_DMA_WAITOK | BUS_DMA_COHERENT)); 389 } 390 if (err == 0) { 391 DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__)); 392 err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1, 393 sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK, 394 &sc->rbqpage_dmamap); 395 } 396 if (err == 0) { 397 DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__)); 398 err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap, 399 sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK); 400 } 401 if (err != 0) { 402 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev)); 403 } 404 sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr; 405 406 memset(sc->rbqpage, 0, sc->rbqlen); 407 408 /* Set up pointers to start of each queue in kernel addr space. 409 * Each descriptor queue or status queue entry uses 2 words 410 */ 411 sc->RDSC = (void*)sc->rbqpage; 412 413 /* Populate the RXQ with mbufs */ 414 sc->rxqi = 0; 415 for (i = 0; i < RX_QLEN; i++) { 416 struct mbuf *m; 417 418 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 419 PAGE_SIZE, BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap); 420 if (err) 421 panic("%s: dmamap_create failed: %i\n", 422 __FUNCTION__, err); 423 424 MGETHDR(m, M_WAIT, MT_DATA); 425 MCLGET(m, M_WAIT); 426 sc->rxq[i].m = m; 427 if (mtod(m, intptr_t) & 3) { 428 m_adj(m, mtod(m, intptr_t) & 3); 429 } 430 err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap, 431 m->m_ext.ext_buf, MCLBYTES, NULL, 432 BUS_DMA_WAITOK); 433 if (err) 434 panic("%s: dmamap_load failed: %i\n", 435 __FUNCTION__, err); 436 437 sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr 438 | (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0); 439 sc->RDSC[i].Info = 0; 440 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0, 441 MCLBYTES, BUS_DMASYNC_PREREAD); 442 } 443 444 /* prepare transmit queue */ 445 for (i = 0; i < TX_QLEN; i++) { 446 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 447 (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW), 448 &sc->txq[i].m_dmamap); 449 if (err) 450 panic("ARGH #1"); 451 sc->txq[i].m = NULL; 452 } 453 454 /* Program each queue's start addr, cur addr, and len registers 455 * with the physical addresses. 456 */ 457 bus_dmamap_sync(sc->sc_dmat, sc->rbqpage_dmamap, 0, sc->rbqlen, 458 BUS_DMASYNC_PREREAD); 459 addr = (void *)sc->rbqpage_dmamap->dm_segs[0].ds_addr; 460 EMAC_WRITE(ETH_RBQP, (uint32_t)addr); 461 462 /* Divide HCLK by 32 for MDC clock */ 463 mii->mii_ifp = ifp; 464 mii->mii_readreg = emac_mii_readreg; 465 mii->mii_writereg = emac_mii_writereg; 466 mii->mii_statchg = emac_statchg; 467 sc->sc_ec.ec_mii = mii; 468 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 469 ether_mediastatus); 470 mii_attach((device_t )sc, mii, 0xffffffff, MII_PHY_ANY, 471 MII_OFFSET_ANY, 0); 472 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 473 474 // enable / disable interrupts 475 476 #if 0 477 // enable / disable interrupts 478 EMAC_WRITE(ETH_IDR, -1); 479 EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE 480 | ETH_ISR_RBNA | ETH_ISR_ROVR); 481 // (void)EMAC_READ(ETH_ISR); // why 482 483 // enable transmitter / receiver 484 EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR 485 | ETH_CTL_CSR | ETH_CTL_MPE); 486 #endif 487 /* 488 * We can support 802.1Q VLAN-sized frames. 489 */ 490 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 491 492 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 493 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 494 ifp->if_ioctl = emac_ifioctl; 495 ifp->if_start = emac_ifstart; 496 ifp->if_watchdog = emac_ifwatchdog; 497 ifp->if_init = emac_ifinit; 498 ifp->if_stop = emac_ifstop; 499 ifp->if_timer = 0; 500 ifp->if_softc = sc; 501 IFQ_SET_READY(&ifp->if_snd); 502 if_attach(ifp); 503 if_deferred_start_init(ifp, NULL); 504 ether_ifattach(ifp, (sc)->sc_enaddr); 505 } 506 507 int 508 emac_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 509 { 510 #ifndef EMAC_FAST 511 struct emac_softc *sc = device_private(self); 512 #endif 513 514 EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD 515 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA) 516 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA) 517 | ETH_MAN_CODE_IEEE802_3)); 518 while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) 519 ; 520 *val = EMAC_READ(ETH_MAN) & ETH_MAN_DATA; 521 522 return 0; 523 } 524 525 int 526 emac_mii_writereg(device_t self, int phy, int reg, uint16_t val) 527 { 528 #ifndef EMAC_FAST 529 struct emac_softc *sc = device_private(self); 530 #endif 531 532 EMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR 533 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA) 534 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA) 535 | ETH_MAN_CODE_IEEE802_3 536 | (val & ETH_MAN_DATA))); 537 while (!(EMAC_READ(ETH_SR) & ETH_SR_IDLE)) 538 ; 539 540 return 0; 541 } 542 543 void 544 emac_statchg(struct ifnet *ifp) 545 { 546 struct emac_softc *sc = ifp->if_softc; 547 uint32_t reg; 548 549 /* 550 * We must keep the MAC and the PHY in sync as 551 * to the status of full-duplex! 552 */ 553 reg = EMAC_READ(ETH_CFG); 554 if (sc->sc_mii.mii_media_active & IFM_FDX) 555 reg |= ETH_CFG_FD; 556 else 557 reg &= ~ETH_CFG_FD; 558 EMAC_WRITE(ETH_CFG, reg); 559 } 560 561 void 562 emac_tick(void *arg) 563 { 564 struct emac_softc* sc = (struct emac_softc *)arg; 565 struct ifnet * ifp = &sc->sc_ec.ec_if; 566 int s; 567 uint32_t misses; 568 569 if_statadd(ifp, if_collisions, EMAC_READ(ETH_SCOL) + EMAC_READ(ETH_MCOL)); 570 /* These misses are ok, they will happen if the RAM/CPU can't keep up */ 571 misses = EMAC_READ(ETH_DRFC); 572 if (misses > 0) 573 printf("%s: %d rx misses\n", device_xname(sc->sc_dev), misses); 574 575 s = splnet(); 576 if (emac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) { 577 emac_ifstart(ifp); 578 } 579 splx(s); 580 581 mii_tick(&sc->sc_mii); 582 callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc); 583 } 584 585 586 static int 587 emac_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 588 { 589 int s, error; 590 591 s = splnet(); 592 switch (cmd) { 593 default: 594 error = ether_ioctl(ifp, cmd, data); 595 if (error == ENETRESET) { 596 if (ifp->if_flags & IFF_RUNNING) 597 emac_setaddr(ifp); 598 error = 0; 599 } 600 } 601 splx(s); 602 return error; 603 } 604 605 static void 606 emac_ifstart(struct ifnet *ifp) 607 { 608 struct emac_softc *sc = (struct emac_softc *)ifp->if_softc; 609 struct mbuf *m; 610 bus_dma_segment_t *segs; 611 int s, bi, err, nsegs; 612 613 s = splnet(); 614 start: 615 if (emac_gctx(sc) == 0) { 616 /* Enable transmit-buffer-free interrupt */ 617 EMAC_WRITE(ETH_IER, ETH_ISR_TBRE); 618 ifp->if_flags |= IFF_OACTIVE; 619 ifp->if_timer = 10; 620 splx(s); 621 return; 622 } 623 624 ifp->if_timer = 0; 625 626 IFQ_POLL(&ifp->if_snd, m); 627 if (m == NULL) { 628 splx(s); 629 return; 630 } 631 //more: 632 bi = (sc->txqi + sc->txqc) % TX_QLEN; 633 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 634 BUS_DMA_NOWAIT)) || 635 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 || 636 sc->txq[bi].m_dmamap->dm_nsegs > 1) { 637 /* Copy entire mbuf chain to new single */ 638 struct mbuf *mn; 639 640 if (err == 0) 641 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap); 642 643 MGETHDR(mn, M_DONTWAIT, MT_DATA); 644 if (mn == NULL) goto stop; 645 if (m->m_pkthdr.len > MHLEN) { 646 MCLGET(mn, M_DONTWAIT); 647 if ((mn->m_flags & M_EXT) == 0) { 648 m_freem(mn); 649 goto stop; 650 } 651 } 652 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *)); 653 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len; 654 IFQ_DEQUEUE(&ifp->if_snd, m); 655 m_freem(m); 656 m = mn; 657 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 658 BUS_DMA_NOWAIT); 659 } else { 660 IFQ_DEQUEUE(&ifp->if_snd, m); 661 } 662 663 bpf_mtap(ifp, m, BPF_D_OUT); 664 665 nsegs = sc->txq[bi].m_dmamap->dm_nsegs; 666 segs = sc->txq[bi].m_dmamap->dm_segs; 667 if (nsegs > 1) { 668 panic("#### ARGH #2"); 669 } 670 671 sc->txq[bi].m = m; 672 sc->txqc++; 673 674 DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr, 675 (unsigned)m->m_pkthdr.len)); 676 #ifdef DIAGNOSTIC 677 if (sc->txqc > TX_QLEN) { 678 panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN); 679 } 680 #endif 681 682 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0, 683 sc->txq[bi].m_dmamap->dm_mapsize, 684 BUS_DMASYNC_PREWRITE); 685 686 EMAC_WRITE(ETH_TAR, segs->ds_addr); 687 EMAC_WRITE(ETH_TCR, m->m_pkthdr.len); 688 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 689 goto start; 690 stop: 691 692 splx(s); 693 return; 694 } 695 696 static void 697 emac_ifwatchdog(struct ifnet *ifp) 698 { 699 struct emac_softc *sc = (struct emac_softc *)ifp->if_softc; 700 701 if ((ifp->if_flags & IFF_RUNNING) == 0) 702 return; 703 printf("%s: device timeout, CTL = 0x%08x, CFG = 0x%08x\n", 704 device_xname(sc->sc_dev), EMAC_READ(ETH_CTL), EMAC_READ(ETH_CFG)); 705 } 706 707 static int 708 emac_ifinit(struct ifnet *ifp) 709 { 710 struct emac_softc *sc = ifp->if_softc; 711 int s = splnet(); 712 713 callout_stop(&sc->emac_tick_ch); 714 715 // enable interrupts 716 EMAC_WRITE(ETH_IDR, -1); 717 EMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE 718 | ETH_ISR_RBNA | ETH_ISR_ROVR); 719 720 // enable transmitter / receiver 721 EMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR 722 | ETH_CTL_CSR | ETH_CTL_MPE); 723 724 mii_mediachg(&sc->sc_mii); 725 callout_reset(&sc->emac_tick_ch, hz, emac_tick, sc); 726 ifp->if_flags |= IFF_RUNNING; 727 splx(s); 728 return 0; 729 } 730 731 static void 732 emac_ifstop(struct ifnet *ifp, int disable) 733 { 734 // uint32_t u; 735 struct emac_softc *sc = ifp->if_softc; 736 737 #if 0 738 EMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything 739 EMAC_WRITE(ETH_IDR, -1); // disable interrupts 740 // EMAC_WRITE(ETH_RBQP, 0); // clear receive 741 EMAC_WRITE(ETH_CFG, 742 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 743 EMAC_WRITE(ETH_TCR, 0); // send nothing 744 // (void)EMAC_READ(ETH_ISR); 745 u = EMAC_READ(ETH_TSR); 746 EMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 747 | ETH_TSR_IDLE | ETH_TSR_RLE 748 | ETH_TSR_COL | ETH_TSR_OVR))); 749 u = EMAC_READ(ETH_RSR); 750 EMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR | ETH_RSR_REC | ETH_RSR_BNA))); 751 #endif 752 callout_stop(&sc->emac_tick_ch); 753 754 /* Down the MII. */ 755 mii_down(&sc->sc_mii); 756 757 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 758 ifp->if_timer = 0; 759 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE; 760 } 761 762 static void 763 emac_setaddr(struct ifnet *ifp) 764 { 765 struct emac_softc *sc = ifp->if_softc; 766 struct ethercom *ec = &sc->sc_ec; 767 struct ether_multi *enm; 768 struct ether_multistep step; 769 uint8_t ias[3][ETHER_ADDR_LEN]; 770 uint32_t h, nma = 0, hashes[2] = { 0, 0 }; 771 uint32_t ctl = EMAC_READ(ETH_CTL); 772 uint32_t cfg = EMAC_READ(ETH_CFG); 773 774 /* disable receiver temporarily */ 775 EMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); 776 777 cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI); 778 779 if (ifp->if_flags & IFF_PROMISC) { 780 cfg |= ETH_CFG_CAF; 781 } else { 782 cfg &= ~ETH_CFG_CAF; 783 } 784 785 // ETH_CFG_BIG? 786 787 ifp->if_flags &= ~IFF_ALLMULTI; 788 789 ETHER_LOCK(ec); 790 ETHER_FIRST_MULTI(step, ec, enm); 791 while (enm != NULL) { 792 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 793 /* 794 * We must listen to a range of multicast addresses. 795 * For now, just accept all multicasts, rather than 796 * trying to set only those filter bits needed to match 797 * the range. (At this time, the only use of address 798 * ranges is for IP multicast routing, for which the 799 * range is big enough to require all bits set.) 800 */ 801 cfg |= ETH_CFG_CAF; 802 hashes[0] = 0xffffffffUL; 803 hashes[1] = 0xffffffffUL; 804 ifp->if_flags |= IFF_ALLMULTI; 805 nma = 0; 806 break; 807 } 808 809 if (nma < 3) { 810 /* We can program 3 perfect address filters for mcast */ 811 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN); 812 } else { 813 /* 814 * XXX: Datasheet is not very clear here, I'm not sure 815 * if I'm doing this right. --joff 816 */ 817 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 818 819 /* Just want the 6 most-significant bits. */ 820 h = h >> 26; 821 822 hashes[ h / 32 ] |= (1 << (h % 32)); 823 cfg |= ETH_CFG_MTI; 824 } 825 ETHER_NEXT_MULTI(step, enm); 826 nma++; 827 } 828 ETHER_UNLOCK(ec); 829 830 // program... 831 DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, 832 sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2], 833 sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5])); 834 EMAC_WRITE(ETH_SA1L, (sc->sc_enaddr[3] << 24) 835 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8) 836 | (sc->sc_enaddr[0])); 837 EMAC_WRITE(ETH_SA1H, (sc->sc_enaddr[5] << 8) 838 | (sc->sc_enaddr[4])); 839 if (nma > 1) { 840 DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n", 841 __FUNCTION__, 842 ias[0][0], ias[0][1], ias[0][2], 843 ias[0][3], ias[0][4], ias[0][5])); 844 EMAC_WRITE(ETH_SA2L, (ias[0][3] << 24) 845 | (ias[0][2] << 16) | (ias[0][1] << 8) 846 | (ias[0][0])); 847 EMAC_WRITE(ETH_SA2H, (ias[0][4] << 8) 848 | (ias[0][5])); 849 } 850 if (nma > 2) { 851 DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n", 852 __FUNCTION__, 853 ias[1][0], ias[1][1], ias[1][2], 854 ias[1][3], ias[1][4], ias[1][5])); 855 EMAC_WRITE(ETH_SA3L, (ias[1][3] << 24) 856 | (ias[1][2] << 16) | (ias[1][1] << 8) 857 | (ias[1][0])); 858 EMAC_WRITE(ETH_SA3H, (ias[1][4] << 8) 859 | (ias[1][5])); 860 } 861 if (nma > 3) { 862 DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n", 863 __FUNCTION__, 864 ias[2][0], ias[2][1], ias[2][2], 865 ias[2][3], ias[2][4], ias[2][5])); 866 EMAC_WRITE(ETH_SA3L, (ias[2][3] << 24) 867 | (ias[2][2] << 16) | (ias[2][1] << 8) 868 | (ias[2][0])); 869 EMAC_WRITE(ETH_SA3H, (ias[2][4] << 8) 870 | (ias[2][5])); 871 } 872 EMAC_WRITE(ETH_HSH, hashes[0]); 873 EMAC_WRITE(ETH_HSL, hashes[1]); 874 EMAC_WRITE(ETH_CFG, cfg); 875 EMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); 876 } 877