1 /* $NetBSD: if_cemac.c,v 1.14 2018/07/15 05:16:44 maxv Exp $ */ 2 3 /* 4 * Copyright (c) 2015 Genetec Corporation. All rights reserved. 5 * Written by Hashimoto Kenichi for Genetec Corporation. 6 * 7 * Based on arch/arm/at91/at91emac.c 8 * 9 * Copyright (c) 2007 Embedtronics Oy 10 * All rights reserved. 11 * 12 * Copyright (c) 2004 Jesse Off 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * Cadence EMAC/GEM ethernet controller IP driver 39 * used by arm/at91, arm/zynq SoC 40 */ 41 42 #include <sys/cdefs.h> 43 __KERNEL_RCSID(0, "$NetBSD: if_cemac.c,v 1.14 2018/07/15 05:16:44 maxv Exp $"); 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/ioctl.h> 49 #include <sys/kernel.h> 50 #include <sys/proc.h> 51 #include <sys/malloc.h> 52 #include <sys/time.h> 53 #include <sys/device.h> 54 #include <uvm/uvm_extern.h> 55 56 #include <sys/bus.h> 57 #include <machine/intr.h> 58 59 #include <arm/cpufunc.h> 60 61 #include <net/if.h> 62 #include <net/if_dl.h> 63 #include <net/if_types.h> 64 #include <net/if_media.h> 65 #include <net/if_ether.h> 66 #include <net/bpf.h> 67 68 #include <dev/mii/mii.h> 69 #include <dev/mii/miivar.h> 70 71 #ifdef INET 72 #include <netinet/in.h> 73 #include <netinet/in_systm.h> 74 #include <netinet/in_var.h> 75 #include <netinet/ip.h> 76 #include <netinet/if_inarp.h> 77 #endif 78 79 #include <dev/cadence/cemacreg.h> 80 #include <dev/cadence/if_cemacvar.h> 81 82 #define DEFAULT_MDCDIV 32 83 84 #define CEMAC_READ(x) \ 85 bus_space_read_4(sc->sc_iot, sc->sc_ioh, (x)) 86 #define CEMAC_WRITE(x, y) \ 87 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (x), (y)) 88 #define CEMAC_GEM_WRITE(x, y) \ 89 do { \ 90 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) \ 91 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (GEM_##x), (y)); \ 92 else \ 93 bus_space_write_4(sc->sc_iot, sc->sc_ioh, (ETH_##x), (y)); \ 94 } while(0) 95 96 #define RX_QLEN 64 97 #define TX_QLEN 2 /* I'm very sorry but that's where we can get */ 98 99 struct cemac_qmeta { 100 struct mbuf *m; 101 bus_dmamap_t m_dmamap; 102 }; 103 104 struct cemac_softc { 105 device_t sc_dev; 106 bus_space_tag_t sc_iot; 107 bus_space_handle_t sc_ioh; 108 bus_dma_tag_t sc_dmat; 109 uint8_t sc_enaddr[ETHER_ADDR_LEN]; 110 struct ethercom sc_ethercom; 111 mii_data_t sc_mii; 112 113 void *rbqpage; 114 unsigned rbqlen; 115 bus_addr_t rbqpage_dsaddr; 116 bus_dmamap_t rbqpage_dmamap; 117 void *tbqpage; 118 unsigned tbqlen; 119 bus_addr_t tbqpage_dsaddr; 120 bus_dmamap_t tbqpage_dmamap; 121 122 volatile struct eth_dsc *RDSC; 123 int rxqi; 124 struct cemac_qmeta rxq[RX_QLEN]; 125 volatile struct eth_dsc *TDSC; 126 int txqi, txqc; 127 struct cemac_qmeta txq[TX_QLEN]; 128 callout_t cemac_tick_ch; 129 130 int cemac_flags; 131 }; 132 133 static void cemac_init(struct cemac_softc *); 134 static int cemac_gctx(struct cemac_softc *); 135 static int cemac_mediachange(struct ifnet *); 136 static void cemac_mediastatus(struct ifnet *, struct ifmediareq *); 137 static int cemac_mii_readreg(device_t, int, int); 138 static void cemac_mii_writereg(device_t, int, int, int); 139 static void cemac_statchg(struct ifnet *); 140 static void cemac_tick(void *); 141 static int cemac_ifioctl(struct ifnet *, u_long, void *); 142 static void cemac_ifstart(struct ifnet *); 143 static void cemac_ifwatchdog(struct ifnet *); 144 static int cemac_ifinit(struct ifnet *); 145 static void cemac_ifstop(struct ifnet *, int); 146 static void cemac_setaddr(struct ifnet *); 147 148 #ifdef CEMAC_DEBUG 149 int cemac_debug = CEMAC_DEBUG; 150 #define DPRINTFN(n,fmt) if (cemac_debug >= (n)) printf fmt 151 #else 152 #define DPRINTFN(n,fmt) 153 #endif 154 155 CFATTACH_DECL_NEW(cemac, sizeof(struct cemac_softc), 156 cemac_match, cemac_attach, NULL, NULL); 157 158 int 159 cemac_match_common(device_t parent, cfdata_t match, void *aux) 160 { 161 if (strcmp(match->cf_name, "cemac") == 0) 162 return 1; 163 return 0; 164 } 165 166 void 167 cemac_attach_common(device_t self, bus_space_tag_t iot, 168 bus_space_handle_t ioh, bus_dma_tag_t dmat, int flags) 169 { 170 struct cemac_softc *sc = device_private(self); 171 prop_data_t enaddr; 172 uint32_t u; 173 174 175 sc->sc_dev = self; 176 sc->sc_ioh = ioh; 177 sc->sc_iot = iot; 178 sc->sc_dmat = dmat; 179 sc->cemac_flags = flags; 180 181 aprint_naive("\n"); 182 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 183 aprint_normal(": Cadence Gigabit Ethernet Controller\n"); 184 else 185 aprint_normal(": Cadence Ethernet Controller\n"); 186 187 /* configure emac: */ 188 CEMAC_WRITE(ETH_CTL, 0); // disable everything 189 CEMAC_WRITE(ETH_IDR, -1); // disable interrupts 190 CEMAC_WRITE(ETH_RBQP, 0); // clear receive 191 CEMAC_WRITE(ETH_TBQP, 0); // clear transmit 192 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 193 CEMAC_WRITE(ETH_CFG, 194 GEM_CFG_CLK_64 | GEM_CFG_GEN | ETH_CFG_SPD | ETH_CFG_FD); 195 else 196 CEMAC_WRITE(ETH_CFG, 197 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 198 //CEMAC_WRITE(ETH_TCR, 0); // send nothing 199 //(void)CEMAC_READ(ETH_ISR); 200 u = CEMAC_READ(ETH_TSR); 201 CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 202 | ETH_TSR_IDLE | ETH_TSR_RLE 203 | ETH_TSR_COL|ETH_TSR_OVR))); 204 u = CEMAC_READ(ETH_RSR); 205 CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA))); 206 207 /* Fetch the Ethernet address from property if set. */ 208 enaddr = prop_dictionary_get(device_properties(self), "mac-address"); 209 210 if (enaddr != NULL) { 211 KASSERT(prop_object_type(enaddr) == PROP_TYPE_DATA); 212 KASSERT(prop_data_size(enaddr) == ETHER_ADDR_LEN); 213 memcpy(sc->sc_enaddr, prop_data_data_nocopy(enaddr), 214 ETHER_ADDR_LEN); 215 } else { 216 static const uint8_t hardcoded[ETHER_ADDR_LEN] = { 217 0x00, 0x0d, 0x10, 0x81, 0x0c, 0x94 218 }; 219 memcpy(sc->sc_enaddr, hardcoded, ETHER_ADDR_LEN); 220 } 221 222 cemac_init(sc); 223 } 224 225 static int 226 cemac_gctx(struct cemac_softc *sc) 227 { 228 struct ifnet * ifp = &sc->sc_ethercom.ec_if; 229 uint32_t tsr; 230 231 tsr = CEMAC_READ(ETH_TSR); 232 if (!ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) { 233 // no space left 234 if (!(tsr & ETH_TSR_BNQ)) 235 return 0; 236 } else { 237 if (tsr & GEM_TSR_TXGO) 238 return 0; 239 } 240 CEMAC_WRITE(ETH_TSR, tsr); 241 242 // free sent frames 243 while (sc->txqc > (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM) ? 0 : 244 (tsr & ETH_TSR_IDLE ? 0 : 1))) { 245 int bi = sc->txqi % TX_QLEN; 246 247 DPRINTFN(3,("%s: TDSC[%i].Addr 0x%08x\n", 248 __FUNCTION__, bi, sc->TDSC[bi].Addr)); 249 DPRINTFN(3,("%s: TDSC[%i].Info 0x%08x\n", 250 __FUNCTION__, bi, sc->TDSC[bi].Info)); 251 252 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0, 253 sc->txq[bi].m->m_pkthdr.len, BUS_DMASYNC_POSTWRITE); 254 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap); 255 m_freem(sc->txq[bi].m); 256 DPRINTFN(2,("%s: freed idx #%i mbuf %p (txqc=%i)\n", 257 __FUNCTION__, bi, sc->txq[bi].m, sc->txqc)); 258 sc->txq[bi].m = NULL; 259 sc->txqi = (bi + 1) % TX_QLEN; 260 sc->txqc--; 261 } 262 263 // mark we're free 264 if (ifp->if_flags & IFF_OACTIVE) { 265 ifp->if_flags &= ~IFF_OACTIVE; 266 /* Disable transmit-buffer-free interrupt */ 267 /*CEMAC_WRITE(ETH_IDR, ETH_ISR_TBRE);*/ 268 } 269 270 return 1; 271 } 272 273 int 274 cemac_intr(void *arg) 275 { 276 struct cemac_softc *sc = (struct cemac_softc *)arg; 277 struct ifnet * ifp = &sc->sc_ethercom.ec_if; 278 uint32_t imr, isr, ctl; 279 #ifdef CEMAC_DEBUG 280 uint32_t rsr; 281 #endif 282 int bi; 283 284 imr = ~CEMAC_READ(ETH_IMR); 285 if (!(imr & (ETH_ISR_RCOM|ETH_ISR_TBRE|ETH_ISR_TIDLE|ETH_ISR_RBNA|ETH_ISR_ROVR|ETH_ISR_TCOM))) { 286 // interrupt not enabled, can't be us 287 return 0; 288 } 289 290 isr = CEMAC_READ(ETH_ISR); 291 CEMAC_WRITE(ETH_ISR, isr); 292 isr &= imr; 293 #ifdef CEMAC_DEBUG 294 rsr = CEMAC_READ(ETH_RSR); // get receive status register 295 #endif 296 DPRINTFN(2, ("%s: isr=0x%08X rsr=0x%08X imr=0x%08X\n", __FUNCTION__, isr, rsr, imr)); 297 298 if (isr & ETH_ISR_RBNA) { // out of receive buffers 299 CEMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear interrupt 300 ctl = CEMAC_READ(ETH_CTL); // get current control register value 301 CEMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); // disable receiver 302 CEMAC_WRITE(ETH_RSR, ETH_RSR_BNA); // clear BNA bit 303 CEMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); // re-enable receiver 304 ifp->if_ierrors++; 305 ifp->if_ipackets++; 306 DPRINTFN(1,("%s: out of receive buffers\n", __FUNCTION__)); 307 } 308 if (isr & ETH_ISR_ROVR) { 309 CEMAC_WRITE(ETH_RSR, ETH_RSR_OVR); // clear interrupt 310 ifp->if_ierrors++; 311 ifp->if_ipackets++; 312 DPRINTFN(1,("%s: receive overrun\n", __FUNCTION__)); 313 } 314 315 if (isr & ETH_ISR_RCOM) { // packet has been received! 316 uint32_t nfo; 317 DPRINTFN(2,("#2 RDSC[%i].INFO=0x%08X\n", sc->rxqi % RX_QLEN, sc->RDSC[sc->rxqi % RX_QLEN].Info)); 318 while (sc->RDSC[(bi = sc->rxqi % RX_QLEN)].Addr & ETH_RDSC_F_USED) { 319 int fl, csum; 320 struct mbuf *m; 321 322 nfo = sc->RDSC[bi].Info; 323 fl = (nfo & ETH_RDSC_I_LEN) - 4; 324 DPRINTFN(2,("## nfo=0x%08X\n", nfo)); 325 326 MGETHDR(m, M_DONTWAIT, MT_DATA); 327 if (m != NULL) MCLGET(m, M_DONTWAIT); 328 if (m != NULL && (m->m_flags & M_EXT)) { 329 bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0, 330 MCLBYTES, BUS_DMASYNC_POSTREAD); 331 bus_dmamap_unload(sc->sc_dmat, 332 sc->rxq[bi].m_dmamap); 333 m_set_rcvif(sc->rxq[bi].m, ifp); 334 sc->rxq[bi].m->m_pkthdr.len = 335 sc->rxq[bi].m->m_len = fl; 336 switch (nfo & ETH_RDSC_I_CHKSUM) { 337 case ETH_RDSC_I_CHKSUM_IP: 338 csum = M_CSUM_IPv4; 339 break; 340 case ETH_RDSC_I_CHKSUM_UDP: 341 csum = M_CSUM_IPv4 | M_CSUM_UDPv4 | 342 M_CSUM_UDPv6; 343 break; 344 case ETH_RDSC_I_CHKSUM_TCP: 345 csum = M_CSUM_IPv4 | M_CSUM_TCPv4 | 346 M_CSUM_TCPv6; 347 break; 348 default: 349 csum = 0; 350 break; 351 } 352 sc->rxq[bi].m->m_pkthdr.csum_flags = csum; 353 DPRINTFN(2,("received %u bytes packet\n", fl)); 354 if_percpuq_enqueue(ifp->if_percpuq, 355 sc->rxq[bi].m); 356 if (mtod(m, intptr_t) & 3) 357 m_adj(m, mtod(m, intptr_t) & 3); 358 sc->rxq[bi].m = m; 359 bus_dmamap_load(sc->sc_dmat, 360 sc->rxq[bi].m_dmamap, 361 m->m_ext.ext_buf, MCLBYTES, 362 NULL, BUS_DMA_NOWAIT); 363 bus_dmamap_sync(sc->sc_dmat, sc->rxq[bi].m_dmamap, 0, 364 MCLBYTES, BUS_DMASYNC_PREREAD); 365 sc->RDSC[bi].Info = 0; 366 sc->RDSC[bi].Addr = 367 sc->rxq[bi].m_dmamap->dm_segs[0].ds_addr 368 | (bi == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0); 369 } else { 370 /* Drop packets until we can get replacement 371 * empty mbufs for the RXDQ. 372 */ 373 if (m != NULL) 374 m_freem(m); 375 ifp->if_ierrors++; 376 } 377 sc->rxqi++; 378 } 379 } 380 381 if (cemac_gctx(sc) > 0) 382 if_schedule_deferred_start(ifp); 383 #if 0 // reloop 384 irq = CEMAC_READ(IntStsC); 385 if ((irq & (IntSts_RxSQ|IntSts_ECI)) != 0) 386 goto begin; 387 #endif 388 389 return (1); 390 } 391 392 393 static void 394 cemac_init(struct cemac_softc *sc) 395 { 396 bus_dma_segment_t segs; 397 int rsegs, err, i; 398 struct ifnet * ifp = &sc->sc_ethercom.ec_if; 399 uint32_t u; 400 #if 0 401 int mdcdiv = DEFAULT_MDCDIV; 402 #endif 403 404 callout_init(&sc->cemac_tick_ch, 0); 405 406 // ok... 407 CEMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything 408 CEMAC_WRITE(ETH_IDR, -1); // disable interrupts 409 CEMAC_WRITE(ETH_RBQP, 0); // clear receive 410 CEMAC_WRITE(ETH_TBQP, 0); // clear transmit 411 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 412 CEMAC_WRITE(ETH_CFG, 413 GEM_CFG_CLK_64 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 414 else 415 CEMAC_WRITE(ETH_CFG, 416 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 417 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) { 418 CEMAC_WRITE(GEM_DMA_CFG, 419 __SHIFTIN((MCLBYTES + 63) / 64, GEM_DMA_CFG_RX_BUF_SIZE) | 420 __SHIFTIN(3, GEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL) | 421 GEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | 422 __SHIFTIN(16, GEM_DMA_CFG_AHB_FIXED_BURST_LEN) | 423 GEM_DMA_CFG_DISC_WHEN_NO_AHB); 424 } 425 // CEMAC_WRITE(ETH_TCR, 0); // send nothing 426 // (void)CEMAC_READ(ETH_ISR); 427 u = CEMAC_READ(ETH_TSR); 428 CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 429 | ETH_TSR_IDLE | ETH_TSR_RLE 430 | ETH_TSR_COL|ETH_TSR_OVR))); 431 u = CEMAC_READ(ETH_RSR); 432 CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA))); 433 434 #if 0 435 if (device_cfdata(sc->sc_dev)->cf_flags) 436 mdcdiv = device_cfdata(sc->sc_dev)->cf_flags; 437 #endif 438 /* set ethernet address */ 439 CEMAC_GEM_WRITE(SA1L, (sc->sc_enaddr[3] << 24) 440 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8) 441 | (sc->sc_enaddr[0])); 442 CEMAC_GEM_WRITE(SA1H, (sc->sc_enaddr[5] << 8) 443 | (sc->sc_enaddr[4])); 444 CEMAC_GEM_WRITE(SA2L, 0); 445 CEMAC_GEM_WRITE(SA2H, 0); 446 CEMAC_GEM_WRITE(SA3L, 0); 447 CEMAC_GEM_WRITE(SA3H, 0); 448 CEMAC_GEM_WRITE(SA4L, 0); 449 CEMAC_GEM_WRITE(SA4H, 0); 450 451 /* Allocate a page of memory for receive queue descriptors */ 452 sc->rbqlen = (ETH_DSC_SIZE * (RX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE; 453 sc->rbqlen *= PAGE_SIZE; 454 DPRINTFN(1,("%s: rbqlen=%i\n", __FUNCTION__, sc->rbqlen)); 455 456 err = bus_dmamem_alloc(sc->sc_dmat, sc->rbqlen, 0, 457 MAX(16384, PAGE_SIZE), // see EMAC errata why forced to 16384 byte boundary 458 &segs, 1, &rsegs, BUS_DMA_WAITOK); 459 if (err == 0) { 460 DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__)); 461 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->rbqlen, 462 &sc->rbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT)); 463 } 464 if (err == 0) { 465 DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__)); 466 err = bus_dmamap_create(sc->sc_dmat, sc->rbqlen, 1, 467 sc->rbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK, 468 &sc->rbqpage_dmamap); 469 } 470 if (err == 0) { 471 DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__)); 472 err = bus_dmamap_load(sc->sc_dmat, sc->rbqpage_dmamap, 473 sc->rbqpage, sc->rbqlen, NULL, BUS_DMA_WAITOK); 474 } 475 if (err != 0) 476 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev)); 477 478 sc->rbqpage_dsaddr = sc->rbqpage_dmamap->dm_segs[0].ds_addr; 479 memset(sc->rbqpage, 0, sc->rbqlen); 480 481 /* Allocate a page of memory for transmit queue descriptors */ 482 sc->tbqlen = (ETH_DSC_SIZE * (TX_QLEN + 1) * 2 + PAGE_SIZE - 1) / PAGE_SIZE; 483 sc->tbqlen *= PAGE_SIZE; 484 DPRINTFN(1,("%s: tbqlen=%i\n", __FUNCTION__, sc->tbqlen)); 485 486 err = bus_dmamem_alloc(sc->sc_dmat, sc->tbqlen, 0, 487 MAX(16384, PAGE_SIZE), // see EMAC errata why forced to 16384 byte boundary 488 &segs, 1, &rsegs, BUS_DMA_WAITOK); 489 if (err == 0) { 490 DPRINTFN(1,("%s: -> bus_dmamem_map\n", __FUNCTION__)); 491 err = bus_dmamem_map(sc->sc_dmat, &segs, 1, sc->tbqlen, 492 &sc->tbqpage, (BUS_DMA_WAITOK|BUS_DMA_COHERENT)); 493 } 494 if (err == 0) { 495 DPRINTFN(1,("%s: -> bus_dmamap_create\n", __FUNCTION__)); 496 err = bus_dmamap_create(sc->sc_dmat, sc->tbqlen, 1, 497 sc->tbqlen, MAX(16384, PAGE_SIZE), BUS_DMA_WAITOK, 498 &sc->tbqpage_dmamap); 499 } 500 if (err == 0) { 501 DPRINTFN(1,("%s: -> bus_dmamap_load\n", __FUNCTION__)); 502 err = bus_dmamap_load(sc->sc_dmat, sc->tbqpage_dmamap, 503 sc->tbqpage, sc->tbqlen, NULL, BUS_DMA_WAITOK); 504 } 505 if (err != 0) 506 panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev)); 507 508 sc->tbqpage_dsaddr = sc->tbqpage_dmamap->dm_segs[0].ds_addr; 509 memset(sc->tbqpage, 0, sc->tbqlen); 510 511 /* Set up pointers to start of each queue in kernel addr space. 512 * Each descriptor queue or status queue entry uses 2 words 513 */ 514 sc->RDSC = (void *)sc->rbqpage; 515 sc->TDSC = (void *)sc->tbqpage; 516 517 /* init TX queue */ 518 for (i = 0; i < TX_QLEN; i++) { 519 sc->TDSC[i].Addr = 0; 520 sc->TDSC[i].Info = ETH_TDSC_I_USED | 521 (i == (TX_QLEN - 1) ? ETH_TDSC_I_WRAP : 0); 522 } 523 524 /* Populate the RXQ with mbufs */ 525 sc->rxqi = 0; 526 for(i = 0; i < RX_QLEN; i++) { 527 struct mbuf *m; 528 529 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, PAGE_SIZE, 530 BUS_DMA_WAITOK, &sc->rxq[i].m_dmamap); 531 if (err) { 532 panic("%s: dmamap_create failed: %i\n", __FUNCTION__, err); 533 } 534 MGETHDR(m, M_WAIT, MT_DATA); 535 MCLGET(m, M_WAIT); 536 sc->rxq[i].m = m; 537 if (mtod(m, intptr_t) & 3) { 538 m_adj(m, mtod(m, intptr_t) & 3); 539 } 540 err = bus_dmamap_load(sc->sc_dmat, sc->rxq[i].m_dmamap, 541 m->m_ext.ext_buf, MCLBYTES, NULL, 542 BUS_DMA_WAITOK); 543 if (err) { 544 panic("%s: dmamap_load failed: %i\n", __FUNCTION__, err); 545 } 546 sc->RDSC[i].Addr = sc->rxq[i].m_dmamap->dm_segs[0].ds_addr 547 | (i == (RX_QLEN-1) ? ETH_RDSC_F_WRAP : 0); 548 sc->RDSC[i].Info = 0; 549 bus_dmamap_sync(sc->sc_dmat, sc->rxq[i].m_dmamap, 0, 550 MCLBYTES, BUS_DMASYNC_PREREAD); 551 } 552 553 /* prepare transmit queue */ 554 for (i = 0; i < TX_QLEN; i++) { 555 err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 556 (BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW), 557 &sc->txq[i].m_dmamap); 558 if (err) 559 panic("ARGH #1"); 560 sc->txq[i].m = NULL; 561 } 562 563 /* Program each queue's start addr, cur addr, and len registers 564 * with the physical addresses. 565 */ 566 CEMAC_WRITE(ETH_RBQP, (uint32_t)sc->rbqpage_dsaddr); 567 CEMAC_WRITE(ETH_TBQP, (uint32_t)sc->tbqpage_dsaddr); 568 569 /* Divide HCLK by 32 for MDC clock */ 570 sc->sc_ethercom.ec_mii = &sc->sc_mii; 571 sc->sc_mii.mii_ifp = ifp; 572 sc->sc_mii.mii_readreg = cemac_mii_readreg; 573 sc->sc_mii.mii_writereg = cemac_mii_writereg; 574 sc->sc_mii.mii_statchg = cemac_statchg; 575 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, cemac_mediachange, 576 cemac_mediastatus); 577 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 578 MII_OFFSET_ANY, 0); 579 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 580 581 #if 0 582 // enable / disable interrupts 583 CEMAC_WRITE(ETH_IDR, -1); 584 CEMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE 585 | ETH_ISR_RBNA | ETH_ISR_ROVR | ETH_ISR_TCOM); 586 // (void)CEMAC_READ(ETH_ISR); // why 587 588 // enable transmitter / receiver 589 CEMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR 590 | ETH_CTL_CSR | ETH_CTL_MPE); 591 #endif 592 /* 593 * We can support hardware checksumming. 594 */ 595 ifp->if_capabilities |= 596 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 597 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 598 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx | 599 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx | 600 IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx; 601 602 /* 603 * We can support 802.1Q VLAN-sized frames. 604 */ 605 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 606 607 strcpy(ifp->if_xname, device_xname(sc->sc_dev)); 608 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 609 ifp->if_ioctl = cemac_ifioctl; 610 ifp->if_start = cemac_ifstart; 611 ifp->if_watchdog = cemac_ifwatchdog; 612 ifp->if_init = cemac_ifinit; 613 ifp->if_stop = cemac_ifstop; 614 ifp->if_timer = 0; 615 ifp->if_softc = sc; 616 IFQ_SET_READY(&ifp->if_snd); 617 if_attach(ifp); 618 if_deferred_start_init(ifp, NULL); 619 ether_ifattach(ifp, (sc)->sc_enaddr); 620 } 621 622 static int 623 cemac_mediachange(struct ifnet *ifp) 624 { 625 if (ifp->if_flags & IFF_UP) 626 cemac_ifinit(ifp); 627 return (0); 628 } 629 630 static void 631 cemac_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 632 { 633 struct cemac_softc *sc = ifp->if_softc; 634 635 mii_pollstat(&sc->sc_mii); 636 ifmr->ifm_active = sc->sc_mii.mii_media_active; 637 ifmr->ifm_status = sc->sc_mii.mii_media_status; 638 } 639 640 641 static int 642 cemac_mii_readreg(device_t self, int phy, int reg) 643 { 644 struct cemac_softc *sc; 645 646 sc = device_private(self); 647 648 CEMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_RD 649 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA) 650 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA) 651 | ETH_MAN_CODE_IEEE802_3)); 652 while (!(CEMAC_READ(ETH_SR) & ETH_SR_IDLE)); 653 654 return (CEMAC_READ(ETH_MAN) & ETH_MAN_DATA); 655 } 656 657 static void 658 cemac_mii_writereg(device_t self, int phy, int reg, int val) 659 { 660 struct cemac_softc *sc; 661 662 sc = device_private(self); 663 664 CEMAC_WRITE(ETH_MAN, (ETH_MAN_HIGH | ETH_MAN_RW_WR 665 | ((phy << ETH_MAN_PHYA_SHIFT) & ETH_MAN_PHYA) 666 | ((reg << ETH_MAN_REGA_SHIFT) & ETH_MAN_REGA) 667 | ETH_MAN_CODE_IEEE802_3 668 | (val & ETH_MAN_DATA))); 669 while (!(CEMAC_READ(ETH_SR) & ETH_SR_IDLE)) ; 670 } 671 672 673 static void 674 cemac_statchg(struct ifnet *ifp) 675 { 676 struct cemac_softc *sc = ifp->if_softc; 677 struct mii_data *mii = &sc->sc_mii; 678 uint32_t reg; 679 680 /* 681 * We must keep the MAC and the PHY in sync as 682 * to the status of full-duplex! 683 */ 684 reg = CEMAC_READ(ETH_CFG); 685 reg &= ~ETH_CFG_FD; 686 if (sc->sc_mii.mii_media_active & IFM_FDX) 687 reg |= ETH_CFG_FD; 688 689 reg &= ~ETH_CFG_SPD; 690 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 691 reg &= ~GEM_CFG_GEN; 692 switch (IFM_SUBTYPE(mii->mii_media_active)) { 693 case IFM_10_T: 694 break; 695 case IFM_100_TX: 696 reg |= ETH_CFG_SPD; 697 break; 698 case IFM_1000_T: 699 reg |= ETH_CFG_SPD | GEM_CFG_GEN; 700 break; 701 default: 702 break; 703 } 704 CEMAC_WRITE(ETH_CFG, reg); 705 } 706 707 static void 708 cemac_tick(void *arg) 709 { 710 struct cemac_softc* sc = (struct cemac_softc *)arg; 711 struct ifnet * ifp = &sc->sc_ethercom.ec_if; 712 int s; 713 714 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 715 ifp->if_collisions += CEMAC_READ(GEM_SCOL) + CEMAC_READ(GEM_MCOL); 716 else 717 ifp->if_collisions += CEMAC_READ(ETH_SCOL) + CEMAC_READ(ETH_MCOL); 718 719 /* These misses are ok, they will happen if the RAM/CPU can't keep up */ 720 if (!ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) { 721 uint32_t misses = CEMAC_READ(ETH_DRFC); 722 if (misses > 0) 723 aprint_normal_ifnet(ifp, "%d rx misses\n", misses); 724 } 725 726 s = splnet(); 727 if (cemac_gctx(sc) > 0 && IFQ_IS_EMPTY(&ifp->if_snd) == 0) 728 cemac_ifstart(ifp); 729 splx(s); 730 731 mii_tick(&sc->sc_mii); 732 callout_reset(&sc->cemac_tick_ch, hz, cemac_tick, sc); 733 } 734 735 736 static int 737 cemac_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 738 { 739 struct cemac_softc *sc = ifp->if_softc; 740 struct ifreq *ifr = (struct ifreq *)data; 741 int s, error; 742 743 s = splnet(); 744 switch(cmd) { 745 case SIOCSIFMEDIA: 746 case SIOCGIFMEDIA: 747 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 748 break; 749 default: 750 error = ether_ioctl(ifp, cmd, data); 751 if (error != ENETRESET) 752 break; 753 error = 0; 754 755 if (cmd == SIOCSIFCAP) { 756 error = (*ifp->if_init)(ifp); 757 } else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 758 ; 759 else if (ifp->if_flags & IFF_RUNNING) { 760 cemac_setaddr(ifp); 761 } 762 } 763 splx(s); 764 return error; 765 } 766 767 static void 768 cemac_ifstart(struct ifnet *ifp) 769 { 770 struct cemac_softc *sc = (struct cemac_softc *)ifp->if_softc; 771 struct mbuf *m; 772 bus_dma_segment_t *segs; 773 int s, bi, err, nsegs; 774 775 s = splnet(); 776 start: 777 if (cemac_gctx(sc) == 0) { 778 /* Enable transmit-buffer-free interrupt */ 779 CEMAC_WRITE(ETH_IER, ETH_ISR_TBRE); 780 ifp->if_flags |= IFF_OACTIVE; 781 ifp->if_timer = 10; 782 splx(s); 783 return; 784 } 785 786 ifp->if_timer = 0; 787 788 IFQ_POLL(&ifp->if_snd, m); 789 if (m == NULL) { 790 splx(s); 791 return; 792 } 793 794 bi = (sc->txqi + sc->txqc) % TX_QLEN; 795 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 796 BUS_DMA_NOWAIT)) || 797 sc->txq[bi].m_dmamap->dm_segs[0].ds_addr & 0x3 || 798 sc->txq[bi].m_dmamap->dm_nsegs > 1) { 799 /* Copy entire mbuf chain to new single */ 800 struct mbuf *mn; 801 802 if (err == 0) 803 bus_dmamap_unload(sc->sc_dmat, sc->txq[bi].m_dmamap); 804 805 MGETHDR(mn, M_DONTWAIT, MT_DATA); 806 if (mn == NULL) goto stop; 807 if (m->m_pkthdr.len > MHLEN) { 808 MCLGET(mn, M_DONTWAIT); 809 if ((mn->m_flags & M_EXT) == 0) { 810 m_freem(mn); 811 goto stop; 812 } 813 } 814 m_copydata(m, 0, m->m_pkthdr.len, mtod(mn, void *)); 815 mn->m_pkthdr.len = mn->m_len = m->m_pkthdr.len; 816 IFQ_DEQUEUE(&ifp->if_snd, m); 817 m_freem(m); 818 m = mn; 819 bus_dmamap_load_mbuf(sc->sc_dmat, sc->txq[bi].m_dmamap, m, 820 BUS_DMA_NOWAIT); 821 } else { 822 IFQ_DEQUEUE(&ifp->if_snd, m); 823 } 824 825 bpf_mtap(ifp, m, BPF_D_OUT); 826 827 nsegs = sc->txq[bi].m_dmamap->dm_nsegs; 828 segs = sc->txq[bi].m_dmamap->dm_segs; 829 if (nsegs > 1) 830 panic("#### ARGH #2"); 831 832 sc->txq[bi].m = m; 833 sc->txqc++; 834 835 DPRINTFN(2,("%s: start sending idx #%i mbuf %p (txqc=%i, phys %p), len=%u\n", 836 __FUNCTION__, bi, sc->txq[bi].m, sc->txqc, (void*)segs->ds_addr, 837 (unsigned)m->m_pkthdr.len)); 838 #ifdef DIAGNOSTIC 839 if (sc->txqc > TX_QLEN) 840 panic("%s: txqc %i > %i", __FUNCTION__, sc->txqc, TX_QLEN); 841 #endif 842 843 bus_dmamap_sync(sc->sc_dmat, sc->txq[bi].m_dmamap, 0, 844 sc->txq[bi].m_dmamap->dm_mapsize, 845 BUS_DMASYNC_PREWRITE); 846 847 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) { 848 sc->TDSC[bi].Addr = segs->ds_addr; 849 sc->TDSC[bi].Info = __SHIFTIN(m->m_pkthdr.len, ETH_TDSC_I_LEN) | 850 ETH_TDSC_I_LAST_BUF | (bi == (TX_QLEN - 1) ? ETH_TDSC_I_WRAP : 0); 851 852 DPRINTFN(3,("%s: TDSC[%i].Addr 0x%08x\n", 853 __FUNCTION__, bi, sc->TDSC[bi].Addr)); 854 DPRINTFN(3,("%s: TDSC[%i].Info 0x%08x\n", 855 __FUNCTION__, bi, sc->TDSC[bi].Info)); 856 857 uint32_t ctl = CEMAC_READ(ETH_CTL) | GEM_CTL_STARTTX; 858 CEMAC_WRITE(ETH_CTL, ctl); 859 DPRINTFN(3,("%s: ETH_CTL 0x%08x\n", __FUNCTION__, CEMAC_READ(ETH_CTL))); 860 } else { 861 CEMAC_WRITE(ETH_TAR, segs->ds_addr); 862 CEMAC_WRITE(ETH_TCR, m->m_pkthdr.len); 863 } 864 if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) 865 goto start; 866 stop: 867 868 splx(s); 869 return; 870 } 871 872 static void 873 cemac_ifwatchdog(struct ifnet *ifp) 874 { 875 struct cemac_softc *sc = (struct cemac_softc *)ifp->if_softc; 876 877 if ((ifp->if_flags & IFF_RUNNING) == 0) 878 return; 879 aprint_error_ifnet(ifp, "device timeout, CTL = 0x%08x, CFG = 0x%08x\n", 880 CEMAC_READ(ETH_CTL), CEMAC_READ(ETH_CFG)); 881 } 882 883 static int 884 cemac_ifinit(struct ifnet *ifp) 885 { 886 struct cemac_softc *sc = ifp->if_softc; 887 uint32_t dma, cfg; 888 int s = splnet(); 889 890 callout_stop(&sc->cemac_tick_ch); 891 892 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) { 893 894 if (ifp->if_capenable & 895 (IFCAP_CSUM_IPv4_Tx | 896 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx | 897 IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_UDPv6_Tx)) { 898 dma = CEMAC_READ(GEM_DMA_CFG); 899 dma |= GEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; 900 CEMAC_WRITE(GEM_DMA_CFG, dma); 901 } 902 if (ifp->if_capenable & 903 (IFCAP_CSUM_IPv4_Rx | 904 IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx | 905 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx)) { 906 cfg = CEMAC_READ(ETH_CFG); 907 cfg |= GEM_CFG_RX_CHKSUM_OFFLD_EN; 908 CEMAC_WRITE(ETH_CFG, cfg); 909 } 910 } 911 912 // enable interrupts 913 CEMAC_WRITE(ETH_IDR, -1); 914 CEMAC_WRITE(ETH_IER, ETH_ISR_RCOM | ETH_ISR_TBRE | ETH_ISR_TIDLE 915 | ETH_ISR_RBNA | ETH_ISR_ROVR | ETH_ISR_TCOM); 916 917 // enable transmitter / receiver 918 CEMAC_WRITE(ETH_CTL, ETH_CTL_TE | ETH_CTL_RE | ETH_CTL_ISR 919 | ETH_CTL_CSR | ETH_CTL_MPE); 920 921 mii_mediachg(&sc->sc_mii); 922 callout_reset(&sc->cemac_tick_ch, hz, cemac_tick, sc); 923 ifp->if_flags |= IFF_RUNNING; 924 splx(s); 925 return 0; 926 } 927 928 static void 929 cemac_ifstop(struct ifnet *ifp, int disable) 930 { 931 // uint32_t u; 932 struct cemac_softc *sc = ifp->if_softc; 933 934 #if 0 935 CEMAC_WRITE(ETH_CTL, ETH_CTL_MPE); // disable everything 936 CEMAC_WRITE(ETH_IDR, -1); // disable interrupts 937 // CEMAC_WRITE(ETH_RBQP, 0); // clear receive 938 if (ISSET(sc->cemac_flags, CEMAC_FLAG_GEM)) 939 CEMAC_WRITE(ETH_CFG, 940 GEM_CFG_CLK_64 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 941 else 942 CEMAC_WRITE(ETH_CFG, 943 ETH_CFG_CLK_32 | ETH_CFG_SPD | ETH_CFG_FD | ETH_CFG_BIG); 944 // CEMAC_WRITE(ETH_TCR, 0); // send nothing 945 // (void)CEMAC_READ(ETH_ISR); 946 u = CEMAC_READ(ETH_TSR); 947 CEMAC_WRITE(ETH_TSR, (u & (ETH_TSR_UND | ETH_TSR_COMP | ETH_TSR_BNQ 948 | ETH_TSR_IDLE | ETH_TSR_RLE 949 | ETH_TSR_COL|ETH_TSR_OVR))); 950 u = CEMAC_READ(ETH_RSR); 951 CEMAC_WRITE(ETH_RSR, (u & (ETH_RSR_OVR|ETH_RSR_REC|ETH_RSR_BNA))); 952 #endif 953 callout_stop(&sc->cemac_tick_ch); 954 955 /* Down the MII. */ 956 mii_down(&sc->sc_mii); 957 958 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 959 ifp->if_timer = 0; 960 sc->sc_mii.mii_media_status &= ~IFM_ACTIVE; 961 } 962 963 static void 964 cemac_setaddr(struct ifnet *ifp) 965 { 966 struct cemac_softc *sc = ifp->if_softc; 967 struct ethercom *ac = &sc->sc_ethercom; 968 struct ether_multi *enm; 969 struct ether_multistep step; 970 uint8_t ias[3][ETHER_ADDR_LEN]; 971 uint32_t h, nma = 0, hashes[2] = { 0, 0 }; 972 uint32_t ctl = CEMAC_READ(ETH_CTL); 973 uint32_t cfg = CEMAC_READ(ETH_CFG); 974 975 /* disable receiver temporarily */ 976 CEMAC_WRITE(ETH_CTL, ctl & ~ETH_CTL_RE); 977 978 cfg &= ~(ETH_CFG_MTI | ETH_CFG_UNI | ETH_CFG_CAF | ETH_CFG_UNI); 979 980 if (ifp->if_flags & IFF_PROMISC) { 981 cfg |= ETH_CFG_CAF; 982 } else { 983 cfg &= ~ETH_CFG_CAF; 984 } 985 986 // ETH_CFG_BIG? 987 988 ifp->if_flags &= ~IFF_ALLMULTI; 989 990 ETHER_FIRST_MULTI(step, ac, enm); 991 while (enm != NULL) { 992 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 993 /* 994 * We must listen to a range of multicast addresses. 995 * For now, just accept all multicasts, rather than 996 * trying to set only those filter bits needed to match 997 * the range. (At this time, the only use of address 998 * ranges is for IP multicast routing, for which the 999 * range is big enough to require all bits set.) 1000 */ 1001 cfg |= ETH_CFG_MTI; 1002 hashes[0] = 0xffffffffUL; 1003 hashes[1] = 0xffffffffUL; 1004 ifp->if_flags |= IFF_ALLMULTI; 1005 nma = 0; 1006 break; 1007 } 1008 1009 if (nma < 3) { 1010 /* We can program 3 perfect address filters for mcast */ 1011 memcpy(ias[nma], enm->enm_addrlo, ETHER_ADDR_LEN); 1012 } else { 1013 /* 1014 * XXX: Datasheet is not very clear here, I'm not sure 1015 * if I'm doing this right. --joff 1016 */ 1017 h = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1018 1019 /* Just want the 6 most-significant bits. */ 1020 h = h >> 26; 1021 #if 0 1022 hashes[h / 32] |= (1 << (h % 32)); 1023 #else 1024 hashes[0] = 0xffffffffUL; 1025 hashes[1] = 0xffffffffUL; 1026 #endif 1027 cfg |= ETH_CFG_MTI; 1028 } 1029 ETHER_NEXT_MULTI(step, enm); 1030 nma++; 1031 } 1032 1033 // program... 1034 DPRINTFN(1,("%s: en0 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, 1035 sc->sc_enaddr[0], sc->sc_enaddr[1], sc->sc_enaddr[2], 1036 sc->sc_enaddr[3], sc->sc_enaddr[4], sc->sc_enaddr[5])); 1037 CEMAC_GEM_WRITE(SA1L, (sc->sc_enaddr[3] << 24) 1038 | (sc->sc_enaddr[2] << 16) | (sc->sc_enaddr[1] << 8) 1039 | (sc->sc_enaddr[0])); 1040 CEMAC_GEM_WRITE(SA1H, (sc->sc_enaddr[5] << 8) 1041 | (sc->sc_enaddr[4])); 1042 if (nma > 0) { 1043 DPRINTFN(1,("%s: en1 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, 1044 ias[0][0], ias[0][1], ias[0][2], 1045 ias[0][3], ias[0][4], ias[0][5])); 1046 CEMAC_WRITE(ETH_SA2L, (ias[0][3] << 24) 1047 | (ias[0][2] << 16) | (ias[0][1] << 8) 1048 | (ias[0][0])); 1049 CEMAC_WRITE(ETH_SA2H, (ias[0][4] << 8) 1050 | (ias[0][5])); 1051 } 1052 if (nma > 1) { 1053 DPRINTFN(1,("%s: en2 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, 1054 ias[1][0], ias[1][1], ias[1][2], 1055 ias[1][3], ias[1][4], ias[1][5])); 1056 CEMAC_WRITE(ETH_SA3L, (ias[1][3] << 24) 1057 | (ias[1][2] << 16) | (ias[1][1] << 8) 1058 | (ias[1][0])); 1059 CEMAC_WRITE(ETH_SA3H, (ias[1][4] << 8) 1060 | (ias[1][5])); 1061 } 1062 if (nma > 2) { 1063 DPRINTFN(1,("%s: en3 %02x:%02x:%02x:%02x:%02x:%02x\n", __FUNCTION__, 1064 ias[2][0], ias[2][1], ias[2][2], 1065 ias[2][3], ias[2][4], ias[2][5])); 1066 CEMAC_WRITE(ETH_SA4L, (ias[2][3] << 24) 1067 | (ias[2][2] << 16) | (ias[2][1] << 8) 1068 | (ias[2][0])); 1069 CEMAC_WRITE(ETH_SA4H, (ias[2][4] << 8) 1070 | (ias[2][5])); 1071 } 1072 CEMAC_GEM_WRITE(HSH, hashes[0]); 1073 CEMAC_GEM_WRITE(HSL, hashes[1]); 1074 CEMAC_WRITE(ETH_CFG, cfg); 1075 CEMAC_WRITE(ETH_CTL, ctl | ETH_CTL_RE); 1076 } 1077