1 /* $NetBSD: dp83932.c,v 1.34 2010/04/05 07:19:34 joerg Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the National Semiconductor DP83932 34 * Systems-Oriented Network Interface Controller (SONIC). 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.34 2010/04/05 07:19:34 joerg Exp $"); 39 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #include <net/bpf.h> 58 59 #include <sys/bus.h> 60 #include <sys/intr.h> 61 62 #include <dev/ic/dp83932reg.h> 63 #include <dev/ic/dp83932var.h> 64 65 static void sonic_start(struct ifnet *); 66 static void sonic_watchdog(struct ifnet *); 67 static int sonic_ioctl(struct ifnet *, u_long, void *); 68 static int sonic_init(struct ifnet *); 69 static void sonic_stop(struct ifnet *, int); 70 71 static bool sonic_shutdown(device_t, int); 72 73 static void sonic_reset(struct sonic_softc *); 74 static void sonic_rxdrain(struct sonic_softc *); 75 static int sonic_add_rxbuf(struct sonic_softc *, int); 76 static void sonic_set_filter(struct sonic_softc *); 77 78 static uint16_t sonic_txintr(struct sonic_softc *); 79 static void sonic_rxintr(struct sonic_softc *); 80 81 int sonic_copy_small = 0; 82 83 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 84 85 /* 86 * sonic_attach: 87 * 88 * Attach a SONIC interface to the system. 89 */ 90 void 91 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr) 92 { 93 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 94 int i, rseg, error; 95 bus_dma_segment_t seg; 96 size_t cdatasize; 97 uint8_t *nullbuf; 98 99 /* 100 * Allocate the control data structures, and create and load the 101 * DMA map for it. 102 */ 103 if (sc->sc_32bit) 104 cdatasize = sizeof(struct sonic_control_data32); 105 else 106 cdatasize = sizeof(struct sonic_control_data16); 107 108 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN, 109 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg, 110 BUS_DMA_NOWAIT)) != 0) { 111 aprint_error_dev(sc->sc_dev, 112 "unable to allocate control data, error = %d\n", error); 113 goto fail_0; 114 } 115 116 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 117 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16, 118 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 119 aprint_error_dev(sc->sc_dev, 120 "unable to map control data, error = %d\n", error); 121 goto fail_1; 122 } 123 nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize; 124 memset(nullbuf, 0, ETHER_PAD_LEN); 125 126 if ((error = bus_dmamap_create(sc->sc_dmat, 127 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT, 128 &sc->sc_cddmamap)) != 0) { 129 aprint_error_dev(sc->sc_dev, 130 "unable to create control data DMA map, error = %d\n", 131 error); 132 goto fail_2; 133 } 134 135 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 136 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) { 137 aprint_error_dev(sc->sc_dev, 138 "unable to load control data DMA map, error = %d\n", error); 139 goto fail_3; 140 } 141 142 /* 143 * Create the transmit buffer DMA maps. 144 */ 145 for (i = 0; i < SONIC_NTXDESC; i++) { 146 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 147 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 148 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 149 aprint_error_dev(sc->sc_dev, 150 "unable to create tx DMA map %d, error = %d\n", 151 i, error); 152 goto fail_4; 153 } 154 } 155 156 /* 157 * Create the receive buffer DMA maps. 158 */ 159 for (i = 0; i < SONIC_NRXDESC; i++) { 160 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 161 MCLBYTES, 0, BUS_DMA_NOWAIT, 162 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 163 aprint_error_dev(sc->sc_dev, 164 "unable to create rx DMA map %d, error = %d\n", 165 i, error); 166 goto fail_5; 167 } 168 sc->sc_rxsoft[i].ds_mbuf = NULL; 169 } 170 171 /* 172 * create and map the pad buffer 173 */ 174 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 175 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) { 176 aprint_error_dev(sc->sc_dev, 177 "unable to create pad buffer DMA map, error = %d\n", error); 178 goto fail_5; 179 } 180 181 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 182 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 183 aprint_error_dev(sc->sc_dev, 184 "unable to load pad buffer DMA map, error = %d\n", error); 185 goto fail_6; 186 } 187 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 188 BUS_DMASYNC_PREWRITE); 189 190 /* 191 * Reset the chip to a known state. 192 */ 193 sonic_reset(sc); 194 195 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 196 ether_sprintf(enaddr)); 197 198 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 199 ifp->if_softc = sc; 200 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 201 ifp->if_ioctl = sonic_ioctl; 202 ifp->if_start = sonic_start; 203 ifp->if_watchdog = sonic_watchdog; 204 ifp->if_init = sonic_init; 205 ifp->if_stop = sonic_stop; 206 IFQ_SET_READY(&ifp->if_snd); 207 208 /* 209 * We can support 802.1Q VLAN-sized frames. 210 */ 211 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 212 213 /* 214 * Attach the interface. 215 */ 216 if_attach(ifp); 217 ether_ifattach(ifp, enaddr); 218 219 /* 220 * Make sure the interface is shutdown during reboot. 221 */ 222 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown)) 223 pmf_class_network_register(sc->sc_dev, ifp); 224 else 225 aprint_error_dev(sc->sc_dev, 226 "couldn't establish power handler\n"); 227 228 return; 229 230 /* 231 * Free any resources we've allocated during the failed attach 232 * attempt. Do this in reverse order and fall through. 233 */ 234 fail_6: 235 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 236 fail_5: 237 for (i = 0; i < SONIC_NRXDESC; i++) { 238 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 239 bus_dmamap_destroy(sc->sc_dmat, 240 sc->sc_rxsoft[i].ds_dmamap); 241 } 242 fail_4: 243 for (i = 0; i < SONIC_NTXDESC; i++) { 244 if (sc->sc_txsoft[i].ds_dmamap != NULL) 245 bus_dmamap_destroy(sc->sc_dmat, 246 sc->sc_txsoft[i].ds_dmamap); 247 } 248 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 249 fail_3: 250 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 251 fail_2: 252 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize); 253 fail_1: 254 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 255 fail_0: 256 return; 257 } 258 259 /* 260 * sonic_shutdown: 261 * 262 * Make sure the interface is stopped at reboot. 263 */ 264 bool 265 sonic_shutdown(device_t self, int howto) 266 { 267 struct sonic_softc *sc = device_private(self); 268 269 sonic_stop(&sc->sc_ethercom.ec_if, 1); 270 271 return true; 272 } 273 274 /* 275 * sonic_start: [ifnet interface function] 276 * 277 * Start packet transmission on the interface. 278 */ 279 void 280 sonic_start(struct ifnet *ifp) 281 { 282 struct sonic_softc *sc = ifp->if_softc; 283 struct mbuf *m0, *m; 284 struct sonic_tda16 *tda16; 285 struct sonic_tda32 *tda32; 286 struct sonic_descsoft *ds; 287 bus_dmamap_t dmamap; 288 int error, olasttx, nexttx, opending, totlen, olseg; 289 int seg = 0; /* XXX: gcc */ 290 291 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 292 return; 293 294 /* 295 * Remember the previous txpending and the current "last txdesc 296 * used" index. 297 */ 298 opending = sc->sc_txpending; 299 olasttx = sc->sc_txlast; 300 301 /* 302 * Loop through the send queue, setting up transmit descriptors 303 * until we drain the queue, or use up all available transmit 304 * descriptors. Leave one at the end for sanity's sake. 305 */ 306 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) { 307 /* 308 * Grab a packet off the queue. 309 */ 310 IFQ_POLL(&ifp->if_snd, m0); 311 if (m0 == NULL) 312 break; 313 m = NULL; 314 315 /* 316 * Get the next available transmit descriptor. 317 */ 318 nexttx = SONIC_NEXTTX(sc->sc_txlast); 319 ds = &sc->sc_txsoft[nexttx]; 320 dmamap = ds->ds_dmamap; 321 322 /* 323 * Load the DMA map. If this fails, the packet either 324 * didn't fit in the allotted number of frags, or we were 325 * short on resources. In this case, we'll copy and try 326 * again. 327 */ 328 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 329 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 330 (m0->m_pkthdr.len < ETHER_PAD_LEN && 331 dmamap->dm_nsegs == SONIC_NTXFRAGS)) { 332 if (error == 0) 333 bus_dmamap_unload(sc->sc_dmat, dmamap); 334 MGETHDR(m, M_DONTWAIT, MT_DATA); 335 if (m == NULL) { 336 printf("%s: unable to allocate Tx mbuf\n", 337 device_xname(sc->sc_dev)); 338 break; 339 } 340 if (m0->m_pkthdr.len > MHLEN) { 341 MCLGET(m, M_DONTWAIT); 342 if ((m->m_flags & M_EXT) == 0) { 343 printf("%s: unable to allocate Tx " 344 "cluster\n", 345 device_xname(sc->sc_dev)); 346 m_freem(m); 347 break; 348 } 349 } 350 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 351 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 352 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 353 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 354 if (error) { 355 printf("%s: unable to load Tx buffer, " 356 "error = %d\n", device_xname(sc->sc_dev), 357 error); 358 m_freem(m); 359 break; 360 } 361 } 362 IFQ_DEQUEUE(&ifp->if_snd, m0); 363 if (m != NULL) { 364 m_freem(m0); 365 m0 = m; 366 } 367 368 /* 369 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 370 */ 371 372 /* Sync the DMA map. */ 373 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 374 BUS_DMASYNC_PREWRITE); 375 376 /* 377 * Store a pointer to the packet so we can free it later. 378 */ 379 ds->ds_mbuf = m0; 380 381 /* 382 * Initialize the transmit descriptor. 383 */ 384 totlen = 0; 385 if (sc->sc_32bit) { 386 tda32 = &sc->sc_tda32[nexttx]; 387 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 388 tda32->tda_frags[seg].frag_ptr1 = 389 htosonic32(sc, 390 (dmamap->dm_segs[seg].ds_addr >> 16) & 391 0xffff); 392 tda32->tda_frags[seg].frag_ptr0 = 393 htosonic32(sc, 394 dmamap->dm_segs[seg].ds_addr & 0xffff); 395 tda32->tda_frags[seg].frag_size = 396 htosonic32(sc, dmamap->dm_segs[seg].ds_len); 397 totlen += dmamap->dm_segs[seg].ds_len; 398 } 399 if (totlen < ETHER_PAD_LEN) { 400 tda32->tda_frags[seg].frag_ptr1 = 401 htosonic32(sc, 402 (sc->sc_nulldma >> 16) & 0xffff); 403 tda32->tda_frags[seg].frag_ptr0 = 404 htosonic32(sc, sc->sc_nulldma & 0xffff); 405 tda32->tda_frags[seg].frag_size = 406 htosonic32(sc, ETHER_PAD_LEN - totlen); 407 totlen = ETHER_PAD_LEN; 408 seg++; 409 } 410 411 tda32->tda_status = 0; 412 tda32->tda_pktconfig = 0; 413 tda32->tda_pktsize = htosonic32(sc, totlen); 414 tda32->tda_fragcnt = htosonic32(sc, seg); 415 416 /* Link it up. */ 417 tda32->tda_frags[seg].frag_ptr0 = 418 htosonic32(sc, SONIC_CDTXADDR32(sc, 419 SONIC_NEXTTX(nexttx)) & 0xffff); 420 421 /* Sync the Tx descriptor. */ 422 SONIC_CDTXSYNC32(sc, nexttx, 423 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 424 } else { 425 tda16 = &sc->sc_tda16[nexttx]; 426 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 427 tda16->tda_frags[seg].frag_ptr1 = 428 htosonic16(sc, 429 (dmamap->dm_segs[seg].ds_addr >> 16) & 430 0xffff); 431 tda16->tda_frags[seg].frag_ptr0 = 432 htosonic16(sc, 433 dmamap->dm_segs[seg].ds_addr & 0xffff); 434 tda16->tda_frags[seg].frag_size = 435 htosonic16(sc, dmamap->dm_segs[seg].ds_len); 436 totlen += dmamap->dm_segs[seg].ds_len; 437 } 438 if (totlen < ETHER_PAD_LEN) { 439 tda16->tda_frags[seg].frag_ptr1 = 440 htosonic16(sc, 441 (sc->sc_nulldma >> 16) & 0xffff); 442 tda16->tda_frags[seg].frag_ptr0 = 443 htosonic16(sc, sc->sc_nulldma & 0xffff); 444 tda16->tda_frags[seg].frag_size = 445 htosonic16(sc, ETHER_PAD_LEN - totlen); 446 totlen = ETHER_PAD_LEN; 447 seg++; 448 } 449 450 tda16->tda_status = 0; 451 tda16->tda_pktconfig = 0; 452 tda16->tda_pktsize = htosonic16(sc, totlen); 453 tda16->tda_fragcnt = htosonic16(sc, seg); 454 455 /* Link it up. */ 456 tda16->tda_frags[seg].frag_ptr0 = 457 htosonic16(sc, SONIC_CDTXADDR16(sc, 458 SONIC_NEXTTX(nexttx)) & 0xffff); 459 460 /* Sync the Tx descriptor. */ 461 SONIC_CDTXSYNC16(sc, nexttx, 462 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 463 } 464 465 /* Advance the Tx pointer. */ 466 sc->sc_txpending++; 467 sc->sc_txlast = nexttx; 468 469 /* 470 * Pass the packet to any BPF listeners. 471 */ 472 bpf_mtap(ifp, m0); 473 } 474 475 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) { 476 /* No more slots left; notify upper layer. */ 477 ifp->if_flags |= IFF_OACTIVE; 478 } 479 480 if (sc->sc_txpending != opending) { 481 /* 482 * We enqueued packets. If the transmitter was idle, 483 * reset the txdirty pointer. 484 */ 485 if (opending == 0) 486 sc->sc_txdirty = SONIC_NEXTTX(olasttx); 487 488 /* 489 * Stop the SONIC on the last packet we've set up, 490 * and clear end-of-list on the descriptor previous 491 * to our new chain. 492 * 493 * NOTE: our `seg' variable should still be valid! 494 */ 495 if (sc->sc_32bit) { 496 olseg = 497 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt); 498 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 499 htosonic32(sc, TDA_LINK_EOL); 500 SONIC_CDTXSYNC32(sc, sc->sc_txlast, 501 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 502 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &= 503 htosonic32(sc, ~TDA_LINK_EOL); 504 SONIC_CDTXSYNC32(sc, olasttx, 505 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 506 } else { 507 olseg = 508 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt); 509 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 510 htosonic16(sc, TDA_LINK_EOL); 511 SONIC_CDTXSYNC16(sc, sc->sc_txlast, 512 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 513 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &= 514 htosonic16(sc, ~TDA_LINK_EOL); 515 SONIC_CDTXSYNC16(sc, olasttx, 516 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 517 } 518 519 /* Start the transmitter. */ 520 CSR_WRITE(sc, SONIC_CR, CR_TXP); 521 522 /* Set a watchdog timer in case the chip flakes out. */ 523 ifp->if_timer = 5; 524 } 525 } 526 527 /* 528 * sonic_watchdog: [ifnet interface function] 529 * 530 * Watchdog timer handler. 531 */ 532 void 533 sonic_watchdog(struct ifnet *ifp) 534 { 535 struct sonic_softc *sc = ifp->if_softc; 536 537 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 538 ifp->if_oerrors++; 539 540 (void)sonic_init(ifp); 541 } 542 543 /* 544 * sonic_ioctl: [ifnet interface function] 545 * 546 * Handle control requests from the operator. 547 */ 548 int 549 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data) 550 { 551 int s, error; 552 553 s = splnet(); 554 555 error = ether_ioctl(ifp, cmd, data); 556 if (error == ENETRESET) { 557 /* 558 * Multicast list has changed; set the hardware 559 * filter accordingly. 560 */ 561 if (ifp->if_flags & IFF_RUNNING) 562 (void)sonic_init(ifp); 563 error = 0; 564 } 565 566 splx(s); 567 return error; 568 } 569 570 /* 571 * sonic_intr: 572 * 573 * Interrupt service routine. 574 */ 575 int 576 sonic_intr(void *arg) 577 { 578 struct sonic_softc *sc = arg; 579 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 580 uint16_t isr; 581 int handled = 0, wantinit; 582 583 for (wantinit = 0; wantinit == 0;) { 584 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr; 585 if (isr == 0) 586 break; 587 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */ 588 589 handled = 1; 590 591 if (isr & IMR_PRX) 592 sonic_rxintr(sc); 593 594 if (isr & (IMR_PTX|IMR_TXER)) { 595 if (sonic_txintr(sc) & TCR_FU) { 596 printf("%s: transmit FIFO underrun\n", 597 device_xname(sc->sc_dev)); 598 wantinit = 1; 599 } 600 } 601 602 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) { 603 #define PRINTERR(bit, str) \ 604 if (isr & (bit)) \ 605 printf("%s: %s\n",device_xname(sc->sc_dev), str) 606 PRINTERR(IMR_RFO, "receive FIFO overrun"); 607 PRINTERR(IMR_RBA, "receive buffer exceeded"); 608 PRINTERR(IMR_RBE, "receive buffers exhausted"); 609 PRINTERR(IMR_RDE, "receive descriptors exhausted"); 610 wantinit = 1; 611 } 612 } 613 614 if (handled) { 615 if (wantinit) 616 (void)sonic_init(ifp); 617 sonic_start(ifp); 618 } 619 620 return handled; 621 } 622 623 /* 624 * sonic_txintr: 625 * 626 * Helper; handle transmit complete interrupts. 627 */ 628 uint16_t 629 sonic_txintr(struct sonic_softc *sc) 630 { 631 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 632 struct sonic_descsoft *ds; 633 struct sonic_tda32 *tda32; 634 struct sonic_tda16 *tda16; 635 uint16_t status, totstat = 0; 636 int i; 637 638 ifp->if_flags &= ~IFF_OACTIVE; 639 640 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 641 i = SONIC_NEXTTX(i), sc->sc_txpending--) { 642 ds = &sc->sc_txsoft[i]; 643 644 if (sc->sc_32bit) { 645 SONIC_CDTXSYNC32(sc, i, 646 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 647 tda32 = &sc->sc_tda32[i]; 648 status = sonic32toh(sc, tda32->tda_status); 649 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 650 } else { 651 SONIC_CDTXSYNC16(sc, i, 652 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 653 tda16 = &sc->sc_tda16[i]; 654 status = sonic16toh(sc, tda16->tda_status); 655 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 656 } 657 658 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0) 659 break; 660 661 totstat |= status; 662 663 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 664 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 665 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 666 m_freem(ds->ds_mbuf); 667 ds->ds_mbuf = NULL; 668 669 /* 670 * Check for errors and collisions. 671 */ 672 if (status & TCR_PTX) 673 ifp->if_opackets++; 674 else 675 ifp->if_oerrors++; 676 ifp->if_collisions += TDA_STATUS_NCOL(status); 677 } 678 679 /* Update the dirty transmit buffer pointer. */ 680 sc->sc_txdirty = i; 681 682 /* 683 * Cancel the watchdog timer if there are no pending 684 * transmissions. 685 */ 686 if (sc->sc_txpending == 0) 687 ifp->if_timer = 0; 688 689 return totstat; 690 } 691 692 /* 693 * sonic_rxintr: 694 * 695 * Helper; handle receive interrupts. 696 */ 697 void 698 sonic_rxintr(struct sonic_softc *sc) 699 { 700 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 701 struct sonic_descsoft *ds; 702 struct sonic_rda32 *rda32; 703 struct sonic_rda16 *rda16; 704 struct mbuf *m; 705 int i, len; 706 uint16_t status, bytecount, ptr0, ptr1, seqno; 707 708 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) { 709 ds = &sc->sc_rxsoft[i]; 710 711 if (sc->sc_32bit) { 712 SONIC_CDRXSYNC32(sc, i, 713 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 714 rda32 = &sc->sc_rda32[i]; 715 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 716 if (rda32->rda_inuse != 0) 717 break; 718 status = sonic32toh(sc, rda32->rda_status); 719 bytecount = sonic32toh(sc, rda32->rda_bytecount); 720 ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); 721 ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); 722 seqno = sonic32toh(sc, rda32->rda_seqno); 723 } else { 724 SONIC_CDRXSYNC16(sc, i, 725 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 726 rda16 = &sc->sc_rda16[i]; 727 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 728 if (rda16->rda_inuse != 0) 729 break; 730 status = sonic16toh(sc, rda16->rda_status); 731 bytecount = sonic16toh(sc, rda16->rda_bytecount); 732 ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); 733 ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); 734 seqno = sonic16toh(sc, rda16->rda_seqno); 735 } 736 737 /* 738 * Make absolutely sure this is the only packet 739 * in this receive buffer. Our entire Rx buffer 740 * management scheme depends on this, and if the 741 * SONIC didn't follow our rule, it means we've 742 * misconfigured it. 743 */ 744 KASSERT(status & RCR_LPKT); 745 746 /* 747 * Make sure the packet arrived OK. If an error occurred, 748 * update stats and reset the descriptor. The buffer will 749 * be reused the next time the descriptor comes up in the 750 * ring. 751 */ 752 if ((status & RCR_PRX) == 0) { 753 if (status & RCR_FAER) 754 printf("%s: Rx frame alignment error\n", 755 device_xname(sc->sc_dev)); 756 else if (status & RCR_CRCR) 757 printf("%s: Rx CRC error\n", 758 device_xname(sc->sc_dev)); 759 ifp->if_ierrors++; 760 SONIC_INIT_RXDESC(sc, i); 761 continue; 762 } 763 764 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 765 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 766 767 /* 768 * The SONIC includes the CRC with every packet. 769 */ 770 len = bytecount - ETHER_CRC_LEN; 771 772 /* 773 * Ok, if the chip is in 32-bit mode, then receive 774 * buffers must be aligned to 32-bit boundaries, 775 * which means the payload is misaligned. In this 776 * case, we must allocate a new mbuf, and copy the 777 * packet into it, scooted forward 2 bytes to ensure 778 * proper alignment. 779 * 780 * Note, in 16-bit mode, we can configure the SONIC 781 * to do what we want, and we have. 782 */ 783 #ifndef __NO_STRICT_ALIGNMENT 784 if (sc->sc_32bit) { 785 MGETHDR(m, M_DONTWAIT, MT_DATA); 786 if (m == NULL) 787 goto dropit; 788 if (len > (MHLEN - 2)) { 789 MCLGET(m, M_DONTWAIT); 790 if ((m->m_flags & M_EXT) == 0) 791 goto dropit; 792 } 793 m->m_data += 2; 794 /* 795 * Note that we use a cluster for incoming frames, 796 * so the buffer is virtually contiguous. 797 */ 798 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 799 len); 800 SONIC_INIT_RXDESC(sc, i); 801 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 802 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 803 } else 804 #endif /* ! __NO_STRICT_ALIGNMENT */ 805 /* 806 * If the packet is small enough to fit in a single 807 * header mbuf, allocate one and copy the data into 808 * it. This greatly reduces memory consumption when 809 * we receive lots of small packets. 810 */ 811 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) { 812 MGETHDR(m, M_DONTWAIT, MT_DATA); 813 if (m == NULL) 814 goto dropit; 815 m->m_data += 2; 816 /* 817 * Note that we use a cluster for incoming frames, 818 * so the buffer is virtually contiguous. 819 */ 820 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 821 len); 822 SONIC_INIT_RXDESC(sc, i); 823 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 824 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 825 } else { 826 m = ds->ds_mbuf; 827 if (sonic_add_rxbuf(sc, i) != 0) { 828 dropit: 829 ifp->if_ierrors++; 830 SONIC_INIT_RXDESC(sc, i); 831 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 832 ds->ds_dmamap->dm_mapsize, 833 BUS_DMASYNC_PREREAD); 834 continue; 835 } 836 } 837 838 ifp->if_ipackets++; 839 m->m_pkthdr.rcvif = ifp; 840 m->m_pkthdr.len = m->m_len = len; 841 842 /* 843 * Pass this up to any BPF listeners. 844 */ 845 bpf_mtap(ifp, m); 846 847 /* Pass it on. */ 848 (*ifp->if_input)(ifp, m); 849 } 850 851 /* Update the receive pointer. */ 852 sc->sc_rxptr = i; 853 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i))); 854 } 855 856 /* 857 * sonic_reset: 858 * 859 * Perform a soft reset on the SONIC. 860 */ 861 void 862 sonic_reset(struct sonic_softc *sc) 863 { 864 865 /* stop TX, RX and timer, and ensure RST is clear */ 866 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX); 867 delay(1000); 868 869 CSR_WRITE(sc, SONIC_CR, CR_RST); 870 delay(1000); 871 872 /* clear all interrupts */ 873 CSR_WRITE(sc, SONIC_IMR, 0); 874 CSR_WRITE(sc, SONIC_ISR, IMR_ALL); 875 876 CSR_WRITE(sc, SONIC_CR, 0); 877 delay(1000); 878 } 879 880 /* 881 * sonic_init: [ifnet interface function] 882 * 883 * Initialize the interface. Must be called at splnet(). 884 */ 885 int 886 sonic_init(struct ifnet *ifp) 887 { 888 struct sonic_softc *sc = ifp->if_softc; 889 struct sonic_descsoft *ds; 890 int i, error = 0; 891 uint16_t reg; 892 893 /* 894 * Cancel any pending I/O. 895 */ 896 sonic_stop(ifp, 0); 897 898 /* 899 * Reset the SONIC to a known state. 900 */ 901 sonic_reset(sc); 902 903 /* 904 * Bring the SONIC into reset state, and program the DCR. 905 * 906 * Note: We don't bother optimizing the transmit and receive 907 * thresholds, here. TFT/RFT values should be set in MD attachments. 908 */ 909 reg = sc->sc_dcr; 910 if (sc->sc_32bit) 911 reg |= DCR_DW; 912 CSR_WRITE(sc, SONIC_CR, CR_RST); 913 CSR_WRITE(sc, SONIC_DCR, reg); 914 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2); 915 CSR_WRITE(sc, SONIC_CR, 0); 916 917 /* 918 * Initialize the transmit descriptors. 919 */ 920 if (sc->sc_32bit) { 921 for (i = 0; i < SONIC_NTXDESC; i++) { 922 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32)); 923 SONIC_CDTXSYNC32(sc, i, 924 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 925 } 926 } else { 927 for (i = 0; i < SONIC_NTXDESC; i++) { 928 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16)); 929 SONIC_CDTXSYNC16(sc, i, 930 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 931 } 932 } 933 sc->sc_txpending = 0; 934 sc->sc_txdirty = 0; 935 sc->sc_txlast = SONIC_NTXDESC - 1; 936 937 /* 938 * Initialize the receive descriptor ring. 939 */ 940 for (i = 0; i < SONIC_NRXDESC; i++) { 941 ds = &sc->sc_rxsoft[i]; 942 if (ds->ds_mbuf == NULL) { 943 if ((error = sonic_add_rxbuf(sc, i)) != 0) { 944 printf("%s: unable to allocate or map Rx " 945 "buffer %d, error = %d\n", 946 device_xname(sc->sc_dev), i, error); 947 /* 948 * XXX Should attempt to run with fewer receive 949 * XXX buffers instead of just failing. 950 */ 951 sonic_rxdrain(sc); 952 goto out; 953 } 954 } else 955 SONIC_INIT_RXDESC(sc, i); 956 } 957 sc->sc_rxptr = 0; 958 959 /* Give the transmit ring to the SONIC. */ 960 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff); 961 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff); 962 963 /* Give the receive descriptor ring to the SONIC. */ 964 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff); 965 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff); 966 967 /* Give the receive buffer ring to the SONIC. */ 968 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff); 969 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff); 970 if (sc->sc_32bit) 971 CSR_WRITE(sc, SONIC_REAR, 972 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 973 sizeof(struct sonic_rra32)) & 0xffff); 974 else 975 CSR_WRITE(sc, SONIC_REAR, 976 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 977 sizeof(struct sonic_rra16)) & 0xffff); 978 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff); 979 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1)); 980 981 /* 982 * Set the End-Of-Buffer counter such that only one packet 983 * will be placed into each buffer we provide. Note we are 984 * following the recommendation of section 3.4.4 of the manual 985 * here, and have "lengthened" the receive buffers accordingly. 986 */ 987 if (sc->sc_32bit) 988 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2); 989 else 990 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2)); 991 992 /* Reset the receive sequence counter. */ 993 CSR_WRITE(sc, SONIC_RSC, 0); 994 995 /* Clear the tally registers. */ 996 CSR_WRITE(sc, SONIC_CRCETC, 0xffff); 997 CSR_WRITE(sc, SONIC_FAET, 0xffff); 998 CSR_WRITE(sc, SONIC_MPT, 0xffff); 999 1000 /* Set the receive filter. */ 1001 sonic_set_filter(sc); 1002 1003 /* 1004 * Set the interrupt mask register. 1005 */ 1006 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE | 1007 IMR_TXER | IMR_PTX | IMR_PRX; 1008 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr); 1009 1010 /* 1011 * Start the receive process in motion. Note, we don't 1012 * start the transmit process until we actually try to 1013 * transmit packets. 1014 */ 1015 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA); 1016 1017 /* 1018 * ...all done! 1019 */ 1020 ifp->if_flags |= IFF_RUNNING; 1021 ifp->if_flags &= ~IFF_OACTIVE; 1022 1023 out: 1024 if (error) 1025 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1026 return error; 1027 } 1028 1029 /* 1030 * sonic_rxdrain: 1031 * 1032 * Drain the receive queue. 1033 */ 1034 void 1035 sonic_rxdrain(struct sonic_softc *sc) 1036 { 1037 struct sonic_descsoft *ds; 1038 int i; 1039 1040 for (i = 0; i < SONIC_NRXDESC; i++) { 1041 ds = &sc->sc_rxsoft[i]; 1042 if (ds->ds_mbuf != NULL) { 1043 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1044 m_freem(ds->ds_mbuf); 1045 ds->ds_mbuf = NULL; 1046 } 1047 } 1048 } 1049 1050 /* 1051 * sonic_stop: [ifnet interface function] 1052 * 1053 * Stop transmission on the interface. 1054 */ 1055 void 1056 sonic_stop(struct ifnet *ifp, int disable) 1057 { 1058 struct sonic_softc *sc = ifp->if_softc; 1059 struct sonic_descsoft *ds; 1060 int i; 1061 1062 /* 1063 * Disable interrupts. 1064 */ 1065 CSR_WRITE(sc, SONIC_IMR, 0); 1066 1067 /* 1068 * Stop the transmitter, receiver, and timer. 1069 */ 1070 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP); 1071 for (i = 0; i < 1000; i++) { 1072 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0) 1073 break; 1074 delay(2); 1075 } 1076 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0) 1077 printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev)); 1078 1079 /* 1080 * Release any queued transmit buffers. 1081 */ 1082 for (i = 0; i < SONIC_NTXDESC; i++) { 1083 ds = &sc->sc_txsoft[i]; 1084 if (ds->ds_mbuf != NULL) { 1085 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1086 m_freem(ds->ds_mbuf); 1087 ds->ds_mbuf = NULL; 1088 } 1089 } 1090 1091 /* 1092 * Mark the interface down and cancel the watchdog timer. 1093 */ 1094 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1095 ifp->if_timer = 0; 1096 1097 if (disable) 1098 sonic_rxdrain(sc); 1099 } 1100 1101 /* 1102 * sonic_add_rxbuf: 1103 * 1104 * Add a receive buffer to the indicated descriptor. 1105 */ 1106 int 1107 sonic_add_rxbuf(struct sonic_softc *sc, int idx) 1108 { 1109 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx]; 1110 struct mbuf *m; 1111 int error; 1112 1113 MGETHDR(m, M_DONTWAIT, MT_DATA); 1114 if (m == NULL) 1115 return ENOBUFS; 1116 1117 MCLGET(m, M_DONTWAIT); 1118 if ((m->m_flags & M_EXT) == 0) { 1119 m_freem(m); 1120 return ENOBUFS; 1121 } 1122 1123 if (ds->ds_mbuf != NULL) 1124 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1125 1126 ds->ds_mbuf = m; 1127 1128 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1129 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1130 BUS_DMA_READ|BUS_DMA_NOWAIT); 1131 if (error) { 1132 printf("%s: can't load rx DMA map %d, error = %d\n", 1133 device_xname(sc->sc_dev), idx, error); 1134 panic("sonic_add_rxbuf"); /* XXX */ 1135 } 1136 1137 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1138 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1139 1140 SONIC_INIT_RXDESC(sc, idx); 1141 1142 return 0; 1143 } 1144 1145 static void 1146 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr) 1147 { 1148 1149 if (sc->sc_32bit) { 1150 struct sonic_cda32 *cda = &sc->sc_cda32[entry]; 1151 1152 cda->cda_entry = htosonic32(sc, entry); 1153 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8)); 1154 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8)); 1155 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8)); 1156 } else { 1157 struct sonic_cda16 *cda = &sc->sc_cda16[entry]; 1158 1159 cda->cda_entry = htosonic16(sc, entry); 1160 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8)); 1161 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8)); 1162 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8)); 1163 } 1164 } 1165 1166 /* 1167 * sonic_set_filter: 1168 * 1169 * Set the SONIC receive filter. 1170 */ 1171 void 1172 sonic_set_filter(struct sonic_softc *sc) 1173 { 1174 struct ethercom *ec = &sc->sc_ethercom; 1175 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1176 struct ether_multi *enm; 1177 struct ether_multistep step; 1178 int i, entry = 0; 1179 uint16_t camvalid = 0; 1180 uint16_t rcr = 0; 1181 1182 if (ifp->if_flags & IFF_BROADCAST) 1183 rcr |= RCR_BRD; 1184 1185 if (ifp->if_flags & IFF_PROMISC) { 1186 rcr |= RCR_PRO; 1187 goto allmulti; 1188 } 1189 1190 /* Put our station address in the first CAM slot. */ 1191 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl)); 1192 camvalid |= (1U << entry); 1193 entry++; 1194 1195 /* Add the multicast addresses to the CAM. */ 1196 ETHER_FIRST_MULTI(step, ec, enm); 1197 while (enm != NULL) { 1198 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1199 /* 1200 * We must listen to a range of multicast addresses. 1201 * The only way to do this on the SONIC is to enable 1202 * reception of all multicast packets. 1203 */ 1204 goto allmulti; 1205 } 1206 1207 if (entry == SONIC_NCAMENT) { 1208 /* 1209 * Out of CAM slots. Have to enable reception 1210 * of all multicast addresses. 1211 */ 1212 goto allmulti; 1213 } 1214 1215 sonic_set_camentry(sc, entry, enm->enm_addrlo); 1216 camvalid |= (1U << entry); 1217 entry++; 1218 1219 ETHER_NEXT_MULTI(step, enm); 1220 } 1221 1222 ifp->if_flags &= ~IFF_ALLMULTI; 1223 goto setit; 1224 1225 allmulti: 1226 /* Use only the first CAM slot (station address). */ 1227 camvalid = 0x0001; 1228 entry = 1; 1229 rcr |= RCR_AMC; 1230 1231 setit: 1232 /* set mask for the CAM Enable register */ 1233 if (sc->sc_32bit) { 1234 if (entry == SONIC_NCAMENT) 1235 sc->sc_cdaenable32 = htosonic32(sc, camvalid); 1236 else 1237 sc->sc_cda32[entry].cda_entry = 1238 htosonic32(sc, camvalid); 1239 } else { 1240 if (entry == SONIC_NCAMENT) 1241 sc->sc_cdaenable16 = htosonic16(sc, camvalid); 1242 else 1243 sc->sc_cda16[entry].cda_entry = 1244 htosonic16(sc, camvalid); 1245 } 1246 1247 /* Load the CAM. */ 1248 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE); 1249 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff); 1250 CSR_WRITE(sc, SONIC_CDC, entry); 1251 CSR_WRITE(sc, SONIC_CR, CR_LCAM); 1252 for (i = 0; i < 10000; i++) { 1253 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0) 1254 break; 1255 delay(2); 1256 } 1257 if (CSR_READ(sc, SONIC_CR) & CR_LCAM) 1258 printf("%s: CAM load failed\n", device_xname(sc->sc_dev)); 1259 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE); 1260 1261 /* Set the receive control register. */ 1262 CSR_WRITE(sc, SONIC_RCR, rcr); 1263 } 1264