1 /* $NetBSD: dp83932.c,v 1.33 2010/01/19 22:06:24 pooka Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the National Semiconductor DP83932 34 * Systems-Oriented Network Interface Controller (SONIC). 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.33 2010/01/19 22:06:24 pooka Exp $"); 39 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_ether.h> 56 57 #include <net/bpf.h> 58 59 #include <sys/bus.h> 60 #include <sys/intr.h> 61 62 #include <dev/ic/dp83932reg.h> 63 #include <dev/ic/dp83932var.h> 64 65 static void sonic_start(struct ifnet *); 66 static void sonic_watchdog(struct ifnet *); 67 static int sonic_ioctl(struct ifnet *, u_long, void *); 68 static int sonic_init(struct ifnet *); 69 static void sonic_stop(struct ifnet *, int); 70 71 static bool sonic_shutdown(device_t, int); 72 73 static void sonic_reset(struct sonic_softc *); 74 static void sonic_rxdrain(struct sonic_softc *); 75 static int sonic_add_rxbuf(struct sonic_softc *, int); 76 static void sonic_set_filter(struct sonic_softc *); 77 78 static uint16_t sonic_txintr(struct sonic_softc *); 79 static void sonic_rxintr(struct sonic_softc *); 80 81 int sonic_copy_small = 0; 82 83 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 84 85 /* 86 * sonic_attach: 87 * 88 * Attach a SONIC interface to the system. 89 */ 90 void 91 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr) 92 { 93 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 94 int i, rseg, error; 95 bus_dma_segment_t seg; 96 size_t cdatasize; 97 uint8_t *nullbuf; 98 99 /* 100 * Allocate the control data structures, and create and load the 101 * DMA map for it. 102 */ 103 if (sc->sc_32bit) 104 cdatasize = sizeof(struct sonic_control_data32); 105 else 106 cdatasize = sizeof(struct sonic_control_data16); 107 108 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN, 109 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg, 110 BUS_DMA_NOWAIT)) != 0) { 111 aprint_error_dev(sc->sc_dev, 112 "unable to allocate control data, error = %d\n", error); 113 goto fail_0; 114 } 115 116 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 117 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16, 118 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 119 aprint_error_dev(sc->sc_dev, 120 "unable to map control data, error = %d\n", error); 121 goto fail_1; 122 } 123 nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize; 124 memset(nullbuf, 0, ETHER_PAD_LEN); 125 126 if ((error = bus_dmamap_create(sc->sc_dmat, 127 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT, 128 &sc->sc_cddmamap)) != 0) { 129 aprint_error_dev(sc->sc_dev, 130 "unable to create control data DMA map, error = %d\n", 131 error); 132 goto fail_2; 133 } 134 135 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 136 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) { 137 aprint_error_dev(sc->sc_dev, 138 "unable to load control data DMA map, error = %d\n", error); 139 goto fail_3; 140 } 141 142 /* 143 * Create the transmit buffer DMA maps. 144 */ 145 for (i = 0; i < SONIC_NTXDESC; i++) { 146 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 147 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 148 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 149 aprint_error_dev(sc->sc_dev, 150 "unable to create tx DMA map %d, error = %d\n", 151 i, error); 152 goto fail_4; 153 } 154 } 155 156 /* 157 * Create the receive buffer DMA maps. 158 */ 159 for (i = 0; i < SONIC_NRXDESC; i++) { 160 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 161 MCLBYTES, 0, BUS_DMA_NOWAIT, 162 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 163 aprint_error_dev(sc->sc_dev, 164 "unable to create rx DMA map %d, error = %d\n", 165 i, error); 166 goto fail_5; 167 } 168 sc->sc_rxsoft[i].ds_mbuf = NULL; 169 } 170 171 /* 172 * create and map the pad buffer 173 */ 174 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 175 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) { 176 aprint_error_dev(sc->sc_dev, 177 "unable to create pad buffer DMA map, error = %d\n", error); 178 goto fail_5; 179 } 180 181 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 182 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 183 aprint_error_dev(sc->sc_dev, 184 "unable to load pad buffer DMA map, error = %d\n", error); 185 goto fail_6; 186 } 187 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 188 BUS_DMASYNC_PREWRITE); 189 190 /* 191 * Reset the chip to a known state. 192 */ 193 sonic_reset(sc); 194 195 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 196 ether_sprintf(enaddr)); 197 198 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 199 ifp->if_softc = sc; 200 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 201 ifp->if_ioctl = sonic_ioctl; 202 ifp->if_start = sonic_start; 203 ifp->if_watchdog = sonic_watchdog; 204 ifp->if_init = sonic_init; 205 ifp->if_stop = sonic_stop; 206 IFQ_SET_READY(&ifp->if_snd); 207 208 /* 209 * We can support 802.1Q VLAN-sized frames. 210 */ 211 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 212 213 /* 214 * Attach the interface. 215 */ 216 if_attach(ifp); 217 ether_ifattach(ifp, enaddr); 218 219 /* 220 * Make sure the interface is shutdown during reboot. 221 */ 222 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown)) 223 pmf_class_network_register(sc->sc_dev, ifp); 224 else 225 aprint_error_dev(sc->sc_dev, 226 "couldn't establish power handler\n"); 227 228 return; 229 230 /* 231 * Free any resources we've allocated during the failed attach 232 * attempt. Do this in reverse order and fall through. 233 */ 234 fail_6: 235 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 236 fail_5: 237 for (i = 0; i < SONIC_NRXDESC; i++) { 238 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 239 bus_dmamap_destroy(sc->sc_dmat, 240 sc->sc_rxsoft[i].ds_dmamap); 241 } 242 fail_4: 243 for (i = 0; i < SONIC_NTXDESC; i++) { 244 if (sc->sc_txsoft[i].ds_dmamap != NULL) 245 bus_dmamap_destroy(sc->sc_dmat, 246 sc->sc_txsoft[i].ds_dmamap); 247 } 248 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 249 fail_3: 250 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 251 fail_2: 252 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize); 253 fail_1: 254 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 255 fail_0: 256 return; 257 } 258 259 /* 260 * sonic_shutdown: 261 * 262 * Make sure the interface is stopped at reboot. 263 */ 264 bool 265 sonic_shutdown(device_t self, int howto) 266 { 267 struct sonic_softc *sc = device_private(self); 268 269 sonic_stop(&sc->sc_ethercom.ec_if, 1); 270 271 return true; 272 } 273 274 /* 275 * sonic_start: [ifnet interface function] 276 * 277 * Start packet transmission on the interface. 278 */ 279 void 280 sonic_start(struct ifnet *ifp) 281 { 282 struct sonic_softc *sc = ifp->if_softc; 283 struct mbuf *m0, *m; 284 struct sonic_tda16 *tda16; 285 struct sonic_tda32 *tda32; 286 struct sonic_descsoft *ds; 287 bus_dmamap_t dmamap; 288 int error, olasttx, nexttx, opending, totlen, olseg; 289 int seg = 0; /* XXX: gcc */ 290 291 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 292 return; 293 294 /* 295 * Remember the previous txpending and the current "last txdesc 296 * used" index. 297 */ 298 opending = sc->sc_txpending; 299 olasttx = sc->sc_txlast; 300 301 /* 302 * Loop through the send queue, setting up transmit descriptors 303 * until we drain the queue, or use up all available transmit 304 * descriptors. Leave one at the end for sanity's sake. 305 */ 306 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) { 307 /* 308 * Grab a packet off the queue. 309 */ 310 IFQ_POLL(&ifp->if_snd, m0); 311 if (m0 == NULL) 312 break; 313 m = NULL; 314 315 /* 316 * Get the next available transmit descriptor. 317 */ 318 nexttx = SONIC_NEXTTX(sc->sc_txlast); 319 ds = &sc->sc_txsoft[nexttx]; 320 dmamap = ds->ds_dmamap; 321 322 /* 323 * Load the DMA map. If this fails, the packet either 324 * didn't fit in the allotted number of frags, or we were 325 * short on resources. In this case, we'll copy and try 326 * again. 327 */ 328 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 329 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 330 (m0->m_pkthdr.len < ETHER_PAD_LEN && 331 dmamap->dm_nsegs == SONIC_NTXFRAGS)) { 332 if (error == 0) 333 bus_dmamap_unload(sc->sc_dmat, dmamap); 334 MGETHDR(m, M_DONTWAIT, MT_DATA); 335 if (m == NULL) { 336 printf("%s: unable to allocate Tx mbuf\n", 337 device_xname(sc->sc_dev)); 338 break; 339 } 340 if (m0->m_pkthdr.len > MHLEN) { 341 MCLGET(m, M_DONTWAIT); 342 if ((m->m_flags & M_EXT) == 0) { 343 printf("%s: unable to allocate Tx " 344 "cluster\n", 345 device_xname(sc->sc_dev)); 346 m_freem(m); 347 break; 348 } 349 } 350 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 351 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 352 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 353 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 354 if (error) { 355 printf("%s: unable to load Tx buffer, " 356 "error = %d\n", device_xname(sc->sc_dev), 357 error); 358 m_freem(m); 359 break; 360 } 361 } 362 IFQ_DEQUEUE(&ifp->if_snd, m0); 363 if (m != NULL) { 364 m_freem(m0); 365 m0 = m; 366 } 367 368 /* 369 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 370 */ 371 372 /* Sync the DMA map. */ 373 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 374 BUS_DMASYNC_PREWRITE); 375 376 /* 377 * Store a pointer to the packet so we can free it later. 378 */ 379 ds->ds_mbuf = m0; 380 381 /* 382 * Initialize the transmit descriptor. 383 */ 384 totlen = 0; 385 if (sc->sc_32bit) { 386 tda32 = &sc->sc_tda32[nexttx]; 387 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 388 tda32->tda_frags[seg].frag_ptr1 = 389 htosonic32(sc, 390 (dmamap->dm_segs[seg].ds_addr >> 16) & 391 0xffff); 392 tda32->tda_frags[seg].frag_ptr0 = 393 htosonic32(sc, 394 dmamap->dm_segs[seg].ds_addr & 0xffff); 395 tda32->tda_frags[seg].frag_size = 396 htosonic32(sc, dmamap->dm_segs[seg].ds_len); 397 totlen += dmamap->dm_segs[seg].ds_len; 398 } 399 if (totlen < ETHER_PAD_LEN) { 400 tda32->tda_frags[seg].frag_ptr1 = 401 htosonic32(sc, 402 (sc->sc_nulldma >> 16) & 0xffff); 403 tda32->tda_frags[seg].frag_ptr0 = 404 htosonic32(sc, sc->sc_nulldma & 0xffff); 405 tda32->tda_frags[seg].frag_size = 406 htosonic32(sc, ETHER_PAD_LEN - totlen); 407 totlen = ETHER_PAD_LEN; 408 seg++; 409 } 410 411 tda32->tda_status = 0; 412 tda32->tda_pktconfig = 0; 413 tda32->tda_pktsize = htosonic32(sc, totlen); 414 tda32->tda_fragcnt = htosonic32(sc, seg); 415 416 /* Link it up. */ 417 tda32->tda_frags[seg].frag_ptr0 = 418 htosonic32(sc, SONIC_CDTXADDR32(sc, 419 SONIC_NEXTTX(nexttx)) & 0xffff); 420 421 /* Sync the Tx descriptor. */ 422 SONIC_CDTXSYNC32(sc, nexttx, 423 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 424 } else { 425 tda16 = &sc->sc_tda16[nexttx]; 426 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 427 tda16->tda_frags[seg].frag_ptr1 = 428 htosonic16(sc, 429 (dmamap->dm_segs[seg].ds_addr >> 16) & 430 0xffff); 431 tda16->tda_frags[seg].frag_ptr0 = 432 htosonic16(sc, 433 dmamap->dm_segs[seg].ds_addr & 0xffff); 434 tda16->tda_frags[seg].frag_size = 435 htosonic16(sc, dmamap->dm_segs[seg].ds_len); 436 totlen += dmamap->dm_segs[seg].ds_len; 437 } 438 if (totlen < ETHER_PAD_LEN) { 439 tda16->tda_frags[seg].frag_ptr1 = 440 htosonic16(sc, 441 (sc->sc_nulldma >> 16) & 0xffff); 442 tda16->tda_frags[seg].frag_ptr0 = 443 htosonic16(sc, sc->sc_nulldma & 0xffff); 444 tda16->tda_frags[seg].frag_size = 445 htosonic16(sc, ETHER_PAD_LEN - totlen); 446 totlen = ETHER_PAD_LEN; 447 seg++; 448 } 449 450 tda16->tda_status = 0; 451 tda16->tda_pktconfig = 0; 452 tda16->tda_pktsize = htosonic16(sc, totlen); 453 tda16->tda_fragcnt = htosonic16(sc, seg); 454 455 /* Link it up. */ 456 tda16->tda_frags[seg].frag_ptr0 = 457 htosonic16(sc, SONIC_CDTXADDR16(sc, 458 SONIC_NEXTTX(nexttx)) & 0xffff); 459 460 /* Sync the Tx descriptor. */ 461 SONIC_CDTXSYNC16(sc, nexttx, 462 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 463 } 464 465 /* Advance the Tx pointer. */ 466 sc->sc_txpending++; 467 sc->sc_txlast = nexttx; 468 469 /* 470 * Pass the packet to any BPF listeners. 471 */ 472 if (ifp->if_bpf) 473 bpf_ops->bpf_mtap(ifp->if_bpf, m0); 474 } 475 476 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) { 477 /* No more slots left; notify upper layer. */ 478 ifp->if_flags |= IFF_OACTIVE; 479 } 480 481 if (sc->sc_txpending != opending) { 482 /* 483 * We enqueued packets. If the transmitter was idle, 484 * reset the txdirty pointer. 485 */ 486 if (opending == 0) 487 sc->sc_txdirty = SONIC_NEXTTX(olasttx); 488 489 /* 490 * Stop the SONIC on the last packet we've set up, 491 * and clear end-of-list on the descriptor previous 492 * to our new chain. 493 * 494 * NOTE: our `seg' variable should still be valid! 495 */ 496 if (sc->sc_32bit) { 497 olseg = 498 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt); 499 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 500 htosonic32(sc, TDA_LINK_EOL); 501 SONIC_CDTXSYNC32(sc, sc->sc_txlast, 502 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 503 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &= 504 htosonic32(sc, ~TDA_LINK_EOL); 505 SONIC_CDTXSYNC32(sc, olasttx, 506 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 507 } else { 508 olseg = 509 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt); 510 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 511 htosonic16(sc, TDA_LINK_EOL); 512 SONIC_CDTXSYNC16(sc, sc->sc_txlast, 513 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 514 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &= 515 htosonic16(sc, ~TDA_LINK_EOL); 516 SONIC_CDTXSYNC16(sc, olasttx, 517 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 518 } 519 520 /* Start the transmitter. */ 521 CSR_WRITE(sc, SONIC_CR, CR_TXP); 522 523 /* Set a watchdog timer in case the chip flakes out. */ 524 ifp->if_timer = 5; 525 } 526 } 527 528 /* 529 * sonic_watchdog: [ifnet interface function] 530 * 531 * Watchdog timer handler. 532 */ 533 void 534 sonic_watchdog(struct ifnet *ifp) 535 { 536 struct sonic_softc *sc = ifp->if_softc; 537 538 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 539 ifp->if_oerrors++; 540 541 (void)sonic_init(ifp); 542 } 543 544 /* 545 * sonic_ioctl: [ifnet interface function] 546 * 547 * Handle control requests from the operator. 548 */ 549 int 550 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data) 551 { 552 int s, error; 553 554 s = splnet(); 555 556 error = ether_ioctl(ifp, cmd, data); 557 if (error == ENETRESET) { 558 /* 559 * Multicast list has changed; set the hardware 560 * filter accordingly. 561 */ 562 if (ifp->if_flags & IFF_RUNNING) 563 (void)sonic_init(ifp); 564 error = 0; 565 } 566 567 splx(s); 568 return error; 569 } 570 571 /* 572 * sonic_intr: 573 * 574 * Interrupt service routine. 575 */ 576 int 577 sonic_intr(void *arg) 578 { 579 struct sonic_softc *sc = arg; 580 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 581 uint16_t isr; 582 int handled = 0, wantinit; 583 584 for (wantinit = 0; wantinit == 0;) { 585 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr; 586 if (isr == 0) 587 break; 588 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */ 589 590 handled = 1; 591 592 if (isr & IMR_PRX) 593 sonic_rxintr(sc); 594 595 if (isr & (IMR_PTX|IMR_TXER)) { 596 if (sonic_txintr(sc) & TCR_FU) { 597 printf("%s: transmit FIFO underrun\n", 598 device_xname(sc->sc_dev)); 599 wantinit = 1; 600 } 601 } 602 603 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) { 604 #define PRINTERR(bit, str) \ 605 if (isr & (bit)) \ 606 printf("%s: %s\n",device_xname(sc->sc_dev), str) 607 PRINTERR(IMR_RFO, "receive FIFO overrun"); 608 PRINTERR(IMR_RBA, "receive buffer exceeded"); 609 PRINTERR(IMR_RBE, "receive buffers exhausted"); 610 PRINTERR(IMR_RDE, "receive descriptors exhausted"); 611 wantinit = 1; 612 } 613 } 614 615 if (handled) { 616 if (wantinit) 617 (void)sonic_init(ifp); 618 sonic_start(ifp); 619 } 620 621 return handled; 622 } 623 624 /* 625 * sonic_txintr: 626 * 627 * Helper; handle transmit complete interrupts. 628 */ 629 uint16_t 630 sonic_txintr(struct sonic_softc *sc) 631 { 632 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 633 struct sonic_descsoft *ds; 634 struct sonic_tda32 *tda32; 635 struct sonic_tda16 *tda16; 636 uint16_t status, totstat = 0; 637 int i; 638 639 ifp->if_flags &= ~IFF_OACTIVE; 640 641 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 642 i = SONIC_NEXTTX(i), sc->sc_txpending--) { 643 ds = &sc->sc_txsoft[i]; 644 645 if (sc->sc_32bit) { 646 SONIC_CDTXSYNC32(sc, i, 647 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 648 tda32 = &sc->sc_tda32[i]; 649 status = sonic32toh(sc, tda32->tda_status); 650 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 651 } else { 652 SONIC_CDTXSYNC16(sc, i, 653 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 654 tda16 = &sc->sc_tda16[i]; 655 status = sonic16toh(sc, tda16->tda_status); 656 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 657 } 658 659 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0) 660 break; 661 662 totstat |= status; 663 664 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 665 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 666 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 667 m_freem(ds->ds_mbuf); 668 ds->ds_mbuf = NULL; 669 670 /* 671 * Check for errors and collisions. 672 */ 673 if (status & TCR_PTX) 674 ifp->if_opackets++; 675 else 676 ifp->if_oerrors++; 677 ifp->if_collisions += TDA_STATUS_NCOL(status); 678 } 679 680 /* Update the dirty transmit buffer pointer. */ 681 sc->sc_txdirty = i; 682 683 /* 684 * Cancel the watchdog timer if there are no pending 685 * transmissions. 686 */ 687 if (sc->sc_txpending == 0) 688 ifp->if_timer = 0; 689 690 return totstat; 691 } 692 693 /* 694 * sonic_rxintr: 695 * 696 * Helper; handle receive interrupts. 697 */ 698 void 699 sonic_rxintr(struct sonic_softc *sc) 700 { 701 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 702 struct sonic_descsoft *ds; 703 struct sonic_rda32 *rda32; 704 struct sonic_rda16 *rda16; 705 struct mbuf *m; 706 int i, len; 707 uint16_t status, bytecount, ptr0, ptr1, seqno; 708 709 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) { 710 ds = &sc->sc_rxsoft[i]; 711 712 if (sc->sc_32bit) { 713 SONIC_CDRXSYNC32(sc, i, 714 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 715 rda32 = &sc->sc_rda32[i]; 716 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 717 if (rda32->rda_inuse != 0) 718 break; 719 status = sonic32toh(sc, rda32->rda_status); 720 bytecount = sonic32toh(sc, rda32->rda_bytecount); 721 ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); 722 ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); 723 seqno = sonic32toh(sc, rda32->rda_seqno); 724 } else { 725 SONIC_CDRXSYNC16(sc, i, 726 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 727 rda16 = &sc->sc_rda16[i]; 728 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 729 if (rda16->rda_inuse != 0) 730 break; 731 status = sonic16toh(sc, rda16->rda_status); 732 bytecount = sonic16toh(sc, rda16->rda_bytecount); 733 ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); 734 ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); 735 seqno = sonic16toh(sc, rda16->rda_seqno); 736 } 737 738 /* 739 * Make absolutely sure this is the only packet 740 * in this receive buffer. Our entire Rx buffer 741 * management scheme depends on this, and if the 742 * SONIC didn't follow our rule, it means we've 743 * misconfigured it. 744 */ 745 KASSERT(status & RCR_LPKT); 746 747 /* 748 * Make sure the packet arrived OK. If an error occurred, 749 * update stats and reset the descriptor. The buffer will 750 * be reused the next time the descriptor comes up in the 751 * ring. 752 */ 753 if ((status & RCR_PRX) == 0) { 754 if (status & RCR_FAER) 755 printf("%s: Rx frame alignment error\n", 756 device_xname(sc->sc_dev)); 757 else if (status & RCR_CRCR) 758 printf("%s: Rx CRC error\n", 759 device_xname(sc->sc_dev)); 760 ifp->if_ierrors++; 761 SONIC_INIT_RXDESC(sc, i); 762 continue; 763 } 764 765 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 766 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 767 768 /* 769 * The SONIC includes the CRC with every packet. 770 */ 771 len = bytecount - ETHER_CRC_LEN; 772 773 /* 774 * Ok, if the chip is in 32-bit mode, then receive 775 * buffers must be aligned to 32-bit boundaries, 776 * which means the payload is misaligned. In this 777 * case, we must allocate a new mbuf, and copy the 778 * packet into it, scooted forward 2 bytes to ensure 779 * proper alignment. 780 * 781 * Note, in 16-bit mode, we can configure the SONIC 782 * to do what we want, and we have. 783 */ 784 #ifndef __NO_STRICT_ALIGNMENT 785 if (sc->sc_32bit) { 786 MGETHDR(m, M_DONTWAIT, MT_DATA); 787 if (m == NULL) 788 goto dropit; 789 if (len > (MHLEN - 2)) { 790 MCLGET(m, M_DONTWAIT); 791 if ((m->m_flags & M_EXT) == 0) 792 goto dropit; 793 } 794 m->m_data += 2; 795 /* 796 * Note that we use a cluster for incoming frames, 797 * so the buffer is virtually contiguous. 798 */ 799 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 800 len); 801 SONIC_INIT_RXDESC(sc, i); 802 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 803 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 804 } else 805 #endif /* ! __NO_STRICT_ALIGNMENT */ 806 /* 807 * If the packet is small enough to fit in a single 808 * header mbuf, allocate one and copy the data into 809 * it. This greatly reduces memory consumption when 810 * we receive lots of small packets. 811 */ 812 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) { 813 MGETHDR(m, M_DONTWAIT, MT_DATA); 814 if (m == NULL) 815 goto dropit; 816 m->m_data += 2; 817 /* 818 * Note that we use a cluster for incoming frames, 819 * so the buffer is virtually contiguous. 820 */ 821 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 822 len); 823 SONIC_INIT_RXDESC(sc, i); 824 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 825 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 826 } else { 827 m = ds->ds_mbuf; 828 if (sonic_add_rxbuf(sc, i) != 0) { 829 dropit: 830 ifp->if_ierrors++; 831 SONIC_INIT_RXDESC(sc, i); 832 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 833 ds->ds_dmamap->dm_mapsize, 834 BUS_DMASYNC_PREREAD); 835 continue; 836 } 837 } 838 839 ifp->if_ipackets++; 840 m->m_pkthdr.rcvif = ifp; 841 m->m_pkthdr.len = m->m_len = len; 842 843 /* 844 * Pass this up to any BPF listeners. 845 */ 846 if (ifp->if_bpf) 847 bpf_ops->bpf_mtap(ifp->if_bpf, m); 848 849 /* Pass it on. */ 850 (*ifp->if_input)(ifp, m); 851 } 852 853 /* Update the receive pointer. */ 854 sc->sc_rxptr = i; 855 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i))); 856 } 857 858 /* 859 * sonic_reset: 860 * 861 * Perform a soft reset on the SONIC. 862 */ 863 void 864 sonic_reset(struct sonic_softc *sc) 865 { 866 867 /* stop TX, RX and timer, and ensure RST is clear */ 868 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX); 869 delay(1000); 870 871 CSR_WRITE(sc, SONIC_CR, CR_RST); 872 delay(1000); 873 874 /* clear all interrupts */ 875 CSR_WRITE(sc, SONIC_IMR, 0); 876 CSR_WRITE(sc, SONIC_ISR, IMR_ALL); 877 878 CSR_WRITE(sc, SONIC_CR, 0); 879 delay(1000); 880 } 881 882 /* 883 * sonic_init: [ifnet interface function] 884 * 885 * Initialize the interface. Must be called at splnet(). 886 */ 887 int 888 sonic_init(struct ifnet *ifp) 889 { 890 struct sonic_softc *sc = ifp->if_softc; 891 struct sonic_descsoft *ds; 892 int i, error = 0; 893 uint16_t reg; 894 895 /* 896 * Cancel any pending I/O. 897 */ 898 sonic_stop(ifp, 0); 899 900 /* 901 * Reset the SONIC to a known state. 902 */ 903 sonic_reset(sc); 904 905 /* 906 * Bring the SONIC into reset state, and program the DCR. 907 * 908 * Note: We don't bother optimizing the transmit and receive 909 * thresholds, here. TFT/RFT values should be set in MD attachments. 910 */ 911 reg = sc->sc_dcr; 912 if (sc->sc_32bit) 913 reg |= DCR_DW; 914 CSR_WRITE(sc, SONIC_CR, CR_RST); 915 CSR_WRITE(sc, SONIC_DCR, reg); 916 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2); 917 CSR_WRITE(sc, SONIC_CR, 0); 918 919 /* 920 * Initialize the transmit descriptors. 921 */ 922 if (sc->sc_32bit) { 923 for (i = 0; i < SONIC_NTXDESC; i++) { 924 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32)); 925 SONIC_CDTXSYNC32(sc, i, 926 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 927 } 928 } else { 929 for (i = 0; i < SONIC_NTXDESC; i++) { 930 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16)); 931 SONIC_CDTXSYNC16(sc, i, 932 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 933 } 934 } 935 sc->sc_txpending = 0; 936 sc->sc_txdirty = 0; 937 sc->sc_txlast = SONIC_NTXDESC - 1; 938 939 /* 940 * Initialize the receive descriptor ring. 941 */ 942 for (i = 0; i < SONIC_NRXDESC; i++) { 943 ds = &sc->sc_rxsoft[i]; 944 if (ds->ds_mbuf == NULL) { 945 if ((error = sonic_add_rxbuf(sc, i)) != 0) { 946 printf("%s: unable to allocate or map Rx " 947 "buffer %d, error = %d\n", 948 device_xname(sc->sc_dev), i, error); 949 /* 950 * XXX Should attempt to run with fewer receive 951 * XXX buffers instead of just failing. 952 */ 953 sonic_rxdrain(sc); 954 goto out; 955 } 956 } else 957 SONIC_INIT_RXDESC(sc, i); 958 } 959 sc->sc_rxptr = 0; 960 961 /* Give the transmit ring to the SONIC. */ 962 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff); 963 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff); 964 965 /* Give the receive descriptor ring to the SONIC. */ 966 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff); 967 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff); 968 969 /* Give the receive buffer ring to the SONIC. */ 970 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff); 971 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff); 972 if (sc->sc_32bit) 973 CSR_WRITE(sc, SONIC_REAR, 974 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 975 sizeof(struct sonic_rra32)) & 0xffff); 976 else 977 CSR_WRITE(sc, SONIC_REAR, 978 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 979 sizeof(struct sonic_rra16)) & 0xffff); 980 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff); 981 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1)); 982 983 /* 984 * Set the End-Of-Buffer counter such that only one packet 985 * will be placed into each buffer we provide. Note we are 986 * following the recommendation of section 3.4.4 of the manual 987 * here, and have "lengthened" the receive buffers accordingly. 988 */ 989 if (sc->sc_32bit) 990 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2); 991 else 992 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2)); 993 994 /* Reset the receive sequence counter. */ 995 CSR_WRITE(sc, SONIC_RSC, 0); 996 997 /* Clear the tally registers. */ 998 CSR_WRITE(sc, SONIC_CRCETC, 0xffff); 999 CSR_WRITE(sc, SONIC_FAET, 0xffff); 1000 CSR_WRITE(sc, SONIC_MPT, 0xffff); 1001 1002 /* Set the receive filter. */ 1003 sonic_set_filter(sc); 1004 1005 /* 1006 * Set the interrupt mask register. 1007 */ 1008 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE | 1009 IMR_TXER | IMR_PTX | IMR_PRX; 1010 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr); 1011 1012 /* 1013 * Start the receive process in motion. Note, we don't 1014 * start the transmit process until we actually try to 1015 * transmit packets. 1016 */ 1017 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA); 1018 1019 /* 1020 * ...all done! 1021 */ 1022 ifp->if_flags |= IFF_RUNNING; 1023 ifp->if_flags &= ~IFF_OACTIVE; 1024 1025 out: 1026 if (error) 1027 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1028 return error; 1029 } 1030 1031 /* 1032 * sonic_rxdrain: 1033 * 1034 * Drain the receive queue. 1035 */ 1036 void 1037 sonic_rxdrain(struct sonic_softc *sc) 1038 { 1039 struct sonic_descsoft *ds; 1040 int i; 1041 1042 for (i = 0; i < SONIC_NRXDESC; i++) { 1043 ds = &sc->sc_rxsoft[i]; 1044 if (ds->ds_mbuf != NULL) { 1045 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1046 m_freem(ds->ds_mbuf); 1047 ds->ds_mbuf = NULL; 1048 } 1049 } 1050 } 1051 1052 /* 1053 * sonic_stop: [ifnet interface function] 1054 * 1055 * Stop transmission on the interface. 1056 */ 1057 void 1058 sonic_stop(struct ifnet *ifp, int disable) 1059 { 1060 struct sonic_softc *sc = ifp->if_softc; 1061 struct sonic_descsoft *ds; 1062 int i; 1063 1064 /* 1065 * Disable interrupts. 1066 */ 1067 CSR_WRITE(sc, SONIC_IMR, 0); 1068 1069 /* 1070 * Stop the transmitter, receiver, and timer. 1071 */ 1072 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP); 1073 for (i = 0; i < 1000; i++) { 1074 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0) 1075 break; 1076 delay(2); 1077 } 1078 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0) 1079 printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev)); 1080 1081 /* 1082 * Release any queued transmit buffers. 1083 */ 1084 for (i = 0; i < SONIC_NTXDESC; i++) { 1085 ds = &sc->sc_txsoft[i]; 1086 if (ds->ds_mbuf != NULL) { 1087 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1088 m_freem(ds->ds_mbuf); 1089 ds->ds_mbuf = NULL; 1090 } 1091 } 1092 1093 /* 1094 * Mark the interface down and cancel the watchdog timer. 1095 */ 1096 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1097 ifp->if_timer = 0; 1098 1099 if (disable) 1100 sonic_rxdrain(sc); 1101 } 1102 1103 /* 1104 * sonic_add_rxbuf: 1105 * 1106 * Add a receive buffer to the indicated descriptor. 1107 */ 1108 int 1109 sonic_add_rxbuf(struct sonic_softc *sc, int idx) 1110 { 1111 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx]; 1112 struct mbuf *m; 1113 int error; 1114 1115 MGETHDR(m, M_DONTWAIT, MT_DATA); 1116 if (m == NULL) 1117 return ENOBUFS; 1118 1119 MCLGET(m, M_DONTWAIT); 1120 if ((m->m_flags & M_EXT) == 0) { 1121 m_freem(m); 1122 return ENOBUFS; 1123 } 1124 1125 if (ds->ds_mbuf != NULL) 1126 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1127 1128 ds->ds_mbuf = m; 1129 1130 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1131 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1132 BUS_DMA_READ|BUS_DMA_NOWAIT); 1133 if (error) { 1134 printf("%s: can't load rx DMA map %d, error = %d\n", 1135 device_xname(sc->sc_dev), idx, error); 1136 panic("sonic_add_rxbuf"); /* XXX */ 1137 } 1138 1139 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1140 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1141 1142 SONIC_INIT_RXDESC(sc, idx); 1143 1144 return 0; 1145 } 1146 1147 static void 1148 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr) 1149 { 1150 1151 if (sc->sc_32bit) { 1152 struct sonic_cda32 *cda = &sc->sc_cda32[entry]; 1153 1154 cda->cda_entry = htosonic32(sc, entry); 1155 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8)); 1156 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8)); 1157 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8)); 1158 } else { 1159 struct sonic_cda16 *cda = &sc->sc_cda16[entry]; 1160 1161 cda->cda_entry = htosonic16(sc, entry); 1162 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8)); 1163 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8)); 1164 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8)); 1165 } 1166 } 1167 1168 /* 1169 * sonic_set_filter: 1170 * 1171 * Set the SONIC receive filter. 1172 */ 1173 void 1174 sonic_set_filter(struct sonic_softc *sc) 1175 { 1176 struct ethercom *ec = &sc->sc_ethercom; 1177 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1178 struct ether_multi *enm; 1179 struct ether_multistep step; 1180 int i, entry = 0; 1181 uint16_t camvalid = 0; 1182 uint16_t rcr = 0; 1183 1184 if (ifp->if_flags & IFF_BROADCAST) 1185 rcr |= RCR_BRD; 1186 1187 if (ifp->if_flags & IFF_PROMISC) { 1188 rcr |= RCR_PRO; 1189 goto allmulti; 1190 } 1191 1192 /* Put our station address in the first CAM slot. */ 1193 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl)); 1194 camvalid |= (1U << entry); 1195 entry++; 1196 1197 /* Add the multicast addresses to the CAM. */ 1198 ETHER_FIRST_MULTI(step, ec, enm); 1199 while (enm != NULL) { 1200 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1201 /* 1202 * We must listen to a range of multicast addresses. 1203 * The only way to do this on the SONIC is to enable 1204 * reception of all multicast packets. 1205 */ 1206 goto allmulti; 1207 } 1208 1209 if (entry == SONIC_NCAMENT) { 1210 /* 1211 * Out of CAM slots. Have to enable reception 1212 * of all multicast addresses. 1213 */ 1214 goto allmulti; 1215 } 1216 1217 sonic_set_camentry(sc, entry, enm->enm_addrlo); 1218 camvalid |= (1U << entry); 1219 entry++; 1220 1221 ETHER_NEXT_MULTI(step, enm); 1222 } 1223 1224 ifp->if_flags &= ~IFF_ALLMULTI; 1225 goto setit; 1226 1227 allmulti: 1228 /* Use only the first CAM slot (station address). */ 1229 camvalid = 0x0001; 1230 entry = 1; 1231 rcr |= RCR_AMC; 1232 1233 setit: 1234 /* set mask for the CAM Enable register */ 1235 if (sc->sc_32bit) { 1236 if (entry == SONIC_NCAMENT) 1237 sc->sc_cdaenable32 = htosonic32(sc, camvalid); 1238 else 1239 sc->sc_cda32[entry].cda_entry = 1240 htosonic32(sc, camvalid); 1241 } else { 1242 if (entry == SONIC_NCAMENT) 1243 sc->sc_cdaenable16 = htosonic16(sc, camvalid); 1244 else 1245 sc->sc_cda16[entry].cda_entry = 1246 htosonic16(sc, camvalid); 1247 } 1248 1249 /* Load the CAM. */ 1250 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE); 1251 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff); 1252 CSR_WRITE(sc, SONIC_CDC, entry); 1253 CSR_WRITE(sc, SONIC_CR, CR_LCAM); 1254 for (i = 0; i < 10000; i++) { 1255 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0) 1256 break; 1257 delay(2); 1258 } 1259 if (CSR_READ(sc, SONIC_CR) & CR_LCAM) 1260 printf("%s: CAM load failed\n", device_xname(sc->sc_dev)); 1261 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE); 1262 1263 /* Set the receive control register. */ 1264 CSR_WRITE(sc, SONIC_RCR, rcr); 1265 } 1266