1 /* $NetBSD: dp83932.c,v 1.42 2018/06/26 06:48:00 msaitoh Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the National Semiconductor DP83932 34 * Systems-Oriented Network Interface Controller (SONIC). 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.42 2018/06/26 06:48:00 msaitoh Exp $"); 39 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/device.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_ether.h> 54 55 #include <net/bpf.h> 56 57 #include <sys/bus.h> 58 #include <sys/intr.h> 59 60 #include <dev/ic/dp83932reg.h> 61 #include <dev/ic/dp83932var.h> 62 63 static void sonic_start(struct ifnet *); 64 static void sonic_watchdog(struct ifnet *); 65 static int sonic_ioctl(struct ifnet *, u_long, void *); 66 static int sonic_init(struct ifnet *); 67 static void sonic_stop(struct ifnet *, int); 68 69 static bool sonic_shutdown(device_t, int); 70 71 static void sonic_reset(struct sonic_softc *); 72 static void sonic_rxdrain(struct sonic_softc *); 73 static int sonic_add_rxbuf(struct sonic_softc *, int); 74 static void sonic_set_filter(struct sonic_softc *); 75 76 static uint16_t sonic_txintr(struct sonic_softc *); 77 static void sonic_rxintr(struct sonic_softc *); 78 79 int sonic_copy_small = 0; 80 81 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 82 83 /* 84 * sonic_attach: 85 * 86 * Attach a SONIC interface to the system. 87 */ 88 void 89 sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr) 90 { 91 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 92 int i, rseg, error; 93 bus_dma_segment_t seg; 94 size_t cdatasize; 95 uint8_t *nullbuf; 96 97 /* 98 * Allocate the control data structures, and create and load the 99 * DMA map for it. 100 */ 101 if (sc->sc_32bit) 102 cdatasize = sizeof(struct sonic_control_data32); 103 else 104 cdatasize = sizeof(struct sonic_control_data16); 105 106 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN, 107 PAGE_SIZE, (64 * 1024), &seg, 1, &rseg, 108 BUS_DMA_NOWAIT)) != 0) { 109 aprint_error_dev(sc->sc_dev, 110 "unable to allocate control data, error = %d\n", error); 111 goto fail_0; 112 } 113 114 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 115 cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16, 116 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 117 aprint_error_dev(sc->sc_dev, 118 "unable to map control data, error = %d\n", error); 119 goto fail_1; 120 } 121 nullbuf = (uint8_t *)sc->sc_cdata16 + cdatasize; 122 memset(nullbuf, 0, ETHER_PAD_LEN); 123 124 if ((error = bus_dmamap_create(sc->sc_dmat, 125 cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT, 126 &sc->sc_cddmamap)) != 0) { 127 aprint_error_dev(sc->sc_dev, 128 "unable to create control data DMA map, error = %d\n", 129 error); 130 goto fail_2; 131 } 132 133 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 134 sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) { 135 aprint_error_dev(sc->sc_dev, 136 "unable to load control data DMA map, error = %d\n", error); 137 goto fail_3; 138 } 139 140 /* 141 * Create the transmit buffer DMA maps. 142 */ 143 for (i = 0; i < SONIC_NTXDESC; i++) { 144 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 145 SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 146 &sc->sc_txsoft[i].ds_dmamap)) != 0) { 147 aprint_error_dev(sc->sc_dev, 148 "unable to create tx DMA map %d, error = %d\n", 149 i, error); 150 goto fail_4; 151 } 152 } 153 154 /* 155 * Create the receive buffer DMA maps. 156 */ 157 for (i = 0; i < SONIC_NRXDESC; i++) { 158 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 159 MCLBYTES, 0, BUS_DMA_NOWAIT, 160 &sc->sc_rxsoft[i].ds_dmamap)) != 0) { 161 aprint_error_dev(sc->sc_dev, 162 "unable to create rx DMA map %d, error = %d\n", 163 i, error); 164 goto fail_5; 165 } 166 sc->sc_rxsoft[i].ds_mbuf = NULL; 167 } 168 169 /* 170 * create and map the pad buffer 171 */ 172 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1, 173 ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) { 174 aprint_error_dev(sc->sc_dev, 175 "unable to create pad buffer DMA map, error = %d\n", error); 176 goto fail_5; 177 } 178 179 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap, 180 nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) { 181 aprint_error_dev(sc->sc_dev, 182 "unable to load pad buffer DMA map, error = %d\n", error); 183 goto fail_6; 184 } 185 bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN, 186 BUS_DMASYNC_PREWRITE); 187 188 /* 189 * Reset the chip to a known state. 190 */ 191 sonic_reset(sc); 192 193 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 194 ether_sprintf(enaddr)); 195 196 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 197 ifp->if_softc = sc; 198 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 199 ifp->if_ioctl = sonic_ioctl; 200 ifp->if_start = sonic_start; 201 ifp->if_watchdog = sonic_watchdog; 202 ifp->if_init = sonic_init; 203 ifp->if_stop = sonic_stop; 204 IFQ_SET_READY(&ifp->if_snd); 205 206 /* 207 * We can support 802.1Q VLAN-sized frames. 208 */ 209 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 210 211 /* 212 * Attach the interface. 213 */ 214 if_attach(ifp); 215 if_deferred_start_init(ifp, NULL); 216 ether_ifattach(ifp, enaddr); 217 218 /* 219 * Make sure the interface is shutdown during reboot. 220 */ 221 if (pmf_device_register1(sc->sc_dev, NULL, NULL, sonic_shutdown)) 222 pmf_class_network_register(sc->sc_dev, ifp); 223 else 224 aprint_error_dev(sc->sc_dev, 225 "couldn't establish power handler\n"); 226 227 return; 228 229 /* 230 * Free any resources we've allocated during the failed attach 231 * attempt. Do this in reverse order and fall through. 232 */ 233 fail_6: 234 bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap); 235 fail_5: 236 for (i = 0; i < SONIC_NRXDESC; i++) { 237 if (sc->sc_rxsoft[i].ds_dmamap != NULL) 238 bus_dmamap_destroy(sc->sc_dmat, 239 sc->sc_rxsoft[i].ds_dmamap); 240 } 241 fail_4: 242 for (i = 0; i < SONIC_NTXDESC; i++) { 243 if (sc->sc_txsoft[i].ds_dmamap != NULL) 244 bus_dmamap_destroy(sc->sc_dmat, 245 sc->sc_txsoft[i].ds_dmamap); 246 } 247 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 248 fail_3: 249 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 250 fail_2: 251 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_cdata16, cdatasize); 252 fail_1: 253 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 254 fail_0: 255 return; 256 } 257 258 /* 259 * sonic_shutdown: 260 * 261 * Make sure the interface is stopped at reboot. 262 */ 263 bool 264 sonic_shutdown(device_t self, int howto) 265 { 266 struct sonic_softc *sc = device_private(self); 267 268 sonic_stop(&sc->sc_ethercom.ec_if, 1); 269 270 return true; 271 } 272 273 /* 274 * sonic_start: [ifnet interface function] 275 * 276 * Start packet transmission on the interface. 277 */ 278 void 279 sonic_start(struct ifnet *ifp) 280 { 281 struct sonic_softc *sc = ifp->if_softc; 282 struct mbuf *m0, *m; 283 struct sonic_tda16 *tda16; 284 struct sonic_tda32 *tda32; 285 struct sonic_descsoft *ds; 286 bus_dmamap_t dmamap; 287 int error, olasttx, nexttx, opending, totlen, olseg; 288 int seg = 0; /* XXX: gcc */ 289 290 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 291 return; 292 293 /* 294 * Remember the previous txpending and the current "last txdesc 295 * used" index. 296 */ 297 opending = sc->sc_txpending; 298 olasttx = sc->sc_txlast; 299 300 /* 301 * Loop through the send queue, setting up transmit descriptors 302 * until we drain the queue, or use up all available transmit 303 * descriptors. Leave one at the end for sanity's sake. 304 */ 305 while (sc->sc_txpending < (SONIC_NTXDESC - 1)) { 306 /* 307 * Grab a packet off the queue. 308 */ 309 IFQ_POLL(&ifp->if_snd, m0); 310 if (m0 == NULL) 311 break; 312 m = NULL; 313 314 /* 315 * Get the next available transmit descriptor. 316 */ 317 nexttx = SONIC_NEXTTX(sc->sc_txlast); 318 ds = &sc->sc_txsoft[nexttx]; 319 dmamap = ds->ds_dmamap; 320 321 /* 322 * Load the DMA map. If this fails, the packet either 323 * didn't fit in the allotted number of frags, or we were 324 * short on resources. In this case, we'll copy and try 325 * again. 326 */ 327 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 328 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 || 329 (m0->m_pkthdr.len < ETHER_PAD_LEN && 330 dmamap->dm_nsegs == SONIC_NTXFRAGS)) { 331 if (error == 0) 332 bus_dmamap_unload(sc->sc_dmat, dmamap); 333 MGETHDR(m, M_DONTWAIT, MT_DATA); 334 if (m == NULL) { 335 printf("%s: unable to allocate Tx mbuf\n", 336 device_xname(sc->sc_dev)); 337 break; 338 } 339 if (m0->m_pkthdr.len > MHLEN) { 340 MCLGET(m, M_DONTWAIT); 341 if ((m->m_flags & M_EXT) == 0) { 342 printf("%s: unable to allocate Tx " 343 "cluster\n", 344 device_xname(sc->sc_dev)); 345 m_freem(m); 346 break; 347 } 348 } 349 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 350 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 351 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 352 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 353 if (error) { 354 printf("%s: unable to load Tx buffer, " 355 "error = %d\n", device_xname(sc->sc_dev), 356 error); 357 m_freem(m); 358 break; 359 } 360 } 361 IFQ_DEQUEUE(&ifp->if_snd, m0); 362 if (m != NULL) { 363 m_freem(m0); 364 m0 = m; 365 } 366 367 /* 368 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 369 */ 370 371 /* Sync the DMA map. */ 372 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 373 BUS_DMASYNC_PREWRITE); 374 375 /* 376 * Store a pointer to the packet so we can free it later. 377 */ 378 ds->ds_mbuf = m0; 379 380 /* 381 * Initialize the transmit descriptor. 382 */ 383 totlen = 0; 384 if (sc->sc_32bit) { 385 tda32 = &sc->sc_tda32[nexttx]; 386 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 387 tda32->tda_frags[seg].frag_ptr1 = 388 htosonic32(sc, 389 (dmamap->dm_segs[seg].ds_addr >> 16) & 390 0xffff); 391 tda32->tda_frags[seg].frag_ptr0 = 392 htosonic32(sc, 393 dmamap->dm_segs[seg].ds_addr & 0xffff); 394 tda32->tda_frags[seg].frag_size = 395 htosonic32(sc, dmamap->dm_segs[seg].ds_len); 396 totlen += dmamap->dm_segs[seg].ds_len; 397 } 398 if (totlen < ETHER_PAD_LEN) { 399 tda32->tda_frags[seg].frag_ptr1 = 400 htosonic32(sc, 401 (sc->sc_nulldma >> 16) & 0xffff); 402 tda32->tda_frags[seg].frag_ptr0 = 403 htosonic32(sc, sc->sc_nulldma & 0xffff); 404 tda32->tda_frags[seg].frag_size = 405 htosonic32(sc, ETHER_PAD_LEN - totlen); 406 totlen = ETHER_PAD_LEN; 407 seg++; 408 } 409 410 tda32->tda_status = 0; 411 tda32->tda_pktconfig = 0; 412 tda32->tda_pktsize = htosonic32(sc, totlen); 413 tda32->tda_fragcnt = htosonic32(sc, seg); 414 415 /* Link it up. */ 416 tda32->tda_frags[seg].frag_ptr0 = 417 htosonic32(sc, SONIC_CDTXADDR32(sc, 418 SONIC_NEXTTX(nexttx)) & 0xffff); 419 420 /* Sync the Tx descriptor. */ 421 SONIC_CDTXSYNC32(sc, nexttx, 422 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 423 } else { 424 tda16 = &sc->sc_tda16[nexttx]; 425 for (seg = 0; seg < dmamap->dm_nsegs; seg++) { 426 tda16->tda_frags[seg].frag_ptr1 = 427 htosonic16(sc, 428 (dmamap->dm_segs[seg].ds_addr >> 16) & 429 0xffff); 430 tda16->tda_frags[seg].frag_ptr0 = 431 htosonic16(sc, 432 dmamap->dm_segs[seg].ds_addr & 0xffff); 433 tda16->tda_frags[seg].frag_size = 434 htosonic16(sc, dmamap->dm_segs[seg].ds_len); 435 totlen += dmamap->dm_segs[seg].ds_len; 436 } 437 if (totlen < ETHER_PAD_LEN) { 438 tda16->tda_frags[seg].frag_ptr1 = 439 htosonic16(sc, 440 (sc->sc_nulldma >> 16) & 0xffff); 441 tda16->tda_frags[seg].frag_ptr0 = 442 htosonic16(sc, sc->sc_nulldma & 0xffff); 443 tda16->tda_frags[seg].frag_size = 444 htosonic16(sc, ETHER_PAD_LEN - totlen); 445 totlen = ETHER_PAD_LEN; 446 seg++; 447 } 448 449 tda16->tda_status = 0; 450 tda16->tda_pktconfig = 0; 451 tda16->tda_pktsize = htosonic16(sc, totlen); 452 tda16->tda_fragcnt = htosonic16(sc, seg); 453 454 /* Link it up. */ 455 tda16->tda_frags[seg].frag_ptr0 = 456 htosonic16(sc, SONIC_CDTXADDR16(sc, 457 SONIC_NEXTTX(nexttx)) & 0xffff); 458 459 /* Sync the Tx descriptor. */ 460 SONIC_CDTXSYNC16(sc, nexttx, 461 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 462 } 463 464 /* Advance the Tx pointer. */ 465 sc->sc_txpending++; 466 sc->sc_txlast = nexttx; 467 468 /* 469 * Pass the packet to any BPF listeners. 470 */ 471 bpf_mtap(ifp, m0, BPF_D_OUT); 472 } 473 474 if (sc->sc_txpending == (SONIC_NTXDESC - 1)) { 475 /* No more slots left; notify upper layer. */ 476 ifp->if_flags |= IFF_OACTIVE; 477 } 478 479 if (sc->sc_txpending != opending) { 480 /* 481 * We enqueued packets. If the transmitter was idle, 482 * reset the txdirty pointer. 483 */ 484 if (opending == 0) 485 sc->sc_txdirty = SONIC_NEXTTX(olasttx); 486 487 /* 488 * Stop the SONIC on the last packet we've set up, 489 * and clear end-of-list on the descriptor previous 490 * to our new chain. 491 * 492 * NOTE: our `seg' variable should still be valid! 493 */ 494 if (sc->sc_32bit) { 495 olseg = 496 sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt); 497 sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 498 htosonic32(sc, TDA_LINK_EOL); 499 SONIC_CDTXSYNC32(sc, sc->sc_txlast, 500 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 501 sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &= 502 htosonic32(sc, ~TDA_LINK_EOL); 503 SONIC_CDTXSYNC32(sc, olasttx, 504 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 505 } else { 506 olseg = 507 sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt); 508 sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |= 509 htosonic16(sc, TDA_LINK_EOL); 510 SONIC_CDTXSYNC16(sc, sc->sc_txlast, 511 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 512 sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &= 513 htosonic16(sc, ~TDA_LINK_EOL); 514 SONIC_CDTXSYNC16(sc, olasttx, 515 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 516 } 517 518 /* Start the transmitter. */ 519 CSR_WRITE(sc, SONIC_CR, CR_TXP); 520 521 /* Set a watchdog timer in case the chip flakes out. */ 522 ifp->if_timer = 5; 523 } 524 } 525 526 /* 527 * sonic_watchdog: [ifnet interface function] 528 * 529 * Watchdog timer handler. 530 */ 531 void 532 sonic_watchdog(struct ifnet *ifp) 533 { 534 struct sonic_softc *sc = ifp->if_softc; 535 536 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 537 ifp->if_oerrors++; 538 539 (void)sonic_init(ifp); 540 } 541 542 /* 543 * sonic_ioctl: [ifnet interface function] 544 * 545 * Handle control requests from the operator. 546 */ 547 int 548 sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data) 549 { 550 int s, error; 551 552 s = splnet(); 553 554 error = ether_ioctl(ifp, cmd, data); 555 if (error == ENETRESET) { 556 /* 557 * Multicast list has changed; set the hardware 558 * filter accordingly. 559 */ 560 if (ifp->if_flags & IFF_RUNNING) 561 (void)sonic_init(ifp); 562 error = 0; 563 } 564 565 splx(s); 566 return error; 567 } 568 569 /* 570 * sonic_intr: 571 * 572 * Interrupt service routine. 573 */ 574 int 575 sonic_intr(void *arg) 576 { 577 struct sonic_softc *sc = arg; 578 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 579 uint16_t isr; 580 int handled = 0, wantinit; 581 582 for (wantinit = 0; wantinit == 0;) { 583 isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr; 584 if (isr == 0) 585 break; 586 CSR_WRITE(sc, SONIC_ISR, isr); /* ACK */ 587 588 handled = 1; 589 590 if (isr & IMR_PRX) 591 sonic_rxintr(sc); 592 593 if (isr & (IMR_PTX|IMR_TXER)) { 594 if (sonic_txintr(sc) & TCR_FU) { 595 printf("%s: transmit FIFO underrun\n", 596 device_xname(sc->sc_dev)); 597 wantinit = 1; 598 } 599 } 600 601 if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) { 602 #define PRINTERR(bit, str) \ 603 if (isr & (bit)) \ 604 printf("%s: %s\n",device_xname(sc->sc_dev), str) 605 PRINTERR(IMR_RFO, "receive FIFO overrun"); 606 PRINTERR(IMR_RBA, "receive buffer exceeded"); 607 PRINTERR(IMR_RBE, "receive buffers exhausted"); 608 PRINTERR(IMR_RDE, "receive descriptors exhausted"); 609 wantinit = 1; 610 } 611 } 612 613 if (handled) { 614 if (wantinit) 615 (void)sonic_init(ifp); 616 if_schedule_deferred_start(ifp); 617 } 618 619 return handled; 620 } 621 622 /* 623 * sonic_txintr: 624 * 625 * Helper; handle transmit complete interrupts. 626 */ 627 uint16_t 628 sonic_txintr(struct sonic_softc *sc) 629 { 630 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 631 struct sonic_descsoft *ds; 632 struct sonic_tda32 *tda32; 633 struct sonic_tda16 *tda16; 634 uint16_t status, totstat = 0; 635 int i; 636 637 ifp->if_flags &= ~IFF_OACTIVE; 638 639 for (i = sc->sc_txdirty; sc->sc_txpending != 0; 640 i = SONIC_NEXTTX(i), sc->sc_txpending--) { 641 ds = &sc->sc_txsoft[i]; 642 643 if (sc->sc_32bit) { 644 SONIC_CDTXSYNC32(sc, i, 645 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 646 tda32 = &sc->sc_tda32[i]; 647 status = sonic32toh(sc, tda32->tda_status); 648 SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 649 } else { 650 SONIC_CDTXSYNC16(sc, i, 651 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 652 tda16 = &sc->sc_tda16[i]; 653 status = sonic16toh(sc, tda16->tda_status); 654 SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 655 } 656 657 if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0) 658 break; 659 660 totstat |= status; 661 662 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 663 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 664 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 665 m_freem(ds->ds_mbuf); 666 ds->ds_mbuf = NULL; 667 668 /* 669 * Check for errors and collisions. 670 */ 671 if (status & TCR_PTX) 672 ifp->if_opackets++; 673 else 674 ifp->if_oerrors++; 675 ifp->if_collisions += TDA_STATUS_NCOL(status); 676 } 677 678 /* Update the dirty transmit buffer pointer. */ 679 sc->sc_txdirty = i; 680 681 /* 682 * Cancel the watchdog timer if there are no pending 683 * transmissions. 684 */ 685 if (sc->sc_txpending == 0) 686 ifp->if_timer = 0; 687 688 return totstat; 689 } 690 691 /* 692 * sonic_rxintr: 693 * 694 * Helper; handle receive interrupts. 695 */ 696 void 697 sonic_rxintr(struct sonic_softc *sc) 698 { 699 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 700 struct sonic_descsoft *ds; 701 struct sonic_rda32 *rda32; 702 struct sonic_rda16 *rda16; 703 struct mbuf *m; 704 int i, len; 705 uint16_t status, bytecount /*, ptr0, ptr1, seqno */; 706 707 for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) { 708 ds = &sc->sc_rxsoft[i]; 709 710 if (sc->sc_32bit) { 711 SONIC_CDRXSYNC32(sc, i, 712 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 713 rda32 = &sc->sc_rda32[i]; 714 SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD); 715 if (rda32->rda_inuse != 0) 716 break; 717 status = sonic32toh(sc, rda32->rda_status); 718 bytecount = sonic32toh(sc, rda32->rda_bytecount); 719 /* ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0); */ 720 /* ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1); */ 721 /* seqno = sonic32toh(sc, rda32->rda_seqno); */ 722 } else { 723 SONIC_CDRXSYNC16(sc, i, 724 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 725 rda16 = &sc->sc_rda16[i]; 726 SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD); 727 if (rda16->rda_inuse != 0) 728 break; 729 status = sonic16toh(sc, rda16->rda_status); 730 bytecount = sonic16toh(sc, rda16->rda_bytecount); 731 /* ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0); */ 732 /* ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1); */ 733 /* seqno = sonic16toh(sc, rda16->rda_seqno); */ 734 } 735 736 /* 737 * Make absolutely sure this is the only packet 738 * in this receive buffer. Our entire Rx buffer 739 * management scheme depends on this, and if the 740 * SONIC didn't follow our rule, it means we've 741 * misconfigured it. 742 */ 743 KASSERT(status & RCR_LPKT); 744 745 /* 746 * Make sure the packet arrived OK. If an error occurred, 747 * update stats and reset the descriptor. The buffer will 748 * be reused the next time the descriptor comes up in the 749 * ring. 750 */ 751 if ((status & RCR_PRX) == 0) { 752 if (status & RCR_FAER) 753 printf("%s: Rx frame alignment error\n", 754 device_xname(sc->sc_dev)); 755 else if (status & RCR_CRCR) 756 printf("%s: Rx CRC error\n", 757 device_xname(sc->sc_dev)); 758 ifp->if_ierrors++; 759 SONIC_INIT_RXDESC(sc, i); 760 continue; 761 } 762 763 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 764 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 765 766 /* 767 * The SONIC includes the CRC with every packet. 768 */ 769 len = bytecount - ETHER_CRC_LEN; 770 771 /* 772 * Ok, if the chip is in 32-bit mode, then receive 773 * buffers must be aligned to 32-bit boundaries, 774 * which means the payload is misaligned. In this 775 * case, we must allocate a new mbuf, and copy the 776 * packet into it, scooted forward 2 bytes to ensure 777 * proper alignment. 778 * 779 * Note, in 16-bit mode, we can configure the SONIC 780 * to do what we want, and we have. 781 */ 782 #ifndef __NO_STRICT_ALIGNMENT 783 if (sc->sc_32bit) { 784 MGETHDR(m, M_DONTWAIT, MT_DATA); 785 if (m == NULL) 786 goto dropit; 787 if (len > (MHLEN - 2)) { 788 MCLGET(m, M_DONTWAIT); 789 if ((m->m_flags & M_EXT) == 0) { 790 m_freem(m); 791 goto dropit; 792 } 793 } 794 m->m_data += 2; 795 /* 796 * Note that we use a cluster for incoming frames, 797 * so the buffer is virtually contiguous. 798 */ 799 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 800 len); 801 SONIC_INIT_RXDESC(sc, i); 802 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 803 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 804 } else 805 #endif /* ! __NO_STRICT_ALIGNMENT */ 806 /* 807 * If the packet is small enough to fit in a single 808 * header mbuf, allocate one and copy the data into 809 * it. This greatly reduces memory consumption when 810 * we receive lots of small packets. 811 */ 812 if (sonic_copy_small != 0 && len <= (MHLEN - 2)) { 813 MGETHDR(m, M_DONTWAIT, MT_DATA); 814 if (m == NULL) 815 goto dropit; 816 m->m_data += 2; 817 /* 818 * Note that we use a cluster for incoming frames, 819 * so the buffer is virtually contiguous. 820 */ 821 memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *), 822 len); 823 SONIC_INIT_RXDESC(sc, i); 824 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 825 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 826 } else { 827 m = ds->ds_mbuf; 828 if (sonic_add_rxbuf(sc, i) != 0) { 829 dropit: 830 ifp->if_ierrors++; 831 SONIC_INIT_RXDESC(sc, i); 832 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 833 ds->ds_dmamap->dm_mapsize, 834 BUS_DMASYNC_PREREAD); 835 continue; 836 } 837 } 838 839 m_set_rcvif(m, ifp); 840 m->m_pkthdr.len = m->m_len = len; 841 842 /* Pass it on. */ 843 if_percpuq_enqueue(ifp->if_percpuq, m); 844 } 845 846 /* Update the receive pointer. */ 847 sc->sc_rxptr = i; 848 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i))); 849 } 850 851 /* 852 * sonic_reset: 853 * 854 * Perform a soft reset on the SONIC. 855 */ 856 void 857 sonic_reset(struct sonic_softc *sc) 858 { 859 860 /* stop TX, RX and timer, and ensure RST is clear */ 861 CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX); 862 delay(1000); 863 864 CSR_WRITE(sc, SONIC_CR, CR_RST); 865 delay(1000); 866 867 /* clear all interrupts */ 868 CSR_WRITE(sc, SONIC_IMR, 0); 869 CSR_WRITE(sc, SONIC_ISR, IMR_ALL); 870 871 CSR_WRITE(sc, SONIC_CR, 0); 872 delay(1000); 873 } 874 875 /* 876 * sonic_init: [ifnet interface function] 877 * 878 * Initialize the interface. Must be called at splnet(). 879 */ 880 int 881 sonic_init(struct ifnet *ifp) 882 { 883 struct sonic_softc *sc = ifp->if_softc; 884 struct sonic_descsoft *ds; 885 int i, error = 0; 886 uint16_t reg; 887 888 /* 889 * Cancel any pending I/O. 890 */ 891 sonic_stop(ifp, 0); 892 893 /* 894 * Reset the SONIC to a known state. 895 */ 896 sonic_reset(sc); 897 898 /* 899 * Bring the SONIC into reset state, and program the DCR. 900 * 901 * Note: We don't bother optimizing the transmit and receive 902 * thresholds, here. TFT/RFT values should be set in MD attachments. 903 */ 904 reg = sc->sc_dcr; 905 if (sc->sc_32bit) 906 reg |= DCR_DW; 907 CSR_WRITE(sc, SONIC_CR, CR_RST); 908 CSR_WRITE(sc, SONIC_DCR, reg); 909 CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2); 910 CSR_WRITE(sc, SONIC_CR, 0); 911 912 /* 913 * Initialize the transmit descriptors. 914 */ 915 if (sc->sc_32bit) { 916 for (i = 0; i < SONIC_NTXDESC; i++) { 917 memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32)); 918 SONIC_CDTXSYNC32(sc, i, 919 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 920 } 921 } else { 922 for (i = 0; i < SONIC_NTXDESC; i++) { 923 memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16)); 924 SONIC_CDTXSYNC16(sc, i, 925 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 926 } 927 } 928 sc->sc_txpending = 0; 929 sc->sc_txdirty = 0; 930 sc->sc_txlast = SONIC_NTXDESC - 1; 931 932 /* 933 * Initialize the receive descriptor ring. 934 */ 935 for (i = 0; i < SONIC_NRXDESC; i++) { 936 ds = &sc->sc_rxsoft[i]; 937 if (ds->ds_mbuf == NULL) { 938 if ((error = sonic_add_rxbuf(sc, i)) != 0) { 939 printf("%s: unable to allocate or map Rx " 940 "buffer %d, error = %d\n", 941 device_xname(sc->sc_dev), i, error); 942 /* 943 * XXX Should attempt to run with fewer receive 944 * XXX buffers instead of just failing. 945 */ 946 sonic_rxdrain(sc); 947 goto out; 948 } 949 } else 950 SONIC_INIT_RXDESC(sc, i); 951 } 952 sc->sc_rxptr = 0; 953 954 /* Give the transmit ring to the SONIC. */ 955 CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff); 956 CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff); 957 958 /* Give the receive descriptor ring to the SONIC. */ 959 CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff); 960 CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff); 961 962 /* Give the receive buffer ring to the SONIC. */ 963 CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff); 964 CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff); 965 if (sc->sc_32bit) 966 CSR_WRITE(sc, SONIC_REAR, 967 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 968 sizeof(struct sonic_rra32)) & 0xffff); 969 else 970 CSR_WRITE(sc, SONIC_REAR, 971 (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) + 972 sizeof(struct sonic_rra16)) & 0xffff); 973 CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff); 974 CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1)); 975 976 /* 977 * Set the End-Of-Buffer counter such that only one packet 978 * will be placed into each buffer we provide. Note we are 979 * following the recommendation of section 3.4.4 of the manual 980 * here, and have "lengthened" the receive buffers accordingly. 981 */ 982 if (sc->sc_32bit) 983 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2); 984 else 985 CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2)); 986 987 /* Reset the receive sequence counter. */ 988 CSR_WRITE(sc, SONIC_RSC, 0); 989 990 /* Clear the tally registers. */ 991 CSR_WRITE(sc, SONIC_CRCETC, 0xffff); 992 CSR_WRITE(sc, SONIC_FAET, 0xffff); 993 CSR_WRITE(sc, SONIC_MPT, 0xffff); 994 995 /* Set the receive filter. */ 996 sonic_set_filter(sc); 997 998 /* 999 * Set the interrupt mask register. 1000 */ 1001 sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE | 1002 IMR_TXER | IMR_PTX | IMR_PRX; 1003 CSR_WRITE(sc, SONIC_IMR, sc->sc_imr); 1004 1005 /* 1006 * Start the receive process in motion. Note, we don't 1007 * start the transmit process until we actually try to 1008 * transmit packets. 1009 */ 1010 CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA); 1011 1012 /* 1013 * ...all done! 1014 */ 1015 ifp->if_flags |= IFF_RUNNING; 1016 ifp->if_flags &= ~IFF_OACTIVE; 1017 1018 out: 1019 if (error) 1020 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 1021 return error; 1022 } 1023 1024 /* 1025 * sonic_rxdrain: 1026 * 1027 * Drain the receive queue. 1028 */ 1029 void 1030 sonic_rxdrain(struct sonic_softc *sc) 1031 { 1032 struct sonic_descsoft *ds; 1033 int i; 1034 1035 for (i = 0; i < SONIC_NRXDESC; i++) { 1036 ds = &sc->sc_rxsoft[i]; 1037 if (ds->ds_mbuf != NULL) { 1038 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1039 m_freem(ds->ds_mbuf); 1040 ds->ds_mbuf = NULL; 1041 } 1042 } 1043 } 1044 1045 /* 1046 * sonic_stop: [ifnet interface function] 1047 * 1048 * Stop transmission on the interface. 1049 */ 1050 void 1051 sonic_stop(struct ifnet *ifp, int disable) 1052 { 1053 struct sonic_softc *sc = ifp->if_softc; 1054 struct sonic_descsoft *ds; 1055 int i; 1056 1057 /* 1058 * Disable interrupts. 1059 */ 1060 CSR_WRITE(sc, SONIC_IMR, 0); 1061 1062 /* 1063 * Stop the transmitter, receiver, and timer. 1064 */ 1065 CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP); 1066 for (i = 0; i < 1000; i++) { 1067 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0) 1068 break; 1069 delay(2); 1070 } 1071 if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0) 1072 printf("%s: SONIC failed to stop\n", device_xname(sc->sc_dev)); 1073 1074 /* 1075 * Release any queued transmit buffers. 1076 */ 1077 for (i = 0; i < SONIC_NTXDESC; i++) { 1078 ds = &sc->sc_txsoft[i]; 1079 if (ds->ds_mbuf != NULL) { 1080 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1081 m_freem(ds->ds_mbuf); 1082 ds->ds_mbuf = NULL; 1083 } 1084 } 1085 1086 /* 1087 * Mark the interface down and cancel the watchdog timer. 1088 */ 1089 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1090 ifp->if_timer = 0; 1091 1092 if (disable) 1093 sonic_rxdrain(sc); 1094 } 1095 1096 /* 1097 * sonic_add_rxbuf: 1098 * 1099 * Add a receive buffer to the indicated descriptor. 1100 */ 1101 int 1102 sonic_add_rxbuf(struct sonic_softc *sc, int idx) 1103 { 1104 struct sonic_descsoft *ds = &sc->sc_rxsoft[idx]; 1105 struct mbuf *m; 1106 int error; 1107 1108 MGETHDR(m, M_DONTWAIT, MT_DATA); 1109 if (m == NULL) 1110 return ENOBUFS; 1111 1112 MCLGET(m, M_DONTWAIT); 1113 if ((m->m_flags & M_EXT) == 0) { 1114 m_freem(m); 1115 return ENOBUFS; 1116 } 1117 1118 if (ds->ds_mbuf != NULL) 1119 bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap); 1120 1121 ds->ds_mbuf = m; 1122 1123 error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap, 1124 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1125 BUS_DMA_READ|BUS_DMA_NOWAIT); 1126 if (error) { 1127 printf("%s: can't load rx DMA map %d, error = %d\n", 1128 device_xname(sc->sc_dev), idx, error); 1129 panic("sonic_add_rxbuf"); /* XXX */ 1130 } 1131 1132 bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0, 1133 ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1134 1135 SONIC_INIT_RXDESC(sc, idx); 1136 1137 return 0; 1138 } 1139 1140 static void 1141 sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr) 1142 { 1143 1144 if (sc->sc_32bit) { 1145 struct sonic_cda32 *cda = &sc->sc_cda32[entry]; 1146 1147 cda->cda_entry = htosonic32(sc, entry); 1148 cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8)); 1149 cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8)); 1150 cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8)); 1151 } else { 1152 struct sonic_cda16 *cda = &sc->sc_cda16[entry]; 1153 1154 cda->cda_entry = htosonic16(sc, entry); 1155 cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8)); 1156 cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8)); 1157 cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8)); 1158 } 1159 } 1160 1161 /* 1162 * sonic_set_filter: 1163 * 1164 * Set the SONIC receive filter. 1165 */ 1166 void 1167 sonic_set_filter(struct sonic_softc *sc) 1168 { 1169 struct ethercom *ec = &sc->sc_ethercom; 1170 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1171 struct ether_multi *enm; 1172 struct ether_multistep step; 1173 int i, entry = 0; 1174 uint16_t camvalid = 0; 1175 uint16_t rcr = 0; 1176 1177 if (ifp->if_flags & IFF_BROADCAST) 1178 rcr |= RCR_BRD; 1179 1180 if (ifp->if_flags & IFF_PROMISC) { 1181 rcr |= RCR_PRO; 1182 goto allmulti; 1183 } 1184 1185 /* Put our station address in the first CAM slot. */ 1186 sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl)); 1187 camvalid |= (1U << entry); 1188 entry++; 1189 1190 /* Add the multicast addresses to the CAM. */ 1191 ETHER_FIRST_MULTI(step, ec, enm); 1192 while (enm != NULL) { 1193 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1194 /* 1195 * We must listen to a range of multicast addresses. 1196 * The only way to do this on the SONIC is to enable 1197 * reception of all multicast packets. 1198 */ 1199 goto allmulti; 1200 } 1201 1202 if (entry == SONIC_NCAMENT) { 1203 /* 1204 * Out of CAM slots. Have to enable reception 1205 * of all multicast addresses. 1206 */ 1207 goto allmulti; 1208 } 1209 1210 sonic_set_camentry(sc, entry, enm->enm_addrlo); 1211 camvalid |= (1U << entry); 1212 entry++; 1213 1214 ETHER_NEXT_MULTI(step, enm); 1215 } 1216 1217 ifp->if_flags &= ~IFF_ALLMULTI; 1218 goto setit; 1219 1220 allmulti: 1221 /* Use only the first CAM slot (station address). */ 1222 camvalid = 0x0001; 1223 entry = 1; 1224 rcr |= RCR_AMC; 1225 1226 setit: 1227 /* set mask for the CAM Enable register */ 1228 if (sc->sc_32bit) { 1229 if (entry == SONIC_NCAMENT) 1230 sc->sc_cdaenable32 = htosonic32(sc, camvalid); 1231 else 1232 sc->sc_cda32[entry].cda_entry = 1233 htosonic32(sc, camvalid); 1234 } else { 1235 if (entry == SONIC_NCAMENT) 1236 sc->sc_cdaenable16 = htosonic16(sc, camvalid); 1237 else 1238 sc->sc_cda16[entry].cda_entry = 1239 htosonic16(sc, camvalid); 1240 } 1241 1242 /* Load the CAM. */ 1243 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE); 1244 CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff); 1245 CSR_WRITE(sc, SONIC_CDC, entry); 1246 CSR_WRITE(sc, SONIC_CR, CR_LCAM); 1247 for (i = 0; i < 10000; i++) { 1248 if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0) 1249 break; 1250 delay(2); 1251 } 1252 if (CSR_READ(sc, SONIC_CR) & CR_LCAM) 1253 printf("%s: CAM load failed\n", device_xname(sc->sc_dev)); 1254 SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE); 1255 1256 /* Set the receive control register. */ 1257 CSR_WRITE(sc, SONIC_RCR, rcr); 1258 } 1259