1 /* $OpenBSD: if_pcn.c,v 1.21 2008/11/28 02:44:18 brad Exp $ */ 2 /* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Wasabi Systems, Inc. 6 * All rights reserved. 7 * 8 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * Wasabi Systems, Inc. 22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Device driver for the AMD PCnet-PCI series of Ethernet 41 * chips: 42 * 43 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI 44 * Local Bus 45 * 46 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller 47 * for PCI Local Bus 48 * 49 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps 50 * Ethernet Controller for PCI Local Bus 51 * 52 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller 53 * with OnNow Support 54 * 55 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI 56 * Ethernet Controller with Integrated PHY 57 * 58 * This also supports the virtual PCnet-PCI Ethernet interface found 59 * in VMware. 60 * 61 * TODO: 62 * 63 * * Split this into bus-specific and bus-independent portions. 64 * The core could also be used for the ILACC (Am79900) 32-bit 65 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE). 66 */ 67 68 #include "bpfilter.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/timeout.h> 73 #include <sys/mbuf.h> 74 #include <sys/malloc.h> 75 #include <sys/kernel.h> 76 #include <sys/socket.h> 77 #include <sys/ioctl.h> 78 #include <sys/errno.h> 79 #include <sys/device.h> 80 #include <sys/queue.h> 81 82 #include <net/if.h> 83 #include <net/if_dl.h> 84 85 #ifdef INET 86 #include <netinet/in.h> 87 #include <netinet/in_systm.h> 88 #include <netinet/in_var.h> 89 #include <netinet/ip.h> 90 #include <netinet/if_ether.h> 91 #endif 92 93 #include <net/if_media.h> 94 95 #if NBPFILTER > 0 96 #include <net/bpf.h> 97 #endif 98 99 #include <machine/bus.h> 100 #include <machine/intr.h> 101 #include <machine/endian.h> 102 103 #include <dev/mii/mii.h> 104 #include <dev/mii/miivar.h> 105 106 #include <dev/ic/am79900reg.h> 107 #include <dev/ic/lancereg.h> 108 109 #include <dev/pci/pcireg.h> 110 #include <dev/pci/pcivar.h> 111 #include <dev/pci/pcidevs.h> 112 113 /* 114 * Register definitions for the AMD PCnet-PCI series of Ethernet 115 * chips. 116 * 117 * These are only the registers that we access directly from PCI 118 * space. Everything else (accessed via the RAP + RDP/BDP) is 119 * defined in <dev/ic/lancereg.h>. 120 */ 121 122 /* 123 * PCI configuration space. 124 */ 125 126 #define PCN_PCI_CBIO (PCI_MAPREG_START + 0x00) 127 #define PCN_PCI_CBMEM (PCI_MAPREG_START + 0x04) 128 129 /* 130 * I/O map in Word I/O mode. 131 */ 132 133 #define PCN16_APROM 0x00 134 #define PCN16_RDP 0x10 135 #define PCN16_RAP 0x12 136 #define PCN16_RESET 0x14 137 #define PCN16_BDP 0x16 138 139 /* 140 * I/O map in DWord I/O mode. 141 */ 142 143 #define PCN32_APROM 0x00 144 #define PCN32_RDP 0x10 145 #define PCN32_RAP 0x14 146 #define PCN32_RESET 0x18 147 #define PCN32_BDP 0x1c 148 149 /* 150 * Transmit descriptor list size. This is arbitrary, but allocate 151 * enough descriptors for 128 pending transmissions, and 4 segments 152 * per packet. This MUST work out to a power of 2. 153 * 154 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL! 155 * 156 * So we play a little trick here. We give each packet up to 16 157 * DMA segments, but only allocate the max of 512 descriptors. The 158 * transmit logic can deal with this, we just are hoping to sneak by. 159 */ 160 #define PCN_NTXSEGS 16 161 162 #define PCN_TXQUEUELEN 128 163 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1) 164 #define PCN_NTXDESC 512 165 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1) 166 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK) 167 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK) 168 169 /* Tx interrupt every N + 1 packets. */ 170 #define PCN_TXINTR_MASK 7 171 172 /* 173 * Receive descriptor list size. We have one Rx buffer per incoming 174 * packet, so this logic is a little simpler. 175 */ 176 #define PCN_NRXDESC 128 177 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1) 178 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK) 179 180 /* 181 * Control structures are DMA'd to the PCnet chip. We allocate them in 182 * a single clump that maps to a single DMA segment to make several things 183 * easier. 184 */ 185 struct pcn_control_data { 186 /* The transmit descriptors. */ 187 struct letmd pcd_txdescs[PCN_NTXDESC]; 188 189 /* The receive descriptors. */ 190 struct lermd pcd_rxdescs[PCN_NRXDESC]; 191 192 /* The init block. */ 193 struct leinit pcd_initblock; 194 }; 195 196 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x) 197 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)]) 198 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)]) 199 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock) 200 201 /* 202 * Software state for transmit jobs. 203 */ 204 struct pcn_txsoft { 205 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 206 bus_dmamap_t txs_dmamap; /* our DMA map */ 207 int txs_firstdesc; /* first descriptor in packet */ 208 int txs_lastdesc; /* last descriptor in packet */ 209 }; 210 211 /* 212 * Software state for receive jobs. 213 */ 214 struct pcn_rxsoft { 215 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 216 bus_dmamap_t rxs_dmamap; /* our DMA map */ 217 }; 218 219 /* 220 * Description of Rx FIFO watermarks for various revisions. 221 */ 222 static const char * const pcn_79c970_rcvfw[] = { 223 "16 bytes", 224 "64 bytes", 225 "128 bytes", 226 NULL, 227 }; 228 229 static const char * const pcn_79c971_rcvfw[] = { 230 "16 bytes", 231 "64 bytes", 232 "112 bytes", 233 NULL, 234 }; 235 236 /* 237 * Description of Tx start points for various revisions. 238 */ 239 static const char * const pcn_79c970_xmtsp[] = { 240 "8 bytes", 241 "64 bytes", 242 "128 bytes", 243 "248 bytes", 244 }; 245 246 static const char * const pcn_79c971_xmtsp[] = { 247 "20 bytes", 248 "64 bytes", 249 "128 bytes", 250 "248 bytes", 251 }; 252 253 static const char * const pcn_79c971_xmtsp_sram[] = { 254 "44 bytes", 255 "64 bytes", 256 "128 bytes", 257 "store-and-forward", 258 }; 259 260 /* 261 * Description of Tx FIFO watermarks for various revisions. 262 */ 263 static const char * const pcn_79c970_xmtfw[] = { 264 "16 bytes", 265 "64 bytes", 266 "128 bytes", 267 NULL, 268 }; 269 270 static const char * const pcn_79c971_xmtfw[] = { 271 "16 bytes", 272 "64 bytes", 273 "108 bytes", 274 NULL, 275 }; 276 277 /* 278 * Software state per device. 279 */ 280 struct pcn_softc { 281 struct device sc_dev; /* generic device information */ 282 bus_space_tag_t sc_st; /* bus space tag */ 283 bus_space_handle_t sc_sh; /* bus space handle */ 284 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 285 struct arpcom sc_arpcom; /* Ethernet common data */ 286 void *sc_sdhook; /* shutdown hook */ 287 288 /* Points to our media routines, etc. */ 289 const struct pcn_variant *sc_variant; 290 291 void *sc_ih; /* interrupt cookie */ 292 293 struct mii_data sc_mii; /* MII/media information */ 294 295 struct timeout sc_tick_timeout; /* tick timeout */ 296 297 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 298 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 299 300 /* Software state for transmit and receive descriptors. */ 301 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN]; 302 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC]; 303 304 /* Control data structures */ 305 struct pcn_control_data *sc_control_data; 306 #define sc_txdescs sc_control_data->pcd_txdescs 307 #define sc_rxdescs sc_control_data->pcd_rxdescs 308 #define sc_initblock sc_control_data->pcd_initblock 309 310 const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */ 311 int sc_rcvfw; 312 313 const char * const *sc_xmtsp_desc; /* Tx start point info */ 314 int sc_xmtsp; 315 316 const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */ 317 int sc_xmtfw; 318 319 int sc_flags; /* misc. flags; see below */ 320 int sc_swstyle; /* the software style in use */ 321 322 int sc_txfree; /* number of free Tx descriptors */ 323 int sc_txnext; /* next ready Tx descriptor */ 324 325 int sc_txsfree; /* number of free Tx jobs */ 326 int sc_txsnext; /* next free Tx job */ 327 int sc_txsdirty; /* dirty Tx jobs */ 328 329 int sc_rxptr; /* next ready Rx descriptor/job */ 330 331 uint32_t sc_csr5; /* prototype CSR5 register */ 332 uint32_t sc_mode; /* prototype MODE register */ 333 }; 334 335 /* sc_flags */ 336 #define PCN_F_HAS_MII 0x0001 /* has MII */ 337 338 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x))) 339 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x))) 340 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF) 341 342 #define PCN_CDTXSYNC(sc, x, n, ops) \ 343 do { \ 344 int __x, __n; \ 345 \ 346 __x = (x); \ 347 __n = (n); \ 348 \ 349 /* If it will wrap around, sync to the end of the ring. */ \ 350 if ((__x + __n) > PCN_NTXDESC) { \ 351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 352 PCN_CDTXOFF(__x), sizeof(struct letmd) * \ 353 (PCN_NTXDESC - __x), (ops)); \ 354 __n -= (PCN_NTXDESC - __x); \ 355 __x = 0; \ 356 } \ 357 \ 358 /* Now sync whatever is left. */ \ 359 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 360 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \ 361 } while (/*CONSTCOND*/0) 362 363 #define PCN_CDRXSYNC(sc, x, ops) \ 364 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 365 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops)) 366 367 #define PCN_CDINITSYNC(sc, ops) \ 368 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 369 PCN_CDINITOFF, sizeof(struct leinit), (ops)) 370 371 #define PCN_INIT_RXDESC(sc, x) \ 372 do { \ 373 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 374 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \ 375 struct mbuf *__m = __rxs->rxs_mbuf; \ 376 \ 377 /* \ 378 * Note: We scoot the packet forward 2 bytes in the buffer \ 379 * so that the payload after the Ethernet header is aligned \ 380 * to a 4-byte boundary. \ 381 */ \ 382 __m->m_data = __m->m_ext.ext_buf + 2; \ 383 \ 384 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \ 385 __rmd->rmd2 = \ 386 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 387 __rmd->rmd0 = 0; \ 388 } else { \ 389 __rmd->rmd2 = 0; \ 390 __rmd->rmd0 = \ 391 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 392 } \ 393 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \ 394 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \ 395 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\ 396 } while(/*CONSTCOND*/0) 397 398 void pcn_start(struct ifnet *); 399 void pcn_watchdog(struct ifnet *); 400 int pcn_ioctl(struct ifnet *, u_long, caddr_t); 401 int pcn_init(struct ifnet *); 402 void pcn_stop(struct ifnet *, int); 403 404 void pcn_shutdown(void *); 405 406 void pcn_reset(struct pcn_softc *); 407 void pcn_rxdrain(struct pcn_softc *); 408 int pcn_add_rxbuf(struct pcn_softc *, int); 409 void pcn_tick(void *); 410 411 void pcn_spnd(struct pcn_softc *); 412 413 void pcn_set_filter(struct pcn_softc *); 414 415 int pcn_intr(void *); 416 void pcn_txintr(struct pcn_softc *); 417 int pcn_rxintr(struct pcn_softc *); 418 419 int pcn_mii_readreg(struct device *, int, int); 420 void pcn_mii_writereg(struct device *, int, int, int); 421 void pcn_mii_statchg(struct device *); 422 423 void pcn_79c970_mediainit(struct pcn_softc *); 424 int pcn_79c970_mediachange(struct ifnet *); 425 void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *); 426 427 void pcn_79c971_mediainit(struct pcn_softc *); 428 int pcn_79c971_mediachange(struct ifnet *); 429 void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *); 430 431 /* 432 * Description of a PCnet-PCI variant. Used to select media access 433 * method, mostly, and to print a nice description of the chip. 434 */ 435 static const struct pcn_variant { 436 const char *pcv_desc; 437 void (*pcv_mediainit)(struct pcn_softc *); 438 uint16_t pcv_chipid; 439 } pcn_variants[] = { 440 { "Am79c970", 441 pcn_79c970_mediainit, 442 PARTID_Am79c970 }, 443 444 { "Am79c970A", 445 pcn_79c970_mediainit, 446 PARTID_Am79c970A }, 447 448 { "Am79c971", 449 pcn_79c971_mediainit, 450 PARTID_Am79c971 }, 451 452 { "Am79c972", 453 pcn_79c971_mediainit, 454 PARTID_Am79c972 }, 455 456 { "Am79c973", 457 pcn_79c971_mediainit, 458 PARTID_Am79c973 }, 459 460 { "Am79c975", 461 pcn_79c971_mediainit, 462 PARTID_Am79c975 }, 463 464 { "Am79c976", 465 pcn_79c971_mediainit, 466 PARTID_Am79c976 }, 467 468 { "Am79c978", 469 pcn_79c971_mediainit, 470 PARTID_Am79c978 }, 471 472 { "Unknown", 473 pcn_79c971_mediainit, 474 0 }, 475 }; 476 477 int pcn_copy_small = 0; 478 479 int pcn_match(struct device *, void *, void *); 480 void pcn_attach(struct device *, struct device *, void *); 481 482 struct cfattach pcn_ca = { 483 sizeof(struct pcn_softc), pcn_match, pcn_attach, 484 }; 485 486 const struct pci_matchid pcn_devices[] = { 487 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI }, 488 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCHOME_PCI } 489 }; 490 491 struct cfdriver pcn_cd = { 492 0, "pcn", DV_IFNET 493 }; 494 495 /* 496 * Routines to read and write the PCnet-PCI CSR/BCR space. 497 */ 498 499 static __inline uint32_t 500 pcn_csr_read(struct pcn_softc *sc, int reg) 501 { 502 503 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 504 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP)); 505 } 506 507 static __inline void 508 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val) 509 { 510 511 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 512 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val); 513 } 514 515 static __inline uint32_t 516 pcn_bcr_read(struct pcn_softc *sc, int reg) 517 { 518 519 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 520 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP)); 521 } 522 523 static __inline void 524 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val) 525 { 526 527 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 528 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val); 529 } 530 531 static const struct pcn_variant * 532 pcn_lookup_variant(uint16_t chipid) 533 { 534 const struct pcn_variant *pcv; 535 536 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) { 537 if (chipid == pcv->pcv_chipid) 538 return (pcv); 539 } 540 541 /* 542 * This covers unknown chips, which we simply treat like 543 * a generic PCnet-FAST. 544 */ 545 return (pcv); 546 } 547 548 int 549 pcn_match(struct device *parent, void *match, void *aux) 550 { 551 struct pci_attach_args *pa = aux; 552 553 /* 554 * IBM makes a PCI variant of this card which shows up as a 555 * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25) 556 * this card is truly a pcn card, so we have a special case match for 557 * it. 558 */ 559 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT && 560 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX && 561 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 562 return(1); 563 564 return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices, 565 sizeof(pcn_devices)/sizeof(pcn_devices[0]))); 566 } 567 568 void 569 pcn_attach(struct device *parent, struct device *self, void *aux) 570 { 571 struct pcn_softc *sc = (struct pcn_softc *) self; 572 struct pci_attach_args *pa = aux; 573 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 574 pci_chipset_tag_t pc = pa->pa_pc; 575 pci_intr_handle_t ih; 576 const char *intrstr = NULL; 577 bus_space_tag_t iot, memt; 578 bus_space_handle_t ioh, memh; 579 bus_dma_segment_t seg; 580 int ioh_valid, memh_valid; 581 int i, rseg, error; 582 uint32_t chipid, reg; 583 uint8_t enaddr[ETHER_ADDR_LEN]; 584 int state; 585 586 timeout_set(&sc->sc_tick_timeout, pcn_tick, sc); 587 588 /* 589 * Map the device. 590 */ 591 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 592 &iot, &ioh, NULL, NULL, 0) == 0); 593 memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM, 594 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 595 &memt, &memh, NULL, NULL, 0) == 0); 596 597 if (memh_valid) { 598 sc->sc_st = memt; 599 sc->sc_sh = memh; 600 } else if (ioh_valid) { 601 sc->sc_st = iot; 602 sc->sc_sh = ioh; 603 } else { 604 printf(": unable to map device registers\n"); 605 return; 606 } 607 608 sc->sc_dmat = pa->pa_dmat; 609 610 /* Get it out of power save mode, if needed. */ 611 state = pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 612 if (state == PCI_PMCSR_STATE_D3) { 613 /* 614 * The card has lost all configuration data in 615 * this state, so punt. 616 */ 617 printf(": unable to wake up from power state D3, " 618 "reboot required.\n"); 619 return; 620 } 621 622 /* 623 * Reset the chip to a known state. This also puts the 624 * chip into 32-bit mode. 625 */ 626 pcn_reset(sc); 627 628 #if !defined(PCN_NO_PROM) 629 630 /* 631 * Read the Ethernet address from the EEPROM. 632 */ 633 for (i = 0; i < ETHER_ADDR_LEN; i++) 634 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh, 635 PCN32_APROM + i); 636 #else 637 /* 638 * The PROM is not used; instead we assume that the MAC address 639 * has been programmed into the device's physical address 640 * registers by the boot firmware 641 */ 642 643 for (i=0; i < 3; i++) { 644 uint32_t val; 645 val = pcn_csr_read(sc, LE_CSR12 + i); 646 enaddr[2*i] = val & 0x0ff; 647 enaddr[2*i+1] = (val >> 8) & 0x0ff; 648 } 649 #endif 650 651 /* 652 * Now that the device is mapped, attempt to figure out what 653 * kind of chip we have. Note that IDL has all 32 bits of 654 * the chip ID when we're in 32-bit mode. 655 */ 656 chipid = pcn_csr_read(sc, LE_CSR88); 657 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid)); 658 659 /* 660 * Map and establish our interrupt. 661 */ 662 if (pci_intr_map(pa, &ih)) { 663 printf(": unable to map interrupt\n"); 664 return; 665 } 666 intrstr = pci_intr_string(pc, ih); 667 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc, 668 self->dv_xname); 669 if (sc->sc_ih == NULL) { 670 printf(": unable to establish interrupt"); 671 if (intrstr != NULL) 672 printf(" at %s", intrstr); 673 printf("\n"); 674 return; 675 } 676 677 /* 678 * Allocate the control data structures, and create and load the 679 * DMA map for it. 680 */ 681 if ((error = bus_dmamem_alloc(sc->sc_dmat, 682 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 683 0)) != 0) { 684 printf(": unable to allocate control data, error = %d\n", 685 error); 686 return; 687 } 688 689 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 690 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data, 691 BUS_DMA_COHERENT)) != 0) { 692 printf(": unable to map control data, error = %d\n", 693 error); 694 goto fail_1; 695 } 696 697 if ((error = bus_dmamap_create(sc->sc_dmat, 698 sizeof(struct pcn_control_data), 1, 699 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 700 printf(": unable to create control data DMA map, " 701 "error = %d\n", error); 702 goto fail_2; 703 } 704 705 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 706 sc->sc_control_data, sizeof(struct pcn_control_data), NULL, 707 0)) != 0) { 708 printf(": unable to load control data DMA map, error = %d\n", 709 error); 710 goto fail_3; 711 } 712 713 /* Create the transmit buffer DMA maps. */ 714 for (i = 0; i < PCN_TXQUEUELEN; i++) { 715 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 716 PCN_NTXSEGS, MCLBYTES, 0, 0, 717 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 718 printf(": unable to create tx DMA map %d, " 719 "error = %d\n", i, error); 720 goto fail_4; 721 } 722 } 723 724 /* Create the receive buffer DMA maps. */ 725 for (i = 0; i < PCN_NRXDESC; i++) { 726 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 727 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 728 printf(": unable to create rx DMA map %d, " 729 "error = %d\n", i, error); 730 goto fail_5; 731 } 732 sc->sc_rxsoft[i].rxs_mbuf = NULL; 733 } 734 735 printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc, 736 CHIPID_VER(chipid), intrstr, ether_sprintf(enaddr)); 737 738 /* Initialize our media structures. */ 739 (*sc->sc_variant->pcv_mediainit)(sc); 740 741 /* 742 * Initialize FIFO watermark info. 743 */ 744 switch (sc->sc_variant->pcv_chipid) { 745 case PARTID_Am79c970: 746 case PARTID_Am79c970A: 747 sc->sc_rcvfw_desc = pcn_79c970_rcvfw; 748 sc->sc_xmtsp_desc = pcn_79c970_xmtsp; 749 sc->sc_xmtfw_desc = pcn_79c970_xmtfw; 750 break; 751 752 default: 753 sc->sc_rcvfw_desc = pcn_79c971_rcvfw; 754 /* 755 * Read BCR25 to determine how much SRAM is 756 * on the board. If > 0, then we the chip 757 * uses different Start Point thresholds. 758 * 759 * Note BCR25 and BCR26 are loaded from the 760 * EEPROM on RST, and unaffected by S_RESET, 761 * so we don't really have to worry about 762 * them except for this. 763 */ 764 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff; 765 if (reg != 0) 766 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram; 767 else 768 sc->sc_xmtsp_desc = pcn_79c971_xmtsp; 769 sc->sc_xmtfw_desc = pcn_79c971_xmtfw; 770 break; 771 } 772 773 /* 774 * Set up defaults -- see the tables above for what these 775 * values mean. 776 * 777 * XXX How should we tune RCVFW and XMTFW? 778 */ 779 sc->sc_rcvfw = 1; /* minimum for full-duplex */ 780 sc->sc_xmtsp = 1; 781 sc->sc_xmtfw = 0; 782 783 ifp = &sc->sc_arpcom.ac_if; 784 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 785 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 786 ifp->if_softc = sc; 787 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 788 ifp->if_ioctl = pcn_ioctl; 789 ifp->if_start = pcn_start; 790 ifp->if_watchdog = pcn_watchdog; 791 IFQ_SET_MAXLEN(&ifp->if_snd, PCN_NTXDESC -1); 792 IFQ_SET_READY(&ifp->if_snd); 793 794 /* Attach the interface. */ 795 if_attach(ifp); 796 ether_ifattach(ifp); 797 798 /* Make sure the interface is shutdown during reboot. */ 799 sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc); 800 if (sc->sc_sdhook == NULL) 801 printf("%s: WARNING: unable to establish shutdown hook\n", 802 sc->sc_dev.dv_xname); 803 return; 804 805 /* 806 * Free any resources we've allocated during the failed attach 807 * attempt. Do this in reverse order and fall through. 808 */ 809 fail_5: 810 for (i = 0; i < PCN_NRXDESC; i++) { 811 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 812 bus_dmamap_destroy(sc->sc_dmat, 813 sc->sc_rxsoft[i].rxs_dmamap); 814 } 815 fail_4: 816 for (i = 0; i < PCN_TXQUEUELEN; i++) { 817 if (sc->sc_txsoft[i].txs_dmamap != NULL) 818 bus_dmamap_destroy(sc->sc_dmat, 819 sc->sc_txsoft[i].txs_dmamap); 820 } 821 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 822 fail_3: 823 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 824 fail_2: 825 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 826 sizeof(struct pcn_control_data)); 827 fail_1: 828 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 829 } 830 831 /* 832 * pcn_shutdown: 833 * 834 * Make sure the interface is stopped at reboot time. 835 */ 836 void 837 pcn_shutdown(void *arg) 838 { 839 struct pcn_softc *sc = arg; 840 841 pcn_stop(&sc->sc_arpcom.ac_if, 1); 842 pcn_reset(sc); 843 } 844 845 /* 846 * pcn_start: [ifnet interface function] 847 * 848 * Start packet transmission on the interface. 849 */ 850 void 851 pcn_start(struct ifnet *ifp) 852 { 853 struct pcn_softc *sc = ifp->if_softc; 854 struct mbuf *m0, *m; 855 struct pcn_txsoft *txs; 856 bus_dmamap_t dmamap; 857 int error, nexttx, lasttx = -1, ofree, seg; 858 859 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 860 return; 861 862 /* 863 * Remember the previous number of free descriptors and 864 * the first descriptor we'll use. 865 */ 866 ofree = sc->sc_txfree; 867 868 /* 869 * Loop through the send queue, setting up transmit descriptors 870 * until we drain the queue, or use up all available transmit 871 * descriptors. 872 */ 873 for (;;) { 874 /* Grab a packet off the queue. */ 875 IFQ_POLL(&ifp->if_snd, m0); 876 if (m0 == NULL) 877 break; 878 m = NULL; 879 880 /* Get a work queue entry. */ 881 if (sc->sc_txsfree == 0) 882 break; 883 884 txs = &sc->sc_txsoft[sc->sc_txsnext]; 885 dmamap = txs->txs_dmamap; 886 887 /* 888 * Load the DMA map. If this fails, the packet either 889 * didn't fit in the alloted number of segments, or we 890 * were short on resources. In this case, we'll copy 891 * and try again. 892 */ 893 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 894 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 895 MGETHDR(m, M_DONTWAIT, MT_DATA); 896 if (m == NULL) 897 break; 898 if (m0->m_pkthdr.len > MHLEN) { 899 MCLGET(m, M_DONTWAIT); 900 if ((m->m_flags & M_EXT) == 0) { 901 m_freem(m); 902 break; 903 } 904 } 905 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 906 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 907 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 908 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 909 if (error) 910 break; 911 } 912 913 /* 914 * Ensure we have enough descriptors free to describe 915 * the packet. Note, we always reserve one descriptor 916 * at the end of the ring as a termination point, to 917 * prevent wrap-around. 918 */ 919 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { 920 /* 921 * Not enough free descriptors to transmit this 922 * packet. We haven't committed anything yet, 923 * so just unload the DMA map, put the packet 924 * back on the queue, and punt. Notify the upper 925 * layer that there are not more slots left. 926 * 927 * XXX We could allocate an mbuf and copy, but 928 * XXX is it worth it? 929 */ 930 ifp->if_flags |= IFF_OACTIVE; 931 bus_dmamap_unload(sc->sc_dmat, dmamap); 932 if (m != NULL) 933 m_freem(m); 934 break; 935 } 936 937 IFQ_DEQUEUE(&ifp->if_snd, m0); 938 if (m != NULL) { 939 m_freem(m0); 940 m0 = m; 941 } 942 943 /* 944 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 945 */ 946 947 /* Sync the DMA map. */ 948 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 949 BUS_DMASYNC_PREWRITE); 950 951 /* 952 * Initialize the transmit descriptors. 953 */ 954 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { 955 for (nexttx = sc->sc_txnext, seg = 0; 956 seg < dmamap->dm_nsegs; 957 seg++, nexttx = PCN_NEXTTX(nexttx)) { 958 /* 959 * If this is the first descriptor we're 960 * enqueueing, don't set the OWN bit just 961 * yet. That could cause a race condition. 962 * We'll do it below. 963 */ 964 sc->sc_txdescs[nexttx].tmd0 = 0; 965 sc->sc_txdescs[nexttx].tmd2 = 966 htole32(dmamap->dm_segs[seg].ds_addr); 967 sc->sc_txdescs[nexttx].tmd1 = 968 htole32(LE_T1_ONES | 969 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) | 970 (LE_BCNT(dmamap->dm_segs[seg].ds_len) & 971 LE_T1_BCNT_MASK)); 972 lasttx = nexttx; 973 } 974 } else { 975 for (nexttx = sc->sc_txnext, seg = 0; 976 seg < dmamap->dm_nsegs; 977 seg++, nexttx = PCN_NEXTTX(nexttx)) { 978 /* 979 * If this is the first descriptor we're 980 * enqueueing, don't set the OWN bit just 981 * yet. That could cause a race condition. 982 * We'll do it below. 983 */ 984 sc->sc_txdescs[nexttx].tmd0 = 985 htole32(dmamap->dm_segs[seg].ds_addr); 986 sc->sc_txdescs[nexttx].tmd2 = 0; 987 sc->sc_txdescs[nexttx].tmd1 = 988 htole32(LE_T1_ONES | 989 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) | 990 (LE_BCNT(dmamap->dm_segs[seg].ds_len) & 991 LE_T1_BCNT_MASK)); 992 lasttx = nexttx; 993 } 994 } 995 996 KASSERT(lasttx != -1); 997 /* Interrupt on the packet, if appropriate. */ 998 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0) 999 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT); 1000 1001 /* Set `start of packet' and `end of packet' appropriately. */ 1002 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP); 1003 sc->sc_txdescs[sc->sc_txnext].tmd1 |= 1004 htole32(LE_T1_OWN|LE_T1_STP); 1005 1006 /* Sync the descriptors we're using. */ 1007 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1008 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1009 1010 /* Kick the transmitter. */ 1011 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD); 1012 1013 /* 1014 * Store a pointer to the packet so we can free it later, 1015 * and remember what txdirty will be once the packet is 1016 * done. 1017 */ 1018 txs->txs_mbuf = m0; 1019 txs->txs_firstdesc = sc->sc_txnext; 1020 txs->txs_lastdesc = lasttx; 1021 1022 /* Advance the tx pointer. */ 1023 sc->sc_txfree -= dmamap->dm_nsegs; 1024 sc->sc_txnext = nexttx; 1025 1026 sc->sc_txsfree--; 1027 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext); 1028 1029 #if NBPFILTER > 0 1030 /* Pass the packet to any BPF listeners. */ 1031 if (ifp->if_bpf) 1032 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1033 #endif /* NBPFILTER > 0 */ 1034 } 1035 1036 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1037 /* No more slots left; notify upper layer. */ 1038 ifp->if_flags |= IFF_OACTIVE; 1039 } 1040 1041 if (sc->sc_txfree != ofree) { 1042 /* Set a watchdog timer in case the chip flakes out. */ 1043 ifp->if_timer = 5; 1044 } 1045 } 1046 1047 /* 1048 * pcn_watchdog: [ifnet interface function] 1049 * 1050 * Watchdog timer handler. 1051 */ 1052 void 1053 pcn_watchdog(struct ifnet *ifp) 1054 { 1055 struct pcn_softc *sc = ifp->if_softc; 1056 1057 /* 1058 * Since we're not interrupting every packet, sweep 1059 * up before we report an error. 1060 */ 1061 pcn_txintr(sc); 1062 1063 if (sc->sc_txfree != PCN_NTXDESC) { 1064 printf("%s: device timeout (txfree %d txsfree %d)\n", 1065 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree); 1066 ifp->if_oerrors++; 1067 1068 /* Reset the interface. */ 1069 (void) pcn_init(ifp); 1070 } 1071 1072 /* Try to get more packets going. */ 1073 pcn_start(ifp); 1074 } 1075 1076 /* 1077 * pcn_ioctl: [ifnet interface function] 1078 * 1079 * Handle control requests from the operator. 1080 */ 1081 int 1082 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1083 { 1084 struct pcn_softc *sc = ifp->if_softc; 1085 struct ifaddr *ifa = (struct ifaddr *) data; 1086 struct ifreq *ifr = (struct ifreq *) data; 1087 int s, error = 0; 1088 1089 s = splnet(); 1090 1091 switch (cmd) { 1092 case SIOCSIFADDR: 1093 ifp->if_flags |= IFF_UP; 1094 1095 switch (ifa->ifa_addr->sa_family) { 1096 #ifdef INET 1097 case AF_INET: 1098 pcn_init(ifp); 1099 arp_ifinit(&sc->sc_arpcom, ifa); 1100 break; 1101 #endif 1102 default: 1103 pcn_init(ifp); 1104 break; 1105 } 1106 break; 1107 1108 case SIOCSIFFLAGS: 1109 /* 1110 * If interface is marked up and not running, then start it. 1111 * If it is marked down and running, stop it. 1112 * XXX If it's up then re-initialize it. This is so flags 1113 * such as IFF_PROMISC are handled. 1114 */ 1115 if (ifp->if_flags & IFF_UP) 1116 pcn_init(ifp); 1117 else if (ifp->if_flags & IFF_RUNNING) 1118 pcn_stop(ifp, 1); 1119 break; 1120 1121 case SIOCSIFMEDIA: 1122 case SIOCGIFMEDIA: 1123 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1124 break; 1125 1126 default: 1127 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1128 } 1129 1130 if (error == ENETRESET) { 1131 if (ifp->if_flags & IFF_RUNNING) 1132 error = pcn_init(ifp); 1133 else 1134 error = 0; 1135 } 1136 1137 /* Try to get more packets going. */ 1138 pcn_start(ifp); 1139 1140 splx(s); 1141 return (error); 1142 } 1143 1144 /* 1145 * pcn_intr: 1146 * 1147 * Interrupt service routine. 1148 */ 1149 int 1150 pcn_intr(void *arg) 1151 { 1152 struct pcn_softc *sc = arg; 1153 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1154 uint32_t csr0; 1155 int wantinit, handled = 0; 1156 1157 for (wantinit = 0; wantinit == 0;) { 1158 csr0 = pcn_csr_read(sc, LE_CSR0); 1159 if ((csr0 & LE_C0_INTR) == 0) 1160 break; 1161 1162 /* ACK the bits and re-enable interrupts. */ 1163 pcn_csr_write(sc, LE_CSR0, csr0 & 1164 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT| 1165 LE_C0_TINT|LE_C0_IDON)); 1166 1167 handled = 1; 1168 1169 if (csr0 & LE_C0_RINT) 1170 wantinit = pcn_rxintr(sc); 1171 1172 if (csr0 & LE_C0_TINT) 1173 pcn_txintr(sc); 1174 1175 if (csr0 & LE_C0_ERR) { 1176 if (csr0 & LE_C0_BABL) 1177 ifp->if_oerrors++; 1178 if (csr0 & LE_C0_MISS) 1179 ifp->if_ierrors++; 1180 if (csr0 & LE_C0_MERR) { 1181 printf("%s: memory error\n", 1182 sc->sc_dev.dv_xname); 1183 wantinit = 1; 1184 break; 1185 } 1186 } 1187 1188 if ((csr0 & LE_C0_RXON) == 0) { 1189 printf("%s: receiver disabled\n", 1190 sc->sc_dev.dv_xname); 1191 ifp->if_ierrors++; 1192 wantinit = 1; 1193 } 1194 1195 if ((csr0 & LE_C0_TXON) == 0) { 1196 printf("%s: transmitter disabled\n", 1197 sc->sc_dev.dv_xname); 1198 ifp->if_oerrors++; 1199 wantinit = 1; 1200 } 1201 } 1202 1203 if (handled) { 1204 if (wantinit) 1205 pcn_init(ifp); 1206 1207 /* Try to get more packets going. */ 1208 pcn_start(ifp); 1209 } 1210 1211 return (handled); 1212 } 1213 1214 /* 1215 * pcn_spnd: 1216 * 1217 * Suspend the chip. 1218 */ 1219 void 1220 pcn_spnd(struct pcn_softc *sc) 1221 { 1222 int i; 1223 1224 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND); 1225 1226 for (i = 0; i < 10000; i++) { 1227 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND) 1228 return; 1229 delay(5); 1230 } 1231 1232 printf("%s: WARNING: chip failed to enter suspended state\n", 1233 sc->sc_dev.dv_xname); 1234 } 1235 1236 /* 1237 * pcn_txintr: 1238 * 1239 * Helper; handle transmit interrupts. 1240 */ 1241 void 1242 pcn_txintr(struct pcn_softc *sc) 1243 { 1244 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1245 struct pcn_txsoft *txs; 1246 uint32_t tmd1, tmd2, tmd; 1247 int i, j; 1248 1249 ifp->if_flags &= ~IFF_OACTIVE; 1250 1251 /* 1252 * Go through our Tx list and free mbufs for those 1253 * frames which have been transmitted. 1254 */ 1255 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN; 1256 i = PCN_NEXTTXS(i), sc->sc_txsfree++) { 1257 txs = &sc->sc_txsoft[i]; 1258 1259 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1260 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1261 1262 tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1); 1263 if (tmd1 & LE_T1_OWN) 1264 break; 1265 1266 /* 1267 * Slightly annoying -- we have to loop through the 1268 * descriptors we've used looking for ERR, since it 1269 * can appear on any descriptor in the chain. 1270 */ 1271 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) { 1272 tmd = letoh32(sc->sc_txdescs[j].tmd1); 1273 if (tmd & LE_T1_ERR) { 1274 ifp->if_oerrors++; 1275 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1276 tmd2 = letoh32(sc->sc_txdescs[j].tmd0); 1277 else 1278 tmd2 = letoh32(sc->sc_txdescs[j].tmd2); 1279 if (tmd2 & LE_T2_UFLO) { 1280 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) { 1281 sc->sc_xmtsp++; 1282 printf("%s: transmit " 1283 "underrun; new threshold: " 1284 "%s\n", 1285 sc->sc_dev.dv_xname, 1286 sc->sc_xmtsp_desc[ 1287 sc->sc_xmtsp]); 1288 pcn_spnd(sc); 1289 pcn_csr_write(sc, LE_CSR80, 1290 LE_C80_RCVFW(sc->sc_rcvfw) | 1291 LE_C80_XMTSP(sc->sc_xmtsp) | 1292 LE_C80_XMTFW(sc->sc_xmtfw)); 1293 pcn_csr_write(sc, LE_CSR5, 1294 sc->sc_csr5); 1295 } else { 1296 printf("%s: transmit " 1297 "underrun\n", 1298 sc->sc_dev.dv_xname); 1299 } 1300 } else if (tmd2 & LE_T2_BUFF) { 1301 printf("%s: transmit buffer error\n", 1302 sc->sc_dev.dv_xname); 1303 } 1304 if (tmd2 & LE_T2_LCOL) 1305 ifp->if_collisions++; 1306 if (tmd2 & LE_T2_RTRY) 1307 ifp->if_collisions += 16; 1308 goto next_packet; 1309 } 1310 if (j == txs->txs_lastdesc) 1311 break; 1312 } 1313 if (tmd1 & LE_T1_ONE) 1314 ifp->if_collisions++; 1315 else if (tmd & LE_T1_MORE) { 1316 /* Real number is unknown. */ 1317 ifp->if_collisions += 2; 1318 } 1319 ifp->if_opackets++; 1320 next_packet: 1321 sc->sc_txfree += txs->txs_dmamap->dm_nsegs; 1322 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1323 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1324 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1325 m_freem(txs->txs_mbuf); 1326 txs->txs_mbuf = NULL; 1327 } 1328 1329 /* Update the dirty transmit buffer pointer. */ 1330 sc->sc_txsdirty = i; 1331 1332 /* 1333 * If there are no more pending transmissions, cancel the watchdog 1334 * timer. 1335 */ 1336 if (sc->sc_txsfree == PCN_TXQUEUELEN) 1337 ifp->if_timer = 0; 1338 } 1339 1340 /* 1341 * pcn_rxintr: 1342 * 1343 * Helper; handle receive interrupts. 1344 */ 1345 int 1346 pcn_rxintr(struct pcn_softc *sc) 1347 { 1348 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1349 struct pcn_rxsoft *rxs; 1350 struct mbuf *m; 1351 uint32_t rmd1; 1352 int i, len; 1353 1354 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) { 1355 rxs = &sc->sc_rxsoft[i]; 1356 1357 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1358 1359 rmd1 = letoh32(sc->sc_rxdescs[i].rmd1); 1360 1361 if (rmd1 & LE_R1_OWN) 1362 break; 1363 1364 /* 1365 * Check for errors and make sure the packet fit into 1366 * a single buffer. We have structured this block of 1367 * code the way it is in order to compress it into 1368 * one test in the common case (no error). 1369 */ 1370 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) != 1371 (LE_R1_STP|LE_R1_ENP))) { 1372 /* Make sure the packet is in a single buffer. */ 1373 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) != 1374 (LE_R1_STP|LE_R1_ENP)) { 1375 printf("%s: packet spilled into next buffer\n", 1376 sc->sc_dev.dv_xname); 1377 return (1); /* pcn_intr() will re-init */ 1378 } 1379 1380 /* 1381 * If the packet had an error, simple recycle the 1382 * buffer. 1383 */ 1384 if (rmd1 & LE_R1_ERR) { 1385 ifp->if_ierrors++; 1386 /* 1387 * If we got an overflow error, chances 1388 * are there will be a CRC error. In 1389 * this case, just print the overflow 1390 * error, and skip the others. 1391 */ 1392 if (rmd1 & LE_R1_OFLO) 1393 printf("%s: overflow error\n", 1394 sc->sc_dev.dv_xname); 1395 else { 1396 #define PRINTIT(x, str) \ 1397 if (rmd1 & (x)) \ 1398 printf("%s: %s\n", \ 1399 sc->sc_dev.dv_xname, str); 1400 PRINTIT(LE_R1_FRAM, "framing error"); 1401 PRINTIT(LE_R1_CRC, "CRC error"); 1402 PRINTIT(LE_R1_BUFF, "buffer error"); 1403 } 1404 #undef PRINTIT 1405 PCN_INIT_RXDESC(sc, i); 1406 continue; 1407 } 1408 } 1409 1410 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1411 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1412 1413 /* 1414 * No errors; receive the packet. 1415 */ 1416 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1417 len = letoh32(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK; 1418 else 1419 len = letoh32(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK; 1420 1421 /* 1422 * The LANCE family includes the CRC with every packet; 1423 * trim it off here. 1424 */ 1425 len -= ETHER_CRC_LEN; 1426 1427 /* 1428 * If the packet is small enough to fit in a 1429 * single header mbuf, allocate one and copy 1430 * the data into it. This greatly reduces 1431 * memory consumption when we receive lots 1432 * of small packets. 1433 * 1434 * Otherwise, we add a new buffer to the receive 1435 * chain. If this fails, we drop the packet and 1436 * recycle the old buffer. 1437 */ 1438 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) { 1439 MGETHDR(m, M_DONTWAIT, MT_DATA); 1440 if (m == NULL) 1441 goto dropit; 1442 m->m_data += 2; 1443 memcpy(mtod(m, caddr_t), 1444 mtod(rxs->rxs_mbuf, caddr_t), len); 1445 PCN_INIT_RXDESC(sc, i); 1446 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1447 rxs->rxs_dmamap->dm_mapsize, 1448 BUS_DMASYNC_PREREAD); 1449 } else { 1450 m = rxs->rxs_mbuf; 1451 if (pcn_add_rxbuf(sc, i) != 0) { 1452 dropit: 1453 ifp->if_ierrors++; 1454 PCN_INIT_RXDESC(sc, i); 1455 bus_dmamap_sync(sc->sc_dmat, 1456 rxs->rxs_dmamap, 0, 1457 rxs->rxs_dmamap->dm_mapsize, 1458 BUS_DMASYNC_PREREAD); 1459 continue; 1460 } 1461 } 1462 1463 m->m_pkthdr.rcvif = ifp; 1464 m->m_pkthdr.len = m->m_len = len; 1465 1466 #if NBPFILTER > 0 1467 /* Pass this up to any BPF listeners. */ 1468 if (ifp->if_bpf) 1469 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1470 #endif /* NBPFILTER > 0 */ 1471 1472 /* Pass it on. */ 1473 ether_input_mbuf(ifp, m); 1474 ifp->if_ipackets++; 1475 } 1476 1477 /* Update the receive pointer. */ 1478 sc->sc_rxptr = i; 1479 return (0); 1480 } 1481 1482 /* 1483 * pcn_tick: 1484 * 1485 * One second timer, used to tick the MII. 1486 */ 1487 void 1488 pcn_tick(void *arg) 1489 { 1490 struct pcn_softc *sc = arg; 1491 int s; 1492 1493 s = splnet(); 1494 mii_tick(&sc->sc_mii); 1495 splx(s); 1496 1497 timeout_add_sec(&sc->sc_tick_timeout, 1); 1498 } 1499 1500 /* 1501 * pcn_reset: 1502 * 1503 * Perform a soft reset on the PCnet-PCI. 1504 */ 1505 void 1506 pcn_reset(struct pcn_softc *sc) 1507 { 1508 1509 /* 1510 * The PCnet-PCI chip is reset by reading from the 1511 * RESET register. Note that while the NE2100 LANCE 1512 * boards require a write after the read, the PCnet-PCI 1513 * chips do not require this. 1514 * 1515 * Since we don't know if we're in 16-bit or 32-bit 1516 * mode right now, issue both (it's safe) in the 1517 * hopes that one will succeed. 1518 */ 1519 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET); 1520 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET); 1521 1522 /* Wait 1ms for it to finish. */ 1523 delay(1000); 1524 1525 /* 1526 * Select 32-bit I/O mode by issuing a 32-bit write to the 1527 * RDP. Since the RAP is 0 after a reset, writing a 0 1528 * to RDP is safe (since it simply clears CSR0). 1529 */ 1530 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0); 1531 } 1532 1533 /* 1534 * pcn_init: [ifnet interface function] 1535 * 1536 * Initialize the interface. Must be called at splnet(). 1537 */ 1538 int 1539 pcn_init(struct ifnet *ifp) 1540 { 1541 struct pcn_softc *sc = ifp->if_softc; 1542 struct pcn_rxsoft *rxs; 1543 uint8_t *enaddr = LLADDR(ifp->if_sadl); 1544 int i, error = 0; 1545 uint32_t reg; 1546 1547 /* Cancel any pending I/O. */ 1548 pcn_stop(ifp, 0); 1549 1550 /* Reset the chip to a known state. */ 1551 pcn_reset(sc); 1552 1553 /* 1554 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything 1555 * else. 1556 * 1557 * XXX It'd be really nice to use SSTYLE 2 on all the chips, 1558 * because the structure layout is compatible with ILACC, 1559 * but the burst mode is only available in SSTYLE 3, and 1560 * burst mode should provide some performance enhancement. 1561 */ 1562 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970) 1563 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2; 1564 else 1565 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3; 1566 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle); 1567 1568 /* Initialize the transmit descriptor ring. */ 1569 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1570 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC, 1571 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1572 sc->sc_txfree = PCN_NTXDESC; 1573 sc->sc_txnext = 0; 1574 1575 /* Initialize the transmit job descriptors. */ 1576 for (i = 0; i < PCN_TXQUEUELEN; i++) 1577 sc->sc_txsoft[i].txs_mbuf = NULL; 1578 sc->sc_txsfree = PCN_TXQUEUELEN; 1579 sc->sc_txsnext = 0; 1580 sc->sc_txsdirty = 0; 1581 1582 /* 1583 * Initialize the receive descriptor and receive job 1584 * descriptor rings. 1585 */ 1586 for (i = 0; i < PCN_NRXDESC; i++) { 1587 rxs = &sc->sc_rxsoft[i]; 1588 if (rxs->rxs_mbuf == NULL) { 1589 if ((error = pcn_add_rxbuf(sc, i)) != 0) { 1590 printf("%s: unable to allocate or map rx " 1591 "buffer %d, error = %d\n", 1592 sc->sc_dev.dv_xname, i, error); 1593 /* 1594 * XXX Should attempt to run with fewer receive 1595 * XXX buffers instead of just failing. 1596 */ 1597 pcn_rxdrain(sc); 1598 goto out; 1599 } 1600 } else 1601 PCN_INIT_RXDESC(sc, i); 1602 } 1603 sc->sc_rxptr = 0; 1604 1605 /* Initialize MODE for the initialization block. */ 1606 sc->sc_mode = 0; 1607 if (ifp->if_flags & IFF_PROMISC) 1608 sc->sc_mode |= LE_C15_PROM; 1609 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1610 sc->sc_mode |= LE_C15_DRCVBC; 1611 1612 /* 1613 * If we have MII, simply select MII in the MODE register, 1614 * and clear ASEL. Otherwise, let ASEL stand (for now), 1615 * and leave PORTSEL alone (it is ignored with ASEL is set). 1616 */ 1617 if (sc->sc_flags & PCN_F_HAS_MII) { 1618 pcn_bcr_write(sc, LE_BCR2, 1619 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL); 1620 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII); 1621 1622 /* 1623 * Disable MII auto-negotiation. We handle that in 1624 * our own MII layer. 1625 */ 1626 pcn_bcr_write(sc, LE_BCR32, 1627 pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS); 1628 } 1629 1630 /* 1631 * Set the Tx and Rx descriptor ring addresses in the init 1632 * block, the TLEN and RLEN other fields of the init block 1633 * MODE register. 1634 */ 1635 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0)); 1636 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0)); 1637 sc->sc_initblock.init_mode = htole32(sc->sc_mode | 1638 ((ffs(PCN_NTXDESC) - 1) << 28) | 1639 ((ffs(PCN_NRXDESC) - 1) << 20)); 1640 1641 /* Set the station address in the init block. */ 1642 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] | 1643 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24)); 1644 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] | 1645 (enaddr[5] << 8)); 1646 1647 /* Set the multicast filter in the init block. */ 1648 pcn_set_filter(sc); 1649 1650 /* Initialize CSR3. */ 1651 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO); 1652 1653 /* Initialize CSR4. */ 1654 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT| 1655 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM); 1656 1657 /* Initialize CSR5. */ 1658 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE; 1659 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5); 1660 1661 /* 1662 * If we have an Am79c971 or greater, initialize CSR7. 1663 * 1664 * XXX Might be nice to use the MII auto-poll interrupt someday. 1665 */ 1666 switch (sc->sc_variant->pcv_chipid) { 1667 case PARTID_Am79c970: 1668 case PARTID_Am79c970A: 1669 /* Not available on these chips. */ 1670 break; 1671 1672 default: 1673 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE); 1674 break; 1675 } 1676 1677 /* 1678 * On the Am79c970A and greater, initialize BCR18 to 1679 * enable burst mode. 1680 * 1681 * Also enable the "no underflow" option on the Am79c971 and 1682 * higher, which prevents the chip from generating transmit 1683 * underflows, yet sill provides decent performance. Note if 1684 * chip is not connected to external SRAM, then we still have 1685 * to handle underflow errors (the NOUFLO bit is ignored in 1686 * that case). 1687 */ 1688 reg = pcn_bcr_read(sc, LE_BCR18); 1689 switch (sc->sc_variant->pcv_chipid) { 1690 case PARTID_Am79c970: 1691 break; 1692 1693 case PARTID_Am79c970A: 1694 reg |= LE_B18_BREADE|LE_B18_BWRITE; 1695 break; 1696 1697 default: 1698 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO; 1699 break; 1700 } 1701 pcn_bcr_write(sc, LE_BCR18, reg); 1702 1703 /* 1704 * Initialize CSR80 (FIFO thresholds for Tx and Rx). 1705 */ 1706 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) | 1707 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw)); 1708 1709 /* 1710 * Send the init block to the chip, and wait for it 1711 * to be processed. 1712 */ 1713 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE); 1714 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff); 1715 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff); 1716 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT); 1717 delay(100); 1718 for (i = 0; i < 10000; i++) { 1719 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON) 1720 break; 1721 delay(10); 1722 } 1723 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE); 1724 if (i == 10000) { 1725 printf("%s: timeout processing init block\n", 1726 sc->sc_dev.dv_xname); 1727 error = EIO; 1728 goto out; 1729 } 1730 1731 /* Set the media. */ 1732 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 1733 1734 /* Enable interrupts and external activity (and ACK IDON). */ 1735 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON); 1736 1737 if (sc->sc_flags & PCN_F_HAS_MII) { 1738 /* Start the one second MII clock. */ 1739 timeout_add_sec(&sc->sc_tick_timeout, 1); 1740 } 1741 1742 /* ...all done! */ 1743 ifp->if_flags |= IFF_RUNNING; 1744 ifp->if_flags &= ~IFF_OACTIVE; 1745 1746 out: 1747 if (error) 1748 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1749 return (error); 1750 } 1751 1752 /* 1753 * pcn_rxdrain: 1754 * 1755 * Drain the receive queue. 1756 */ 1757 void 1758 pcn_rxdrain(struct pcn_softc *sc) 1759 { 1760 struct pcn_rxsoft *rxs; 1761 int i; 1762 1763 for (i = 0; i < PCN_NRXDESC; i++) { 1764 rxs = &sc->sc_rxsoft[i]; 1765 if (rxs->rxs_mbuf != NULL) { 1766 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1767 m_freem(rxs->rxs_mbuf); 1768 rxs->rxs_mbuf = NULL; 1769 } 1770 } 1771 } 1772 1773 /* 1774 * pcn_stop: [ifnet interface function] 1775 * 1776 * Stop transmission on the interface. 1777 */ 1778 void 1779 pcn_stop(struct ifnet *ifp, int disable) 1780 { 1781 struct pcn_softc *sc = ifp->if_softc; 1782 struct pcn_txsoft *txs; 1783 int i; 1784 1785 if (sc->sc_flags & PCN_F_HAS_MII) { 1786 /* Stop the one second clock. */ 1787 timeout_del(&sc->sc_tick_timeout); 1788 1789 /* Down the MII. */ 1790 mii_down(&sc->sc_mii); 1791 } 1792 1793 /* Mark the interface as down and cancel the watchdog timer. */ 1794 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1795 ifp->if_timer = 0; 1796 1797 /* Stop the chip. */ 1798 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP); 1799 1800 /* Release any queued transmit buffers. */ 1801 for (i = 0; i < PCN_TXQUEUELEN; i++) { 1802 txs = &sc->sc_txsoft[i]; 1803 if (txs->txs_mbuf != NULL) { 1804 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1805 m_freem(txs->txs_mbuf); 1806 txs->txs_mbuf = NULL; 1807 } 1808 } 1809 1810 if (disable) 1811 pcn_rxdrain(sc); 1812 } 1813 1814 /* 1815 * pcn_add_rxbuf: 1816 * 1817 * Add a receive buffer to the indicated descriptor. 1818 */ 1819 int 1820 pcn_add_rxbuf(struct pcn_softc *sc, int idx) 1821 { 1822 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1823 struct mbuf *m; 1824 int error; 1825 1826 MGETHDR(m, M_DONTWAIT, MT_DATA); 1827 if (m == NULL) 1828 return (ENOBUFS); 1829 1830 MCLGET(m, M_DONTWAIT); 1831 if ((m->m_flags & M_EXT) == 0) { 1832 m_freem(m); 1833 return (ENOBUFS); 1834 } 1835 1836 if (rxs->rxs_mbuf != NULL) 1837 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1838 1839 rxs->rxs_mbuf = m; 1840 1841 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1842 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1843 BUS_DMA_READ|BUS_DMA_NOWAIT); 1844 if (error) { 1845 printf("%s: can't load rx DMA map %d, error = %d\n", 1846 sc->sc_dev.dv_xname, idx, error); 1847 panic("pcn_add_rxbuf"); 1848 } 1849 1850 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1851 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1852 1853 PCN_INIT_RXDESC(sc, idx); 1854 1855 return (0); 1856 } 1857 1858 /* 1859 * pcn_set_filter: 1860 * 1861 * Set up the receive filter. 1862 */ 1863 void 1864 pcn_set_filter(struct pcn_softc *sc) 1865 { 1866 struct arpcom *ac = &sc->sc_arpcom; 1867 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1868 struct ether_multi *enm; 1869 struct ether_multistep step; 1870 uint32_t crc; 1871 1872 /* 1873 * Set up the multicast address filter by passing all multicast 1874 * addresses through a CRC generator, and then using the high 1875 * order 6 bits as an index into the 64-bit logical address 1876 * filter. The high order bits select the word, while the rest 1877 * of the bits select the bit within the word. 1878 */ 1879 1880 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) 1881 goto allmulti; 1882 1883 sc->sc_initblock.init_ladrf[0] = 1884 sc->sc_initblock.init_ladrf[1] = 1885 sc->sc_initblock.init_ladrf[2] = 1886 sc->sc_initblock.init_ladrf[3] = 0; 1887 1888 ETHER_FIRST_MULTI(step, ac, enm); 1889 while (enm != NULL) { 1890 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1891 /* 1892 * We must listen to a range of multicast addresses. 1893 * For now, just accept all multicasts, rather than 1894 * trying to set only those filter bits needed to match 1895 * the range. (At this time, the only use of address 1896 * ranges is for IP multicast routing, for which the 1897 * range is big enough to require all bits set.) 1898 */ 1899 goto allmulti; 1900 } 1901 1902 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1903 1904 /* Just want the 6 most significant bits. */ 1905 crc >>= 26; 1906 1907 /* Set the corresponding bit in the filter. */ 1908 sc->sc_initblock.init_ladrf[crc >> 4] |= 1909 htole16(1 << (crc & 0xf)); 1910 1911 ETHER_NEXT_MULTI(step, enm); 1912 } 1913 1914 ifp->if_flags &= ~IFF_ALLMULTI; 1915 return; 1916 1917 allmulti: 1918 ifp->if_flags |= IFF_ALLMULTI; 1919 sc->sc_initblock.init_ladrf[0] = 1920 sc->sc_initblock.init_ladrf[1] = 1921 sc->sc_initblock.init_ladrf[2] = 1922 sc->sc_initblock.init_ladrf[3] = 0xffff; 1923 } 1924 1925 /* 1926 * pcn_79c970_mediainit: 1927 * 1928 * Initialize media for the Am79c970. 1929 */ 1930 void 1931 pcn_79c970_mediainit(struct pcn_softc *sc) 1932 { 1933 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange, 1934 pcn_79c970_mediastatus); 1935 1936 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5, 1937 PORTSEL_AUI, NULL); 1938 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1939 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5|IFM_FDX, 1940 PORTSEL_AUI, NULL); 1941 1942 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T, 1943 PORTSEL_10T, NULL); 1944 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1945 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX, 1946 PORTSEL_10T, NULL); 1947 1948 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 1949 0, NULL); 1950 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1951 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO|IFM_FDX, 1952 0, NULL); 1953 1954 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1955 } 1956 1957 /* 1958 * pcn_79c970_mediastatus: [ifmedia interface function] 1959 * 1960 * Get the current interface media status (Am79c970 version). 1961 */ 1962 void 1963 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1964 { 1965 struct pcn_softc *sc = ifp->if_softc; 1966 1967 /* 1968 * The currently selected media is always the active media. 1969 * Note: We have no way to determine what media the AUTO 1970 * process picked. 1971 */ 1972 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media; 1973 } 1974 1975 /* 1976 * pcn_79c970_mediachange: [ifmedia interface function] 1977 * 1978 * Set hardware to newly-selected media (Am79c970 version). 1979 */ 1980 int 1981 pcn_79c970_mediachange(struct ifnet *ifp) 1982 { 1983 struct pcn_softc *sc = ifp->if_softc; 1984 uint32_t reg; 1985 1986 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) { 1987 /* 1988 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL. 1989 */ 1990 reg = pcn_bcr_read(sc, LE_BCR2); 1991 reg |= LE_B2_ASEL; 1992 pcn_bcr_write(sc, LE_BCR2, reg); 1993 } else { 1994 /* 1995 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value. 1996 */ 1997 reg = pcn_bcr_read(sc, LE_BCR2); 1998 reg &= ~LE_B2_ASEL; 1999 pcn_bcr_write(sc, LE_BCR2, reg); 2000 2001 reg = pcn_csr_read(sc, LE_CSR15); 2002 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) | 2003 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data); 2004 pcn_csr_write(sc, LE_CSR15, reg); 2005 } 2006 2007 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) { 2008 reg = LE_B9_FDEN; 2009 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5) 2010 reg |= LE_B9_AUIFD; 2011 pcn_bcr_write(sc, LE_BCR9, reg); 2012 } else 2013 pcn_bcr_write(sc, LE_BCR9, 0); 2014 2015 return (0); 2016 } 2017 2018 /* 2019 * pcn_79c971_mediainit: 2020 * 2021 * Initialize media for the Am79c971. 2022 */ 2023 void 2024 pcn_79c971_mediainit(struct pcn_softc *sc) 2025 { 2026 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2027 2028 /* We have MII. */ 2029 sc->sc_flags |= PCN_F_HAS_MII; 2030 2031 /* 2032 * The built-in 10BASE-T interface is mapped to the MII 2033 * on the PCNet-FAST. Unfortunately, there's no EEPROM 2034 * word that tells us which PHY to use. 2035 * This driver used to ignore all but the first PHY to 2036 * answer, but this code was removed to support multiple 2037 * external PHYs. As the default instance will be the first 2038 * one to answer, no harm is done by letting the possibly 2039 * non-connected internal PHY show up. 2040 */ 2041 2042 /* Initialize our media structures and probe the MII. */ 2043 sc->sc_mii.mii_ifp = ifp; 2044 sc->sc_mii.mii_readreg = pcn_mii_readreg; 2045 sc->sc_mii.mii_writereg = pcn_mii_writereg; 2046 sc->sc_mii.mii_statchg = pcn_mii_statchg; 2047 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange, 2048 pcn_79c971_mediastatus); 2049 2050 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 2051 MII_OFFSET_ANY, 0); 2052 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2053 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 2054 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2055 } else 2056 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2057 } 2058 2059 /* 2060 * pcn_79c971_mediastatus: [ifmedia interface function] 2061 * 2062 * Get the current interface media status (Am79c971 version). 2063 */ 2064 void 2065 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2066 { 2067 struct pcn_softc *sc = ifp->if_softc; 2068 2069 mii_pollstat(&sc->sc_mii); 2070 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2071 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2072 } 2073 2074 /* 2075 * pcn_79c971_mediachange: [ifmedia interface function] 2076 * 2077 * Set hardware to newly-selected media (Am79c971 version). 2078 */ 2079 int 2080 pcn_79c971_mediachange(struct ifnet *ifp) 2081 { 2082 struct pcn_softc *sc = ifp->if_softc; 2083 2084 if (ifp->if_flags & IFF_UP) 2085 mii_mediachg(&sc->sc_mii); 2086 return (0); 2087 } 2088 2089 /* 2090 * pcn_mii_readreg: [mii interface function] 2091 * 2092 * Read a PHY register on the MII. 2093 */ 2094 int 2095 pcn_mii_readreg(struct device *self, int phy, int reg) 2096 { 2097 struct pcn_softc *sc = (void *) self; 2098 uint32_t rv; 2099 2100 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2101 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD; 2102 if (rv == 0xffff) 2103 return (0); 2104 2105 return (rv); 2106 } 2107 2108 /* 2109 * pcn_mii_writereg: [mii interface function] 2110 * 2111 * Write a PHY register on the MII. 2112 */ 2113 void 2114 pcn_mii_writereg(struct device *self, int phy, int reg, int val) 2115 { 2116 struct pcn_softc *sc = (void *) self; 2117 2118 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2119 pcn_bcr_write(sc, LE_BCR34, val); 2120 } 2121 2122 /* 2123 * pcn_mii_statchg: [mii interface function] 2124 * 2125 * Callback from MII layer when media changes. 2126 */ 2127 void 2128 pcn_mii_statchg(struct device *self) 2129 { 2130 struct pcn_softc *sc = (void *) self; 2131 2132 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2133 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN); 2134 else 2135 pcn_bcr_write(sc, LE_BCR9, 0); 2136 } 2137