1 /* $NetBSD: if_pcn.c,v 1.4 2001/09/02 13:17:54 enami Exp $ */ 2 3 /* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Device driver for the AMD PCnet-PCI series of Ethernet 40 * chips: 41 * 42 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI 43 * Local Bus 44 * 45 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller 46 * for PCI Local Bus 47 * 48 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps 49 * Ethernet Controller for PCI Local Bus 50 * 51 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller 52 * with OnNow Support 53 * 54 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI 55 * Ethernet Controller with Integrated PHY 56 * 57 * This also supports the virtual PCnet-PCI Ethernet interface found 58 * in VMware. 59 * 60 * TODO: 61 * 62 * * Split this into bus-specific and bus-independent portions. 63 * The core could also be used for the ILACC (Am79900) 32-bit 64 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE). 65 */ 66 67 #include "bpfilter.h" 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/callout.h> 72 #include <sys/mbuf.h> 73 #include <sys/malloc.h> 74 #include <sys/kernel.h> 75 #include <sys/socket.h> 76 #include <sys/ioctl.h> 77 #include <sys/errno.h> 78 #include <sys/device.h> 79 #include <sys/queue.h> 80 81 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 82 83 #include <net/if.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_ether.h> 87 88 #if NBPFILTER > 0 89 #include <net/bpf.h> 90 #endif 91 92 #include <machine/bus.h> 93 #include <machine/intr.h> 94 #include <machine/endian.h> 95 96 #include <dev/mii/mii.h> 97 #include <dev/mii/miivar.h> 98 99 #include <dev/ic/am79900reg.h> 100 #include <dev/ic/lancereg.h> 101 102 #include <dev/pci/pcireg.h> 103 #include <dev/pci/pcivar.h> 104 #include <dev/pci/pcidevs.h> 105 106 #include <dev/pci/if_pcnreg.h> 107 108 /* 109 * Transmit descriptor list size. This is arbitrary, but allocate 110 * enough descriptors for 128 pending transmissions, and 4 segments 111 * per packet. This MUST work out to a power of 2. 112 * 113 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL! 114 * 115 * So we play a little trick here. We give each packet up to 8 116 * DMA segments, but only allocate 4 DMA segments per packet. 117 * The transmit logic can deal with this, we just are hoping to 118 * sneak by. 119 */ 120 #define PCN_NTXSEGS 8 121 #define PCN_NTXSEGS_ALLOC 4 122 123 #define PCN_TXQUEUELEN 128 124 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1) 125 #define PCN_NTXDESC (PCN_TXQUEUELEN * PCN_NTXSEGS_ALLOC) 126 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1) 127 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK) 128 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK) 129 130 /* Tx interrupt every N + 1 packets. */ 131 #define PCN_TXINTR_MASK 7 132 133 /* 134 * Receive descriptor list size. We have one Rx buffer per incoming 135 * packet, so this logic is a little simpler. 136 */ 137 #define PCN_NRXDESC 128 138 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1) 139 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK) 140 141 /* 142 * Control structures are DMA'd to the PCnet chip. We allocate them in 143 * a single clump that maps to a single DMA segment to make several things 144 * easier. 145 */ 146 struct pcn_control_data { 147 /* The transmit descriptors. */ 148 struct letmd pcd_txdescs[PCN_NTXDESC]; 149 150 /* The receive descriptors. */ 151 struct lermd pcd_rxdescs[PCN_NRXDESC]; 152 153 /* The init block. */ 154 struct leinit pcd_initblock; 155 }; 156 157 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x) 158 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)]) 159 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)]) 160 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock) 161 162 /* 163 * Software state for transmit jobs. 164 */ 165 struct pcn_txsoft { 166 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 167 bus_dmamap_t txs_dmamap; /* our DMA map */ 168 int txs_firstdesc; /* first descriptor in packet */ 169 int txs_lastdesc; /* last descriptor in packet */ 170 }; 171 172 /* 173 * Software state for receive jobs. 174 */ 175 struct pcn_rxsoft { 176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 177 bus_dmamap_t rxs_dmamap; /* our DMA map */ 178 }; 179 180 /* 181 * Description of Rx FIFO watermarks for various revisions. 182 */ 183 const char *pcn_79c970_rcvfw[] = { 184 "16 bytes", 185 "64 bytes", 186 "128 bytes", 187 NULL, 188 }; 189 190 const char *pcn_79c971_rcvfw[] = { 191 "16 bytes", 192 "64 bytes", 193 "112 bytes", 194 NULL, 195 }; 196 197 /* 198 * Description of Tx start points for various revisions. 199 */ 200 const char *pcn_79c970_xmtsp[] = { 201 "8 bytes", 202 "64 bytes", 203 "128 bytes", 204 "248 bytes", 205 }; 206 207 const char *pcn_79c971_xmtsp[] = { 208 "20 bytes", 209 "64 bytes", 210 "128 bytes", 211 "248 bytes", 212 }; 213 214 const char *pcn_79c971_xmtsp_sram[] = { 215 "44 bytes", 216 "64 bytes", 217 "128 bytes", 218 "store-and-forward", 219 }; 220 221 /* 222 * Description of Tx FIFO watermarks for various revisions. 223 */ 224 const char *pcn_79c970_xmtfw[] = { 225 "16 bytes", 226 "64 bytes", 227 "128 bytes", 228 NULL, 229 }; 230 231 const char *pcn_79c971_xmtfw[] = { 232 "16 bytes", 233 "64 bytes", 234 "108 bytes", 235 NULL, 236 }; 237 238 /* 239 * Software state per device. 240 */ 241 struct pcn_softc { 242 struct device sc_dev; /* generic device information */ 243 bus_space_tag_t sc_st; /* bus space tag */ 244 bus_space_handle_t sc_sh; /* bus space handle */ 245 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 246 struct ethercom sc_ethercom; /* Ethernet common data */ 247 void *sc_sdhook; /* shutdown hook */ 248 249 /* Points to our media routines, etc. */ 250 const struct pcn_variant *sc_variant; 251 252 void *sc_ih; /* interrupt cookie */ 253 254 struct mii_data sc_mii; /* MII/media information */ 255 256 struct callout sc_tick_ch; /* tick callout */ 257 258 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 259 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 260 261 /* Software state for transmit and receive descriptors. */ 262 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN]; 263 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC]; 264 265 /* Control data structures */ 266 struct pcn_control_data *sc_control_data; 267 #define sc_txdescs sc_control_data->pcd_txdescs 268 #define sc_rxdescs sc_control_data->pcd_rxdescs 269 #define sc_initblock sc_control_data->pcd_initblock 270 271 #ifdef PCN_EVENT_COUNTERS 272 /* Event counters. */ 273 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 274 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 275 struct evcnt sc_ev_txintr; /* Tx interrupts */ 276 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 277 struct evcnt sc_ev_babl; /* BABL in pcn_intr() */ 278 struct evcnt sc_ev_miss; /* MISS in pcn_intr() */ 279 struct evcnt sc_ev_merr; /* MERR in pcn_intr() */ 280 281 struct evcnt sc_ev_txseg1; /* Tx packets w/ 1 segment */ 282 struct evcnt sc_ev_txseg2; /* Tx packets w/ 2 segments */ 283 struct evcnt sc_ev_txseg3; /* Tx packets w/ 3 segments */ 284 struct evcnt sc_ev_txseg4; /* Tx packets w/ 4 segments */ 285 struct evcnt sc_ev_txseg5; /* Tx packets w/ 5 segments */ 286 struct evcnt sc_ev_txsegmore; /* Tx packets w/ more than 5 segments */ 287 struct evcnt sc_ev_txcopy; /* Tx copies required */ 288 #endif /* PCN_EVENT_COUNTERS */ 289 290 const char **sc_rcvfw_desc; /* Rx FIFO watermark info */ 291 int sc_rcvfw; 292 293 const char **sc_xmtsp_desc; /* Tx start point info */ 294 int sc_xmtsp; 295 296 const char **sc_xmtfw_desc; /* Tx FIFO watermark info */ 297 int sc_xmtfw; 298 299 int sc_flags; /* misc. flags; see below */ 300 int sc_swstyle; /* the software style in use */ 301 302 int sc_txfree; /* number of free Tx descriptors */ 303 int sc_txnext; /* next ready Tx descriptor */ 304 305 int sc_txsfree; /* number of free Tx jobs */ 306 int sc_txsnext; /* next free Tx job */ 307 int sc_txsdirty; /* dirty Tx jobs */ 308 309 int sc_rxptr; /* next ready Rx descriptor/job */ 310 311 uint32_t sc_csr5; /* prototype CSR5 register */ 312 uint32_t sc_mode; /* prototype MODE register */ 313 int sc_phyaddr; /* PHY address */ 314 }; 315 316 /* sc_flags */ 317 #define PCN_F_HAS_MII 0x0001 /* has MII */ 318 319 #ifdef PCN_EVENT_COUNTERS 320 #define PCN_EVCNT_INCR(ev) (ev)->ev_count++ 321 #else 322 #define PCN_EVCNT_INCR(ev) /* nothing */ 323 #endif 324 325 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x))) 326 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x))) 327 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF) 328 329 #define PCN_CDTXSYNC(sc, x, n, ops) \ 330 do { \ 331 int __x, __n; \ 332 \ 333 __x = (x); \ 334 __n = (n); \ 335 \ 336 /* If it will wrap around, sync to the end of the ring. */ \ 337 if ((__x + __n) > PCN_NTXDESC) { \ 338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 339 PCN_CDTXOFF(__x), sizeof(struct letmd) * \ 340 (PCN_NTXDESC - __x), (ops)); \ 341 __n -= (PCN_NTXDESC - __x); \ 342 __x = 0; \ 343 } \ 344 \ 345 /* Now sync whatever is left. */ \ 346 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 347 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \ 348 } while (/*CONSTCOND*/0) 349 350 #define PCN_CDRXSYNC(sc, x, ops) \ 351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 352 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops)) 353 354 #define PCN_CDINITSYNC(sc, ops) \ 355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 356 PCN_CDINITOFF, sizeof(struct leinit), (ops)) 357 358 #define PCN_INIT_RXDESC(sc, x) \ 359 do { \ 360 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 361 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \ 362 struct mbuf *__m = __rxs->rxs_mbuf; \ 363 \ 364 /* \ 365 * Note: We scoot the packet forward 2 bytes in the buffer \ 366 * so that the payload after the Ethernet header is aligned \ 367 * to a 4-byte boundary. \ 368 */ \ 369 __m->m_data = __m->m_ext.ext_buf + 2; \ 370 \ 371 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \ 372 __rmd->rmd2 = \ 373 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 374 __rmd->rmd0 = 0; \ 375 } else { \ 376 __rmd->rmd2 = 0; \ 377 __rmd->rmd0 = \ 378 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 379 } \ 380 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \ 381 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \ 382 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\ 383 } while(/*CONSTCOND*/0) 384 385 void pcn_start(struct ifnet *); 386 void pcn_watchdog(struct ifnet *); 387 int pcn_ioctl(struct ifnet *, u_long, caddr_t); 388 int pcn_init(struct ifnet *); 389 void pcn_stop(struct ifnet *, int); 390 391 void pcn_shutdown(void *); 392 393 void pcn_reset(struct pcn_softc *); 394 void pcn_rxdrain(struct pcn_softc *); 395 int pcn_add_rxbuf(struct pcn_softc *, int); 396 void pcn_tick(void *); 397 398 void pcn_spnd(struct pcn_softc *); 399 400 void pcn_set_filter(struct pcn_softc *); 401 402 int pcn_intr(void *); 403 void pcn_txintr(struct pcn_softc *); 404 int pcn_rxintr(struct pcn_softc *); 405 406 int pcn_mii_readreg(struct device *, int, int); 407 void pcn_mii_writereg(struct device *, int, int, int); 408 void pcn_mii_statchg(struct device *); 409 410 void pcn_79c970_mediainit(struct pcn_softc *); 411 int pcn_79c970_mediachange(struct ifnet *); 412 void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *); 413 414 void pcn_79c971_mediainit(struct pcn_softc *); 415 int pcn_79c971_mediachange(struct ifnet *); 416 void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *); 417 418 /* 419 * Description of a PCnet-PCI variant. Used to select media access 420 * method, mostly, and to print a nice description of the chip. 421 */ 422 const struct pcn_variant { 423 const char *pcv_desc; 424 void (*pcv_mediainit)(struct pcn_softc *); 425 uint16_t pcv_chipid; 426 } pcn_variants[] = { 427 { "Am79c970 PCnet-PCI", 428 pcn_79c970_mediainit, 429 PARTID_Am79c970 }, 430 431 { "Am79c970A PCnet-PCI II", 432 pcn_79c970_mediainit, 433 PARTID_Am79c970A }, 434 435 { "Am79c971 PCnet-FAST", 436 pcn_79c971_mediainit, 437 PARTID_Am79c971 }, 438 439 { "Am79c972 PCnet-FAST+", 440 pcn_79c971_mediainit, 441 PARTID_Am79c972 }, 442 443 { "Am79c973 PCnet-FAST III", 444 pcn_79c971_mediainit, 445 PARTID_Am79c973 }, 446 447 { "Am79c975 PCnet-FAST III", 448 pcn_79c971_mediainit, 449 PARTID_Am79c975 }, 450 451 { "Unknown PCnet-PCI variant", 452 pcn_79c971_mediainit, 453 0 }, 454 }; 455 456 int pcn_copy_small = 0; 457 458 int pcn_match(struct device *, struct cfdata *, void *); 459 void pcn_attach(struct device *, struct device *, void *); 460 461 struct cfattach pcn_ca = { 462 sizeof(struct pcn_softc), pcn_match, pcn_attach, 463 }; 464 465 /* 466 * Routines to read and write the PCnet-PCI CSR/BCR space. 467 */ 468 469 static __inline uint32_t 470 pcn_csr_read(struct pcn_softc *sc, int reg) 471 { 472 473 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 474 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP)); 475 } 476 477 static __inline void 478 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val) 479 { 480 481 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 482 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val); 483 } 484 485 static __inline uint32_t 486 pcn_bcr_read(struct pcn_softc *sc, int reg) 487 { 488 489 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 490 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP)); 491 } 492 493 static __inline void 494 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val) 495 { 496 497 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 498 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val); 499 } 500 501 static const struct pcn_variant * 502 pcn_lookup_variant(uint16_t chipid) 503 { 504 const struct pcn_variant *pcv; 505 506 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) { 507 if (chipid == pcv->pcv_chipid) 508 return (pcv); 509 } 510 511 /* 512 * This covers unknown chips, which we simply treat like 513 * a generic PCnet-FAST. 514 */ 515 return (pcv); 516 } 517 518 int 519 pcn_match(struct device *parent, struct cfdata *cf, void *aux) 520 { 521 struct pci_attach_args *pa = aux; 522 523 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_AMD) 524 return (0); 525 526 switch (PCI_PRODUCT(pa->pa_id)) { 527 case PCI_PRODUCT_AMD_PCNET_PCI: 528 /* Beat if_le_pci.c */ 529 return (10); 530 } 531 532 return (0); 533 } 534 535 void 536 pcn_attach(struct device *parent, struct device *self, void *aux) 537 { 538 struct pcn_softc *sc = (struct pcn_softc *) self; 539 struct pci_attach_args *pa = aux; 540 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 541 pci_chipset_tag_t pc = pa->pa_pc; 542 pci_intr_handle_t ih; 543 const char *intrstr = NULL; 544 bus_space_tag_t iot; 545 bus_space_handle_t ioh; 546 bus_dma_segment_t seg; 547 int ioh_valid; 548 int i, rseg, error; 549 pcireg_t pmode; 550 uint32_t chipid, reg; 551 uint8_t enaddr[ETHER_ADDR_LEN]; 552 int pmreg; 553 554 callout_init(&sc->sc_tick_ch); 555 556 printf(": AMD PCnet-PCI Ethernet\n"); 557 558 /* 559 * Map the device. 560 */ 561 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 562 &iot, &ioh, NULL, NULL) == 0); 563 564 if (ioh_valid) { 565 sc->sc_st = iot; 566 sc->sc_sh = ioh; 567 } else { 568 printf("%s: unable to map device registers\n", 569 sc->sc_dev.dv_xname); 570 return; 571 } 572 573 sc->sc_dmat = pa->pa_dmat; 574 575 /* Make sure bus mastering is enabled. */ 576 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 577 pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | 578 PCI_COMMAND_MASTER_ENABLE); 579 580 /* Get it out of power save mode, if needed. */ 581 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 582 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 583 if (pmode == 3) { 584 /* 585 * The card has lost all configuration data in 586 * this state, so punt. 587 */ 588 printf("%s: unable to wake from power state D3\n", 589 sc->sc_dev.dv_xname); 590 return; 591 } 592 if (pmode != 0) { 593 printf("%s: waking up from power date D%d\n", 594 sc->sc_dev.dv_xname, pmode); 595 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 596 } 597 } 598 599 /* 600 * Reset the chip to a known state. This also puts the 601 * chip into 32-bit mode. 602 */ 603 pcn_reset(sc); 604 605 /* 606 * Read the Ethernet address from the EEPROM. 607 */ 608 for (i = 0; i < ETHER_ADDR_LEN; i++) 609 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh, 610 PCN32_APROM + i); 611 612 /* 613 * Now that the device is mapped, attempt to figure out what 614 * kind of chip we have. Note that IDL has all 32 bits of 615 * the chip ID when we're in 32-bit mode. 616 */ 617 chipid = pcn_csr_read(sc, LE_CSR88); 618 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid)); 619 620 printf("%s: %s rev %d, Ethernet address %s\n", 621 sc->sc_dev.dv_xname, sc->sc_variant->pcv_desc, CHIPID_VER(chipid), 622 ether_sprintf(enaddr)); 623 624 /* 625 * Map and establish our interrupt. 626 */ 627 if (pci_intr_map(pa, &ih)) { 628 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 629 return; 630 } 631 intrstr = pci_intr_string(pc, ih); 632 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc); 633 if (sc->sc_ih == NULL) { 634 printf("%s: unable to establish interrupt", 635 sc->sc_dev.dv_xname); 636 if (intrstr != NULL) 637 printf(" at %s", intrstr); 638 printf("\n"); 639 return; 640 } 641 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 642 643 /* 644 * Allocate the control data structures, and create and load the 645 * DMA map for it. 646 */ 647 if ((error = bus_dmamem_alloc(sc->sc_dmat, 648 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 649 0)) != 0) { 650 printf("%s: unable to allocate control data, error = %d\n", 651 sc->sc_dev.dv_xname, error); 652 goto fail_0; 653 } 654 655 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 656 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data, 657 BUS_DMA_COHERENT)) != 0) { 658 printf("%s: unable to map control data, error = %d\n", 659 sc->sc_dev.dv_xname, error); 660 goto fail_1; 661 } 662 663 if ((error = bus_dmamap_create(sc->sc_dmat, 664 sizeof(struct pcn_control_data), 1, 665 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 666 printf("%s: unable to create control data DMA map, " 667 "error = %d\n", sc->sc_dev.dv_xname, error); 668 goto fail_2; 669 } 670 671 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 672 sc->sc_control_data, sizeof(struct pcn_control_data), NULL, 673 0)) != 0) { 674 printf("%s: unable to load control data DMA map, error = %d\n", 675 sc->sc_dev.dv_xname, error); 676 goto fail_3; 677 } 678 679 /* Create the transmit buffer DMA maps. */ 680 for (i = 0; i < PCN_TXQUEUELEN; i++) { 681 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 682 PCN_NTXSEGS, MCLBYTES, 0, 0, 683 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 684 printf("%s: unable to create tx DMA map %d, " 685 "error = %d\n", sc->sc_dev.dv_xname, i, error); 686 goto fail_4; 687 } 688 } 689 690 /* Create the receive buffer DMA maps. */ 691 for (i = 0; i < PCN_NRXDESC; i++) { 692 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 693 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 694 printf("%s: unable to create rx DMA map %d, " 695 "error = %d\n", sc->sc_dev.dv_xname, i, error); 696 goto fail_5; 697 } 698 sc->sc_rxsoft[i].rxs_mbuf = NULL; 699 } 700 701 /* Initialize our media structures. */ 702 (*sc->sc_variant->pcv_mediainit)(sc); 703 704 /* 705 * Initialize FIFO watermark info. 706 */ 707 switch (sc->sc_variant->pcv_chipid) { 708 case PARTID_Am79c970: 709 case PARTID_Am79c970A: 710 sc->sc_rcvfw_desc = pcn_79c970_rcvfw; 711 sc->sc_xmtsp_desc = pcn_79c970_xmtsp; 712 sc->sc_xmtfw_desc = pcn_79c970_xmtfw; 713 break; 714 715 default: 716 sc->sc_rcvfw_desc = pcn_79c971_rcvfw; 717 /* 718 * Read BCR25 to determine how much SRAM is 719 * on the board. If > 0, then we the chip 720 * uses different Start Point thresholds. 721 * 722 * Note BCR25 and BCR26 are loaded from the 723 * EEPROM on RST, and unaffected by S_RESET, 724 * so we don't really have to worry about 725 * them except for this. 726 */ 727 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff; 728 if (reg != 0) 729 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram; 730 else 731 sc->sc_xmtsp_desc = pcn_79c971_xmtsp; 732 sc->sc_xmtfw_desc = pcn_79c971_xmtfw; 733 break; 734 } 735 736 /* 737 * Set up defaults -- see the tables above for what these 738 * values mean. 739 * 740 * XXX How should we tune RCVFW and XMTFW? 741 */ 742 sc->sc_rcvfw = 1; /* minimum for full-duplex */ 743 sc->sc_xmtsp = 1; 744 sc->sc_xmtfw = 0; 745 746 ifp = &sc->sc_ethercom.ec_if; 747 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 748 ifp->if_softc = sc; 749 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 750 ifp->if_ioctl = pcn_ioctl; 751 ifp->if_start = pcn_start; 752 ifp->if_watchdog = pcn_watchdog; 753 ifp->if_init = pcn_init; 754 ifp->if_stop = pcn_stop; 755 IFQ_SET_READY(&ifp->if_snd); 756 757 /* Attach the interface. */ 758 if_attach(ifp); 759 ether_ifattach(ifp, enaddr); 760 761 #ifdef PCN_EVENT_COUNTERS 762 /* Attach event counters. */ 763 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 764 NULL, sc->sc_dev.dv_xname, "txsstall"); 765 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 766 NULL, sc->sc_dev.dv_xname, "txdstall"); 767 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 768 NULL, sc->sc_dev.dv_xname, "txintr"); 769 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 770 NULL, sc->sc_dev.dv_xname, "rxintr"); 771 evcnt_attach_dynamic(&sc->sc_ev_babl, EVCNT_TYPE_MISC, 772 NULL, sc->sc_dev.dv_xname, "babl"); 773 evcnt_attach_dynamic(&sc->sc_ev_miss, EVCNT_TYPE_MISC, 774 NULL, sc->sc_dev.dv_xname, "miss"); 775 evcnt_attach_dynamic(&sc->sc_ev_merr, EVCNT_TYPE_MISC, 776 NULL, sc->sc_dev.dv_xname, "merr"); 777 778 evcnt_attach_dynamic(&sc->sc_ev_txseg1, EVCNT_TYPE_MISC, 779 NULL, sc->sc_dev.dv_xname, "txseg1"); 780 evcnt_attach_dynamic(&sc->sc_ev_txseg2, EVCNT_TYPE_MISC, 781 NULL, sc->sc_dev.dv_xname, "txseg2"); 782 evcnt_attach_dynamic(&sc->sc_ev_txseg3, EVCNT_TYPE_MISC, 783 NULL, sc->sc_dev.dv_xname, "txseg3"); 784 evcnt_attach_dynamic(&sc->sc_ev_txseg4, EVCNT_TYPE_MISC, 785 NULL, sc->sc_dev.dv_xname, "txseg4"); 786 evcnt_attach_dynamic(&sc->sc_ev_txseg5, EVCNT_TYPE_MISC, 787 NULL, sc->sc_dev.dv_xname, "txseg5"); 788 evcnt_attach_dynamic(&sc->sc_ev_txsegmore, EVCNT_TYPE_MISC, 789 NULL, sc->sc_dev.dv_xname, "txsegmore"); 790 evcnt_attach_dynamic(&sc->sc_ev_txcopy, EVCNT_TYPE_MISC, 791 NULL, sc->sc_dev.dv_xname, "txcopy"); 792 #endif /* PCN_EVENT_COUNTERS */ 793 794 /* Make sure the interface is shutdown during reboot. */ 795 sc->sc_sdhook = shutdownhook_establish(pcn_shutdown, sc); 796 if (sc->sc_sdhook == NULL) 797 printf("%s: WARNING: unable to establish shutdown hook\n", 798 sc->sc_dev.dv_xname); 799 return; 800 801 /* 802 * Free any resources we've allocated during the failed attach 803 * attempt. Do this in reverse order and fall through. 804 */ 805 fail_5: 806 for (i = 0; i < PCN_NRXDESC; i++) { 807 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 808 bus_dmamap_destroy(sc->sc_dmat, 809 sc->sc_rxsoft[i].rxs_dmamap); 810 } 811 fail_4: 812 for (i = 0; i < PCN_TXQUEUELEN; i++) { 813 if (sc->sc_txsoft[i].txs_dmamap != NULL) 814 bus_dmamap_destroy(sc->sc_dmat, 815 sc->sc_txsoft[i].txs_dmamap); 816 } 817 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 818 fail_3: 819 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 820 fail_2: 821 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 822 sizeof(struct pcn_control_data)); 823 fail_1: 824 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 825 fail_0: 826 return; 827 } 828 829 /* 830 * pcn_shutdown: 831 * 832 * Make sure the interface is stopped at reboot time. 833 */ 834 void 835 pcn_shutdown(void *arg) 836 { 837 struct pcn_softc *sc = arg; 838 839 pcn_stop(&sc->sc_ethercom.ec_if, 1); 840 } 841 842 /* 843 * pcn_start: [ifnet interface function] 844 * 845 * Start packet transmission on the interface. 846 */ 847 void 848 pcn_start(struct ifnet *ifp) 849 { 850 struct pcn_softc *sc = ifp->if_softc; 851 struct mbuf *m0, *m; 852 struct pcn_txsoft *txs; 853 bus_dmamap_t dmamap; 854 int error, nexttx, lasttx, ofree, seg; 855 856 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 857 return; 858 859 /* 860 * Remember the previous number of free descriptors and 861 * the first descriptor we'll use. 862 */ 863 ofree = sc->sc_txfree; 864 865 /* 866 * Loop through the send queue, setting up transmit descriptors 867 * until we drain the queue, or use up all available transmit 868 * descriptors. 869 */ 870 for (;;) { 871 /* Grab a packet off the queue. */ 872 IFQ_POLL(&ifp->if_snd, m0); 873 if (m0 == NULL) 874 break; 875 m = NULL; 876 877 /* Get a work queue entry. */ 878 if (sc->sc_txsfree == 0) { 879 PCN_EVCNT_INCR(&sc->sc_ev_txsstall); 880 break; 881 } 882 883 txs = &sc->sc_txsoft[sc->sc_txsnext]; 884 dmamap = txs->txs_dmamap; 885 886 /* 887 * Load the DMA map. If this fails, the packet either 888 * didn't fit in the alloted number of segments, or we 889 * were short on resources. In this case, we'll copy 890 * and try again. 891 */ 892 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 893 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 894 PCN_EVCNT_INCR(&sc->sc_ev_txcopy); 895 MGETHDR(m, M_DONTWAIT, MT_DATA); 896 if (m == NULL) { 897 printf("%s: unable to allocate Tx mbuf\n", 898 sc->sc_dev.dv_xname); 899 break; 900 } 901 if (m0->m_pkthdr.len > MHLEN) { 902 MCLGET(m, M_DONTWAIT); 903 if ((m->m_flags & M_EXT) == 0) { 904 printf("%s: unable to allocate Tx " 905 "cluster\n", sc->sc_dev.dv_xname); 906 m_freem(m); 907 break; 908 } 909 } 910 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 911 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 912 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 913 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 914 if (error) { 915 printf("%s: unable to load Tx buffer, " 916 "error = %d\n", sc->sc_dev.dv_xname, error); 917 break; 918 } 919 } 920 921 /* 922 * Ensure we have enough descriptors free to describe 923 * the packet. Note, we always reserve one descriptor 924 * at the end of the ring as a termination point, to 925 * prevent wrap-around. 926 */ 927 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { 928 /* 929 * Not enough free descriptors to transmit this 930 * packet. We haven't committed anything yet, 931 * so just unload the DMA map, put the packet 932 * back on the queue, and punt. Notify the upper 933 * layer that there are not more slots left. 934 * 935 * XXX We could allocate an mbuf and copy, but 936 * XXX is it worth it? 937 */ 938 ifp->if_flags |= IFF_OACTIVE; 939 bus_dmamap_unload(sc->sc_dmat, dmamap); 940 if (m != NULL) 941 m_freem(m); 942 PCN_EVCNT_INCR(&sc->sc_ev_txdstall); 943 break; 944 } 945 946 IFQ_DEQUEUE(&ifp->if_snd, m0); 947 if (m != NULL) { 948 m_freem(m0); 949 m0 = m; 950 } 951 952 /* 953 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 954 */ 955 956 /* Sync the DMA map. */ 957 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 958 BUS_DMASYNC_PREWRITE); 959 960 #ifdef PCN_EVENT_COUNTERS 961 switch (dmamap->dm_nsegs) { 962 case 1: 963 PCN_EVCNT_INCR(&sc->sc_ev_txseg1); 964 break; 965 case 2: 966 PCN_EVCNT_INCR(&sc->sc_ev_txseg2); 967 break; 968 case 3: 969 PCN_EVCNT_INCR(&sc->sc_ev_txseg3); 970 break; 971 case 4: 972 PCN_EVCNT_INCR(&sc->sc_ev_txseg4); 973 break; 974 case 5: 975 PCN_EVCNT_INCR(&sc->sc_ev_txseg5); 976 break; 977 default: 978 PCN_EVCNT_INCR(&sc->sc_ev_txsegmore); 979 break; 980 } 981 #endif /* PCN_EVENT_COUNTERS */ 982 983 /* 984 * Initialize the transmit descriptors. 985 */ 986 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { 987 for (nexttx = sc->sc_txnext, seg = 0; 988 seg < dmamap->dm_nsegs; 989 seg++, nexttx = PCN_NEXTTX(nexttx)) { 990 /* 991 * If this is the first descriptor we're 992 * enqueueing, don't set the OWN bit just 993 * yet. That could cause a race condition. 994 * We'll do it below. 995 */ 996 sc->sc_txdescs[nexttx].tmd0 = 0; 997 sc->sc_txdescs[nexttx].tmd2 = 998 htole32(dmamap->dm_segs[seg].ds_addr); 999 sc->sc_txdescs[nexttx].tmd1 = 1000 ((nexttx == sc->sc_txnext) ? 0 : 1001 htole32(LE_T1_OWN)) | 1002 htole32((LE_BCNT(dmamap->dm_segs[ 1003 seg].ds_len) & 1004 LE_T1_BCNT_MASK)); 1005 lasttx = nexttx; 1006 } 1007 } else { 1008 for (nexttx = sc->sc_txnext, seg = 0; 1009 seg < dmamap->dm_nsegs; 1010 seg++, nexttx = PCN_NEXTTX(nexttx)) { 1011 /* 1012 * If this is the first descriptor we're 1013 * enqueueing, don't set the OWN bit just 1014 * yet. That could cause a race condition. 1015 * We'll do it below. 1016 */ 1017 sc->sc_txdescs[nexttx].tmd0 = 1018 htole32(dmamap->dm_segs[seg].ds_addr); 1019 sc->sc_txdescs[nexttx].tmd2 = 0; 1020 sc->sc_txdescs[nexttx].tmd1 = 1021 ((nexttx == sc->sc_txnext) ? 0 : 1022 htole32(LE_T1_OWN)) | 1023 htole32((LE_BCNT(dmamap->dm_segs[ 1024 seg].ds_len) & 1025 LE_T1_BCNT_MASK)); 1026 lasttx = nexttx; 1027 } 1028 } 1029 1030 /* Interrupt on the packet, if appropriate. */ 1031 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0) 1032 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT); 1033 1034 /* Set `start of packet' and `end of packet' appropriately. */ 1035 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP); 1036 sc->sc_txdescs[sc->sc_txnext].tmd1 |= 1037 htole32(LE_T1_OWN|LE_T1_STP); 1038 1039 /* Sync the descriptors we're using. */ 1040 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1041 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1042 1043 /* Kick the transmitter. */ 1044 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD); 1045 1046 /* 1047 * Store a pointer to the packet so we can free it later, 1048 * and remember what txdirty will be once the packet is 1049 * done. 1050 */ 1051 txs->txs_mbuf = m0; 1052 txs->txs_firstdesc = sc->sc_txnext; 1053 txs->txs_lastdesc = lasttx; 1054 1055 /* Advance the tx pointer. */ 1056 sc->sc_txfree -= dmamap->dm_nsegs; 1057 sc->sc_txnext = nexttx; 1058 1059 sc->sc_txsfree--; 1060 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext); 1061 1062 #if NBPFILTER > 0 1063 /* Pass the packet to any BPF listeners. */ 1064 if (ifp->if_bpf) 1065 bpf_mtap(ifp->if_bpf, m0); 1066 #endif /* NBPFILTER > 0 */ 1067 } 1068 1069 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1070 /* No more slots left; notify upper layer. */ 1071 ifp->if_flags |= IFF_OACTIVE; 1072 } 1073 1074 if (sc->sc_txfree != ofree) { 1075 /* Set a watchdog timer in case the chip flakes out. */ 1076 ifp->if_timer = 5; 1077 } 1078 } 1079 1080 /* 1081 * pcn_watchdog: [ifnet interface function] 1082 * 1083 * Watchdog timer handler. 1084 */ 1085 void 1086 pcn_watchdog(struct ifnet *ifp) 1087 { 1088 struct pcn_softc *sc = ifp->if_softc; 1089 1090 /* 1091 * Since we're not interrupting every packet, sweep 1092 * up before we report an error. 1093 */ 1094 pcn_txintr(sc); 1095 1096 if (sc->sc_txfree != PCN_NTXDESC) { 1097 printf("%s: device timeout (txfree %d txsfree %d)\n", 1098 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree); 1099 ifp->if_oerrors++; 1100 1101 /* Reset the interface. */ 1102 (void) pcn_init(ifp); 1103 } 1104 1105 /* Try to get more packets going. */ 1106 pcn_start(ifp); 1107 } 1108 1109 /* 1110 * pcn_ioctl: [ifnet interface function] 1111 * 1112 * Handle control requests from the operator. 1113 */ 1114 int 1115 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1116 { 1117 struct pcn_softc *sc = ifp->if_softc; 1118 struct ifreq *ifr = (struct ifreq *) data; 1119 int s, error; 1120 1121 s = splnet(); 1122 1123 switch (cmd) { 1124 case SIOCSIFMEDIA: 1125 case SIOCGIFMEDIA: 1126 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1127 break; 1128 1129 default: 1130 error = ether_ioctl(ifp, cmd, data); 1131 if (error == ENETRESET) { 1132 /* 1133 * Multicast list has changed; set the hardware filter 1134 * accordingly. 1135 */ 1136 error = pcn_init(ifp); 1137 } 1138 break; 1139 } 1140 1141 /* Try to get more packets going. */ 1142 pcn_start(ifp); 1143 1144 splx(s); 1145 return (error); 1146 } 1147 1148 /* 1149 * pcn_intr: 1150 * 1151 * Interrupt service routine. 1152 */ 1153 int 1154 pcn_intr(void *arg) 1155 { 1156 struct pcn_softc *sc = arg; 1157 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1158 uint32_t csr0; 1159 int wantinit, handled = 0; 1160 1161 for (wantinit = 0; wantinit == 0;) { 1162 csr0 = pcn_csr_read(sc, LE_CSR0); 1163 if ((csr0 & LE_C0_INTR) == 0) 1164 break; 1165 1166 /* ACK the bits and re-enable interrupts. */ 1167 pcn_csr_write(sc, LE_CSR0, csr0 & 1168 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT| 1169 LE_C0_TINT|LE_C0_IDON)); 1170 1171 handled = 1; 1172 1173 if (csr0 & LE_C0_RINT) { 1174 PCN_EVCNT_INCR(&sc->sc_ev_rxintr); 1175 wantinit = pcn_rxintr(sc); 1176 } 1177 1178 if (csr0 & LE_C0_TINT) { 1179 PCN_EVCNT_INCR(&sc->sc_ev_txintr); 1180 pcn_txintr(sc); 1181 } 1182 1183 if (csr0 & LE_C0_ERR) { 1184 if (csr0 & LE_C0_BABL) { 1185 PCN_EVCNT_INCR(&sc->sc_ev_babl); 1186 ifp->if_oerrors++; 1187 } 1188 if (csr0 & LE_C0_MISS) { 1189 PCN_EVCNT_INCR(&sc->sc_ev_miss); 1190 ifp->if_ierrors++; 1191 } 1192 if (csr0 & LE_C0_MERR) { 1193 PCN_EVCNT_INCR(&sc->sc_ev_merr); 1194 printf("%s: memory error\n", 1195 sc->sc_dev.dv_xname); 1196 wantinit = 1; 1197 break; 1198 } 1199 } 1200 1201 if ((csr0 & LE_C0_RXON) == 0) { 1202 printf("%s: receiver disabled\n", 1203 sc->sc_dev.dv_xname); 1204 ifp->if_ierrors++; 1205 wantinit = 1; 1206 } 1207 1208 if ((csr0 & LE_C0_TXON) == 0) { 1209 printf("%s: transmitter disabled\n", 1210 sc->sc_dev.dv_xname); 1211 ifp->if_oerrors++; 1212 wantinit = 1; 1213 } 1214 } 1215 1216 if (handled) { 1217 if (wantinit) 1218 pcn_init(ifp); 1219 1220 /* Try to get more packets going. */ 1221 pcn_start(ifp); 1222 } 1223 1224 return (handled); 1225 } 1226 1227 /* 1228 * pcn_spnd: 1229 * 1230 * Suspend the chip. 1231 */ 1232 void 1233 pcn_spnd(struct pcn_softc *sc) 1234 { 1235 int i; 1236 1237 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND); 1238 1239 for (i = 0; i < 10000; i++) { 1240 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND) 1241 return; 1242 delay(5); 1243 } 1244 1245 printf("%s: WARNING: chip failed to enter suspended state\n", 1246 sc->sc_dev.dv_xname); 1247 } 1248 1249 /* 1250 * pcn_txintr: 1251 * 1252 * Helper; handle transmit interrupts. 1253 */ 1254 void 1255 pcn_txintr(struct pcn_softc *sc) 1256 { 1257 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1258 struct pcn_txsoft *txs; 1259 uint32_t tmd1, tmd2, tmd; 1260 int i, j; 1261 1262 ifp->if_flags &= ~IFF_OACTIVE; 1263 1264 /* 1265 * Go through our Tx list and free mbufs for those 1266 * frames which have been transmitted. 1267 */ 1268 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN; 1269 i = PCN_NEXTTXS(i), sc->sc_txsfree++) { 1270 txs = &sc->sc_txsoft[i]; 1271 1272 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1273 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1274 1275 tmd1 = le32toh(sc->sc_txdescs[txs->txs_lastdesc].tmd1); 1276 if (tmd1 & LE_T1_OWN) 1277 break; 1278 1279 /* 1280 * Slightly annoying -- we have to loop through the 1281 * descriptors we've used looking for ERR, since it 1282 * can appear on any descriptor in the chain. 1283 */ 1284 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) { 1285 tmd = le32toh(sc->sc_txdescs[j].tmd1); 1286 if (tmd & LE_T1_ERR) { 1287 ifp->if_oerrors++; 1288 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1289 tmd2 = le32toh(sc->sc_txdescs[j].tmd0); 1290 else 1291 tmd2 = le32toh(sc->sc_txdescs[j].tmd2); 1292 if (tmd2 & LE_T2_UFLO) { 1293 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) { 1294 sc->sc_xmtsp++; 1295 printf("%s: transmit " 1296 "underrun; new threshold: " 1297 "%s\n", 1298 sc->sc_dev.dv_xname, 1299 sc->sc_xmtsp_desc[ 1300 sc->sc_xmtsp]); 1301 pcn_spnd(sc); 1302 pcn_csr_write(sc, LE_CSR80, 1303 LE_C80_RCVFW(sc->sc_rcvfw) | 1304 LE_C80_XMTSP(sc->sc_xmtsp) | 1305 LE_C80_XMTFW(sc->sc_xmtfw)); 1306 pcn_csr_write(sc, LE_CSR5, 1307 sc->sc_csr5); 1308 } else { 1309 printf("%s: transmit " 1310 "underrun\n", 1311 sc->sc_dev.dv_xname); 1312 } 1313 } else if (tmd2 & LE_T2_BUFF) { 1314 printf("%s: transmit buffer error\n", 1315 sc->sc_dev.dv_xname); 1316 } 1317 if (tmd2 & LE_T2_LCOL) 1318 ifp->if_collisions++; 1319 if (tmd2 & LE_T2_RTRY) 1320 ifp->if_collisions += 16; 1321 goto next_packet; 1322 } 1323 if (j == txs->txs_lastdesc) 1324 break; 1325 } 1326 if (tmd1 & LE_T1_ONE) 1327 ifp->if_collisions++; 1328 else if (tmd & LE_T1_MORE) { 1329 /* Real number is unknown. */ 1330 ifp->if_collisions += 2; 1331 } 1332 ifp->if_opackets++; 1333 next_packet: 1334 sc->sc_txfree += txs->txs_dmamap->dm_nsegs; 1335 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1336 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1337 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1338 m_freem(txs->txs_mbuf); 1339 txs->txs_mbuf = NULL; 1340 } 1341 1342 /* Update the dirty transmit buffer pointer. */ 1343 sc->sc_txsdirty = i; 1344 1345 /* 1346 * If there are no more pending transmissions, cancel the watchdog 1347 * timer. 1348 */ 1349 if (sc->sc_txsfree == PCN_TXQUEUELEN) 1350 ifp->if_timer = 0; 1351 } 1352 1353 /* 1354 * pcn_rxintr: 1355 * 1356 * Helper; handle receive interrupts. 1357 */ 1358 int 1359 pcn_rxintr(struct pcn_softc *sc) 1360 { 1361 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1362 struct pcn_rxsoft *rxs; 1363 struct mbuf *m; 1364 uint32_t rmd1; 1365 int i, len; 1366 1367 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) { 1368 rxs = &sc->sc_rxsoft[i]; 1369 1370 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1371 1372 rmd1 = le32toh(sc->sc_rxdescs[i].rmd1); 1373 1374 if (rmd1 & LE_R1_OWN) 1375 break; 1376 1377 /* 1378 * Check for errors and make sure the packet fit into 1379 * a single buffer. We have structured this block of 1380 * code the way it is in order to compress it into 1381 * one test in the common case (no error). 1382 */ 1383 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) != 1384 (LE_R1_STP|LE_R1_ENP))) { 1385 /* Make sure the packet is in a single buffer. */ 1386 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) != 1387 (LE_R1_STP|LE_R1_ENP)) { 1388 printf("%s: packet spilled into next buffer\n", 1389 sc->sc_dev.dv_xname); 1390 return (1); /* pcn_intr() will re-init */ 1391 } 1392 1393 /* 1394 * If the packet had an error, simple recycle the 1395 * buffer. 1396 */ 1397 if (rmd1 & LE_R1_ERR) { 1398 ifp->if_ierrors++; 1399 /* 1400 * If we got an overflow error, chances 1401 * are there will be a CRC error. In 1402 * this case, just print the overflow 1403 * error, and skip the others. 1404 */ 1405 if (rmd1 & LE_R1_OFLO) 1406 printf("%s: overflow error\n", 1407 sc->sc_dev.dv_xname); 1408 else { 1409 #define PRINTIT(x, s) \ 1410 if (rmd1 & (x)) \ 1411 printf("%s: %s\n", \ 1412 sc->sc_dev.dv_xname, s); 1413 PRINTIT(LE_R1_FRAM, "framing error"); 1414 PRINTIT(LE_R1_CRC, "CRC error"); 1415 PRINTIT(LE_R1_BUFF, "buffer error"); 1416 } 1417 #undef PRINTIT 1418 PCN_INIT_RXDESC(sc, i); 1419 continue; 1420 } 1421 } 1422 1423 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1424 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1425 1426 /* 1427 * No errors; receive the packet. 1428 */ 1429 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1430 len = le32toh(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK; 1431 else 1432 len = le32toh(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK; 1433 1434 /* 1435 * The LANCE family includes the CRC with every packet; 1436 * trim it off here. 1437 */ 1438 len -= ETHER_CRC_LEN; 1439 1440 /* 1441 * If the packet is small enough to fit in a 1442 * single header mbuf, allocate one and copy 1443 * the data into it. This greatly reduces 1444 * memory consumption when we receive lots 1445 * of small packets. 1446 * 1447 * Otherwise, we add a new buffer to the receive 1448 * chain. If this fails, we drop the packet and 1449 * recycle the old buffer. 1450 */ 1451 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) { 1452 MGETHDR(m, M_DONTWAIT, MT_DATA); 1453 if (m == NULL) 1454 goto dropit; 1455 m->m_data += 2; 1456 memcpy(mtod(m, caddr_t), 1457 mtod(rxs->rxs_mbuf, caddr_t), len); 1458 PCN_INIT_RXDESC(sc, i); 1459 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1460 rxs->rxs_dmamap->dm_mapsize, 1461 BUS_DMASYNC_PREREAD); 1462 } else { 1463 m = rxs->rxs_mbuf; 1464 if (pcn_add_rxbuf(sc, i) != 0) { 1465 dropit: 1466 ifp->if_ierrors++; 1467 PCN_INIT_RXDESC(sc, i); 1468 bus_dmamap_sync(sc->sc_dmat, 1469 rxs->rxs_dmamap, 0, 1470 rxs->rxs_dmamap->dm_mapsize, 1471 BUS_DMASYNC_PREREAD); 1472 continue; 1473 } 1474 } 1475 1476 m->m_pkthdr.rcvif = ifp; 1477 m->m_pkthdr.len = m->m_len = len; 1478 1479 #if NBPFILTER > 0 1480 /* Pass this up to any BPF listeners. */ 1481 if (ifp->if_bpf) 1482 bpf_mtap(ifp->if_bpf, m); 1483 #endif /* NBPFILTER > 0 */ 1484 1485 /* Pass it on. */ 1486 (*ifp->if_input)(ifp, m); 1487 ifp->if_ipackets++; 1488 } 1489 1490 /* Update the receive pointer. */ 1491 sc->sc_rxptr = i; 1492 return (0); 1493 } 1494 1495 /* 1496 * pcn_tick: 1497 * 1498 * One second timer, used to tick the MII. 1499 */ 1500 void 1501 pcn_tick(void *arg) 1502 { 1503 struct pcn_softc *sc = arg; 1504 int s; 1505 1506 s = splnet(); 1507 mii_tick(&sc->sc_mii); 1508 splx(s); 1509 1510 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc); 1511 } 1512 1513 /* 1514 * pcn_reset: 1515 * 1516 * Perform a soft reset on the PCnet-PCI. 1517 */ 1518 void 1519 pcn_reset(struct pcn_softc *sc) 1520 { 1521 1522 /* 1523 * The PCnet-PCI chip is reset by reading from the 1524 * RESET register. Note that while the NE2100 LANCE 1525 * boards require a write after the read, the PCnet-PCI 1526 * chips do not require this. 1527 * 1528 * Since we don't know if we're in 16-bit or 32-bit 1529 * mode right now, issue both (it's safe) in the 1530 * hopes that one will succeed. 1531 */ 1532 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET); 1533 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET); 1534 1535 /* Wait 1ms for it to finish. */ 1536 delay(1000); 1537 1538 /* 1539 * Select 32-bit I/O mode by issuing a 32-bit write to the 1540 * RDP. Since the RAP is 0 after a reset, writing a 0 1541 * to RDP is safe (since it simply clears CSR0). 1542 */ 1543 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0); 1544 } 1545 1546 /* 1547 * pcn_init: [ifnet interface function] 1548 * 1549 * Initialize the interface. Must be called at splnet(). 1550 */ 1551 int 1552 pcn_init(struct ifnet *ifp) 1553 { 1554 struct pcn_softc *sc = ifp->if_softc; 1555 struct pcn_rxsoft *rxs; 1556 uint8_t *enaddr = LLADDR(ifp->if_sadl); 1557 int i, error = 0; 1558 uint32_t reg; 1559 1560 /* Cancel any pending I/O. */ 1561 pcn_stop(ifp, 0); 1562 1563 /* Reset the chip to a known state. */ 1564 pcn_reset(sc); 1565 1566 /* 1567 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything 1568 * else. 1569 * 1570 * XXX It'd be really nice to use SSTYLE 2 on all the chips, 1571 * because the structure layout is compatible with ILACC, 1572 * but the burst mode is only available in SSTYLE 3, and 1573 * burst mode should provide some performance enhancement. 1574 */ 1575 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970) 1576 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2; 1577 else 1578 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3; 1579 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle); 1580 1581 /* Initialize the transmit descriptor ring. */ 1582 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1583 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC, 1584 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1585 sc->sc_txfree = PCN_NTXDESC; 1586 sc->sc_txnext = 0; 1587 1588 /* Initialize the transmit job descriptors. */ 1589 for (i = 0; i < PCN_TXQUEUELEN; i++) 1590 sc->sc_txsoft[i].txs_mbuf = NULL; 1591 sc->sc_txsfree = PCN_TXQUEUELEN; 1592 sc->sc_txsnext = 0; 1593 sc->sc_txsdirty = 0; 1594 1595 /* 1596 * Initialize the receive descriptor and receive job 1597 * descriptor rings. 1598 */ 1599 for (i = 0; i < PCN_NRXDESC; i++) { 1600 rxs = &sc->sc_rxsoft[i]; 1601 if (rxs->rxs_mbuf == NULL) { 1602 if ((error = pcn_add_rxbuf(sc, i)) != 0) { 1603 printf("%s: unable to allocate or map rx " 1604 "buffer %d, error = %d\n", 1605 sc->sc_dev.dv_xname, i, error); 1606 /* 1607 * XXX Should attempt to run with fewer receive 1608 * XXX buffers instead of just failing. 1609 */ 1610 pcn_rxdrain(sc); 1611 goto out; 1612 } 1613 } else 1614 PCN_INIT_RXDESC(sc, i); 1615 } 1616 sc->sc_rxptr = 0; 1617 1618 /* Initialize MODE for the initialization block. */ 1619 sc->sc_mode = 0; 1620 if (ifp->if_flags & IFF_PROMISC) 1621 sc->sc_mode |= LE_C15_PROM; 1622 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1623 sc->sc_mode |= LE_C15_DRCVBC; 1624 1625 /* 1626 * If we have MII, simply select MII in the MODE register, 1627 * and clear ASEL. Otherwise, let ASEL stand (for now), 1628 * and leave PORTSEL alone (it is ignored with ASEL is set). 1629 */ 1630 if (sc->sc_flags & PCN_F_HAS_MII) { 1631 pcn_bcr_write(sc, LE_BCR2, 1632 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL); 1633 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII); 1634 1635 /* 1636 * Disable MII auto-negotiation. We handle that in 1637 * our own MII layer. 1638 */ 1639 pcn_bcr_write(sc, LE_BCR32, 1640 pcn_csr_read(sc, LE_BCR32) & ~LE_B32_DANAS); 1641 } 1642 1643 /* 1644 * Set the Tx and Rx descriptor ring addresses in the init 1645 * block, the TLEN and RLEN other fields of the init block 1646 * MODE register. 1647 */ 1648 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0)); 1649 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0)); 1650 sc->sc_initblock.init_mode = htole32(sc->sc_mode | 1651 ((ffs(PCN_NTXDESC) - 1) << 28) | 1652 ((ffs(PCN_NRXDESC) - 1) << 20)); 1653 1654 /* Set the station address in the init block. */ 1655 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] | 1656 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24)); 1657 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] | 1658 (enaddr[5] << 8)); 1659 1660 /* Set the multicast filter in the init block. */ 1661 pcn_set_filter(sc); 1662 1663 /* Initialize CSR3. */ 1664 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO); 1665 1666 /* Initialize CSR4. */ 1667 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT| 1668 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM); 1669 1670 /* Initialize CSR5. */ 1671 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE; 1672 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5); 1673 1674 /* 1675 * If we have an Am79c971 or greater, initialize CSR7. 1676 * 1677 * XXX Might be nice to use the MII auto-poll interrupt someday. 1678 */ 1679 switch (sc->sc_variant->pcv_chipid) { 1680 case PARTID_Am79c970: 1681 case PARTID_Am79c970A: 1682 /* Not available on these chips. */ 1683 break; 1684 1685 default: 1686 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE); 1687 break; 1688 } 1689 1690 /* 1691 * On the Am79c970A and greater, initialize BCR18 to 1692 * enable burst mode. 1693 * 1694 * Also enable the "no underflow" option on the Am79c971 and 1695 * higher, which prevents the chip from generating transmit 1696 * underflows, yet sill provides decent performance. Note if 1697 * chip is not connected to external SRAM, then we still have 1698 * to handle underflow errors (the NOUFLO bit is ignored in 1699 * that case). 1700 */ 1701 reg = pcn_bcr_read(sc, LE_BCR18); 1702 switch (sc->sc_variant->pcv_chipid) { 1703 case PARTID_Am79c970: 1704 break; 1705 1706 case PARTID_Am79c970A: 1707 reg |= LE_B18_BREADE|LE_B18_BWRITE; 1708 break; 1709 1710 default: 1711 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO; 1712 break; 1713 } 1714 pcn_bcr_write(sc, LE_BCR18, reg); 1715 1716 /* 1717 * Initialize CSR80 (FIFO thresholds for Tx and Rx). 1718 */ 1719 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) | 1720 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw)); 1721 1722 /* 1723 * Send the init block to the chip, and wait for it 1724 * to be processed. 1725 */ 1726 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE); 1727 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff); 1728 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff); 1729 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT); 1730 delay(100); 1731 for (i = 0; i < 10000; i++) { 1732 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON) 1733 break; 1734 delay(10); 1735 } 1736 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE); 1737 if (i == 10000) { 1738 printf("%s: timeout processing init block\n", 1739 sc->sc_dev.dv_xname); 1740 error = EIO; 1741 goto out; 1742 } 1743 1744 /* Set the media. */ 1745 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 1746 1747 /* Enable interrupts and external activity (and ACK IDON). */ 1748 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON); 1749 1750 if (sc->sc_flags & PCN_F_HAS_MII) { 1751 /* Start the one second MII clock. */ 1752 callout_reset(&sc->sc_tick_ch, hz, pcn_tick, sc); 1753 } 1754 1755 /* ...all done! */ 1756 ifp->if_flags |= IFF_RUNNING; 1757 ifp->if_flags &= ~IFF_OACTIVE; 1758 1759 out: 1760 if (error) 1761 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1762 return (error); 1763 } 1764 1765 /* 1766 * pcn_rxdrain: 1767 * 1768 * Drain the receive queue. 1769 */ 1770 void 1771 pcn_rxdrain(struct pcn_softc *sc) 1772 { 1773 struct pcn_rxsoft *rxs; 1774 int i; 1775 1776 for (i = 0; i < PCN_NRXDESC; i++) { 1777 rxs = &sc->sc_rxsoft[i]; 1778 if (rxs->rxs_mbuf != NULL) { 1779 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1780 m_freem(rxs->rxs_mbuf); 1781 rxs->rxs_mbuf = NULL; 1782 } 1783 } 1784 } 1785 1786 /* 1787 * pcn_stop: [ifnet interface function] 1788 * 1789 * Stop transmission on the interface. 1790 */ 1791 void 1792 pcn_stop(struct ifnet *ifp, int disable) 1793 { 1794 struct pcn_softc *sc = ifp->if_softc; 1795 struct pcn_txsoft *txs; 1796 int i; 1797 1798 if (sc->sc_flags & PCN_F_HAS_MII) { 1799 /* Stop the one second clock. */ 1800 callout_stop(&sc->sc_tick_ch); 1801 1802 /* Down the MII. */ 1803 mii_down(&sc->sc_mii); 1804 } 1805 1806 /* Stop the chip. */ 1807 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP); 1808 1809 /* Release any queued transmit buffers. */ 1810 for (i = 0; i < PCN_TXQUEUELEN; i++) { 1811 txs = &sc->sc_txsoft[i]; 1812 if (txs->txs_mbuf != NULL) { 1813 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1814 m_freem(txs->txs_mbuf); 1815 txs->txs_mbuf = NULL; 1816 } 1817 } 1818 1819 if (disable) 1820 pcn_rxdrain(sc); 1821 1822 /* Mark the interface as down and cancel the watchdog timer. */ 1823 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1824 ifp->if_timer = 0; 1825 } 1826 1827 /* 1828 * pcn_add_rxbuf: 1829 * 1830 * Add a receive buffer to the indicated descriptor. 1831 */ 1832 int 1833 pcn_add_rxbuf(struct pcn_softc *sc, int idx) 1834 { 1835 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1836 struct mbuf *m; 1837 int error; 1838 1839 MGETHDR(m, M_DONTWAIT, MT_DATA); 1840 if (m == NULL) 1841 return (ENOBUFS); 1842 1843 MCLGET(m, M_DONTWAIT); 1844 if ((m->m_flags & M_EXT) == 0) { 1845 m_freem(m); 1846 return (ENOBUFS); 1847 } 1848 1849 if (rxs->rxs_mbuf != NULL) 1850 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1851 1852 rxs->rxs_mbuf = m; 1853 1854 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1855 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1856 BUS_DMA_READ|BUS_DMA_NOWAIT); 1857 if (error) { 1858 printf("%s: can't load rx DMA map %d, error = %d\n", 1859 sc->sc_dev.dv_xname, idx, error); 1860 panic("pcn_add_rxbuf"); 1861 } 1862 1863 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1864 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1865 1866 PCN_INIT_RXDESC(sc, idx); 1867 1868 return (0); 1869 } 1870 1871 /* 1872 * pcn_set_filter: 1873 * 1874 * Set up the receive filter. 1875 */ 1876 void 1877 pcn_set_filter(struct pcn_softc *sc) 1878 { 1879 struct ethercom *ec = &sc->sc_ethercom; 1880 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1881 struct ether_multi *enm; 1882 struct ether_multistep step; 1883 uint32_t crc; 1884 1885 /* 1886 * Set up the multicast address filter by passing all multicast 1887 * addresses through a CRC generator, and then using the high 1888 * order 6 bits as an index into the 64-bit logical address 1889 * filter. The high order bits select the word, while the rest 1890 * of the bits select the bit within the word. 1891 */ 1892 1893 if (ifp->if_flags & IFF_PROMISC) 1894 goto allmulti; 1895 1896 sc->sc_initblock.init_ladrf[0] = 1897 sc->sc_initblock.init_ladrf[1] = 1898 sc->sc_initblock.init_ladrf[2] = 1899 sc->sc_initblock.init_ladrf[3] = 0; 1900 1901 ETHER_FIRST_MULTI(step, ec, enm); 1902 while (enm != NULL) { 1903 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1904 /* 1905 * We must listen to a range of multicast addresses. 1906 * For now, just accept all multicasts, rather than 1907 * trying to set only those filter bits needed to match 1908 * the range. (At this time, the only use of address 1909 * ranges is for IP multicast routing, for which the 1910 * range is big enough to require all bits set.) 1911 */ 1912 goto allmulti; 1913 } 1914 1915 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1916 1917 /* Just want the 6 most significant bits. */ 1918 crc >>= 26; 1919 1920 /* Set the corresponding bit in the filter. */ 1921 sc->sc_initblock.init_ladrf[crc >> 4] |= 1922 htole16(1 << (crc & 0xf)); 1923 1924 ETHER_NEXT_MULTI(step, enm); 1925 } 1926 1927 ifp->if_flags &= ~IFF_ALLMULTI; 1928 return; 1929 1930 allmulti: 1931 ifp->if_flags |= IFF_ALLMULTI; 1932 sc->sc_initblock.init_ladrf[0] = 1933 sc->sc_initblock.init_ladrf[1] = 1934 sc->sc_initblock.init_ladrf[2] = 1935 sc->sc_initblock.init_ladrf[3] = 0xffff; 1936 } 1937 1938 /* 1939 * pcn_79c970_mediainit: 1940 * 1941 * Initialize media for the Am79c970. 1942 */ 1943 void 1944 pcn_79c970_mediainit(struct pcn_softc *sc) 1945 { 1946 const char *sep = ""; 1947 1948 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c970_mediachange, 1949 pcn_79c970_mediastatus); 1950 1951 #define ADD(s, m, d) \ 1952 do { \ 1953 printf("%s%s", sep, s); \ 1954 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \ 1955 sep = ", "; \ 1956 } while (/*CONSTCOND*/0) 1957 1958 printf("%s: ", sc->sc_dev.dv_xname); 1959 ADD("10base5", IFM_10_5, PORTSEL_AUI); 1960 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1961 ADD("10base5-FDX", IFM_10_5|IFM_FDX, PORTSEL_AUI); 1962 ADD("10baseT", IFM_10_T, PORTSEL_10T); 1963 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1964 ADD("10baseT-FDX", IFM_10_T|IFM_FDX, PORTSEL_10T); 1965 ADD("auto", IFM_AUTO, 0); 1966 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1967 ADD("auto-FDX", IFM_AUTO|IFM_FDX, 0); 1968 printf("\n"); 1969 1970 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1971 } 1972 1973 /* 1974 * pcn_79c970_mediastatus: [ifmedia interface function] 1975 * 1976 * Get the current interface media status (Am79c970 version). 1977 */ 1978 void 1979 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1980 { 1981 struct pcn_softc *sc = ifp->if_softc; 1982 1983 /* 1984 * The currently selected media is always the active media. 1985 * Note: We have no way to determine what media the AUTO 1986 * process picked. 1987 */ 1988 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media; 1989 } 1990 1991 /* 1992 * pcn_79c970_mediachange: [ifmedia interface function] 1993 * 1994 * Set hardware to newly-selected media (Am79c970 version). 1995 */ 1996 int 1997 pcn_79c970_mediachange(struct ifnet *ifp) 1998 { 1999 struct pcn_softc *sc = ifp->if_softc; 2000 uint32_t reg; 2001 2002 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) { 2003 /* 2004 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL. 2005 */ 2006 reg = pcn_bcr_read(sc, LE_BCR2); 2007 reg |= LE_B2_ASEL; 2008 pcn_bcr_write(sc, LE_BCR2, reg); 2009 } else { 2010 /* 2011 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value. 2012 */ 2013 reg = pcn_bcr_read(sc, LE_BCR2); 2014 reg &= ~LE_B2_ASEL; 2015 pcn_bcr_write(sc, LE_BCR2, reg); 2016 2017 reg = pcn_csr_read(sc, LE_CSR15); 2018 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) | 2019 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data); 2020 pcn_csr_write(sc, LE_CSR15, reg); 2021 } 2022 2023 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) { 2024 reg = LE_B9_FDEN; 2025 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5) 2026 reg |= LE_B9_AUIFD; 2027 pcn_bcr_write(sc, LE_BCR9, reg); 2028 } else 2029 pcn_bcr_write(sc, LE_BCR0, 0); 2030 2031 return (0); 2032 } 2033 2034 /* 2035 * pcn_79c971_mediainit: 2036 * 2037 * Initialize media for the Am79c971. 2038 */ 2039 void 2040 pcn_79c971_mediainit(struct pcn_softc *sc) 2041 { 2042 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2043 2044 /* We have MII. */ 2045 sc->sc_flags |= PCN_F_HAS_MII; 2046 2047 /* 2048 * The built-in 10BASE-T interface is mapped to the MII 2049 * on the PCNet-FAST. Unfortunately, there's no EEPROM 2050 * word that tells us which PHY to use. Since the 10BASE-T 2051 * interface is always at PHY 31, we make a note of the 2052 * first PHY that responds, and disallow any PHYs after 2053 * it. This is all handled in the MII read routine. 2054 */ 2055 sc->sc_phyaddr = -1; 2056 2057 /* Initialize our media structures and probe the MII. */ 2058 sc->sc_mii.mii_ifp = ifp; 2059 sc->sc_mii.mii_readreg = pcn_mii_readreg; 2060 sc->sc_mii.mii_writereg = pcn_mii_writereg; 2061 sc->sc_mii.mii_statchg = pcn_mii_statchg; 2062 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange, 2063 pcn_79c971_mediastatus); 2064 2065 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 2066 MII_OFFSET_ANY, 0); 2067 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2068 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 2069 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2070 } else 2071 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2072 } 2073 2074 /* 2075 * pcn_79c971_mediastatus: [ifmedia interface function] 2076 * 2077 * Get the current interface media status (Am79c971 version). 2078 */ 2079 void 2080 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2081 { 2082 struct pcn_softc *sc = ifp->if_softc; 2083 2084 mii_pollstat(&sc->sc_mii); 2085 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2086 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2087 } 2088 2089 /* 2090 * pcn_79c971_mediachange: [ifmedia interface function] 2091 * 2092 * Set hardware to newly-selected media (Am79c971 version). 2093 */ 2094 int 2095 pcn_79c971_mediachange(struct ifnet *ifp) 2096 { 2097 struct pcn_softc *sc = ifp->if_softc; 2098 2099 if (ifp->if_flags & IFF_UP) 2100 mii_mediachg(&sc->sc_mii); 2101 return (0); 2102 } 2103 2104 /* 2105 * pcn_mii_readreg: [mii interface function] 2106 * 2107 * Read a PHY register on the MII. 2108 */ 2109 int 2110 pcn_mii_readreg(struct device *self, int phy, int reg) 2111 { 2112 struct pcn_softc *sc = (void *) self; 2113 uint32_t rv; 2114 2115 if (sc->sc_phyaddr != -1 && phy != sc->sc_phyaddr) 2116 return (0); 2117 2118 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2119 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD; 2120 if (rv == 0xffff) 2121 return (0); 2122 2123 if (sc->sc_phyaddr == -1) 2124 sc->sc_phyaddr = phy; 2125 2126 return (rv); 2127 } 2128 2129 /* 2130 * pcn_mii_writereg: [mii interface function] 2131 * 2132 * Write a PHY register on the MII. 2133 */ 2134 void 2135 pcn_mii_writereg(struct device *self, int phy, int reg, int val) 2136 { 2137 struct pcn_softc *sc = (void *) self; 2138 2139 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2140 pcn_bcr_write(sc, LE_BCR34, val); 2141 } 2142 2143 /* 2144 * pcn_mii_statchg: [mii interface function] 2145 * 2146 * Callback from MII layer when media changes. 2147 */ 2148 void 2149 pcn_mii_statchg(struct device *self) 2150 { 2151 struct pcn_softc *sc = (void *) self; 2152 2153 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2154 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN); 2155 else 2156 pcn_bcr_write(sc, LE_BCR0, 0); 2157 } 2158