1 /* $OpenBSD: if_pcn.c,v 1.24 2011/04/03 15:36:02 jasper Exp $ */ 2 /* $NetBSD: if_pcn.c,v 1.26 2005/05/07 09:15:44 is Exp $ */ 3 4 /* 5 * Copyright (c) 2001 Wasabi Systems, Inc. 6 * All rights reserved. 7 * 8 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * Wasabi Systems, Inc. 22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Device driver for the AMD PCnet-PCI series of Ethernet 41 * chips: 42 * 43 * * Am79c970 PCnet-PCI Single-Chip Ethernet Controller for PCI 44 * Local Bus 45 * 46 * * Am79c970A PCnet-PCI II Single-Chip Full-Duplex Ethernet Controller 47 * for PCI Local Bus 48 * 49 * * Am79c971 PCnet-FAST Single-Chip Full-Duplex 10/100Mbps 50 * Ethernet Controller for PCI Local Bus 51 * 52 * * Am79c972 PCnet-FAST+ Enhanced 10/100Mbps PCI Ethernet Controller 53 * with OnNow Support 54 * 55 * * Am79c973/Am79c975 PCnet-FAST III Single-Chip 10/100Mbps PCI 56 * Ethernet Controller with Integrated PHY 57 * 58 * This also supports the virtual PCnet-PCI Ethernet interface found 59 * in VMware. 60 * 61 * TODO: 62 * 63 * * Split this into bus-specific and bus-independent portions. 64 * The core could also be used for the ILACC (Am79900) 32-bit 65 * Ethernet chip (XXX only if we use an ILACC-compatible SWSTYLE). 66 */ 67 68 #include "bpfilter.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/timeout.h> 73 #include <sys/mbuf.h> 74 #include <sys/malloc.h> 75 #include <sys/kernel.h> 76 #include <sys/socket.h> 77 #include <sys/ioctl.h> 78 #include <sys/errno.h> 79 #include <sys/device.h> 80 #include <sys/queue.h> 81 82 #include <net/if.h> 83 #include <net/if_dl.h> 84 85 #ifdef INET 86 #include <netinet/in.h> 87 #include <netinet/in_systm.h> 88 #include <netinet/in_var.h> 89 #include <netinet/ip.h> 90 #include <netinet/if_ether.h> 91 #endif 92 93 #include <net/if_media.h> 94 95 #if NBPFILTER > 0 96 #include <net/bpf.h> 97 #endif 98 99 #include <machine/bus.h> 100 #include <machine/intr.h> 101 #include <machine/endian.h> 102 103 #include <dev/mii/mii.h> 104 #include <dev/mii/miivar.h> 105 106 #include <dev/ic/am79900reg.h> 107 #include <dev/ic/lancereg.h> 108 109 #include <dev/pci/pcireg.h> 110 #include <dev/pci/pcivar.h> 111 #include <dev/pci/pcidevs.h> 112 113 /* 114 * Register definitions for the AMD PCnet-PCI series of Ethernet 115 * chips. 116 * 117 * These are only the registers that we access directly from PCI 118 * space. Everything else (accessed via the RAP + RDP/BDP) is 119 * defined in <dev/ic/lancereg.h>. 120 */ 121 122 /* 123 * PCI configuration space. 124 */ 125 126 #define PCN_PCI_CBIO (PCI_MAPREG_START + 0x00) 127 #define PCN_PCI_CBMEM (PCI_MAPREG_START + 0x04) 128 129 /* 130 * I/O map in Word I/O mode. 131 */ 132 133 #define PCN16_APROM 0x00 134 #define PCN16_RDP 0x10 135 #define PCN16_RAP 0x12 136 #define PCN16_RESET 0x14 137 #define PCN16_BDP 0x16 138 139 /* 140 * I/O map in DWord I/O mode. 141 */ 142 143 #define PCN32_APROM 0x00 144 #define PCN32_RDP 0x10 145 #define PCN32_RAP 0x14 146 #define PCN32_RESET 0x18 147 #define PCN32_BDP 0x1c 148 149 /* 150 * Transmit descriptor list size. This is arbitrary, but allocate 151 * enough descriptors for 128 pending transmissions, and 4 segments 152 * per packet. This MUST work out to a power of 2. 153 * 154 * NOTE: We can't have any more than 512 Tx descriptors, SO BE CAREFUL! 155 * 156 * So we play a little trick here. We give each packet up to 16 157 * DMA segments, but only allocate the max of 512 descriptors. The 158 * transmit logic can deal with this, we just are hoping to sneak by. 159 */ 160 #define PCN_NTXSEGS 16 161 162 #define PCN_TXQUEUELEN 128 163 #define PCN_TXQUEUELEN_MASK (PCN_TXQUEUELEN - 1) 164 #define PCN_NTXDESC 512 165 #define PCN_NTXDESC_MASK (PCN_NTXDESC - 1) 166 #define PCN_NEXTTX(x) (((x) + 1) & PCN_NTXDESC_MASK) 167 #define PCN_NEXTTXS(x) (((x) + 1) & PCN_TXQUEUELEN_MASK) 168 169 /* Tx interrupt every N + 1 packets. */ 170 #define PCN_TXINTR_MASK 7 171 172 /* 173 * Receive descriptor list size. We have one Rx buffer per incoming 174 * packet, so this logic is a little simpler. 175 */ 176 #define PCN_NRXDESC 128 177 #define PCN_NRXDESC_MASK (PCN_NRXDESC - 1) 178 #define PCN_NEXTRX(x) (((x) + 1) & PCN_NRXDESC_MASK) 179 180 /* 181 * Control structures are DMA'd to the PCnet chip. We allocate them in 182 * a single clump that maps to a single DMA segment to make several things 183 * easier. 184 */ 185 struct pcn_control_data { 186 /* The transmit descriptors. */ 187 struct letmd pcd_txdescs[PCN_NTXDESC]; 188 189 /* The receive descriptors. */ 190 struct lermd pcd_rxdescs[PCN_NRXDESC]; 191 192 /* The init block. */ 193 struct leinit pcd_initblock; 194 }; 195 196 #define PCN_CDOFF(x) offsetof(struct pcn_control_data, x) 197 #define PCN_CDTXOFF(x) PCN_CDOFF(pcd_txdescs[(x)]) 198 #define PCN_CDRXOFF(x) PCN_CDOFF(pcd_rxdescs[(x)]) 199 #define PCN_CDINITOFF PCN_CDOFF(pcd_initblock) 200 201 /* 202 * Software state for transmit jobs. 203 */ 204 struct pcn_txsoft { 205 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 206 bus_dmamap_t txs_dmamap; /* our DMA map */ 207 int txs_firstdesc; /* first descriptor in packet */ 208 int txs_lastdesc; /* last descriptor in packet */ 209 }; 210 211 /* 212 * Software state for receive jobs. 213 */ 214 struct pcn_rxsoft { 215 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 216 bus_dmamap_t rxs_dmamap; /* our DMA map */ 217 }; 218 219 /* 220 * Description of Rx FIFO watermarks for various revisions. 221 */ 222 static const char * const pcn_79c970_rcvfw[] = { 223 "16 bytes", 224 "64 bytes", 225 "128 bytes", 226 NULL, 227 }; 228 229 static const char * const pcn_79c971_rcvfw[] = { 230 "16 bytes", 231 "64 bytes", 232 "112 bytes", 233 NULL, 234 }; 235 236 /* 237 * Description of Tx start points for various revisions. 238 */ 239 static const char * const pcn_79c970_xmtsp[] = { 240 "8 bytes", 241 "64 bytes", 242 "128 bytes", 243 "248 bytes", 244 }; 245 246 static const char * const pcn_79c971_xmtsp[] = { 247 "20 bytes", 248 "64 bytes", 249 "128 bytes", 250 "248 bytes", 251 }; 252 253 static const char * const pcn_79c971_xmtsp_sram[] = { 254 "44 bytes", 255 "64 bytes", 256 "128 bytes", 257 "store-and-forward", 258 }; 259 260 /* 261 * Description of Tx FIFO watermarks for various revisions. 262 */ 263 static const char * const pcn_79c970_xmtfw[] = { 264 "16 bytes", 265 "64 bytes", 266 "128 bytes", 267 NULL, 268 }; 269 270 static const char * const pcn_79c971_xmtfw[] = { 271 "16 bytes", 272 "64 bytes", 273 "108 bytes", 274 NULL, 275 }; 276 277 /* 278 * Software state per device. 279 */ 280 struct pcn_softc { 281 struct device sc_dev; /* generic device information */ 282 bus_space_tag_t sc_st; /* bus space tag */ 283 bus_space_handle_t sc_sh; /* bus space handle */ 284 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 285 struct arpcom sc_arpcom; /* Ethernet common data */ 286 287 /* Points to our media routines, etc. */ 288 const struct pcn_variant *sc_variant; 289 290 void *sc_ih; /* interrupt cookie */ 291 292 struct mii_data sc_mii; /* MII/media information */ 293 294 struct timeout sc_tick_timeout; /* tick timeout */ 295 296 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 297 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 298 299 /* Software state for transmit and receive descriptors. */ 300 struct pcn_txsoft sc_txsoft[PCN_TXQUEUELEN]; 301 struct pcn_rxsoft sc_rxsoft[PCN_NRXDESC]; 302 303 /* Control data structures */ 304 struct pcn_control_data *sc_control_data; 305 #define sc_txdescs sc_control_data->pcd_txdescs 306 #define sc_rxdescs sc_control_data->pcd_rxdescs 307 #define sc_initblock sc_control_data->pcd_initblock 308 309 const char * const *sc_rcvfw_desc; /* Rx FIFO watermark info */ 310 int sc_rcvfw; 311 312 const char * const *sc_xmtsp_desc; /* Tx start point info */ 313 int sc_xmtsp; 314 315 const char * const *sc_xmtfw_desc; /* Tx FIFO watermark info */ 316 int sc_xmtfw; 317 318 int sc_flags; /* misc. flags; see below */ 319 int sc_swstyle; /* the software style in use */ 320 321 int sc_txfree; /* number of free Tx descriptors */ 322 int sc_txnext; /* next ready Tx descriptor */ 323 324 int sc_txsfree; /* number of free Tx jobs */ 325 int sc_txsnext; /* next free Tx job */ 326 int sc_txsdirty; /* dirty Tx jobs */ 327 328 int sc_rxptr; /* next ready Rx descriptor/job */ 329 330 uint32_t sc_csr5; /* prototype CSR5 register */ 331 uint32_t sc_mode; /* prototype MODE register */ 332 }; 333 334 /* sc_flags */ 335 #define PCN_F_HAS_MII 0x0001 /* has MII */ 336 337 #define PCN_CDTXADDR(sc, x) ((sc)->sc_cddma + PCN_CDTXOFF((x))) 338 #define PCN_CDRXADDR(sc, x) ((sc)->sc_cddma + PCN_CDRXOFF((x))) 339 #define PCN_CDINITADDR(sc) ((sc)->sc_cddma + PCN_CDINITOFF) 340 341 #define PCN_CDTXSYNC(sc, x, n, ops) \ 342 do { \ 343 int __x, __n; \ 344 \ 345 __x = (x); \ 346 __n = (n); \ 347 \ 348 /* If it will wrap around, sync to the end of the ring. */ \ 349 if ((__x + __n) > PCN_NTXDESC) { \ 350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 351 PCN_CDTXOFF(__x), sizeof(struct letmd) * \ 352 (PCN_NTXDESC - __x), (ops)); \ 353 __n -= (PCN_NTXDESC - __x); \ 354 __x = 0; \ 355 } \ 356 \ 357 /* Now sync whatever is left. */ \ 358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 359 PCN_CDTXOFF(__x), sizeof(struct letmd) * __n, (ops)); \ 360 } while (/*CONSTCOND*/0) 361 362 #define PCN_CDRXSYNC(sc, x, ops) \ 363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 364 PCN_CDRXOFF((x)), sizeof(struct lermd), (ops)) 365 366 #define PCN_CDINITSYNC(sc, ops) \ 367 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 368 PCN_CDINITOFF, sizeof(struct leinit), (ops)) 369 370 #define PCN_INIT_RXDESC(sc, x) \ 371 do { \ 372 struct pcn_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 373 struct lermd *__rmd = &(sc)->sc_rxdescs[(x)]; \ 374 struct mbuf *__m = __rxs->rxs_mbuf; \ 375 \ 376 /* \ 377 * Note: We scoot the packet forward 2 bytes in the buffer \ 378 * so that the payload after the Ethernet header is aligned \ 379 * to a 4-byte boundary. \ 380 */ \ 381 __m->m_data = __m->m_ext.ext_buf + 2; \ 382 \ 383 if ((sc)->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { \ 384 __rmd->rmd2 = \ 385 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 386 __rmd->rmd0 = 0; \ 387 } else { \ 388 __rmd->rmd2 = 0; \ 389 __rmd->rmd0 = \ 390 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \ 391 } \ 392 __rmd->rmd1 = htole32(LE_R1_OWN|LE_R1_ONES| \ 393 (LE_BCNT(MCLBYTES - 2) & LE_R1_BCNT_MASK)); \ 394 PCN_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);\ 395 } while(/*CONSTCOND*/0) 396 397 void pcn_start(struct ifnet *); 398 void pcn_watchdog(struct ifnet *); 399 int pcn_ioctl(struct ifnet *, u_long, caddr_t); 400 int pcn_init(struct ifnet *); 401 void pcn_stop(struct ifnet *, int); 402 403 void pcn_reset(struct pcn_softc *); 404 void pcn_rxdrain(struct pcn_softc *); 405 int pcn_add_rxbuf(struct pcn_softc *, int); 406 void pcn_tick(void *); 407 408 void pcn_spnd(struct pcn_softc *); 409 410 void pcn_set_filter(struct pcn_softc *); 411 412 int pcn_intr(void *); 413 void pcn_txintr(struct pcn_softc *); 414 int pcn_rxintr(struct pcn_softc *); 415 416 int pcn_mii_readreg(struct device *, int, int); 417 void pcn_mii_writereg(struct device *, int, int, int); 418 void pcn_mii_statchg(struct device *); 419 420 void pcn_79c970_mediainit(struct pcn_softc *); 421 int pcn_79c970_mediachange(struct ifnet *); 422 void pcn_79c970_mediastatus(struct ifnet *, struct ifmediareq *); 423 424 void pcn_79c971_mediainit(struct pcn_softc *); 425 int pcn_79c971_mediachange(struct ifnet *); 426 void pcn_79c971_mediastatus(struct ifnet *, struct ifmediareq *); 427 428 /* 429 * Description of a PCnet-PCI variant. Used to select media access 430 * method, mostly, and to print a nice description of the chip. 431 */ 432 static const struct pcn_variant { 433 const char *pcv_desc; 434 void (*pcv_mediainit)(struct pcn_softc *); 435 uint16_t pcv_chipid; 436 } pcn_variants[] = { 437 { "Am79c970", 438 pcn_79c970_mediainit, 439 PARTID_Am79c970 }, 440 441 { "Am79c970A", 442 pcn_79c970_mediainit, 443 PARTID_Am79c970A }, 444 445 { "Am79c971", 446 pcn_79c971_mediainit, 447 PARTID_Am79c971 }, 448 449 { "Am79c972", 450 pcn_79c971_mediainit, 451 PARTID_Am79c972 }, 452 453 { "Am79c973", 454 pcn_79c971_mediainit, 455 PARTID_Am79c973 }, 456 457 { "Am79c975", 458 pcn_79c971_mediainit, 459 PARTID_Am79c975 }, 460 461 { "Am79c976", 462 pcn_79c971_mediainit, 463 PARTID_Am79c976 }, 464 465 { "Am79c978", 466 pcn_79c971_mediainit, 467 PARTID_Am79c978 }, 468 469 { "Unknown", 470 pcn_79c971_mediainit, 471 0 }, 472 }; 473 474 int pcn_copy_small = 0; 475 476 int pcn_match(struct device *, void *, void *); 477 void pcn_attach(struct device *, struct device *, void *); 478 479 struct cfattach pcn_ca = { 480 sizeof(struct pcn_softc), pcn_match, pcn_attach, 481 }; 482 483 const struct pci_matchid pcn_devices[] = { 484 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCNET_PCI }, 485 { PCI_VENDOR_AMD, PCI_PRODUCT_AMD_PCHOME_PCI } 486 }; 487 488 struct cfdriver pcn_cd = { 489 NULL, "pcn", DV_IFNET 490 }; 491 492 /* 493 * Routines to read and write the PCnet-PCI CSR/BCR space. 494 */ 495 496 static __inline uint32_t 497 pcn_csr_read(struct pcn_softc *sc, int reg) 498 { 499 500 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 501 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RDP)); 502 } 503 504 static __inline void 505 pcn_csr_write(struct pcn_softc *sc, int reg, uint32_t val) 506 { 507 508 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 509 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, val); 510 } 511 512 static __inline uint32_t 513 pcn_bcr_read(struct pcn_softc *sc, int reg) 514 { 515 516 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 517 return (bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_BDP)); 518 } 519 520 static __inline void 521 pcn_bcr_write(struct pcn_softc *sc, int reg, uint32_t val) 522 { 523 524 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RAP, reg); 525 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_BDP, val); 526 } 527 528 static const struct pcn_variant * 529 pcn_lookup_variant(uint16_t chipid) 530 { 531 const struct pcn_variant *pcv; 532 533 for (pcv = pcn_variants; pcv->pcv_chipid != 0; pcv++) { 534 if (chipid == pcv->pcv_chipid) 535 return (pcv); 536 } 537 538 /* 539 * This covers unknown chips, which we simply treat like 540 * a generic PCnet-FAST. 541 */ 542 return (pcv); 543 } 544 545 int 546 pcn_match(struct device *parent, void *match, void *aux) 547 { 548 struct pci_attach_args *pa = aux; 549 550 /* 551 * IBM makes a PCI variant of this card which shows up as a 552 * Trident Microsystems 4DWAVE DX (ethernet network, revision 0x25) 553 * this card is truly a pcn card, so we have a special case match for 554 * it. 555 */ 556 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_TRIDENT && 557 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_TRIDENT_4DWAVE_DX && 558 PCI_CLASS(pa->pa_class) == PCI_CLASS_NETWORK) 559 return(1); 560 561 return (pci_matchbyid((struct pci_attach_args *)aux, pcn_devices, 562 nitems(pcn_devices))); 563 } 564 565 void 566 pcn_attach(struct device *parent, struct device *self, void *aux) 567 { 568 struct pcn_softc *sc = (struct pcn_softc *) self; 569 struct pci_attach_args *pa = aux; 570 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 571 pci_chipset_tag_t pc = pa->pa_pc; 572 pci_intr_handle_t ih; 573 const char *intrstr = NULL; 574 bus_space_tag_t iot, memt; 575 bus_space_handle_t ioh, memh; 576 bus_dma_segment_t seg; 577 int ioh_valid, memh_valid; 578 int i, rseg, error; 579 uint32_t chipid, reg; 580 uint8_t enaddr[ETHER_ADDR_LEN]; 581 int state; 582 583 timeout_set(&sc->sc_tick_timeout, pcn_tick, sc); 584 585 /* 586 * Map the device. 587 */ 588 ioh_valid = (pci_mapreg_map(pa, PCN_PCI_CBIO, PCI_MAPREG_TYPE_IO, 0, 589 &iot, &ioh, NULL, NULL, 0) == 0); 590 memh_valid = (pci_mapreg_map(pa, PCN_PCI_CBMEM, 591 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 592 &memt, &memh, NULL, NULL, 0) == 0); 593 594 if (memh_valid) { 595 sc->sc_st = memt; 596 sc->sc_sh = memh; 597 } else if (ioh_valid) { 598 sc->sc_st = iot; 599 sc->sc_sh = ioh; 600 } else { 601 printf(": unable to map device registers\n"); 602 return; 603 } 604 605 sc->sc_dmat = pa->pa_dmat; 606 607 /* Get it out of power save mode, if needed. */ 608 state = pci_set_powerstate(pc, pa->pa_tag, PCI_PMCSR_STATE_D0); 609 if (state == PCI_PMCSR_STATE_D3) { 610 /* 611 * The card has lost all configuration data in 612 * this state, so punt. 613 */ 614 printf(": unable to wake up from power state D3, " 615 "reboot required.\n"); 616 return; 617 } 618 619 /* 620 * Reset the chip to a known state. This also puts the 621 * chip into 32-bit mode. 622 */ 623 pcn_reset(sc); 624 625 #if !defined(PCN_NO_PROM) 626 627 /* 628 * Read the Ethernet address from the EEPROM. 629 */ 630 for (i = 0; i < ETHER_ADDR_LEN; i++) 631 enaddr[i] = bus_space_read_1(sc->sc_st, sc->sc_sh, 632 PCN32_APROM + i); 633 #else 634 /* 635 * The PROM is not used; instead we assume that the MAC address 636 * has been programmed into the device's physical address 637 * registers by the boot firmware 638 */ 639 640 for (i=0; i < 3; i++) { 641 uint32_t val; 642 val = pcn_csr_read(sc, LE_CSR12 + i); 643 enaddr[2*i] = val & 0x0ff; 644 enaddr[2*i+1] = (val >> 8) & 0x0ff; 645 } 646 #endif 647 648 /* 649 * Now that the device is mapped, attempt to figure out what 650 * kind of chip we have. Note that IDL has all 32 bits of 651 * the chip ID when we're in 32-bit mode. 652 */ 653 chipid = pcn_csr_read(sc, LE_CSR88); 654 sc->sc_variant = pcn_lookup_variant(CHIPID_PARTID(chipid)); 655 656 /* 657 * Map and establish our interrupt. 658 */ 659 if (pci_intr_map(pa, &ih)) { 660 printf(": unable to map interrupt\n"); 661 return; 662 } 663 intrstr = pci_intr_string(pc, ih); 664 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, pcn_intr, sc, 665 self->dv_xname); 666 if (sc->sc_ih == NULL) { 667 printf(": unable to establish interrupt"); 668 if (intrstr != NULL) 669 printf(" at %s", intrstr); 670 printf("\n"); 671 return; 672 } 673 674 /* 675 * Allocate the control data structures, and create and load the 676 * DMA map for it. 677 */ 678 if ((error = bus_dmamem_alloc(sc->sc_dmat, 679 sizeof(struct pcn_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 680 0)) != 0) { 681 printf(": unable to allocate control data, error = %d\n", 682 error); 683 return; 684 } 685 686 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 687 sizeof(struct pcn_control_data), (caddr_t *)&sc->sc_control_data, 688 BUS_DMA_COHERENT)) != 0) { 689 printf(": unable to map control data, error = %d\n", 690 error); 691 goto fail_1; 692 } 693 694 if ((error = bus_dmamap_create(sc->sc_dmat, 695 sizeof(struct pcn_control_data), 1, 696 sizeof(struct pcn_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 697 printf(": unable to create control data DMA map, " 698 "error = %d\n", error); 699 goto fail_2; 700 } 701 702 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 703 sc->sc_control_data, sizeof(struct pcn_control_data), NULL, 704 0)) != 0) { 705 printf(": unable to load control data DMA map, error = %d\n", 706 error); 707 goto fail_3; 708 } 709 710 /* Create the transmit buffer DMA maps. */ 711 for (i = 0; i < PCN_TXQUEUELEN; i++) { 712 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 713 PCN_NTXSEGS, MCLBYTES, 0, 0, 714 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 715 printf(": unable to create tx DMA map %d, " 716 "error = %d\n", i, error); 717 goto fail_4; 718 } 719 } 720 721 /* Create the receive buffer DMA maps. */ 722 for (i = 0; i < PCN_NRXDESC; i++) { 723 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 724 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 725 printf(": unable to create rx DMA map %d, " 726 "error = %d\n", i, error); 727 goto fail_5; 728 } 729 sc->sc_rxsoft[i].rxs_mbuf = NULL; 730 } 731 732 printf(", %s, rev %d: %s, address %s\n", sc->sc_variant->pcv_desc, 733 CHIPID_VER(chipid), intrstr, ether_sprintf(enaddr)); 734 735 /* Initialize our media structures. */ 736 (*sc->sc_variant->pcv_mediainit)(sc); 737 738 /* 739 * Initialize FIFO watermark info. 740 */ 741 switch (sc->sc_variant->pcv_chipid) { 742 case PARTID_Am79c970: 743 case PARTID_Am79c970A: 744 sc->sc_rcvfw_desc = pcn_79c970_rcvfw; 745 sc->sc_xmtsp_desc = pcn_79c970_xmtsp; 746 sc->sc_xmtfw_desc = pcn_79c970_xmtfw; 747 break; 748 749 default: 750 sc->sc_rcvfw_desc = pcn_79c971_rcvfw; 751 /* 752 * Read BCR25 to determine how much SRAM is 753 * on the board. If > 0, then we the chip 754 * uses different Start Point thresholds. 755 * 756 * Note BCR25 and BCR26 are loaded from the 757 * EEPROM on RST, and unaffected by S_RESET, 758 * so we don't really have to worry about 759 * them except for this. 760 */ 761 reg = pcn_bcr_read(sc, LE_BCR25) & 0x00ff; 762 if (reg != 0) 763 sc->sc_xmtsp_desc = pcn_79c971_xmtsp_sram; 764 else 765 sc->sc_xmtsp_desc = pcn_79c971_xmtsp; 766 sc->sc_xmtfw_desc = pcn_79c971_xmtfw; 767 break; 768 } 769 770 /* 771 * Set up defaults -- see the tables above for what these 772 * values mean. 773 * 774 * XXX How should we tune RCVFW and XMTFW? 775 */ 776 sc->sc_rcvfw = 1; /* minimum for full-duplex */ 777 sc->sc_xmtsp = 1; 778 sc->sc_xmtfw = 0; 779 780 ifp = &sc->sc_arpcom.ac_if; 781 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 782 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 783 ifp->if_softc = sc; 784 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 785 ifp->if_ioctl = pcn_ioctl; 786 ifp->if_start = pcn_start; 787 ifp->if_watchdog = pcn_watchdog; 788 IFQ_SET_MAXLEN(&ifp->if_snd, PCN_NTXDESC -1); 789 IFQ_SET_READY(&ifp->if_snd); 790 791 /* Attach the interface. */ 792 if_attach(ifp); 793 ether_ifattach(ifp); 794 return; 795 796 /* 797 * Free any resources we've allocated during the failed attach 798 * attempt. Do this in reverse order and fall through. 799 */ 800 fail_5: 801 for (i = 0; i < PCN_NRXDESC; i++) { 802 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 803 bus_dmamap_destroy(sc->sc_dmat, 804 sc->sc_rxsoft[i].rxs_dmamap); 805 } 806 fail_4: 807 for (i = 0; i < PCN_TXQUEUELEN; i++) { 808 if (sc->sc_txsoft[i].txs_dmamap != NULL) 809 bus_dmamap_destroy(sc->sc_dmat, 810 sc->sc_txsoft[i].txs_dmamap); 811 } 812 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 813 fail_3: 814 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 815 fail_2: 816 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 817 sizeof(struct pcn_control_data)); 818 fail_1: 819 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 820 } 821 822 /* 823 * pcn_start: [ifnet interface function] 824 * 825 * Start packet transmission on the interface. 826 */ 827 void 828 pcn_start(struct ifnet *ifp) 829 { 830 struct pcn_softc *sc = ifp->if_softc; 831 struct mbuf *m0, *m; 832 struct pcn_txsoft *txs; 833 bus_dmamap_t dmamap; 834 int error, nexttx, lasttx = -1, ofree, seg; 835 836 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 837 return; 838 839 /* 840 * Remember the previous number of free descriptors and 841 * the first descriptor we'll use. 842 */ 843 ofree = sc->sc_txfree; 844 845 /* 846 * Loop through the send queue, setting up transmit descriptors 847 * until we drain the queue, or use up all available transmit 848 * descriptors. 849 */ 850 for (;;) { 851 /* Grab a packet off the queue. */ 852 IFQ_POLL(&ifp->if_snd, m0); 853 if (m0 == NULL) 854 break; 855 m = NULL; 856 857 /* Get a work queue entry. */ 858 if (sc->sc_txsfree == 0) 859 break; 860 861 txs = &sc->sc_txsoft[sc->sc_txsnext]; 862 dmamap = txs->txs_dmamap; 863 864 /* 865 * Load the DMA map. If this fails, the packet either 866 * didn't fit in the alloted number of segments, or we 867 * were short on resources. In this case, we'll copy 868 * and try again. 869 */ 870 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 871 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 872 MGETHDR(m, M_DONTWAIT, MT_DATA); 873 if (m == NULL) 874 break; 875 if (m0->m_pkthdr.len > MHLEN) { 876 MCLGET(m, M_DONTWAIT); 877 if ((m->m_flags & M_EXT) == 0) { 878 m_freem(m); 879 break; 880 } 881 } 882 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 883 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 884 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 885 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 886 if (error) 887 break; 888 } 889 890 /* 891 * Ensure we have enough descriptors free to describe 892 * the packet. Note, we always reserve one descriptor 893 * at the end of the ring as a termination point, to 894 * prevent wrap-around. 895 */ 896 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { 897 /* 898 * Not enough free descriptors to transmit this 899 * packet. We haven't committed anything yet, 900 * so just unload the DMA map, put the packet 901 * back on the queue, and punt. Notify the upper 902 * layer that there are not more slots left. 903 * 904 * XXX We could allocate an mbuf and copy, but 905 * XXX is it worth it? 906 */ 907 ifp->if_flags |= IFF_OACTIVE; 908 bus_dmamap_unload(sc->sc_dmat, dmamap); 909 if (m != NULL) 910 m_freem(m); 911 break; 912 } 913 914 IFQ_DEQUEUE(&ifp->if_snd, m0); 915 if (m != NULL) { 916 m_freem(m0); 917 m0 = m; 918 } 919 920 /* 921 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 922 */ 923 924 /* Sync the DMA map. */ 925 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 926 BUS_DMASYNC_PREWRITE); 927 928 /* 929 * Initialize the transmit descriptors. 930 */ 931 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) { 932 for (nexttx = sc->sc_txnext, seg = 0; 933 seg < dmamap->dm_nsegs; 934 seg++, nexttx = PCN_NEXTTX(nexttx)) { 935 /* 936 * If this is the first descriptor we're 937 * enqueueing, don't set the OWN bit just 938 * yet. That could cause a race condition. 939 * We'll do it below. 940 */ 941 sc->sc_txdescs[nexttx].tmd0 = 0; 942 sc->sc_txdescs[nexttx].tmd2 = 943 htole32(dmamap->dm_segs[seg].ds_addr); 944 sc->sc_txdescs[nexttx].tmd1 = 945 htole32(LE_T1_ONES | 946 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) | 947 (LE_BCNT(dmamap->dm_segs[seg].ds_len) & 948 LE_T1_BCNT_MASK)); 949 lasttx = nexttx; 950 } 951 } else { 952 for (nexttx = sc->sc_txnext, seg = 0; 953 seg < dmamap->dm_nsegs; 954 seg++, nexttx = PCN_NEXTTX(nexttx)) { 955 /* 956 * If this is the first descriptor we're 957 * enqueueing, don't set the OWN bit just 958 * yet. That could cause a race condition. 959 * We'll do it below. 960 */ 961 sc->sc_txdescs[nexttx].tmd0 = 962 htole32(dmamap->dm_segs[seg].ds_addr); 963 sc->sc_txdescs[nexttx].tmd2 = 0; 964 sc->sc_txdescs[nexttx].tmd1 = 965 htole32(LE_T1_ONES | 966 (nexttx == sc->sc_txnext ? 0 : LE_T1_OWN) | 967 (LE_BCNT(dmamap->dm_segs[seg].ds_len) & 968 LE_T1_BCNT_MASK)); 969 lasttx = nexttx; 970 } 971 } 972 973 KASSERT(lasttx != -1); 974 /* Interrupt on the packet, if appropriate. */ 975 if ((sc->sc_txsnext & PCN_TXINTR_MASK) == 0) 976 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_LTINT); 977 978 /* Set `start of packet' and `end of packet' appropriately. */ 979 sc->sc_txdescs[lasttx].tmd1 |= htole32(LE_T1_ENP); 980 sc->sc_txdescs[sc->sc_txnext].tmd1 |= 981 htole32(LE_T1_OWN|LE_T1_STP); 982 983 /* Sync the descriptors we're using. */ 984 PCN_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 985 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 986 987 /* Kick the transmitter. */ 988 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_TDMD); 989 990 /* 991 * Store a pointer to the packet so we can free it later, 992 * and remember what txdirty will be once the packet is 993 * done. 994 */ 995 txs->txs_mbuf = m0; 996 txs->txs_firstdesc = sc->sc_txnext; 997 txs->txs_lastdesc = lasttx; 998 999 /* Advance the tx pointer. */ 1000 sc->sc_txfree -= dmamap->dm_nsegs; 1001 sc->sc_txnext = nexttx; 1002 1003 sc->sc_txsfree--; 1004 sc->sc_txsnext = PCN_NEXTTXS(sc->sc_txsnext); 1005 1006 #if NBPFILTER > 0 1007 /* Pass the packet to any BPF listeners. */ 1008 if (ifp->if_bpf) 1009 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 1010 #endif /* NBPFILTER > 0 */ 1011 } 1012 1013 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) { 1014 /* No more slots left; notify upper layer. */ 1015 ifp->if_flags |= IFF_OACTIVE; 1016 } 1017 1018 if (sc->sc_txfree != ofree) { 1019 /* Set a watchdog timer in case the chip flakes out. */ 1020 ifp->if_timer = 5; 1021 } 1022 } 1023 1024 /* 1025 * pcn_watchdog: [ifnet interface function] 1026 * 1027 * Watchdog timer handler. 1028 */ 1029 void 1030 pcn_watchdog(struct ifnet *ifp) 1031 { 1032 struct pcn_softc *sc = ifp->if_softc; 1033 1034 /* 1035 * Since we're not interrupting every packet, sweep 1036 * up before we report an error. 1037 */ 1038 pcn_txintr(sc); 1039 1040 if (sc->sc_txfree != PCN_NTXDESC) { 1041 printf("%s: device timeout (txfree %d txsfree %d)\n", 1042 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree); 1043 ifp->if_oerrors++; 1044 1045 /* Reset the interface. */ 1046 (void) pcn_init(ifp); 1047 } 1048 1049 /* Try to get more packets going. */ 1050 pcn_start(ifp); 1051 } 1052 1053 /* 1054 * pcn_ioctl: [ifnet interface function] 1055 * 1056 * Handle control requests from the operator. 1057 */ 1058 int 1059 pcn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1060 { 1061 struct pcn_softc *sc = ifp->if_softc; 1062 struct ifaddr *ifa = (struct ifaddr *) data; 1063 struct ifreq *ifr = (struct ifreq *) data; 1064 int s, error = 0; 1065 1066 s = splnet(); 1067 1068 switch (cmd) { 1069 case SIOCSIFADDR: 1070 ifp->if_flags |= IFF_UP; 1071 1072 switch (ifa->ifa_addr->sa_family) { 1073 #ifdef INET 1074 case AF_INET: 1075 pcn_init(ifp); 1076 arp_ifinit(&sc->sc_arpcom, ifa); 1077 break; 1078 #endif 1079 default: 1080 pcn_init(ifp); 1081 break; 1082 } 1083 break; 1084 1085 case SIOCSIFFLAGS: 1086 /* 1087 * If interface is marked up and not running, then start it. 1088 * If it is marked down and running, stop it. 1089 * XXX If it's up then re-initialize it. This is so flags 1090 * such as IFF_PROMISC are handled. 1091 */ 1092 if (ifp->if_flags & IFF_UP) 1093 pcn_init(ifp); 1094 else if (ifp->if_flags & IFF_RUNNING) 1095 pcn_stop(ifp, 1); 1096 break; 1097 1098 case SIOCSIFMEDIA: 1099 case SIOCGIFMEDIA: 1100 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1101 break; 1102 1103 default: 1104 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1105 } 1106 1107 if (error == ENETRESET) { 1108 if (ifp->if_flags & IFF_RUNNING) 1109 error = pcn_init(ifp); 1110 else 1111 error = 0; 1112 } 1113 1114 /* Try to get more packets going. */ 1115 pcn_start(ifp); 1116 1117 splx(s); 1118 return (error); 1119 } 1120 1121 /* 1122 * pcn_intr: 1123 * 1124 * Interrupt service routine. 1125 */ 1126 int 1127 pcn_intr(void *arg) 1128 { 1129 struct pcn_softc *sc = arg; 1130 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1131 uint32_t csr0; 1132 int wantinit, handled = 0; 1133 1134 for (wantinit = 0; wantinit == 0;) { 1135 csr0 = pcn_csr_read(sc, LE_CSR0); 1136 if ((csr0 & LE_C0_INTR) == 0) 1137 break; 1138 1139 /* ACK the bits and re-enable interrupts. */ 1140 pcn_csr_write(sc, LE_CSR0, csr0 & 1141 (LE_C0_INEA|LE_C0_BABL|LE_C0_MISS|LE_C0_MERR|LE_C0_RINT| 1142 LE_C0_TINT|LE_C0_IDON)); 1143 1144 handled = 1; 1145 1146 if (csr0 & LE_C0_RINT) 1147 wantinit = pcn_rxintr(sc); 1148 1149 if (csr0 & LE_C0_TINT) 1150 pcn_txintr(sc); 1151 1152 if (csr0 & LE_C0_ERR) { 1153 if (csr0 & LE_C0_BABL) 1154 ifp->if_oerrors++; 1155 if (csr0 & LE_C0_MISS) 1156 ifp->if_ierrors++; 1157 if (csr0 & LE_C0_MERR) { 1158 printf("%s: memory error\n", 1159 sc->sc_dev.dv_xname); 1160 wantinit = 1; 1161 break; 1162 } 1163 } 1164 1165 if ((csr0 & LE_C0_RXON) == 0) { 1166 printf("%s: receiver disabled\n", 1167 sc->sc_dev.dv_xname); 1168 ifp->if_ierrors++; 1169 wantinit = 1; 1170 } 1171 1172 if ((csr0 & LE_C0_TXON) == 0) { 1173 printf("%s: transmitter disabled\n", 1174 sc->sc_dev.dv_xname); 1175 ifp->if_oerrors++; 1176 wantinit = 1; 1177 } 1178 } 1179 1180 if (handled) { 1181 if (wantinit) 1182 pcn_init(ifp); 1183 1184 /* Try to get more packets going. */ 1185 pcn_start(ifp); 1186 } 1187 1188 return (handled); 1189 } 1190 1191 /* 1192 * pcn_spnd: 1193 * 1194 * Suspend the chip. 1195 */ 1196 void 1197 pcn_spnd(struct pcn_softc *sc) 1198 { 1199 int i; 1200 1201 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5 | LE_C5_SPND); 1202 1203 for (i = 0; i < 10000; i++) { 1204 if (pcn_csr_read(sc, LE_CSR5) & LE_C5_SPND) 1205 return; 1206 delay(5); 1207 } 1208 1209 printf("%s: WARNING: chip failed to enter suspended state\n", 1210 sc->sc_dev.dv_xname); 1211 } 1212 1213 /* 1214 * pcn_txintr: 1215 * 1216 * Helper; handle transmit interrupts. 1217 */ 1218 void 1219 pcn_txintr(struct pcn_softc *sc) 1220 { 1221 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1222 struct pcn_txsoft *txs; 1223 uint32_t tmd1, tmd2, tmd; 1224 int i, j; 1225 1226 ifp->if_flags &= ~IFF_OACTIVE; 1227 1228 /* 1229 * Go through our Tx list and free mbufs for those 1230 * frames which have been transmitted. 1231 */ 1232 for (i = sc->sc_txsdirty; sc->sc_txsfree != PCN_TXQUEUELEN; 1233 i = PCN_NEXTTXS(i), sc->sc_txsfree++) { 1234 txs = &sc->sc_txsoft[i]; 1235 1236 PCN_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1237 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1238 1239 tmd1 = letoh32(sc->sc_txdescs[txs->txs_lastdesc].tmd1); 1240 if (tmd1 & LE_T1_OWN) 1241 break; 1242 1243 /* 1244 * Slightly annoying -- we have to loop through the 1245 * descriptors we've used looking for ERR, since it 1246 * can appear on any descriptor in the chain. 1247 */ 1248 for (j = txs->txs_firstdesc;; j = PCN_NEXTTX(j)) { 1249 tmd = letoh32(sc->sc_txdescs[j].tmd1); 1250 if (tmd & LE_T1_ERR) { 1251 ifp->if_oerrors++; 1252 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1253 tmd2 = letoh32(sc->sc_txdescs[j].tmd0); 1254 else 1255 tmd2 = letoh32(sc->sc_txdescs[j].tmd2); 1256 if (tmd2 & LE_T2_UFLO) { 1257 if (sc->sc_xmtsp < LE_C80_XMTSP_MAX) { 1258 sc->sc_xmtsp++; 1259 printf("%s: transmit " 1260 "underrun; new threshold: " 1261 "%s\n", 1262 sc->sc_dev.dv_xname, 1263 sc->sc_xmtsp_desc[ 1264 sc->sc_xmtsp]); 1265 pcn_spnd(sc); 1266 pcn_csr_write(sc, LE_CSR80, 1267 LE_C80_RCVFW(sc->sc_rcvfw) | 1268 LE_C80_XMTSP(sc->sc_xmtsp) | 1269 LE_C80_XMTFW(sc->sc_xmtfw)); 1270 pcn_csr_write(sc, LE_CSR5, 1271 sc->sc_csr5); 1272 } else { 1273 printf("%s: transmit " 1274 "underrun\n", 1275 sc->sc_dev.dv_xname); 1276 } 1277 } else if (tmd2 & LE_T2_BUFF) { 1278 printf("%s: transmit buffer error\n", 1279 sc->sc_dev.dv_xname); 1280 } 1281 if (tmd2 & LE_T2_LCOL) 1282 ifp->if_collisions++; 1283 if (tmd2 & LE_T2_RTRY) 1284 ifp->if_collisions += 16; 1285 goto next_packet; 1286 } 1287 if (j == txs->txs_lastdesc) 1288 break; 1289 } 1290 if (tmd1 & LE_T1_ONE) 1291 ifp->if_collisions++; 1292 else if (tmd & LE_T1_MORE) { 1293 /* Real number is unknown. */ 1294 ifp->if_collisions += 2; 1295 } 1296 ifp->if_opackets++; 1297 next_packet: 1298 sc->sc_txfree += txs->txs_dmamap->dm_nsegs; 1299 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1300 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1301 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1302 m_freem(txs->txs_mbuf); 1303 txs->txs_mbuf = NULL; 1304 } 1305 1306 /* Update the dirty transmit buffer pointer. */ 1307 sc->sc_txsdirty = i; 1308 1309 /* 1310 * If there are no more pending transmissions, cancel the watchdog 1311 * timer. 1312 */ 1313 if (sc->sc_txsfree == PCN_TXQUEUELEN) 1314 ifp->if_timer = 0; 1315 } 1316 1317 /* 1318 * pcn_rxintr: 1319 * 1320 * Helper; handle receive interrupts. 1321 */ 1322 int 1323 pcn_rxintr(struct pcn_softc *sc) 1324 { 1325 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1326 struct pcn_rxsoft *rxs; 1327 struct mbuf *m; 1328 uint32_t rmd1; 1329 int i, len; 1330 1331 for (i = sc->sc_rxptr;; i = PCN_NEXTRX(i)) { 1332 rxs = &sc->sc_rxsoft[i]; 1333 1334 PCN_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1335 1336 rmd1 = letoh32(sc->sc_rxdescs[i].rmd1); 1337 1338 if (rmd1 & LE_R1_OWN) 1339 break; 1340 1341 /* 1342 * Check for errors and make sure the packet fit into 1343 * a single buffer. We have structured this block of 1344 * code the way it is in order to compress it into 1345 * one test in the common case (no error). 1346 */ 1347 if (__predict_false((rmd1 & (LE_R1_STP|LE_R1_ENP|LE_R1_ERR)) != 1348 (LE_R1_STP|LE_R1_ENP))) { 1349 /* Make sure the packet is in a single buffer. */ 1350 if ((rmd1 & (LE_R1_STP|LE_R1_ENP)) != 1351 (LE_R1_STP|LE_R1_ENP)) { 1352 printf("%s: packet spilled into next buffer\n", 1353 sc->sc_dev.dv_xname); 1354 return (1); /* pcn_intr() will re-init */ 1355 } 1356 1357 /* 1358 * If the packet had an error, simple recycle the 1359 * buffer. 1360 */ 1361 if (rmd1 & LE_R1_ERR) { 1362 ifp->if_ierrors++; 1363 /* 1364 * If we got an overflow error, chances 1365 * are there will be a CRC error. In 1366 * this case, just print the overflow 1367 * error, and skip the others. 1368 */ 1369 if (rmd1 & LE_R1_OFLO) 1370 printf("%s: overflow error\n", 1371 sc->sc_dev.dv_xname); 1372 else { 1373 #define PRINTIT(x, str) \ 1374 if (rmd1 & (x)) \ 1375 printf("%s: %s\n", \ 1376 sc->sc_dev.dv_xname, str); 1377 PRINTIT(LE_R1_FRAM, "framing error"); 1378 PRINTIT(LE_R1_CRC, "CRC error"); 1379 PRINTIT(LE_R1_BUFF, "buffer error"); 1380 } 1381 #undef PRINTIT 1382 PCN_INIT_RXDESC(sc, i); 1383 continue; 1384 } 1385 } 1386 1387 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1388 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1389 1390 /* 1391 * No errors; receive the packet. 1392 */ 1393 if (sc->sc_swstyle == LE_B20_SSTYLE_PCNETPCI3) 1394 len = letoh32(sc->sc_rxdescs[i].rmd0) & LE_R1_BCNT_MASK; 1395 else 1396 len = letoh32(sc->sc_rxdescs[i].rmd2) & LE_R1_BCNT_MASK; 1397 1398 /* 1399 * The LANCE family includes the CRC with every packet; 1400 * trim it off here. 1401 */ 1402 len -= ETHER_CRC_LEN; 1403 1404 /* 1405 * If the packet is small enough to fit in a 1406 * single header mbuf, allocate one and copy 1407 * the data into it. This greatly reduces 1408 * memory consumption when we receive lots 1409 * of small packets. 1410 * 1411 * Otherwise, we add a new buffer to the receive 1412 * chain. If this fails, we drop the packet and 1413 * recycle the old buffer. 1414 */ 1415 if (pcn_copy_small != 0 && len <= (MHLEN - 2)) { 1416 MGETHDR(m, M_DONTWAIT, MT_DATA); 1417 if (m == NULL) 1418 goto dropit; 1419 m->m_data += 2; 1420 memcpy(mtod(m, caddr_t), 1421 mtod(rxs->rxs_mbuf, caddr_t), len); 1422 PCN_INIT_RXDESC(sc, i); 1423 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1424 rxs->rxs_dmamap->dm_mapsize, 1425 BUS_DMASYNC_PREREAD); 1426 } else { 1427 m = rxs->rxs_mbuf; 1428 if (pcn_add_rxbuf(sc, i) != 0) { 1429 dropit: 1430 ifp->if_ierrors++; 1431 PCN_INIT_RXDESC(sc, i); 1432 bus_dmamap_sync(sc->sc_dmat, 1433 rxs->rxs_dmamap, 0, 1434 rxs->rxs_dmamap->dm_mapsize, 1435 BUS_DMASYNC_PREREAD); 1436 continue; 1437 } 1438 } 1439 1440 m->m_pkthdr.rcvif = ifp; 1441 m->m_pkthdr.len = m->m_len = len; 1442 1443 #if NBPFILTER > 0 1444 /* Pass this up to any BPF listeners. */ 1445 if (ifp->if_bpf) 1446 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 1447 #endif /* NBPFILTER > 0 */ 1448 1449 /* Pass it on. */ 1450 ether_input_mbuf(ifp, m); 1451 ifp->if_ipackets++; 1452 } 1453 1454 /* Update the receive pointer. */ 1455 sc->sc_rxptr = i; 1456 return (0); 1457 } 1458 1459 /* 1460 * pcn_tick: 1461 * 1462 * One second timer, used to tick the MII. 1463 */ 1464 void 1465 pcn_tick(void *arg) 1466 { 1467 struct pcn_softc *sc = arg; 1468 int s; 1469 1470 s = splnet(); 1471 mii_tick(&sc->sc_mii); 1472 splx(s); 1473 1474 timeout_add_sec(&sc->sc_tick_timeout, 1); 1475 } 1476 1477 /* 1478 * pcn_reset: 1479 * 1480 * Perform a soft reset on the PCnet-PCI. 1481 */ 1482 void 1483 pcn_reset(struct pcn_softc *sc) 1484 { 1485 1486 /* 1487 * The PCnet-PCI chip is reset by reading from the 1488 * RESET register. Note that while the NE2100 LANCE 1489 * boards require a write after the read, the PCnet-PCI 1490 * chips do not require this. 1491 * 1492 * Since we don't know if we're in 16-bit or 32-bit 1493 * mode right now, issue both (it's safe) in the 1494 * hopes that one will succeed. 1495 */ 1496 (void) bus_space_read_2(sc->sc_st, sc->sc_sh, PCN16_RESET); 1497 (void) bus_space_read_4(sc->sc_st, sc->sc_sh, PCN32_RESET); 1498 1499 /* Wait 1ms for it to finish. */ 1500 delay(1000); 1501 1502 /* 1503 * Select 32-bit I/O mode by issuing a 32-bit write to the 1504 * RDP. Since the RAP is 0 after a reset, writing a 0 1505 * to RDP is safe (since it simply clears CSR0). 1506 */ 1507 bus_space_write_4(sc->sc_st, sc->sc_sh, PCN32_RDP, 0); 1508 } 1509 1510 /* 1511 * pcn_init: [ifnet interface function] 1512 * 1513 * Initialize the interface. Must be called at splnet(). 1514 */ 1515 int 1516 pcn_init(struct ifnet *ifp) 1517 { 1518 struct pcn_softc *sc = ifp->if_softc; 1519 struct pcn_rxsoft *rxs; 1520 uint8_t *enaddr = LLADDR(ifp->if_sadl); 1521 int i, error = 0; 1522 uint32_t reg; 1523 1524 /* Cancel any pending I/O. */ 1525 pcn_stop(ifp, 0); 1526 1527 /* Reset the chip to a known state. */ 1528 pcn_reset(sc); 1529 1530 /* 1531 * On the Am79c970, select SSTYLE 2, and SSTYLE 3 on everything 1532 * else. 1533 * 1534 * XXX It'd be really nice to use SSTYLE 2 on all the chips, 1535 * because the structure layout is compatible with ILACC, 1536 * but the burst mode is only available in SSTYLE 3, and 1537 * burst mode should provide some performance enhancement. 1538 */ 1539 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970) 1540 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI2; 1541 else 1542 sc->sc_swstyle = LE_B20_SSTYLE_PCNETPCI3; 1543 pcn_bcr_write(sc, LE_BCR20, sc->sc_swstyle); 1544 1545 /* Initialize the transmit descriptor ring. */ 1546 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1547 PCN_CDTXSYNC(sc, 0, PCN_NTXDESC, 1548 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1549 sc->sc_txfree = PCN_NTXDESC; 1550 sc->sc_txnext = 0; 1551 1552 /* Initialize the transmit job descriptors. */ 1553 for (i = 0; i < PCN_TXQUEUELEN; i++) 1554 sc->sc_txsoft[i].txs_mbuf = NULL; 1555 sc->sc_txsfree = PCN_TXQUEUELEN; 1556 sc->sc_txsnext = 0; 1557 sc->sc_txsdirty = 0; 1558 1559 /* 1560 * Initialize the receive descriptor and receive job 1561 * descriptor rings. 1562 */ 1563 for (i = 0; i < PCN_NRXDESC; i++) { 1564 rxs = &sc->sc_rxsoft[i]; 1565 if (rxs->rxs_mbuf == NULL) { 1566 if ((error = pcn_add_rxbuf(sc, i)) != 0) { 1567 printf("%s: unable to allocate or map rx " 1568 "buffer %d, error = %d\n", 1569 sc->sc_dev.dv_xname, i, error); 1570 /* 1571 * XXX Should attempt to run with fewer receive 1572 * XXX buffers instead of just failing. 1573 */ 1574 pcn_rxdrain(sc); 1575 goto out; 1576 } 1577 } else 1578 PCN_INIT_RXDESC(sc, i); 1579 } 1580 sc->sc_rxptr = 0; 1581 1582 /* Initialize MODE for the initialization block. */ 1583 sc->sc_mode = 0; 1584 if (ifp->if_flags & IFF_PROMISC) 1585 sc->sc_mode |= LE_C15_PROM; 1586 if ((ifp->if_flags & IFF_BROADCAST) == 0) 1587 sc->sc_mode |= LE_C15_DRCVBC; 1588 1589 /* 1590 * If we have MII, simply select MII in the MODE register, 1591 * and clear ASEL. Otherwise, let ASEL stand (for now), 1592 * and leave PORTSEL alone (it is ignored with ASEL is set). 1593 */ 1594 if (sc->sc_flags & PCN_F_HAS_MII) { 1595 pcn_bcr_write(sc, LE_BCR2, 1596 pcn_bcr_read(sc, LE_BCR2) & ~LE_B2_ASEL); 1597 sc->sc_mode |= LE_C15_PORTSEL(PORTSEL_MII); 1598 1599 /* 1600 * Disable MII auto-negotiation. We handle that in 1601 * our own MII layer. 1602 */ 1603 pcn_bcr_write(sc, LE_BCR32, 1604 pcn_bcr_read(sc, LE_BCR32) | LE_B32_DANAS); 1605 } 1606 1607 /* 1608 * Set the Tx and Rx descriptor ring addresses in the init 1609 * block, the TLEN and RLEN other fields of the init block 1610 * MODE register. 1611 */ 1612 sc->sc_initblock.init_rdra = htole32(PCN_CDRXADDR(sc, 0)); 1613 sc->sc_initblock.init_tdra = htole32(PCN_CDTXADDR(sc, 0)); 1614 sc->sc_initblock.init_mode = htole32(sc->sc_mode | 1615 ((ffs(PCN_NTXDESC) - 1) << 28) | 1616 ((ffs(PCN_NRXDESC) - 1) << 20)); 1617 1618 /* Set the station address in the init block. */ 1619 sc->sc_initblock.init_padr[0] = htole32(enaddr[0] | 1620 (enaddr[1] << 8) | (enaddr[2] << 16) | (enaddr[3] << 24)); 1621 sc->sc_initblock.init_padr[1] = htole32(enaddr[4] | 1622 (enaddr[5] << 8)); 1623 1624 /* Set the multicast filter in the init block. */ 1625 pcn_set_filter(sc); 1626 1627 /* Initialize CSR3. */ 1628 pcn_csr_write(sc, LE_CSR3, LE_C3_MISSM|LE_C3_IDONM|LE_C3_DXSUFLO); 1629 1630 /* Initialize CSR4. */ 1631 pcn_csr_write(sc, LE_CSR4, LE_C4_DMAPLUS|LE_C4_APAD_XMT| 1632 LE_C4_MFCOM|LE_C4_RCVCCOM|LE_C4_TXSTRTM); 1633 1634 /* Initialize CSR5. */ 1635 sc->sc_csr5 = LE_C5_LTINTEN|LE_C5_SINTE; 1636 pcn_csr_write(sc, LE_CSR5, sc->sc_csr5); 1637 1638 /* 1639 * If we have an Am79c971 or greater, initialize CSR7. 1640 * 1641 * XXX Might be nice to use the MII auto-poll interrupt someday. 1642 */ 1643 switch (sc->sc_variant->pcv_chipid) { 1644 case PARTID_Am79c970: 1645 case PARTID_Am79c970A: 1646 /* Not available on these chips. */ 1647 break; 1648 1649 default: 1650 pcn_csr_write(sc, LE_CSR7, LE_C7_FASTSPNDE); 1651 break; 1652 } 1653 1654 /* 1655 * On the Am79c970A and greater, initialize BCR18 to 1656 * enable burst mode. 1657 * 1658 * Also enable the "no underflow" option on the Am79c971 and 1659 * higher, which prevents the chip from generating transmit 1660 * underflows, yet sill provides decent performance. Note if 1661 * chip is not connected to external SRAM, then we still have 1662 * to handle underflow errors (the NOUFLO bit is ignored in 1663 * that case). 1664 */ 1665 reg = pcn_bcr_read(sc, LE_BCR18); 1666 switch (sc->sc_variant->pcv_chipid) { 1667 case PARTID_Am79c970: 1668 break; 1669 1670 case PARTID_Am79c970A: 1671 reg |= LE_B18_BREADE|LE_B18_BWRITE; 1672 break; 1673 1674 default: 1675 reg |= LE_B18_BREADE|LE_B18_BWRITE|LE_B18_NOUFLO; 1676 break; 1677 } 1678 pcn_bcr_write(sc, LE_BCR18, reg); 1679 1680 /* 1681 * Initialize CSR80 (FIFO thresholds for Tx and Rx). 1682 */ 1683 pcn_csr_write(sc, LE_CSR80, LE_C80_RCVFW(sc->sc_rcvfw) | 1684 LE_C80_XMTSP(sc->sc_xmtsp) | LE_C80_XMTFW(sc->sc_xmtfw)); 1685 1686 /* 1687 * Send the init block to the chip, and wait for it 1688 * to be processed. 1689 */ 1690 PCN_CDINITSYNC(sc, BUS_DMASYNC_PREWRITE); 1691 pcn_csr_write(sc, LE_CSR1, PCN_CDINITADDR(sc) & 0xffff); 1692 pcn_csr_write(sc, LE_CSR2, (PCN_CDINITADDR(sc) >> 16) & 0xffff); 1693 pcn_csr_write(sc, LE_CSR0, LE_C0_INIT); 1694 delay(100); 1695 for (i = 0; i < 10000; i++) { 1696 if (pcn_csr_read(sc, LE_CSR0) & LE_C0_IDON) 1697 break; 1698 delay(10); 1699 } 1700 PCN_CDINITSYNC(sc, BUS_DMASYNC_POSTWRITE); 1701 if (i == 10000) { 1702 printf("%s: timeout processing init block\n", 1703 sc->sc_dev.dv_xname); 1704 error = EIO; 1705 goto out; 1706 } 1707 1708 /* Set the media. */ 1709 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp); 1710 1711 /* Enable interrupts and external activity (and ACK IDON). */ 1712 pcn_csr_write(sc, LE_CSR0, LE_C0_INEA|LE_C0_STRT|LE_C0_IDON); 1713 1714 if (sc->sc_flags & PCN_F_HAS_MII) { 1715 /* Start the one second MII clock. */ 1716 timeout_add_sec(&sc->sc_tick_timeout, 1); 1717 } 1718 1719 /* ...all done! */ 1720 ifp->if_flags |= IFF_RUNNING; 1721 ifp->if_flags &= ~IFF_OACTIVE; 1722 1723 out: 1724 if (error) 1725 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 1726 return (error); 1727 } 1728 1729 /* 1730 * pcn_rxdrain: 1731 * 1732 * Drain the receive queue. 1733 */ 1734 void 1735 pcn_rxdrain(struct pcn_softc *sc) 1736 { 1737 struct pcn_rxsoft *rxs; 1738 int i; 1739 1740 for (i = 0; i < PCN_NRXDESC; i++) { 1741 rxs = &sc->sc_rxsoft[i]; 1742 if (rxs->rxs_mbuf != NULL) { 1743 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1744 m_freem(rxs->rxs_mbuf); 1745 rxs->rxs_mbuf = NULL; 1746 } 1747 } 1748 } 1749 1750 /* 1751 * pcn_stop: [ifnet interface function] 1752 * 1753 * Stop transmission on the interface. 1754 */ 1755 void 1756 pcn_stop(struct ifnet *ifp, int disable) 1757 { 1758 struct pcn_softc *sc = ifp->if_softc; 1759 struct pcn_txsoft *txs; 1760 int i; 1761 1762 if (sc->sc_flags & PCN_F_HAS_MII) { 1763 /* Stop the one second clock. */ 1764 timeout_del(&sc->sc_tick_timeout); 1765 1766 /* Down the MII. */ 1767 mii_down(&sc->sc_mii); 1768 } 1769 1770 /* Mark the interface as down and cancel the watchdog timer. */ 1771 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1772 ifp->if_timer = 0; 1773 1774 /* Stop the chip. */ 1775 pcn_csr_write(sc, LE_CSR0, LE_C0_STOP); 1776 1777 /* Release any queued transmit buffers. */ 1778 for (i = 0; i < PCN_TXQUEUELEN; i++) { 1779 txs = &sc->sc_txsoft[i]; 1780 if (txs->txs_mbuf != NULL) { 1781 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1782 m_freem(txs->txs_mbuf); 1783 txs->txs_mbuf = NULL; 1784 } 1785 } 1786 1787 if (disable) 1788 pcn_rxdrain(sc); 1789 } 1790 1791 /* 1792 * pcn_add_rxbuf: 1793 * 1794 * Add a receive buffer to the indicated descriptor. 1795 */ 1796 int 1797 pcn_add_rxbuf(struct pcn_softc *sc, int idx) 1798 { 1799 struct pcn_rxsoft *rxs = &sc->sc_rxsoft[idx]; 1800 struct mbuf *m; 1801 int error; 1802 1803 MGETHDR(m, M_DONTWAIT, MT_DATA); 1804 if (m == NULL) 1805 return (ENOBUFS); 1806 1807 MCLGET(m, M_DONTWAIT); 1808 if ((m->m_flags & M_EXT) == 0) { 1809 m_freem(m); 1810 return (ENOBUFS); 1811 } 1812 1813 if (rxs->rxs_mbuf != NULL) 1814 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 1815 1816 rxs->rxs_mbuf = m; 1817 1818 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 1819 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1820 BUS_DMA_READ|BUS_DMA_NOWAIT); 1821 if (error) { 1822 printf("%s: can't load rx DMA map %d, error = %d\n", 1823 sc->sc_dev.dv_xname, idx, error); 1824 panic("pcn_add_rxbuf"); 1825 } 1826 1827 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1828 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1829 1830 PCN_INIT_RXDESC(sc, idx); 1831 1832 return (0); 1833 } 1834 1835 /* 1836 * pcn_set_filter: 1837 * 1838 * Set up the receive filter. 1839 */ 1840 void 1841 pcn_set_filter(struct pcn_softc *sc) 1842 { 1843 struct arpcom *ac = &sc->sc_arpcom; 1844 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1845 struct ether_multi *enm; 1846 struct ether_multistep step; 1847 uint32_t crc; 1848 1849 /* 1850 * Set up the multicast address filter by passing all multicast 1851 * addresses through a CRC generator, and then using the high 1852 * order 6 bits as an index into the 64-bit logical address 1853 * filter. The high order bits select the word, while the rest 1854 * of the bits select the bit within the word. 1855 */ 1856 1857 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) 1858 goto allmulti; 1859 1860 sc->sc_initblock.init_ladrf[0] = 1861 sc->sc_initblock.init_ladrf[1] = 1862 sc->sc_initblock.init_ladrf[2] = 1863 sc->sc_initblock.init_ladrf[3] = 0; 1864 1865 ETHER_FIRST_MULTI(step, ac, enm); 1866 while (enm != NULL) { 1867 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1868 /* 1869 * We must listen to a range of multicast addresses. 1870 * For now, just accept all multicasts, rather than 1871 * trying to set only those filter bits needed to match 1872 * the range. (At this time, the only use of address 1873 * ranges is for IP multicast routing, for which the 1874 * range is big enough to require all bits set.) 1875 */ 1876 goto allmulti; 1877 } 1878 1879 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1880 1881 /* Just want the 6 most significant bits. */ 1882 crc >>= 26; 1883 1884 /* Set the corresponding bit in the filter. */ 1885 sc->sc_initblock.init_ladrf[crc >> 4] |= 1886 htole16(1 << (crc & 0xf)); 1887 1888 ETHER_NEXT_MULTI(step, enm); 1889 } 1890 1891 ifp->if_flags &= ~IFF_ALLMULTI; 1892 return; 1893 1894 allmulti: 1895 ifp->if_flags |= IFF_ALLMULTI; 1896 sc->sc_initblock.init_ladrf[0] = 1897 sc->sc_initblock.init_ladrf[1] = 1898 sc->sc_initblock.init_ladrf[2] = 1899 sc->sc_initblock.init_ladrf[3] = 0xffff; 1900 } 1901 1902 /* 1903 * pcn_79c970_mediainit: 1904 * 1905 * Initialize media for the Am79c970. 1906 */ 1907 void 1908 pcn_79c970_mediainit(struct pcn_softc *sc) 1909 { 1910 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, pcn_79c970_mediachange, 1911 pcn_79c970_mediastatus); 1912 1913 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5, 1914 PORTSEL_AUI, NULL); 1915 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1916 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_5|IFM_FDX, 1917 PORTSEL_AUI, NULL); 1918 1919 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T, 1920 PORTSEL_10T, NULL); 1921 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1922 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T|IFM_FDX, 1923 PORTSEL_10T, NULL); 1924 1925 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 1926 0, NULL); 1927 if (sc->sc_variant->pcv_chipid == PARTID_Am79c970A) 1928 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO|IFM_FDX, 1929 0, NULL); 1930 1931 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 1932 } 1933 1934 /* 1935 * pcn_79c970_mediastatus: [ifmedia interface function] 1936 * 1937 * Get the current interface media status (Am79c970 version). 1938 */ 1939 void 1940 pcn_79c970_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1941 { 1942 struct pcn_softc *sc = ifp->if_softc; 1943 1944 /* 1945 * The currently selected media is always the active media. 1946 * Note: We have no way to determine what media the AUTO 1947 * process picked. 1948 */ 1949 ifmr->ifm_active = sc->sc_mii.mii_media.ifm_media; 1950 } 1951 1952 /* 1953 * pcn_79c970_mediachange: [ifmedia interface function] 1954 * 1955 * Set hardware to newly-selected media (Am79c970 version). 1956 */ 1957 int 1958 pcn_79c970_mediachange(struct ifnet *ifp) 1959 { 1960 struct pcn_softc *sc = ifp->if_softc; 1961 uint32_t reg; 1962 1963 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_AUTO) { 1964 /* 1965 * CSR15:PORTSEL doesn't matter. Just set BCR2:ASEL. 1966 */ 1967 reg = pcn_bcr_read(sc, LE_BCR2); 1968 reg |= LE_B2_ASEL; 1969 pcn_bcr_write(sc, LE_BCR2, reg); 1970 } else { 1971 /* 1972 * Clear BCR2:ASEL and set the new CSR15:PORTSEL value. 1973 */ 1974 reg = pcn_bcr_read(sc, LE_BCR2); 1975 reg &= ~LE_B2_ASEL; 1976 pcn_bcr_write(sc, LE_BCR2, reg); 1977 1978 reg = pcn_csr_read(sc, LE_CSR15); 1979 reg = (reg & ~LE_C15_PORTSEL(PORTSEL_MASK)) | 1980 LE_C15_PORTSEL(sc->sc_mii.mii_media.ifm_cur->ifm_data); 1981 pcn_csr_write(sc, LE_CSR15, reg); 1982 } 1983 1984 if ((sc->sc_mii.mii_media.ifm_media & IFM_FDX) != 0) { 1985 reg = LE_B9_FDEN; 1986 if (IFM_SUBTYPE(sc->sc_mii.mii_media.ifm_media) == IFM_10_5) 1987 reg |= LE_B9_AUIFD; 1988 pcn_bcr_write(sc, LE_BCR9, reg); 1989 } else 1990 pcn_bcr_write(sc, LE_BCR9, 0); 1991 1992 return (0); 1993 } 1994 1995 /* 1996 * pcn_79c971_mediainit: 1997 * 1998 * Initialize media for the Am79c971. 1999 */ 2000 void 2001 pcn_79c971_mediainit(struct pcn_softc *sc) 2002 { 2003 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 2004 2005 /* We have MII. */ 2006 sc->sc_flags |= PCN_F_HAS_MII; 2007 2008 /* 2009 * The built-in 10BASE-T interface is mapped to the MII 2010 * on the PCNet-FAST. Unfortunately, there's no EEPROM 2011 * word that tells us which PHY to use. 2012 * This driver used to ignore all but the first PHY to 2013 * answer, but this code was removed to support multiple 2014 * external PHYs. As the default instance will be the first 2015 * one to answer, no harm is done by letting the possibly 2016 * non-connected internal PHY show up. 2017 */ 2018 2019 /* Initialize our media structures and probe the MII. */ 2020 sc->sc_mii.mii_ifp = ifp; 2021 sc->sc_mii.mii_readreg = pcn_mii_readreg; 2022 sc->sc_mii.mii_writereg = pcn_mii_writereg; 2023 sc->sc_mii.mii_statchg = pcn_mii_statchg; 2024 ifmedia_init(&sc->sc_mii.mii_media, 0, pcn_79c971_mediachange, 2025 pcn_79c971_mediastatus); 2026 2027 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 2028 MII_OFFSET_ANY, 0); 2029 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 2030 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 2031 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 2032 } else 2033 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 2034 } 2035 2036 /* 2037 * pcn_79c971_mediastatus: [ifmedia interface function] 2038 * 2039 * Get the current interface media status (Am79c971 version). 2040 */ 2041 void 2042 pcn_79c971_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2043 { 2044 struct pcn_softc *sc = ifp->if_softc; 2045 2046 mii_pollstat(&sc->sc_mii); 2047 ifmr->ifm_status = sc->sc_mii.mii_media_status; 2048 ifmr->ifm_active = sc->sc_mii.mii_media_active; 2049 } 2050 2051 /* 2052 * pcn_79c971_mediachange: [ifmedia interface function] 2053 * 2054 * Set hardware to newly-selected media (Am79c971 version). 2055 */ 2056 int 2057 pcn_79c971_mediachange(struct ifnet *ifp) 2058 { 2059 struct pcn_softc *sc = ifp->if_softc; 2060 2061 if (ifp->if_flags & IFF_UP) 2062 mii_mediachg(&sc->sc_mii); 2063 return (0); 2064 } 2065 2066 /* 2067 * pcn_mii_readreg: [mii interface function] 2068 * 2069 * Read a PHY register on the MII. 2070 */ 2071 int 2072 pcn_mii_readreg(struct device *self, int phy, int reg) 2073 { 2074 struct pcn_softc *sc = (void *) self; 2075 uint32_t rv; 2076 2077 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2078 rv = pcn_bcr_read(sc, LE_BCR34) & LE_B34_MIIMD; 2079 if (rv == 0xffff) 2080 return (0); 2081 2082 return (rv); 2083 } 2084 2085 /* 2086 * pcn_mii_writereg: [mii interface function] 2087 * 2088 * Write a PHY register on the MII. 2089 */ 2090 void 2091 pcn_mii_writereg(struct device *self, int phy, int reg, int val) 2092 { 2093 struct pcn_softc *sc = (void *) self; 2094 2095 pcn_bcr_write(sc, LE_BCR33, reg | (phy << PHYAD_SHIFT)); 2096 pcn_bcr_write(sc, LE_BCR34, val); 2097 } 2098 2099 /* 2100 * pcn_mii_statchg: [mii interface function] 2101 * 2102 * Callback from MII layer when media changes. 2103 */ 2104 void 2105 pcn_mii_statchg(struct device *self) 2106 { 2107 struct pcn_softc *sc = (void *) self; 2108 2109 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2110 pcn_bcr_write(sc, LE_BCR9, LE_B9_FDEN); 2111 else 2112 pcn_bcr_write(sc, LE_BCR9, 0); 2113 } 2114