1 /* 2 * Copyright (c) 2000 Berkeley Software Design, Inc. 3 * Copyright (c) 1997, 1998, 1999, 2000 4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_pcn.c,v 1.5.2.10 2003/03/05 18:42:33 njl Exp $ 34 * $DragonFly: src/sys/dev/netif/pcn/if_pcn.c,v 1.24 2005/10/12 17:35:52 dillon Exp $ 35 */ 36 37 /* 38 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available 39 * from http://www.amd.com. 40 * 41 * Written by Bill Paul <wpaul@osd.bsdi.com> 42 */ 43 44 /* 45 * The AMD PCnet/PCI controllers are more advanced and functional 46 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain 47 * backwards compatibility with the LANCE and thus can be made 48 * to work with older LANCE drivers. This is in fact how the 49 * PCnet/PCI chips were supported in FreeBSD originally. The trouble 50 * is that the PCnet/PCI devices offer several performance enhancements 51 * which can't be exploited in LANCE compatibility mode. Chief among 52 * these enhancements is the ability to perform PCI DMA operations 53 * using 32-bit addressing (which eliminates the need for ISA 54 * bounce-buffering), and special receive buffer alignment (which 55 * allows the receive handler to pass packets to the upper protocol 56 * layers without copying on both the x86 and alpha platforms). 57 */ 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/sockio.h> 62 #include <sys/mbuf.h> 63 #include <sys/malloc.h> 64 #include <sys/kernel.h> 65 #include <sys/socket.h> 66 #include <sys/thread2.h> 67 68 #include <net/if.h> 69 #include <net/ifq_var.h> 70 #include <net/if_arp.h> 71 #include <net/ethernet.h> 72 #include <net/if_dl.h> 73 #include <net/if_media.h> 74 75 #include <net/bpf.h> 76 77 #include <vm/vm.h> /* for vtophys */ 78 #include <vm/pmap.h> /* for vtophys */ 79 #include <machine/clock.h> /* for DELAY */ 80 #include <machine/bus_pio.h> 81 #include <machine/bus_memio.h> 82 #include <machine/bus.h> 83 #include <machine/resource.h> 84 #include <sys/bus.h> 85 #include <sys/rman.h> 86 87 #include "../mii_layer/mii.h" 88 #include "../mii_layer/miivar.h" 89 90 #include <bus/pci/pcireg.h> 91 #include <bus/pci/pcivar.h> 92 93 #define PCN_USEIOSPACE 94 95 #include "if_pcnreg.h" 96 97 /* "controller miibus0" required. See GENERIC if you get errors here. */ 98 #include "miibus_if.h" 99 100 /* 101 * Various supported device vendors/types and their names. 102 */ 103 static struct pcn_type pcn_devs[] = { 104 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, 105 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, 106 { 0, 0, NULL } 107 }; 108 109 static u_int32_t pcn_csr_read (struct pcn_softc *, int); 110 static u_int16_t pcn_csr_read16 (struct pcn_softc *, int); 111 static u_int16_t pcn_bcr_read16 (struct pcn_softc *, int); 112 static void pcn_csr_write (struct pcn_softc *, int, int); 113 static u_int32_t pcn_bcr_read (struct pcn_softc *, int); 114 static void pcn_bcr_write (struct pcn_softc *, int, int); 115 116 static int pcn_probe (device_t); 117 static int pcn_attach (device_t); 118 static int pcn_detach (device_t); 119 120 static int pcn_newbuf (struct pcn_softc *, int, struct mbuf *); 121 static int pcn_encap (struct pcn_softc *, 122 struct mbuf *, u_int32_t *); 123 static void pcn_rxeof (struct pcn_softc *); 124 static void pcn_txeof (struct pcn_softc *); 125 static void pcn_intr (void *); 126 static void pcn_tick (void *); 127 static void pcn_start (struct ifnet *); 128 static int pcn_ioctl (struct ifnet *, u_long, caddr_t, 129 struct ucred *); 130 static void pcn_init (void *); 131 static void pcn_stop (struct pcn_softc *); 132 static void pcn_watchdog (struct ifnet *); 133 static void pcn_shutdown (device_t); 134 static int pcn_ifmedia_upd (struct ifnet *); 135 static void pcn_ifmedia_sts (struct ifnet *, struct ifmediareq *); 136 137 static int pcn_miibus_readreg (device_t, int, int); 138 static int pcn_miibus_writereg (device_t, int, int, int); 139 static void pcn_miibus_statchg (device_t); 140 141 static void pcn_setfilt (struct ifnet *); 142 static void pcn_setmulti (struct pcn_softc *); 143 static u_int32_t pcn_crc (caddr_t); 144 static void pcn_reset (struct pcn_softc *); 145 static int pcn_list_rx_init (struct pcn_softc *); 146 static int pcn_list_tx_init (struct pcn_softc *); 147 148 #ifdef PCN_USEIOSPACE 149 #define PCN_RES SYS_RES_IOPORT 150 #define PCN_RID PCN_PCI_LOIO 151 #else 152 #define PCN_RES SYS_RES_MEMORY 153 #define PCN_RID PCN_PCI_LOMEM 154 #endif 155 156 static device_method_t pcn_methods[] = { 157 /* Device interface */ 158 DEVMETHOD(device_probe, pcn_probe), 159 DEVMETHOD(device_attach, pcn_attach), 160 DEVMETHOD(device_detach, pcn_detach), 161 DEVMETHOD(device_shutdown, pcn_shutdown), 162 163 /* bus interface */ 164 DEVMETHOD(bus_print_child, bus_generic_print_child), 165 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 166 167 /* MII interface */ 168 DEVMETHOD(miibus_readreg, pcn_miibus_readreg), 169 DEVMETHOD(miibus_writereg, pcn_miibus_writereg), 170 DEVMETHOD(miibus_statchg, pcn_miibus_statchg), 171 172 { 0, 0 } 173 }; 174 175 static driver_t pcn_driver = { 176 "pcn", 177 pcn_methods, 178 sizeof(struct pcn_softc) 179 }; 180 181 static devclass_t pcn_devclass; 182 183 DECLARE_DUMMY_MODULE(if_pcn); 184 DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0); 185 DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); 186 187 #define PCN_CSR_SETBIT(sc, reg, x) \ 188 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) 189 190 #define PCN_CSR_CLRBIT(sc, reg, x) \ 191 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) 192 193 #define PCN_BCR_SETBIT(sc, reg, x) \ 194 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) 195 196 #define PCN_BCR_CLRBIT(sc, reg, x) \ 197 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) 198 199 static u_int32_t pcn_csr_read(sc, reg) 200 struct pcn_softc *sc; 201 int reg; 202 { 203 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 204 return(CSR_READ_4(sc, PCN_IO32_RDP)); 205 } 206 207 static u_int16_t pcn_csr_read16(sc, reg) 208 struct pcn_softc *sc; 209 int reg; 210 { 211 CSR_WRITE_2(sc, PCN_IO16_RAP, reg); 212 return(CSR_READ_2(sc, PCN_IO16_RDP)); 213 } 214 215 static void pcn_csr_write(sc, reg, val) 216 struct pcn_softc *sc; 217 int reg; 218 { 219 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 220 CSR_WRITE_4(sc, PCN_IO32_RDP, val); 221 return; 222 } 223 224 static u_int32_t pcn_bcr_read(sc, reg) 225 struct pcn_softc *sc; 226 int reg; 227 { 228 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 229 return(CSR_READ_4(sc, PCN_IO32_BDP)); 230 } 231 232 static u_int16_t pcn_bcr_read16(sc, reg) 233 struct pcn_softc *sc; 234 int reg; 235 { 236 CSR_WRITE_2(sc, PCN_IO16_RAP, reg); 237 return(CSR_READ_2(sc, PCN_IO16_BDP)); 238 } 239 240 static void pcn_bcr_write(sc, reg, val) 241 struct pcn_softc *sc; 242 int reg; 243 { 244 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 245 CSR_WRITE_4(sc, PCN_IO32_BDP, val); 246 return; 247 } 248 249 static int pcn_miibus_readreg(dev, phy, reg) 250 device_t dev; 251 int phy, reg; 252 { 253 struct pcn_softc *sc; 254 int val; 255 256 sc = device_get_softc(dev); 257 258 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr) 259 return(0); 260 261 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); 262 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; 263 if (val == 0xFFFF) 264 return(0); 265 266 sc->pcn_phyaddr = phy; 267 268 return(val); 269 } 270 271 static int pcn_miibus_writereg(dev, phy, reg, data) 272 device_t dev; 273 int phy, reg, data; 274 { 275 struct pcn_softc *sc; 276 277 sc = device_get_softc(dev); 278 279 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); 280 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); 281 282 return(0); 283 } 284 285 static void pcn_miibus_statchg(dev) 286 device_t dev; 287 { 288 struct pcn_softc *sc; 289 struct mii_data *mii; 290 291 sc = device_get_softc(dev); 292 mii = device_get_softc(sc->pcn_miibus); 293 294 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 295 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); 296 } else { 297 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); 298 } 299 300 return; 301 } 302 303 #define DC_POLY 0xEDB88320 304 305 static u_int32_t pcn_crc(addr) 306 caddr_t addr; 307 { 308 u_int32_t idx, bit, data, crc; 309 310 /* Compute CRC for the address value. */ 311 crc = 0xFFFFFFFF; /* initial value */ 312 313 for (idx = 0; idx < 6; idx++) { 314 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 315 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 316 } 317 318 return ((crc >> 26) & 0x3F); 319 } 320 321 static void pcn_setmulti(sc) 322 struct pcn_softc *sc; 323 { 324 struct ifnet *ifp; 325 struct ifmultiaddr *ifma; 326 u_int32_t h, i; 327 u_int16_t hashes[4] = { 0, 0, 0, 0 }; 328 329 ifp = &sc->arpcom.ac_if; 330 331 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 332 333 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 334 for (i = 0; i < 4; i++) 335 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); 336 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 337 return; 338 } 339 340 /* first, zot all the existing hash bits */ 341 for (i = 0; i < 4; i++) 342 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); 343 344 /* now program new ones */ 345 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 346 if (ifma->ifma_addr->sa_family != AF_LINK) 347 continue; 348 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 349 hashes[h >> 4] |= 1 << (h & 0xF); 350 } 351 352 for (i = 0; i < 4; i++) 353 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); 354 355 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 356 357 return; 358 } 359 360 static void pcn_reset(sc) 361 struct pcn_softc *sc; 362 { 363 /* 364 * Issue a reset by reading from the RESET register. 365 * Note that we don't know if the chip is operating in 366 * 16-bit or 32-bit mode at this point, so we attempt 367 * to reset the chip both ways. If one fails, the other 368 * will succeed. 369 */ 370 CSR_READ_2(sc, PCN_IO16_RESET); 371 CSR_READ_4(sc, PCN_IO32_RESET); 372 373 /* Wait a little while for the chip to get its brains in order. */ 374 DELAY(1000); 375 376 /* Select 32-bit (DWIO) mode */ 377 CSR_WRITE_4(sc, PCN_IO32_RDP, 0); 378 379 /* Select software style 3. */ 380 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); 381 382 return; 383 } 384 385 /* 386 * Probe for an AMD chip. Check the PCI vendor and device 387 * IDs against our list and return a device name if we find a match. 388 */ 389 static int pcn_probe(dev) 390 device_t dev; 391 { 392 struct pcn_type *t; 393 struct pcn_softc *sc; 394 int rid; 395 u_int32_t chip_id; 396 397 t = pcn_devs; 398 sc = device_get_softc(dev); 399 400 while(t->pcn_name != NULL) { 401 if ((pci_get_vendor(dev) == t->pcn_vid) && 402 (pci_get_device(dev) == t->pcn_did)) { 403 /* 404 * Temporarily map the I/O space 405 * so we can read the chip ID register. 406 */ 407 rid = PCN_RID; 408 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, 409 &rid, RF_ACTIVE); 410 if (sc->pcn_res == NULL) { 411 device_printf(dev, 412 "couldn't map ports/memory\n"); 413 return(ENXIO); 414 } 415 sc->pcn_btag = rman_get_bustag(sc->pcn_res); 416 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); 417 /* 418 * Note: we can *NOT* put the chip into 419 * 32-bit mode yet. The lnc driver will only 420 * work in 16-bit mode, and once the chip 421 * goes into 32-bit mode, the only way to 422 * get it out again is with a hardware reset. 423 * So if pcn_probe() is called before the 424 * lnc driver's probe routine, the chip will 425 * be locked into 32-bit operation and the lnc 426 * driver will be unable to attach to it. 427 * Note II: if the chip happens to already 428 * be in 32-bit mode, we still need to check 429 * the chip ID, but first we have to detect 430 * 32-bit mode using only 16-bit operations. 431 * The safest way to do this is to read the 432 * PCI subsystem ID from BCR23/24 and compare 433 * that with the value read from PCI config 434 * space. 435 */ 436 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); 437 chip_id <<= 16; 438 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); 439 /* 440 * Note III: the test for 0x10001000 is a hack to 441 * pacify VMware, who's pseudo-PCnet interface is 442 * broken. Reading the subsystem register from PCI 443 * config space yeilds 0x00000000 while reading the 444 * same value from I/O space yeilds 0x10001000. It's 445 * not supposed to be that way. 446 */ 447 if (chip_id == pci_read_config(dev, 448 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) { 449 /* We're in 16-bit mode. */ 450 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); 451 chip_id <<= 16; 452 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); 453 } else { 454 /* We're in 32-bit mode. */ 455 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); 456 chip_id <<= 16; 457 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); 458 } 459 bus_release_resource(dev, PCN_RES, 460 PCN_RID, sc->pcn_res); 461 chip_id >>= 12; 462 sc->pcn_type = chip_id & PART_MASK; 463 switch(sc->pcn_type) { 464 case Am79C971: 465 case Am79C972: 466 case Am79C973: 467 case Am79C975: 468 case Am79C976: 469 case Am79C978: 470 break; 471 default: 472 return(ENXIO); 473 break; 474 } 475 device_set_desc(dev, t->pcn_name); 476 return(0); 477 } 478 t++; 479 } 480 481 return(ENXIO); 482 } 483 484 /* 485 * Attach the interface. Allocate softc structures, do ifmedia 486 * setup and ethernet/BPF attach. 487 */ 488 static int pcn_attach(dev) 489 device_t dev; 490 { 491 uint8_t eaddr[ETHER_ADDR_LEN]; 492 u_int32_t command; 493 struct pcn_softc *sc; 494 struct ifnet *ifp; 495 int unit, error = 0, rid; 496 497 sc = device_get_softc(dev); 498 unit = device_get_unit(dev); 499 500 /* 501 * Handle power management nonsense. 502 */ 503 504 command = pci_read_config(dev, PCN_PCI_CAPID, 4) & 0x000000FF; 505 if (command == 0x01) { 506 507 command = pci_read_config(dev, PCN_PCI_PWRMGMTCTRL, 4); 508 if (command & PCN_PSTATE_MASK) { 509 u_int32_t iobase, membase, irq; 510 511 /* Save important PCI config data. */ 512 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4); 513 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4); 514 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4); 515 516 /* Reset the power state. */ 517 printf("pcn%d: chip is in D%d power mode " 518 "-- setting to D0\n", unit, command & PCN_PSTATE_MASK); 519 command &= 0xFFFFFFFC; 520 pci_write_config(dev, PCN_PCI_PWRMGMTCTRL, command, 4); 521 522 /* Restore PCI config data. */ 523 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4); 524 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4); 525 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4); 526 } 527 } 528 529 /* 530 * Map control/status registers. 531 */ 532 command = pci_read_config(dev, PCIR_COMMAND, 4); 533 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 534 pci_write_config(dev, PCIR_COMMAND, command, 4); 535 command = pci_read_config(dev, PCIR_COMMAND, 4); 536 537 #ifdef PCN_USEIOSPACE 538 if (!(command & PCIM_CMD_PORTEN)) { 539 printf("pcn%d: failed to enable I/O ports!\n", unit); 540 error = ENXIO; 541 return(error); 542 } 543 #else 544 if (!(command & PCIM_CMD_MEMEN)) { 545 printf("pcn%d: failed to enable memory mapping!\n", unit); 546 error = ENXIO; 547 return(error); 548 } 549 #endif 550 551 rid = PCN_RID; 552 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); 553 554 if (sc->pcn_res == NULL) { 555 printf("pcn%d: couldn't map ports/memory\n", unit); 556 error = ENXIO; 557 return(error); 558 } 559 560 sc->pcn_btag = rman_get_bustag(sc->pcn_res); 561 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); 562 563 /* Allocate interrupt */ 564 rid = 0; 565 sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 566 RF_SHAREABLE | RF_ACTIVE); 567 568 if (sc->pcn_irq == NULL) { 569 printf("pcn%d: couldn't map interrupt\n", unit); 570 error = ENXIO; 571 goto fail; 572 } 573 574 /* Reset the adapter. */ 575 pcn_reset(sc); 576 577 /* 578 * Get station address from the EEPROM. 579 */ 580 *(uint32_t *)eaddr = CSR_READ_4(sc, PCN_IO32_APROM00); 581 *(uint16_t *)(eaddr + 4) = CSR_READ_2(sc, PCN_IO32_APROM01); 582 583 sc->pcn_unit = unit; 584 callout_init(&sc->pcn_stat_timer); 585 586 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, 587 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 588 589 if (sc->pcn_ldata == NULL) { 590 printf("pcn%d: no memory for list buffers!\n", unit); 591 error = ENXIO; 592 goto fail; 593 } 594 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); 595 596 ifp = &sc->arpcom.ac_if; 597 ifp->if_softc = sc; 598 if_initname(ifp, "pcn", unit); 599 ifp->if_mtu = ETHERMTU; 600 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 601 ifp->if_ioctl = pcn_ioctl; 602 ifp->if_start = pcn_start; 603 ifp->if_watchdog = pcn_watchdog; 604 ifp->if_init = pcn_init; 605 ifp->if_baudrate = 10000000; 606 ifq_set_maxlen(&ifp->if_snd, PCN_TX_LIST_CNT - 1); 607 ifq_set_ready(&ifp->if_snd); 608 609 /* 610 * Do MII setup. 611 */ 612 if (mii_phy_probe(dev, &sc->pcn_miibus, 613 pcn_ifmedia_upd, pcn_ifmedia_sts)) { 614 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit); 615 error = ENXIO; 616 goto fail; 617 } 618 619 /* 620 * Call MI attach routine. 621 */ 622 ether_ifattach(ifp, eaddr); 623 624 error = bus_setup_intr(dev, sc->pcn_irq, 0, 625 pcn_intr, sc, &sc->pcn_intrhand, NULL); 626 if (error) { 627 ether_ifdetach(ifp); 628 device_printf(dev, "couldn't set up irq\n"); 629 goto fail; 630 } 631 632 fail: 633 pcn_detach(dev); 634 return(error); 635 } 636 637 static int pcn_detach(dev) 638 device_t dev; 639 { 640 struct pcn_softc *sc = device_get_softc(dev); 641 struct ifnet *ifp = &sc->arpcom.ac_if; 642 643 crit_enter(); 644 645 if (device_is_attached(dev)) { 646 pcn_reset(sc); 647 pcn_stop(sc); 648 ether_ifdetach(ifp); 649 } 650 651 if (sc->pcn_miibus != NULL) 652 device_delete_child(dev, sc->pcn_miibus); 653 bus_generic_detach(dev); 654 655 if (sc->pcn_intrhand) 656 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); 657 658 crit_enter(); 659 660 if (sc->pcn_irq) 661 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); 662 if (sc->pcn_res) 663 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); 664 665 if (sc->pcn_ldata) { 666 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), 667 M_DEVBUF); 668 } 669 670 return(0); 671 } 672 673 /* 674 * Initialize the transmit descriptors. 675 */ 676 static int pcn_list_tx_init(sc) 677 struct pcn_softc *sc; 678 { 679 struct pcn_list_data *ld; 680 struct pcn_ring_data *cd; 681 int i; 682 683 cd = &sc->pcn_cdata; 684 ld = sc->pcn_ldata; 685 686 for (i = 0; i < PCN_TX_LIST_CNT; i++) { 687 cd->pcn_tx_chain[i] = NULL; 688 ld->pcn_tx_list[i].pcn_tbaddr = 0; 689 ld->pcn_tx_list[i].pcn_txctl = 0; 690 ld->pcn_tx_list[i].pcn_txstat = 0; 691 } 692 693 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; 694 695 return(0); 696 } 697 698 699 /* 700 * Initialize the RX descriptors and allocate mbufs for them. 701 */ 702 static int pcn_list_rx_init(sc) 703 struct pcn_softc *sc; 704 { 705 struct pcn_list_data *ld; 706 struct pcn_ring_data *cd; 707 int i; 708 709 ld = sc->pcn_ldata; 710 cd = &sc->pcn_cdata; 711 712 for (i = 0; i < PCN_RX_LIST_CNT; i++) { 713 if (pcn_newbuf(sc, i, NULL) == ENOBUFS) 714 return(ENOBUFS); 715 } 716 717 cd->pcn_rx_prod = 0; 718 719 return(0); 720 } 721 722 /* 723 * Initialize an RX descriptor and attach an MBUF cluster. 724 */ 725 static int pcn_newbuf(sc, idx, m) 726 struct pcn_softc *sc; 727 int idx; 728 struct mbuf *m; 729 { 730 struct mbuf *m_new = NULL; 731 struct pcn_rx_desc *c; 732 733 c = &sc->pcn_ldata->pcn_rx_list[idx]; 734 735 if (m == NULL) { 736 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 737 if (m_new == NULL) 738 return(ENOBUFS); 739 740 MCLGET(m_new, MB_DONTWAIT); 741 if (!(m_new->m_flags & M_EXT)) { 742 m_freem(m_new); 743 return(ENOBUFS); 744 } 745 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 746 } else { 747 m_new = m; 748 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 749 m_new->m_data = m_new->m_ext.ext_buf; 750 } 751 752 m_adj(m_new, ETHER_ALIGN); 753 754 sc->pcn_cdata.pcn_rx_chain[idx] = m_new; 755 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); 756 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; 757 c->pcn_bufsz |= PCN_RXLEN_MBO; 758 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; 759 760 return(0); 761 } 762 763 /* 764 * A frame has been uploaded: pass the resulting mbuf chain up to 765 * the higher level protocols. 766 */ 767 static void pcn_rxeof(sc) 768 struct pcn_softc *sc; 769 { 770 struct mbuf *m; 771 struct ifnet *ifp; 772 struct pcn_rx_desc *cur_rx; 773 int i; 774 775 ifp = &sc->arpcom.ac_if; 776 i = sc->pcn_cdata.pcn_rx_prod; 777 778 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { 779 cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; 780 m = sc->pcn_cdata.pcn_rx_chain[i]; 781 sc->pcn_cdata.pcn_rx_chain[i] = NULL; 782 783 /* 784 * If an error occurs, update stats, clear the 785 * status word and leave the mbuf cluster in place: 786 * it should simply get re-used next time this descriptor 787 * comes up in the ring. 788 */ 789 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { 790 ifp->if_ierrors++; 791 pcn_newbuf(sc, i, m); 792 PCN_INC(i, PCN_RX_LIST_CNT); 793 continue; 794 } 795 796 if (pcn_newbuf(sc, i, NULL)) { 797 /* Ran out of mbufs; recycle this one. */ 798 pcn_newbuf(sc, i, m); 799 ifp->if_ierrors++; 800 PCN_INC(i, PCN_RX_LIST_CNT); 801 continue; 802 } 803 804 PCN_INC(i, PCN_RX_LIST_CNT); 805 806 /* No errors; receive the packet. */ 807 ifp->if_ipackets++; 808 m->m_len = m->m_pkthdr.len = 809 cur_rx->pcn_rxlen - ETHER_CRC_LEN; 810 m->m_pkthdr.rcvif = ifp; 811 812 (*ifp->if_input)(ifp, m); 813 } 814 815 sc->pcn_cdata.pcn_rx_prod = i; 816 817 return; 818 } 819 820 /* 821 * A frame was downloaded to the chip. It's safe for us to clean up 822 * the list buffers. 823 */ 824 825 static void pcn_txeof(sc) 826 struct pcn_softc *sc; 827 { 828 struct pcn_tx_desc *cur_tx = NULL; 829 struct ifnet *ifp; 830 u_int32_t idx; 831 832 ifp = &sc->arpcom.ac_if; 833 834 /* 835 * Go through our tx list and free mbufs for those 836 * frames that have been transmitted. 837 */ 838 idx = sc->pcn_cdata.pcn_tx_cons; 839 while (idx != sc->pcn_cdata.pcn_tx_prod) { 840 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; 841 842 if (!PCN_OWN_TXDESC(cur_tx)) 843 break; 844 845 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { 846 sc->pcn_cdata.pcn_tx_cnt--; 847 PCN_INC(idx, PCN_TX_LIST_CNT); 848 continue; 849 } 850 851 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { 852 ifp->if_oerrors++; 853 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) 854 ifp->if_collisions++; 855 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) 856 ifp->if_collisions++; 857 } 858 859 ifp->if_collisions += 860 cur_tx->pcn_txstat & PCN_TXSTAT_TRC; 861 862 ifp->if_opackets++; 863 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { 864 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); 865 sc->pcn_cdata.pcn_tx_chain[idx] = NULL; 866 } 867 868 sc->pcn_cdata.pcn_tx_cnt--; 869 PCN_INC(idx, PCN_TX_LIST_CNT); 870 } 871 872 if (idx != sc->pcn_cdata.pcn_tx_cons) { 873 /* Some buffers have been freed. */ 874 sc->pcn_cdata.pcn_tx_cons = idx; 875 ifp->if_flags &= ~IFF_OACTIVE; 876 } 877 ifp->if_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5; 878 879 return; 880 } 881 882 static void pcn_tick(xsc) 883 void *xsc; 884 { 885 struct pcn_softc *sc = xsc; 886 struct mii_data *mii; 887 struct ifnet *ifp = &sc->arpcom.ac_if; 888 889 crit_enter(); 890 891 mii = device_get_softc(sc->pcn_miibus); 892 mii_tick(mii); 893 894 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE)) 895 sc->pcn_link = 0; 896 897 if (!sc->pcn_link) { 898 mii_pollstat(mii); 899 if (mii->mii_media_status & IFM_ACTIVE && 900 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 901 sc->pcn_link++; 902 if (!ifq_is_empty(&ifp->if_snd)) 903 pcn_start(ifp); 904 } 905 906 callout_reset(&sc->pcn_stat_timer, hz, pcn_tick, sc); 907 908 crit_exit(); 909 } 910 911 static void pcn_intr(arg) 912 void *arg; 913 { 914 struct pcn_softc *sc; 915 struct ifnet *ifp; 916 u_int32_t status; 917 918 sc = arg; 919 ifp = &sc->arpcom.ac_if; 920 921 /* Supress unwanted interrupts */ 922 if (!(ifp->if_flags & IFF_UP)) { 923 pcn_stop(sc); 924 return; 925 } 926 927 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); 928 929 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { 930 CSR_WRITE_4(sc, PCN_IO32_RDP, status); 931 932 if (status & PCN_CSR_RINT) 933 pcn_rxeof(sc); 934 935 if (status & PCN_CSR_TINT) 936 pcn_txeof(sc); 937 938 if (status & PCN_CSR_ERR) { 939 pcn_init(sc); 940 break; 941 } 942 } 943 944 if (!ifq_is_empty(&ifp->if_snd)) 945 pcn_start(ifp); 946 947 return; 948 } 949 950 /* 951 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 952 * pointers to the fragment pointers. 953 */ 954 static int pcn_encap(sc, m_head, txidx) 955 struct pcn_softc *sc; 956 struct mbuf *m_head; 957 u_int32_t *txidx; 958 { 959 struct pcn_tx_desc *f = NULL; 960 struct mbuf *m; 961 int frag, cur, cnt = 0; 962 963 /* 964 * Start packing the mbufs in this chain into 965 * the fragment pointers. Stop when we run out 966 * of fragments or hit the end of the mbuf chain. 967 */ 968 m = m_head; 969 cur = frag = *txidx; 970 971 for (m = m_head; m != NULL; m = m->m_next) { 972 if (m->m_len != 0) { 973 if ((PCN_TX_LIST_CNT - 974 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) 975 return(ENOBUFS); 976 f = &sc->pcn_ldata->pcn_tx_list[frag]; 977 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; 978 f->pcn_txctl |= PCN_TXCTL_MBO; 979 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); 980 if (cnt == 0) 981 f->pcn_txctl |= PCN_TXCTL_STP; 982 else 983 f->pcn_txctl |= PCN_TXCTL_OWN; 984 cur = frag; 985 PCN_INC(frag, PCN_TX_LIST_CNT); 986 cnt++; 987 } 988 } 989 990 if (m != NULL) 991 return(ENOBUFS); 992 993 sc->pcn_cdata.pcn_tx_chain[cur] = m_head; 994 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= 995 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; 996 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; 997 sc->pcn_cdata.pcn_tx_cnt += cnt; 998 *txidx = frag; 999 1000 return(0); 1001 } 1002 1003 /* 1004 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1005 * to the mbuf data regions directly in the transmit lists. We also save a 1006 * copy of the pointers since the transmit list fragment pointers are 1007 * physical addresses. 1008 */ 1009 static void pcn_start(ifp) 1010 struct ifnet *ifp; 1011 { 1012 struct pcn_softc *sc; 1013 struct mbuf *m_head = NULL; 1014 u_int32_t idx; 1015 int need_trans; 1016 1017 sc = ifp->if_softc; 1018 1019 if (!sc->pcn_link) 1020 return; 1021 1022 idx = sc->pcn_cdata.pcn_tx_prod; 1023 1024 if (ifp->if_flags & IFF_OACTIVE) 1025 return; 1026 1027 need_trans = 0; 1028 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { 1029 m_head = ifq_poll(&ifp->if_snd); 1030 if (m_head == NULL) 1031 break; 1032 1033 if (pcn_encap(sc, m_head, &idx)) { 1034 ifp->if_flags |= IFF_OACTIVE; 1035 break; 1036 } 1037 m_head = ifq_dequeue(&ifp->if_snd); 1038 need_trans = 1; 1039 1040 BPF_MTAP(ifp, m_head); 1041 } 1042 1043 if (!need_trans) 1044 return; 1045 1046 /* Transmit */ 1047 sc->pcn_cdata.pcn_tx_prod = idx; 1048 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); 1049 1050 /* 1051 * Set a timeout in case the chip goes out to lunch. 1052 */ 1053 ifp->if_timer = 5; 1054 } 1055 1056 void pcn_setfilt(ifp) 1057 struct ifnet *ifp; 1058 { 1059 struct pcn_softc *sc; 1060 1061 sc = ifp->if_softc; 1062 1063 /* If we want promiscuous mode, set the allframes bit. */ 1064 if (ifp->if_flags & IFF_PROMISC) { 1065 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); 1066 } else { 1067 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); 1068 } 1069 1070 /* Set the capture broadcast bit to capture broadcast frames. */ 1071 if (ifp->if_flags & IFF_BROADCAST) { 1072 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); 1073 } else { 1074 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); 1075 } 1076 1077 return; 1078 } 1079 1080 static void pcn_init(xsc) 1081 void *xsc; 1082 { 1083 struct pcn_softc *sc = xsc; 1084 struct ifnet *ifp = &sc->arpcom.ac_if; 1085 struct mii_data *mii = NULL; 1086 1087 crit_enter(); 1088 1089 /* 1090 * Cancel pending I/O and free all RX/TX buffers. 1091 */ 1092 pcn_stop(sc); 1093 pcn_reset(sc); 1094 1095 mii = device_get_softc(sc->pcn_miibus); 1096 1097 /* Set MAC address */ 1098 pcn_csr_write(sc, PCN_CSR_PAR0, 1099 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1100 pcn_csr_write(sc, PCN_CSR_PAR1, 1101 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1102 pcn_csr_write(sc, PCN_CSR_PAR2, 1103 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1104 1105 /* Init circular RX list. */ 1106 if (pcn_list_rx_init(sc) == ENOBUFS) { 1107 printf("pcn%d: initialization failed: no " 1108 "memory for rx buffers\n", sc->pcn_unit); 1109 pcn_stop(sc); 1110 1111 crit_exit(); 1112 return; 1113 } 1114 1115 /* Set up RX filter. */ 1116 pcn_setfilt(ifp); 1117 1118 /* 1119 * Init tx descriptors. 1120 */ 1121 pcn_list_tx_init(sc); 1122 1123 /* Set up the mode register. */ 1124 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); 1125 1126 /* 1127 * Load the multicast filter. 1128 */ 1129 pcn_setmulti(sc); 1130 1131 /* 1132 * Load the addresses of the RX and TX lists. 1133 */ 1134 pcn_csr_write(sc, PCN_CSR_RXADDR0, 1135 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); 1136 pcn_csr_write(sc, PCN_CSR_RXADDR1, 1137 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); 1138 pcn_csr_write(sc, PCN_CSR_TXADDR0, 1139 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); 1140 pcn_csr_write(sc, PCN_CSR_TXADDR1, 1141 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); 1142 1143 /* Set the RX and TX ring sizes. */ 1144 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); 1145 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); 1146 1147 /* We're not using the initialization block. */ 1148 pcn_csr_write(sc, PCN_CSR_IAB1, 0); 1149 1150 /* Enable fast suspend mode. */ 1151 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); 1152 1153 /* 1154 * Enable burst read and write. Also set the no underflow 1155 * bit. This will avoid transmit underruns in certain 1156 * conditions while still providing decent performance. 1157 */ 1158 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| 1159 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); 1160 1161 /* Enable graceful recovery from underflow. */ 1162 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); 1163 1164 /* Enable auto-padding of short TX frames. */ 1165 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); 1166 1167 /* Disable MII autoneg (we handle this ourselves). */ 1168 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); 1169 1170 if (sc->pcn_type == Am79C978) 1171 pcn_bcr_write(sc, PCN_BCR_PHYSEL, 1172 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); 1173 1174 /* Enable interrupts and start the controller running. */ 1175 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); 1176 1177 mii_mediachg(mii); 1178 1179 ifp->if_flags |= IFF_RUNNING; 1180 ifp->if_flags &= ~IFF_OACTIVE; 1181 1182 callout_reset(&sc->pcn_stat_timer, hz, pcn_tick, sc); 1183 1184 crit_exit(); 1185 } 1186 1187 /* 1188 * Set media options. 1189 */ 1190 static int pcn_ifmedia_upd(ifp) 1191 struct ifnet *ifp; 1192 { 1193 struct pcn_softc *sc; 1194 struct mii_data *mii; 1195 1196 sc = ifp->if_softc; 1197 mii = device_get_softc(sc->pcn_miibus); 1198 1199 sc->pcn_link = 0; 1200 if (mii->mii_instance) { 1201 struct mii_softc *miisc; 1202 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1203 miisc = LIST_NEXT(miisc, mii_list)) 1204 mii_phy_reset(miisc); 1205 } 1206 mii_mediachg(mii); 1207 1208 return(0); 1209 } 1210 1211 /* 1212 * Report current media status. 1213 */ 1214 static void pcn_ifmedia_sts(ifp, ifmr) 1215 struct ifnet *ifp; 1216 struct ifmediareq *ifmr; 1217 { 1218 struct pcn_softc *sc; 1219 struct mii_data *mii; 1220 1221 sc = ifp->if_softc; 1222 1223 mii = device_get_softc(sc->pcn_miibus); 1224 mii_pollstat(mii); 1225 ifmr->ifm_active = mii->mii_media_active; 1226 ifmr->ifm_status = mii->mii_media_status; 1227 1228 return; 1229 } 1230 1231 static int pcn_ioctl(ifp, command, data, cr) 1232 struct ifnet *ifp; 1233 u_long command; 1234 caddr_t data; 1235 struct ucred *cr; 1236 { 1237 struct pcn_softc *sc = ifp->if_softc; 1238 struct ifreq *ifr = (struct ifreq *) data; 1239 struct mii_data *mii = NULL; 1240 int error = 0; 1241 1242 crit_enter(); 1243 1244 switch(command) { 1245 case SIOCSIFFLAGS: 1246 if (ifp->if_flags & IFF_UP) { 1247 if (ifp->if_flags & IFF_RUNNING && 1248 ifp->if_flags & IFF_PROMISC && 1249 !(sc->pcn_if_flags & IFF_PROMISC)) { 1250 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, 1251 PCN_EXTCTL1_SPND); 1252 pcn_setfilt(ifp); 1253 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, 1254 PCN_EXTCTL1_SPND); 1255 pcn_csr_write(sc, PCN_CSR_CSR, 1256 PCN_CSR_INTEN|PCN_CSR_START); 1257 } else if (ifp->if_flags & IFF_RUNNING && 1258 !(ifp->if_flags & IFF_PROMISC) && 1259 sc->pcn_if_flags & IFF_PROMISC) { 1260 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, 1261 PCN_EXTCTL1_SPND); 1262 pcn_setfilt(ifp); 1263 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, 1264 PCN_EXTCTL1_SPND); 1265 pcn_csr_write(sc, PCN_CSR_CSR, 1266 PCN_CSR_INTEN|PCN_CSR_START); 1267 } else if (!(ifp->if_flags & IFF_RUNNING)) 1268 pcn_init(sc); 1269 } else { 1270 if (ifp->if_flags & IFF_RUNNING) 1271 pcn_stop(sc); 1272 } 1273 sc->pcn_if_flags = ifp->if_flags; 1274 error = 0; 1275 break; 1276 case SIOCADDMULTI: 1277 case SIOCDELMULTI: 1278 pcn_setmulti(sc); 1279 error = 0; 1280 break; 1281 case SIOCGIFMEDIA: 1282 case SIOCSIFMEDIA: 1283 mii = device_get_softc(sc->pcn_miibus); 1284 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1285 break; 1286 default: 1287 error = ether_ioctl(ifp, command, data); 1288 break; 1289 } 1290 1291 crit_exit(); 1292 1293 return(error); 1294 } 1295 1296 static void pcn_watchdog(ifp) 1297 struct ifnet *ifp; 1298 { 1299 struct pcn_softc *sc; 1300 1301 sc = ifp->if_softc; 1302 1303 ifp->if_oerrors++; 1304 printf("pcn%d: watchdog timeout\n", sc->pcn_unit); 1305 1306 pcn_stop(sc); 1307 pcn_reset(sc); 1308 pcn_init(sc); 1309 1310 if (!ifq_is_empty(&ifp->if_snd)) 1311 pcn_start(ifp); 1312 1313 return; 1314 } 1315 1316 /* 1317 * Stop the adapter and free any mbufs allocated to the 1318 * RX and TX lists. 1319 */ 1320 static void pcn_stop(sc) 1321 struct pcn_softc *sc; 1322 { 1323 int i; 1324 struct ifnet *ifp; 1325 1326 ifp = &sc->arpcom.ac_if; 1327 ifp->if_timer = 0; 1328 1329 callout_stop(&sc->pcn_stat_timer); 1330 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); 1331 sc->pcn_link = 0; 1332 1333 /* 1334 * Free data in the RX lists. 1335 */ 1336 for (i = 0; i < PCN_RX_LIST_CNT; i++) { 1337 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { 1338 m_freem(sc->pcn_cdata.pcn_rx_chain[i]); 1339 sc->pcn_cdata.pcn_rx_chain[i] = NULL; 1340 } 1341 } 1342 bzero((char *)&sc->pcn_ldata->pcn_rx_list, 1343 sizeof(sc->pcn_ldata->pcn_rx_list)); 1344 1345 /* 1346 * Free the TX list buffers. 1347 */ 1348 for (i = 0; i < PCN_TX_LIST_CNT; i++) { 1349 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { 1350 m_freem(sc->pcn_cdata.pcn_tx_chain[i]); 1351 sc->pcn_cdata.pcn_tx_chain[i] = NULL; 1352 } 1353 } 1354 1355 bzero((char *)&sc->pcn_ldata->pcn_tx_list, 1356 sizeof(sc->pcn_ldata->pcn_tx_list)); 1357 1358 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1359 1360 return; 1361 } 1362 1363 /* 1364 * Stop all chip I/O so that the kernel's probe routines don't 1365 * get confused by errant DMAs when rebooting. 1366 */ 1367 static void pcn_shutdown(dev) 1368 device_t dev; 1369 { 1370 struct pcn_softc *sc; 1371 1372 sc = device_get_softc(dev); 1373 1374 pcn_reset(sc); 1375 pcn_stop(sc); 1376 1377 return; 1378 } 1379