1 /* 2 * Copyright (c) 2000 Berkeley Software Design, Inc. 3 * Copyright (c) 1997, 1998, 1999, 2000 4 * Bill Paul <wpaul@osd.bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD: src/sys/pci/if_pcn.c,v 1.5.2.10 2003/03/05 18:42:33 njl Exp $ 34 * $DragonFly: src/sys/dev/netif/pcn/if_pcn.c,v 1.26 2005/11/28 17:13:43 dillon Exp $ 35 */ 36 37 /* 38 * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available 39 * from http://www.amd.com. 40 * 41 * Written by Bill Paul <wpaul@osd.bsdi.com> 42 */ 43 44 /* 45 * The AMD PCnet/PCI controllers are more advanced and functional 46 * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain 47 * backwards compatibility with the LANCE and thus can be made 48 * to work with older LANCE drivers. This is in fact how the 49 * PCnet/PCI chips were supported in FreeBSD originally. The trouble 50 * is that the PCnet/PCI devices offer several performance enhancements 51 * which can't be exploited in LANCE compatibility mode. Chief among 52 * these enhancements is the ability to perform PCI DMA operations 53 * using 32-bit addressing (which eliminates the need for ISA 54 * bounce-buffering), and special receive buffer alignment (which 55 * allows the receive handler to pass packets to the upper protocol 56 * layers without copying on both the x86 and alpha platforms). 57 */ 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/sockio.h> 62 #include <sys/mbuf.h> 63 #include <sys/malloc.h> 64 #include <sys/kernel.h> 65 #include <sys/socket.h> 66 #include <sys/serialize.h> 67 #include <sys/thread2.h> 68 69 #include <net/if.h> 70 #include <net/ifq_var.h> 71 #include <net/if_arp.h> 72 #include <net/ethernet.h> 73 #include <net/if_dl.h> 74 #include <net/if_media.h> 75 76 #include <net/bpf.h> 77 78 #include <vm/vm.h> /* for vtophys */ 79 #include <vm/pmap.h> /* for vtophys */ 80 #include <machine/clock.h> /* for DELAY */ 81 #include <machine/bus_pio.h> 82 #include <machine/bus_memio.h> 83 #include <machine/bus.h> 84 #include <machine/resource.h> 85 #include <sys/bus.h> 86 #include <sys/rman.h> 87 88 #include "../mii_layer/mii.h" 89 #include "../mii_layer/miivar.h" 90 91 #include <bus/pci/pcireg.h> 92 #include <bus/pci/pcivar.h> 93 94 #define PCN_USEIOSPACE 95 96 #include "if_pcnreg.h" 97 98 /* "controller miibus0" required. See GENERIC if you get errors here. */ 99 #include "miibus_if.h" 100 101 /* 102 * Various supported device vendors/types and their names. 103 */ 104 static struct pcn_type pcn_devs[] = { 105 { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, 106 { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, 107 { 0, 0, NULL } 108 }; 109 110 static u_int32_t pcn_csr_read (struct pcn_softc *, int); 111 static u_int16_t pcn_csr_read16 (struct pcn_softc *, int); 112 static u_int16_t pcn_bcr_read16 (struct pcn_softc *, int); 113 static void pcn_csr_write (struct pcn_softc *, int, int); 114 static u_int32_t pcn_bcr_read (struct pcn_softc *, int); 115 static void pcn_bcr_write (struct pcn_softc *, int, int); 116 117 static int pcn_probe (device_t); 118 static int pcn_attach (device_t); 119 static int pcn_detach (device_t); 120 121 static int pcn_newbuf (struct pcn_softc *, int, struct mbuf *); 122 static int pcn_encap (struct pcn_softc *, 123 struct mbuf *, u_int32_t *); 124 static void pcn_rxeof (struct pcn_softc *); 125 static void pcn_txeof (struct pcn_softc *); 126 static void pcn_intr (void *); 127 static void pcn_tick (void *); 128 static void pcn_start (struct ifnet *); 129 static int pcn_ioctl (struct ifnet *, u_long, caddr_t, 130 struct ucred *); 131 static void pcn_init (void *); 132 static void pcn_stop (struct pcn_softc *); 133 static void pcn_watchdog (struct ifnet *); 134 static void pcn_shutdown (device_t); 135 static int pcn_ifmedia_upd (struct ifnet *); 136 static void pcn_ifmedia_sts (struct ifnet *, struct ifmediareq *); 137 138 static int pcn_miibus_readreg (device_t, int, int); 139 static int pcn_miibus_writereg (device_t, int, int, int); 140 static void pcn_miibus_statchg (device_t); 141 142 static void pcn_setfilt (struct ifnet *); 143 static void pcn_setmulti (struct pcn_softc *); 144 static u_int32_t pcn_crc (caddr_t); 145 static void pcn_reset (struct pcn_softc *); 146 static int pcn_list_rx_init (struct pcn_softc *); 147 static int pcn_list_tx_init (struct pcn_softc *); 148 149 #ifdef PCN_USEIOSPACE 150 #define PCN_RES SYS_RES_IOPORT 151 #define PCN_RID PCN_PCI_LOIO 152 #else 153 #define PCN_RES SYS_RES_MEMORY 154 #define PCN_RID PCN_PCI_LOMEM 155 #endif 156 157 static device_method_t pcn_methods[] = { 158 /* Device interface */ 159 DEVMETHOD(device_probe, pcn_probe), 160 DEVMETHOD(device_attach, pcn_attach), 161 DEVMETHOD(device_detach, pcn_detach), 162 DEVMETHOD(device_shutdown, pcn_shutdown), 163 164 /* bus interface */ 165 DEVMETHOD(bus_print_child, bus_generic_print_child), 166 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 167 168 /* MII interface */ 169 DEVMETHOD(miibus_readreg, pcn_miibus_readreg), 170 DEVMETHOD(miibus_writereg, pcn_miibus_writereg), 171 DEVMETHOD(miibus_statchg, pcn_miibus_statchg), 172 173 { 0, 0 } 174 }; 175 176 static driver_t pcn_driver = { 177 "pcn", 178 pcn_methods, 179 sizeof(struct pcn_softc) 180 }; 181 182 static devclass_t pcn_devclass; 183 184 DECLARE_DUMMY_MODULE(if_pcn); 185 DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0); 186 DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); 187 188 #define PCN_CSR_SETBIT(sc, reg, x) \ 189 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) 190 191 #define PCN_CSR_CLRBIT(sc, reg, x) \ 192 pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) 193 194 #define PCN_BCR_SETBIT(sc, reg, x) \ 195 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) 196 197 #define PCN_BCR_CLRBIT(sc, reg, x) \ 198 pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) 199 200 static u_int32_t pcn_csr_read(sc, reg) 201 struct pcn_softc *sc; 202 int reg; 203 { 204 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 205 return(CSR_READ_4(sc, PCN_IO32_RDP)); 206 } 207 208 static u_int16_t pcn_csr_read16(sc, reg) 209 struct pcn_softc *sc; 210 int reg; 211 { 212 CSR_WRITE_2(sc, PCN_IO16_RAP, reg); 213 return(CSR_READ_2(sc, PCN_IO16_RDP)); 214 } 215 216 static void pcn_csr_write(sc, reg, val) 217 struct pcn_softc *sc; 218 int reg; 219 { 220 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 221 CSR_WRITE_4(sc, PCN_IO32_RDP, val); 222 return; 223 } 224 225 static u_int32_t pcn_bcr_read(sc, reg) 226 struct pcn_softc *sc; 227 int reg; 228 { 229 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 230 return(CSR_READ_4(sc, PCN_IO32_BDP)); 231 } 232 233 static u_int16_t pcn_bcr_read16(sc, reg) 234 struct pcn_softc *sc; 235 int reg; 236 { 237 CSR_WRITE_2(sc, PCN_IO16_RAP, reg); 238 return(CSR_READ_2(sc, PCN_IO16_BDP)); 239 } 240 241 static void pcn_bcr_write(sc, reg, val) 242 struct pcn_softc *sc; 243 int reg; 244 { 245 CSR_WRITE_4(sc, PCN_IO32_RAP, reg); 246 CSR_WRITE_4(sc, PCN_IO32_BDP, val); 247 return; 248 } 249 250 static int pcn_miibus_readreg(dev, phy, reg) 251 device_t dev; 252 int phy, reg; 253 { 254 struct pcn_softc *sc; 255 int val; 256 257 sc = device_get_softc(dev); 258 259 if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr) 260 return(0); 261 262 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); 263 val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; 264 if (val == 0xFFFF) 265 return(0); 266 267 sc->pcn_phyaddr = phy; 268 269 return(val); 270 } 271 272 static int pcn_miibus_writereg(dev, phy, reg, data) 273 device_t dev; 274 int phy, reg, data; 275 { 276 struct pcn_softc *sc; 277 278 sc = device_get_softc(dev); 279 280 pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); 281 pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); 282 283 return(0); 284 } 285 286 static void pcn_miibus_statchg(dev) 287 device_t dev; 288 { 289 struct pcn_softc *sc; 290 struct mii_data *mii; 291 292 sc = device_get_softc(dev); 293 mii = device_get_softc(sc->pcn_miibus); 294 295 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 296 PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); 297 } else { 298 PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); 299 } 300 301 return; 302 } 303 304 #define DC_POLY 0xEDB88320 305 306 static u_int32_t pcn_crc(addr) 307 caddr_t addr; 308 { 309 u_int32_t idx, bit, data, crc; 310 311 /* Compute CRC for the address value. */ 312 crc = 0xFFFFFFFF; /* initial value */ 313 314 for (idx = 0; idx < 6; idx++) { 315 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 316 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 317 } 318 319 return ((crc >> 26) & 0x3F); 320 } 321 322 static void pcn_setmulti(sc) 323 struct pcn_softc *sc; 324 { 325 struct ifnet *ifp; 326 struct ifmultiaddr *ifma; 327 u_int32_t h, i; 328 u_int16_t hashes[4] = { 0, 0, 0, 0 }; 329 330 ifp = &sc->arpcom.ac_if; 331 332 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 333 334 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 335 for (i = 0; i < 4; i++) 336 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); 337 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 338 return; 339 } 340 341 /* first, zot all the existing hash bits */ 342 for (i = 0; i < 4; i++) 343 pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); 344 345 /* now program new ones */ 346 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 347 if (ifma->ifma_addr->sa_family != AF_LINK) 348 continue; 349 h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 350 hashes[h >> 4] |= 1 << (h & 0xF); 351 } 352 353 for (i = 0; i < 4; i++) 354 pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); 355 356 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); 357 358 return; 359 } 360 361 static void pcn_reset(sc) 362 struct pcn_softc *sc; 363 { 364 /* 365 * Issue a reset by reading from the RESET register. 366 * Note that we don't know if the chip is operating in 367 * 16-bit or 32-bit mode at this point, so we attempt 368 * to reset the chip both ways. If one fails, the other 369 * will succeed. 370 */ 371 CSR_READ_2(sc, PCN_IO16_RESET); 372 CSR_READ_4(sc, PCN_IO32_RESET); 373 374 /* Wait a little while for the chip to get its brains in order. */ 375 DELAY(1000); 376 377 /* Select 32-bit (DWIO) mode */ 378 CSR_WRITE_4(sc, PCN_IO32_RDP, 0); 379 380 /* Select software style 3. */ 381 pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); 382 383 return; 384 } 385 386 /* 387 * Probe for an AMD chip. Check the PCI vendor and device 388 * IDs against our list and return a device name if we find a match. 389 */ 390 static int pcn_probe(dev) 391 device_t dev; 392 { 393 struct pcn_type *t; 394 struct pcn_softc *sc; 395 int rid; 396 u_int32_t chip_id; 397 398 t = pcn_devs; 399 sc = device_get_softc(dev); 400 401 while(t->pcn_name != NULL) { 402 if ((pci_get_vendor(dev) == t->pcn_vid) && 403 (pci_get_device(dev) == t->pcn_did)) { 404 /* 405 * Temporarily map the I/O space 406 * so we can read the chip ID register. 407 */ 408 rid = PCN_RID; 409 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, 410 &rid, RF_ACTIVE); 411 if (sc->pcn_res == NULL) { 412 device_printf(dev, 413 "couldn't map ports/memory\n"); 414 return(ENXIO); 415 } 416 sc->pcn_btag = rman_get_bustag(sc->pcn_res); 417 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); 418 /* 419 * Note: we can *NOT* put the chip into 420 * 32-bit mode yet. The lnc driver will only 421 * work in 16-bit mode, and once the chip 422 * goes into 32-bit mode, the only way to 423 * get it out again is with a hardware reset. 424 * So if pcn_probe() is called before the 425 * lnc driver's probe routine, the chip will 426 * be locked into 32-bit operation and the lnc 427 * driver will be unable to attach to it. 428 * Note II: if the chip happens to already 429 * be in 32-bit mode, we still need to check 430 * the chip ID, but first we have to detect 431 * 32-bit mode using only 16-bit operations. 432 * The safest way to do this is to read the 433 * PCI subsystem ID from BCR23/24 and compare 434 * that with the value read from PCI config 435 * space. 436 */ 437 chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); 438 chip_id <<= 16; 439 chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); 440 /* 441 * Note III: the test for 0x10001000 is a hack to 442 * pacify VMware, who's pseudo-PCnet interface is 443 * broken. Reading the subsystem register from PCI 444 * config space yeilds 0x00000000 while reading the 445 * same value from I/O space yeilds 0x10001000. It's 446 * not supposed to be that way. 447 */ 448 if (chip_id == pci_read_config(dev, 449 PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) { 450 /* We're in 16-bit mode. */ 451 chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); 452 chip_id <<= 16; 453 chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); 454 } else { 455 /* We're in 32-bit mode. */ 456 chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); 457 chip_id <<= 16; 458 chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); 459 } 460 bus_release_resource(dev, PCN_RES, 461 PCN_RID, sc->pcn_res); 462 chip_id >>= 12; 463 sc->pcn_type = chip_id & PART_MASK; 464 switch(sc->pcn_type) { 465 case Am79C971: 466 case Am79C972: 467 case Am79C973: 468 case Am79C975: 469 case Am79C976: 470 case Am79C978: 471 break; 472 default: 473 return(ENXIO); 474 break; 475 } 476 device_set_desc(dev, t->pcn_name); 477 return(0); 478 } 479 t++; 480 } 481 482 return(ENXIO); 483 } 484 485 /* 486 * Attach the interface. Allocate softc structures, do ifmedia 487 * setup and ethernet/BPF attach. 488 */ 489 static int pcn_attach(dev) 490 device_t dev; 491 { 492 uint8_t eaddr[ETHER_ADDR_LEN]; 493 u_int32_t command; 494 struct pcn_softc *sc; 495 struct ifnet *ifp; 496 int unit, error = 0, rid; 497 498 sc = device_get_softc(dev); 499 unit = device_get_unit(dev); 500 501 /* 502 * Handle power management nonsense. 503 */ 504 505 command = pci_read_config(dev, PCN_PCI_CAPID, 4) & 0x000000FF; 506 if (command == 0x01) { 507 508 command = pci_read_config(dev, PCN_PCI_PWRMGMTCTRL, 4); 509 if (command & PCN_PSTATE_MASK) { 510 u_int32_t iobase, membase, irq; 511 512 /* Save important PCI config data. */ 513 iobase = pci_read_config(dev, PCN_PCI_LOIO, 4); 514 membase = pci_read_config(dev, PCN_PCI_LOMEM, 4); 515 irq = pci_read_config(dev, PCN_PCI_INTLINE, 4); 516 517 /* Reset the power state. */ 518 printf("pcn%d: chip is in D%d power mode " 519 "-- setting to D0\n", unit, command & PCN_PSTATE_MASK); 520 command &= 0xFFFFFFFC; 521 pci_write_config(dev, PCN_PCI_PWRMGMTCTRL, command, 4); 522 523 /* Restore PCI config data. */ 524 pci_write_config(dev, PCN_PCI_LOIO, iobase, 4); 525 pci_write_config(dev, PCN_PCI_LOMEM, membase, 4); 526 pci_write_config(dev, PCN_PCI_INTLINE, irq, 4); 527 } 528 } 529 530 /* 531 * Map control/status registers. 532 */ 533 command = pci_read_config(dev, PCIR_COMMAND, 4); 534 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 535 pci_write_config(dev, PCIR_COMMAND, command, 4); 536 command = pci_read_config(dev, PCIR_COMMAND, 4); 537 538 #ifdef PCN_USEIOSPACE 539 if (!(command & PCIM_CMD_PORTEN)) { 540 printf("pcn%d: failed to enable I/O ports!\n", unit); 541 error = ENXIO; 542 return(error); 543 } 544 #else 545 if (!(command & PCIM_CMD_MEMEN)) { 546 printf("pcn%d: failed to enable memory mapping!\n", unit); 547 error = ENXIO; 548 return(error); 549 } 550 #endif 551 552 rid = PCN_RID; 553 sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); 554 555 if (sc->pcn_res == NULL) { 556 printf("pcn%d: couldn't map ports/memory\n", unit); 557 error = ENXIO; 558 return(error); 559 } 560 561 sc->pcn_btag = rman_get_bustag(sc->pcn_res); 562 sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); 563 564 /* Allocate interrupt */ 565 rid = 0; 566 sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 567 RF_SHAREABLE | RF_ACTIVE); 568 569 if (sc->pcn_irq == NULL) { 570 printf("pcn%d: couldn't map interrupt\n", unit); 571 error = ENXIO; 572 goto fail; 573 } 574 575 /* Reset the adapter. */ 576 pcn_reset(sc); 577 578 /* 579 * Get station address from the EEPROM. 580 */ 581 *(uint32_t *)eaddr = CSR_READ_4(sc, PCN_IO32_APROM00); 582 *(uint16_t *)(eaddr + 4) = CSR_READ_2(sc, PCN_IO32_APROM01); 583 584 sc->pcn_unit = unit; 585 callout_init(&sc->pcn_stat_timer); 586 587 sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, 588 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 589 590 if (sc->pcn_ldata == NULL) { 591 printf("pcn%d: no memory for list buffers!\n", unit); 592 error = ENXIO; 593 goto fail; 594 } 595 bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); 596 597 ifp = &sc->arpcom.ac_if; 598 ifp->if_softc = sc; 599 if_initname(ifp, "pcn", unit); 600 ifp->if_mtu = ETHERMTU; 601 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 602 ifp->if_ioctl = pcn_ioctl; 603 ifp->if_start = pcn_start; 604 ifp->if_watchdog = pcn_watchdog; 605 ifp->if_init = pcn_init; 606 ifp->if_baudrate = 10000000; 607 ifq_set_maxlen(&ifp->if_snd, PCN_TX_LIST_CNT - 1); 608 ifq_set_ready(&ifp->if_snd); 609 610 /* 611 * Do MII setup. 612 */ 613 if (mii_phy_probe(dev, &sc->pcn_miibus, 614 pcn_ifmedia_upd, pcn_ifmedia_sts)) { 615 printf("pcn%d: MII without any PHY!\n", sc->pcn_unit); 616 error = ENXIO; 617 goto fail; 618 } 619 620 /* 621 * Call MI attach routine. 622 */ 623 ether_ifattach(ifp, eaddr, NULL); 624 625 error = bus_setup_intr(dev, sc->pcn_irq, INTR_NETSAFE, 626 pcn_intr, sc, &sc->pcn_intrhand, 627 ifp->if_serializer); 628 if (error) { 629 ether_ifdetach(ifp); 630 device_printf(dev, "couldn't set up irq\n"); 631 goto fail; 632 } 633 634 fail: 635 pcn_detach(dev); 636 return(error); 637 } 638 639 static int pcn_detach(dev) 640 device_t dev; 641 { 642 struct pcn_softc *sc = device_get_softc(dev); 643 struct ifnet *ifp = &sc->arpcom.ac_if; 644 645 lwkt_serialize_enter(ifp->if_serializer); 646 647 if (device_is_attached(dev)) { 648 pcn_reset(sc); 649 pcn_stop(sc); 650 ether_ifdetach(ifp); 651 } 652 653 if (sc->pcn_miibus != NULL) 654 device_delete_child(dev, sc->pcn_miibus); 655 bus_generic_detach(dev); 656 657 if (sc->pcn_intrhand) 658 bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); 659 660 if (sc->pcn_irq) 661 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); 662 if (sc->pcn_res) 663 bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); 664 665 if (sc->pcn_ldata) { 666 contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), 667 M_DEVBUF); 668 } 669 lwkt_serialize_exit(ifp->if_serializer); 670 671 return(0); 672 } 673 674 /* 675 * Initialize the transmit descriptors. 676 */ 677 static int pcn_list_tx_init(sc) 678 struct pcn_softc *sc; 679 { 680 struct pcn_list_data *ld; 681 struct pcn_ring_data *cd; 682 int i; 683 684 cd = &sc->pcn_cdata; 685 ld = sc->pcn_ldata; 686 687 for (i = 0; i < PCN_TX_LIST_CNT; i++) { 688 cd->pcn_tx_chain[i] = NULL; 689 ld->pcn_tx_list[i].pcn_tbaddr = 0; 690 ld->pcn_tx_list[i].pcn_txctl = 0; 691 ld->pcn_tx_list[i].pcn_txstat = 0; 692 } 693 694 cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; 695 696 return(0); 697 } 698 699 700 /* 701 * Initialize the RX descriptors and allocate mbufs for them. 702 */ 703 static int pcn_list_rx_init(sc) 704 struct pcn_softc *sc; 705 { 706 struct pcn_list_data *ld; 707 struct pcn_ring_data *cd; 708 int i; 709 710 ld = sc->pcn_ldata; 711 cd = &sc->pcn_cdata; 712 713 for (i = 0; i < PCN_RX_LIST_CNT; i++) { 714 if (pcn_newbuf(sc, i, NULL) == ENOBUFS) 715 return(ENOBUFS); 716 } 717 718 cd->pcn_rx_prod = 0; 719 720 return(0); 721 } 722 723 /* 724 * Initialize an RX descriptor and attach an MBUF cluster. 725 */ 726 static int pcn_newbuf(sc, idx, m) 727 struct pcn_softc *sc; 728 int idx; 729 struct mbuf *m; 730 { 731 struct mbuf *m_new = NULL; 732 struct pcn_rx_desc *c; 733 734 c = &sc->pcn_ldata->pcn_rx_list[idx]; 735 736 if (m == NULL) { 737 MGETHDR(m_new, MB_DONTWAIT, MT_DATA); 738 if (m_new == NULL) 739 return(ENOBUFS); 740 741 MCLGET(m_new, MB_DONTWAIT); 742 if (!(m_new->m_flags & M_EXT)) { 743 m_freem(m_new); 744 return(ENOBUFS); 745 } 746 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 747 } else { 748 m_new = m; 749 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 750 m_new->m_data = m_new->m_ext.ext_buf; 751 } 752 753 m_adj(m_new, ETHER_ALIGN); 754 755 sc->pcn_cdata.pcn_rx_chain[idx] = m_new; 756 c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); 757 c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; 758 c->pcn_bufsz |= PCN_RXLEN_MBO; 759 c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; 760 761 return(0); 762 } 763 764 /* 765 * A frame has been uploaded: pass the resulting mbuf chain up to 766 * the higher level protocols. 767 */ 768 static void pcn_rxeof(sc) 769 struct pcn_softc *sc; 770 { 771 struct mbuf *m; 772 struct ifnet *ifp; 773 struct pcn_rx_desc *cur_rx; 774 int i; 775 776 ifp = &sc->arpcom.ac_if; 777 i = sc->pcn_cdata.pcn_rx_prod; 778 779 while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { 780 cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; 781 m = sc->pcn_cdata.pcn_rx_chain[i]; 782 sc->pcn_cdata.pcn_rx_chain[i] = NULL; 783 784 /* 785 * If an error occurs, update stats, clear the 786 * status word and leave the mbuf cluster in place: 787 * it should simply get re-used next time this descriptor 788 * comes up in the ring. 789 */ 790 if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { 791 ifp->if_ierrors++; 792 pcn_newbuf(sc, i, m); 793 PCN_INC(i, PCN_RX_LIST_CNT); 794 continue; 795 } 796 797 if (pcn_newbuf(sc, i, NULL)) { 798 /* Ran out of mbufs; recycle this one. */ 799 pcn_newbuf(sc, i, m); 800 ifp->if_ierrors++; 801 PCN_INC(i, PCN_RX_LIST_CNT); 802 continue; 803 } 804 805 PCN_INC(i, PCN_RX_LIST_CNT); 806 807 /* No errors; receive the packet. */ 808 ifp->if_ipackets++; 809 m->m_len = m->m_pkthdr.len = 810 cur_rx->pcn_rxlen - ETHER_CRC_LEN; 811 m->m_pkthdr.rcvif = ifp; 812 813 ifp->if_input(ifp, m); 814 } 815 816 sc->pcn_cdata.pcn_rx_prod = i; 817 818 return; 819 } 820 821 /* 822 * A frame was downloaded to the chip. It's safe for us to clean up 823 * the list buffers. 824 */ 825 826 static void pcn_txeof(sc) 827 struct pcn_softc *sc; 828 { 829 struct pcn_tx_desc *cur_tx = NULL; 830 struct ifnet *ifp; 831 u_int32_t idx; 832 833 ifp = &sc->arpcom.ac_if; 834 835 /* 836 * Go through our tx list and free mbufs for those 837 * frames that have been transmitted. 838 */ 839 idx = sc->pcn_cdata.pcn_tx_cons; 840 while (idx != sc->pcn_cdata.pcn_tx_prod) { 841 cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; 842 843 if (!PCN_OWN_TXDESC(cur_tx)) 844 break; 845 846 if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { 847 sc->pcn_cdata.pcn_tx_cnt--; 848 PCN_INC(idx, PCN_TX_LIST_CNT); 849 continue; 850 } 851 852 if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { 853 ifp->if_oerrors++; 854 if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) 855 ifp->if_collisions++; 856 if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) 857 ifp->if_collisions++; 858 } 859 860 ifp->if_collisions += 861 cur_tx->pcn_txstat & PCN_TXSTAT_TRC; 862 863 ifp->if_opackets++; 864 if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { 865 m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); 866 sc->pcn_cdata.pcn_tx_chain[idx] = NULL; 867 } 868 869 sc->pcn_cdata.pcn_tx_cnt--; 870 PCN_INC(idx, PCN_TX_LIST_CNT); 871 } 872 873 if (idx != sc->pcn_cdata.pcn_tx_cons) { 874 /* Some buffers have been freed. */ 875 sc->pcn_cdata.pcn_tx_cons = idx; 876 ifp->if_flags &= ~IFF_OACTIVE; 877 } 878 ifp->if_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5; 879 880 return; 881 } 882 883 static void pcn_tick(xsc) 884 void *xsc; 885 { 886 struct pcn_softc *sc = xsc; 887 struct mii_data *mii; 888 struct ifnet *ifp = &sc->arpcom.ac_if; 889 890 lwkt_serialize_enter(ifp->if_serializer); 891 892 mii = device_get_softc(sc->pcn_miibus); 893 mii_tick(mii); 894 895 if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE)) 896 sc->pcn_link = 0; 897 898 if (!sc->pcn_link) { 899 mii_pollstat(mii); 900 if (mii->mii_media_status & IFM_ACTIVE && 901 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 902 sc->pcn_link++; 903 if (!ifq_is_empty(&ifp->if_snd)) 904 pcn_start(ifp); 905 } 906 callout_reset(&sc->pcn_stat_timer, hz, pcn_tick, sc); 907 908 lwkt_serialize_exit(ifp->if_serializer); 909 } 910 911 static void pcn_intr(arg) 912 void *arg; 913 { 914 struct pcn_softc *sc; 915 struct ifnet *ifp; 916 u_int32_t status; 917 918 sc = arg; 919 ifp = &sc->arpcom.ac_if; 920 921 /* Supress unwanted interrupts */ 922 if (!(ifp->if_flags & IFF_UP)) { 923 pcn_stop(sc); 924 return; 925 } 926 927 CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); 928 929 while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { 930 CSR_WRITE_4(sc, PCN_IO32_RDP, status); 931 932 if (status & PCN_CSR_RINT) 933 pcn_rxeof(sc); 934 935 if (status & PCN_CSR_TINT) 936 pcn_txeof(sc); 937 938 if (status & PCN_CSR_ERR) { 939 pcn_init(sc); 940 break; 941 } 942 } 943 944 if (!ifq_is_empty(&ifp->if_snd)) 945 pcn_start(ifp); 946 947 return; 948 } 949 950 /* 951 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 952 * pointers to the fragment pointers. 953 */ 954 static int pcn_encap(sc, m_head, txidx) 955 struct pcn_softc *sc; 956 struct mbuf *m_head; 957 u_int32_t *txidx; 958 { 959 struct pcn_tx_desc *f = NULL; 960 struct mbuf *m; 961 int frag, cur, cnt = 0; 962 963 /* 964 * Start packing the mbufs in this chain into 965 * the fragment pointers. Stop when we run out 966 * of fragments or hit the end of the mbuf chain. 967 */ 968 m = m_head; 969 cur = frag = *txidx; 970 971 for (m = m_head; m != NULL; m = m->m_next) { 972 if (m->m_len != 0) { 973 if ((PCN_TX_LIST_CNT - 974 (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) 975 return(ENOBUFS); 976 f = &sc->pcn_ldata->pcn_tx_list[frag]; 977 f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; 978 f->pcn_txctl |= PCN_TXCTL_MBO; 979 f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); 980 if (cnt == 0) 981 f->pcn_txctl |= PCN_TXCTL_STP; 982 else 983 f->pcn_txctl |= PCN_TXCTL_OWN; 984 cur = frag; 985 PCN_INC(frag, PCN_TX_LIST_CNT); 986 cnt++; 987 } 988 } 989 990 if (m != NULL) 991 return(ENOBUFS); 992 993 sc->pcn_cdata.pcn_tx_chain[cur] = m_head; 994 sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= 995 PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; 996 sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; 997 sc->pcn_cdata.pcn_tx_cnt += cnt; 998 *txidx = frag; 999 1000 return(0); 1001 } 1002 1003 /* 1004 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1005 * to the mbuf data regions directly in the transmit lists. We also save a 1006 * copy of the pointers since the transmit list fragment pointers are 1007 * physical addresses. 1008 */ 1009 static void pcn_start(ifp) 1010 struct ifnet *ifp; 1011 { 1012 struct pcn_softc *sc; 1013 struct mbuf *m_head = NULL; 1014 u_int32_t idx; 1015 int need_trans; 1016 1017 sc = ifp->if_softc; 1018 1019 if (!sc->pcn_link) 1020 return; 1021 1022 idx = sc->pcn_cdata.pcn_tx_prod; 1023 1024 if (ifp->if_flags & IFF_OACTIVE) 1025 return; 1026 1027 need_trans = 0; 1028 while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { 1029 m_head = ifq_poll(&ifp->if_snd); 1030 if (m_head == NULL) 1031 break; 1032 1033 if (pcn_encap(sc, m_head, &idx)) { 1034 ifp->if_flags |= IFF_OACTIVE; 1035 break; 1036 } 1037 ifq_dequeue(&ifp->if_snd, m_head); 1038 need_trans = 1; 1039 1040 BPF_MTAP(ifp, m_head); 1041 } 1042 1043 if (!need_trans) 1044 return; 1045 1046 /* Transmit */ 1047 sc->pcn_cdata.pcn_tx_prod = idx; 1048 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); 1049 1050 /* 1051 * Set a timeout in case the chip goes out to lunch. 1052 */ 1053 ifp->if_timer = 5; 1054 } 1055 1056 void pcn_setfilt(ifp) 1057 struct ifnet *ifp; 1058 { 1059 struct pcn_softc *sc; 1060 1061 sc = ifp->if_softc; 1062 1063 /* If we want promiscuous mode, set the allframes bit. */ 1064 if (ifp->if_flags & IFF_PROMISC) { 1065 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); 1066 } else { 1067 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); 1068 } 1069 1070 /* Set the capture broadcast bit to capture broadcast frames. */ 1071 if (ifp->if_flags & IFF_BROADCAST) { 1072 PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); 1073 } else { 1074 PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); 1075 } 1076 1077 return; 1078 } 1079 1080 static void pcn_init(xsc) 1081 void *xsc; 1082 { 1083 struct pcn_softc *sc = xsc; 1084 struct ifnet *ifp = &sc->arpcom.ac_if; 1085 struct mii_data *mii = NULL; 1086 1087 /* 1088 * Cancel pending I/O and free all RX/TX buffers. 1089 */ 1090 pcn_stop(sc); 1091 pcn_reset(sc); 1092 1093 mii = device_get_softc(sc->pcn_miibus); 1094 1095 /* Set MAC address */ 1096 pcn_csr_write(sc, PCN_CSR_PAR0, 1097 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1098 pcn_csr_write(sc, PCN_CSR_PAR1, 1099 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1100 pcn_csr_write(sc, PCN_CSR_PAR2, 1101 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1102 1103 /* Init circular RX list. */ 1104 if (pcn_list_rx_init(sc) == ENOBUFS) { 1105 printf("pcn%d: initialization failed: no " 1106 "memory for rx buffers\n", sc->pcn_unit); 1107 pcn_stop(sc); 1108 1109 return; 1110 } 1111 1112 /* Set up RX filter. */ 1113 pcn_setfilt(ifp); 1114 1115 /* 1116 * Init tx descriptors. 1117 */ 1118 pcn_list_tx_init(sc); 1119 1120 /* Set up the mode register. */ 1121 pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); 1122 1123 /* 1124 * Load the multicast filter. 1125 */ 1126 pcn_setmulti(sc); 1127 1128 /* 1129 * Load the addresses of the RX and TX lists. 1130 */ 1131 pcn_csr_write(sc, PCN_CSR_RXADDR0, 1132 vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); 1133 pcn_csr_write(sc, PCN_CSR_RXADDR1, 1134 (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); 1135 pcn_csr_write(sc, PCN_CSR_TXADDR0, 1136 vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); 1137 pcn_csr_write(sc, PCN_CSR_TXADDR1, 1138 (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); 1139 1140 /* Set the RX and TX ring sizes. */ 1141 pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); 1142 pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); 1143 1144 /* We're not using the initialization block. */ 1145 pcn_csr_write(sc, PCN_CSR_IAB1, 0); 1146 1147 /* Enable fast suspend mode. */ 1148 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); 1149 1150 /* 1151 * Enable burst read and write. Also set the no underflow 1152 * bit. This will avoid transmit underruns in certain 1153 * conditions while still providing decent performance. 1154 */ 1155 PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| 1156 PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); 1157 1158 /* Enable graceful recovery from underflow. */ 1159 PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); 1160 1161 /* Enable auto-padding of short TX frames. */ 1162 PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); 1163 1164 /* Disable MII autoneg (we handle this ourselves). */ 1165 PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); 1166 1167 if (sc->pcn_type == Am79C978) 1168 pcn_bcr_write(sc, PCN_BCR_PHYSEL, 1169 PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); 1170 1171 /* Enable interrupts and start the controller running. */ 1172 pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); 1173 1174 mii_mediachg(mii); 1175 1176 ifp->if_flags |= IFF_RUNNING; 1177 ifp->if_flags &= ~IFF_OACTIVE; 1178 1179 callout_reset(&sc->pcn_stat_timer, hz, pcn_tick, sc); 1180 } 1181 1182 /* 1183 * Set media options. 1184 */ 1185 static int pcn_ifmedia_upd(ifp) 1186 struct ifnet *ifp; 1187 { 1188 struct pcn_softc *sc; 1189 struct mii_data *mii; 1190 1191 sc = ifp->if_softc; 1192 mii = device_get_softc(sc->pcn_miibus); 1193 1194 sc->pcn_link = 0; 1195 if (mii->mii_instance) { 1196 struct mii_softc *miisc; 1197 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1198 miisc = LIST_NEXT(miisc, mii_list)) 1199 mii_phy_reset(miisc); 1200 } 1201 mii_mediachg(mii); 1202 1203 return(0); 1204 } 1205 1206 /* 1207 * Report current media status. 1208 */ 1209 static void pcn_ifmedia_sts(ifp, ifmr) 1210 struct ifnet *ifp; 1211 struct ifmediareq *ifmr; 1212 { 1213 struct pcn_softc *sc; 1214 struct mii_data *mii; 1215 1216 sc = ifp->if_softc; 1217 1218 mii = device_get_softc(sc->pcn_miibus); 1219 mii_pollstat(mii); 1220 ifmr->ifm_active = mii->mii_media_active; 1221 ifmr->ifm_status = mii->mii_media_status; 1222 1223 return; 1224 } 1225 1226 static int pcn_ioctl(ifp, command, data, cr) 1227 struct ifnet *ifp; 1228 u_long command; 1229 caddr_t data; 1230 struct ucred *cr; 1231 { 1232 struct pcn_softc *sc = ifp->if_softc; 1233 struct ifreq *ifr = (struct ifreq *) data; 1234 struct mii_data *mii = NULL; 1235 int error = 0; 1236 1237 switch(command) { 1238 case SIOCSIFFLAGS: 1239 if (ifp->if_flags & IFF_UP) { 1240 if (ifp->if_flags & IFF_RUNNING && 1241 ifp->if_flags & IFF_PROMISC && 1242 !(sc->pcn_if_flags & IFF_PROMISC)) { 1243 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, 1244 PCN_EXTCTL1_SPND); 1245 pcn_setfilt(ifp); 1246 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, 1247 PCN_EXTCTL1_SPND); 1248 pcn_csr_write(sc, PCN_CSR_CSR, 1249 PCN_CSR_INTEN|PCN_CSR_START); 1250 } else if (ifp->if_flags & IFF_RUNNING && 1251 !(ifp->if_flags & IFF_PROMISC) && 1252 sc->pcn_if_flags & IFF_PROMISC) { 1253 PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, 1254 PCN_EXTCTL1_SPND); 1255 pcn_setfilt(ifp); 1256 PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, 1257 PCN_EXTCTL1_SPND); 1258 pcn_csr_write(sc, PCN_CSR_CSR, 1259 PCN_CSR_INTEN|PCN_CSR_START); 1260 } else if (!(ifp->if_flags & IFF_RUNNING)) 1261 pcn_init(sc); 1262 } else { 1263 if (ifp->if_flags & IFF_RUNNING) 1264 pcn_stop(sc); 1265 } 1266 sc->pcn_if_flags = ifp->if_flags; 1267 error = 0; 1268 break; 1269 case SIOCADDMULTI: 1270 case SIOCDELMULTI: 1271 pcn_setmulti(sc); 1272 error = 0; 1273 break; 1274 case SIOCGIFMEDIA: 1275 case SIOCSIFMEDIA: 1276 mii = device_get_softc(sc->pcn_miibus); 1277 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1278 break; 1279 default: 1280 error = ether_ioctl(ifp, command, data); 1281 break; 1282 } 1283 return(error); 1284 } 1285 1286 static void pcn_watchdog(ifp) 1287 struct ifnet *ifp; 1288 { 1289 struct pcn_softc *sc; 1290 1291 sc = ifp->if_softc; 1292 1293 ifp->if_oerrors++; 1294 printf("pcn%d: watchdog timeout\n", sc->pcn_unit); 1295 1296 pcn_stop(sc); 1297 pcn_reset(sc); 1298 pcn_init(sc); 1299 1300 if (!ifq_is_empty(&ifp->if_snd)) 1301 pcn_start(ifp); 1302 1303 return; 1304 } 1305 1306 /* 1307 * Stop the adapter and free any mbufs allocated to the 1308 * RX and TX lists. 1309 */ 1310 static void pcn_stop(sc) 1311 struct pcn_softc *sc; 1312 { 1313 int i; 1314 struct ifnet *ifp; 1315 1316 ifp = &sc->arpcom.ac_if; 1317 ifp->if_timer = 0; 1318 1319 callout_stop(&sc->pcn_stat_timer); 1320 PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); 1321 sc->pcn_link = 0; 1322 1323 /* 1324 * Free data in the RX lists. 1325 */ 1326 for (i = 0; i < PCN_RX_LIST_CNT; i++) { 1327 if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { 1328 m_freem(sc->pcn_cdata.pcn_rx_chain[i]); 1329 sc->pcn_cdata.pcn_rx_chain[i] = NULL; 1330 } 1331 } 1332 bzero((char *)&sc->pcn_ldata->pcn_rx_list, 1333 sizeof(sc->pcn_ldata->pcn_rx_list)); 1334 1335 /* 1336 * Free the TX list buffers. 1337 */ 1338 for (i = 0; i < PCN_TX_LIST_CNT; i++) { 1339 if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { 1340 m_freem(sc->pcn_cdata.pcn_tx_chain[i]); 1341 sc->pcn_cdata.pcn_tx_chain[i] = NULL; 1342 } 1343 } 1344 1345 bzero((char *)&sc->pcn_ldata->pcn_tx_list, 1346 sizeof(sc->pcn_ldata->pcn_tx_list)); 1347 1348 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1349 1350 return; 1351 } 1352 1353 /* 1354 * Stop all chip I/O so that the kernel's probe routines don't 1355 * get confused by errant DMAs when rebooting. 1356 */ 1357 static void pcn_shutdown(dev) 1358 device_t dev; 1359 { 1360 struct pcn_softc *sc = device_get_softc(dev); 1361 struct ifnet *ifp = &sc->arpcom.ac_if; 1362 1363 lwkt_serialize_enter(ifp->if_serializer); 1364 pcn_reset(sc); 1365 pcn_stop(sc); 1366 lwkt_serialize_exit(ifp->if_serializer); 1367 } 1368 1369