1 /* $OpenBSD: if_nge.c,v 1.67 2008/11/28 02:44:18 brad Exp $ */ 2 /* 3 * Copyright (c) 2001 Wind River Systems 4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 5 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: if_nge.c,v 1.35 2002/08/08 18:33:28 ambrisko Exp $ 35 */ 36 37 /* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60 /* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91 #include "bpfilter.h" 92 #include "vlan.h" 93 94 #include <sys/param.h> 95 #include <sys/systm.h> 96 #include <sys/sockio.h> 97 #include <sys/mbuf.h> 98 #include <sys/malloc.h> 99 #include <sys/kernel.h> 100 #include <sys/device.h> 101 #include <sys/socket.h> 102 103 #include <net/if.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 107 #ifdef INET 108 #include <netinet/in.h> 109 #include <netinet/in_systm.h> 110 #include <netinet/in_var.h> 111 #include <netinet/ip.h> 112 #include <netinet/if_ether.h> 113 #endif 114 115 #if NVLAN > 0 116 #include <net/if_types.h> 117 #include <net/if_vlan_var.h> 118 #endif 119 120 #if NBPFILTER > 0 121 #include <net/bpf.h> 122 #endif 123 124 #include <uvm/uvm_extern.h> /* for vtophys */ 125 #define VTOPHYS(v) vtophys((vaddr_t)(v)) 126 127 #include <dev/pci/pcireg.h> 128 #include <dev/pci/pcivar.h> 129 #include <dev/pci/pcidevs.h> 130 131 #include <dev/mii/mii.h> 132 #include <dev/mii/miivar.h> 133 134 #define NGE_USEIOSPACE 135 136 #include <dev/pci/if_ngereg.h> 137 138 int nge_probe(struct device *, void *, void *); 139 void nge_attach(struct device *, struct device *, void *); 140 141 int nge_alloc_jumbo_mem(struct nge_softc *); 142 void *nge_jalloc(struct nge_softc *); 143 void nge_jfree(caddr_t, u_int, void *); 144 145 int nge_newbuf(struct nge_softc *, struct nge_desc *, 146 struct mbuf *); 147 int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *); 148 void nge_rxeof(struct nge_softc *); 149 void nge_txeof(struct nge_softc *); 150 int nge_intr(void *); 151 void nge_tick(void *); 152 void nge_start(struct ifnet *); 153 int nge_ioctl(struct ifnet *, u_long, caddr_t); 154 void nge_init(void *); 155 void nge_stop(struct nge_softc *); 156 void nge_watchdog(struct ifnet *); 157 void nge_shutdown(void *); 158 int nge_ifmedia_mii_upd(struct ifnet *); 159 void nge_ifmedia_mii_sts(struct ifnet *, struct ifmediareq *); 160 int nge_ifmedia_tbi_upd(struct ifnet *); 161 void nge_ifmedia_tbi_sts(struct ifnet *, struct ifmediareq *); 162 163 void nge_delay(struct nge_softc *); 164 void nge_eeprom_idle(struct nge_softc *); 165 void nge_eeprom_putbyte(struct nge_softc *, int); 166 void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *); 167 void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); 168 169 void nge_mii_sync(struct nge_softc *); 170 void nge_mii_send(struct nge_softc *, u_int32_t, int); 171 int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 172 int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 173 174 int nge_miibus_readreg(struct device *, int, int); 175 void nge_miibus_writereg(struct device *, int, int, int); 176 void nge_miibus_statchg(struct device *); 177 178 void nge_setmulti(struct nge_softc *); 179 void nge_reset(struct nge_softc *); 180 int nge_list_rx_init(struct nge_softc *); 181 int nge_list_tx_init(struct nge_softc *); 182 183 #ifdef NGE_USEIOSPACE 184 #define NGE_RES SYS_RES_IOPORT 185 #define NGE_RID NGE_PCI_LOIO 186 #else 187 #define NGE_RES SYS_RES_MEMORY 188 #define NGE_RID NGE_PCI_LOMEM 189 #endif 190 191 #ifdef NGE_DEBUG 192 #define DPRINTF(x) if (ngedebug) printf x 193 #define DPRINTFN(n,x) if (ngedebug >= (n)) printf x 194 int ngedebug = 0; 195 #else 196 #define DPRINTF(x) 197 #define DPRINTFN(n,x) 198 #endif 199 200 #define NGE_SETBIT(sc, reg, x) \ 201 CSR_WRITE_4(sc, reg, \ 202 CSR_READ_4(sc, reg) | (x)) 203 204 #define NGE_CLRBIT(sc, reg, x) \ 205 CSR_WRITE_4(sc, reg, \ 206 CSR_READ_4(sc, reg) & ~(x)) 207 208 #define SIO_SET(x) \ 209 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 210 211 #define SIO_CLR(x) \ 212 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 213 214 void 215 nge_delay(sc) 216 struct nge_softc *sc; 217 { 218 int idx; 219 220 for (idx = (300 / 33) + 1; idx > 0; idx--) 221 CSR_READ_4(sc, NGE_CSR); 222 } 223 224 void 225 nge_eeprom_idle(sc) 226 struct nge_softc *sc; 227 { 228 int i; 229 230 SIO_SET(NGE_MEAR_EE_CSEL); 231 nge_delay(sc); 232 SIO_SET(NGE_MEAR_EE_CLK); 233 nge_delay(sc); 234 235 for (i = 0; i < 25; i++) { 236 SIO_CLR(NGE_MEAR_EE_CLK); 237 nge_delay(sc); 238 SIO_SET(NGE_MEAR_EE_CLK); 239 nge_delay(sc); 240 } 241 242 SIO_CLR(NGE_MEAR_EE_CLK); 243 nge_delay(sc); 244 SIO_CLR(NGE_MEAR_EE_CSEL); 245 nge_delay(sc); 246 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 247 } 248 249 /* 250 * Send a read command and address to the EEPROM, check for ACK. 251 */ 252 void 253 nge_eeprom_putbyte(sc, addr) 254 struct nge_softc *sc; 255 int addr; 256 { 257 int d, i; 258 259 d = addr | NGE_EECMD_READ; 260 261 /* 262 * Feed in each bit and strobe the clock. 263 */ 264 for (i = 0x400; i; i >>= 1) { 265 if (d & i) { 266 SIO_SET(NGE_MEAR_EE_DIN); 267 } else { 268 SIO_CLR(NGE_MEAR_EE_DIN); 269 } 270 nge_delay(sc); 271 SIO_SET(NGE_MEAR_EE_CLK); 272 nge_delay(sc); 273 SIO_CLR(NGE_MEAR_EE_CLK); 274 nge_delay(sc); 275 } 276 } 277 278 /* 279 * Read a word of data stored in the EEPROM at address 'addr.' 280 */ 281 void 282 nge_eeprom_getword(sc, addr, dest) 283 struct nge_softc *sc; 284 int addr; 285 u_int16_t *dest; 286 { 287 int i; 288 u_int16_t word = 0; 289 290 /* Force EEPROM to idle state. */ 291 nge_eeprom_idle(sc); 292 293 /* Enter EEPROM access mode. */ 294 nge_delay(sc); 295 SIO_CLR(NGE_MEAR_EE_CLK); 296 nge_delay(sc); 297 SIO_SET(NGE_MEAR_EE_CSEL); 298 nge_delay(sc); 299 300 /* 301 * Send address of word we want to read. 302 */ 303 nge_eeprom_putbyte(sc, addr); 304 305 /* 306 * Start reading bits from EEPROM. 307 */ 308 for (i = 0x8000; i; i >>= 1) { 309 SIO_SET(NGE_MEAR_EE_CLK); 310 nge_delay(sc); 311 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 312 word |= i; 313 nge_delay(sc); 314 SIO_CLR(NGE_MEAR_EE_CLK); 315 nge_delay(sc); 316 } 317 318 /* Turn off EEPROM access mode. */ 319 nge_eeprom_idle(sc); 320 321 *dest = word; 322 } 323 324 /* 325 * Read a sequence of words from the EEPROM. 326 */ 327 void 328 nge_read_eeprom(sc, dest, off, cnt, swap) 329 struct nge_softc *sc; 330 caddr_t dest; 331 int off; 332 int cnt; 333 int swap; 334 { 335 int i; 336 u_int16_t word = 0, *ptr; 337 338 for (i = 0; i < cnt; i++) { 339 nge_eeprom_getword(sc, off + i, &word); 340 ptr = (u_int16_t *)(dest + (i * 2)); 341 if (swap) 342 *ptr = ntohs(word); 343 else 344 *ptr = word; 345 } 346 } 347 348 /* 349 * Sync the PHYs by setting data bit and strobing the clock 32 times. 350 */ 351 void 352 nge_mii_sync(sc) 353 struct nge_softc *sc; 354 { 355 int i; 356 357 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 358 359 for (i = 0; i < 32; i++) { 360 SIO_SET(NGE_MEAR_MII_CLK); 361 DELAY(1); 362 SIO_CLR(NGE_MEAR_MII_CLK); 363 DELAY(1); 364 } 365 } 366 367 /* 368 * Clock a series of bits through the MII. 369 */ 370 void 371 nge_mii_send(sc, bits, cnt) 372 struct nge_softc *sc; 373 u_int32_t bits; 374 int cnt; 375 { 376 int i; 377 378 SIO_CLR(NGE_MEAR_MII_CLK); 379 380 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 381 if (bits & i) { 382 SIO_SET(NGE_MEAR_MII_DATA); 383 } else { 384 SIO_CLR(NGE_MEAR_MII_DATA); 385 } 386 DELAY(1); 387 SIO_CLR(NGE_MEAR_MII_CLK); 388 DELAY(1); 389 SIO_SET(NGE_MEAR_MII_CLK); 390 } 391 } 392 393 /* 394 * Read an PHY register through the MII. 395 */ 396 int 397 nge_mii_readreg(sc, frame) 398 struct nge_softc *sc; 399 struct nge_mii_frame *frame; 400 { 401 int i, ack, s; 402 403 s = splnet(); 404 405 /* 406 * Set up frame for RX. 407 */ 408 frame->mii_stdelim = NGE_MII_STARTDELIM; 409 frame->mii_opcode = NGE_MII_READOP; 410 frame->mii_turnaround = 0; 411 frame->mii_data = 0; 412 413 CSR_WRITE_4(sc, NGE_MEAR, 0); 414 415 /* 416 * Turn on data xmit. 417 */ 418 SIO_SET(NGE_MEAR_MII_DIR); 419 420 nge_mii_sync(sc); 421 422 /* 423 * Send command/address info. 424 */ 425 nge_mii_send(sc, frame->mii_stdelim, 2); 426 nge_mii_send(sc, frame->mii_opcode, 2); 427 nge_mii_send(sc, frame->mii_phyaddr, 5); 428 nge_mii_send(sc, frame->mii_regaddr, 5); 429 430 /* Idle bit */ 431 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 432 DELAY(1); 433 SIO_SET(NGE_MEAR_MII_CLK); 434 DELAY(1); 435 436 /* Turn off xmit. */ 437 SIO_CLR(NGE_MEAR_MII_DIR); 438 /* Check for ack */ 439 SIO_CLR(NGE_MEAR_MII_CLK); 440 DELAY(1); 441 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 442 SIO_SET(NGE_MEAR_MII_CLK); 443 DELAY(1); 444 445 /* 446 * Now try reading data bits. If the ack failed, we still 447 * need to clock through 16 cycles to keep the PHY(s) in sync. 448 */ 449 if (ack) { 450 for(i = 0; i < 16; i++) { 451 SIO_CLR(NGE_MEAR_MII_CLK); 452 DELAY(1); 453 SIO_SET(NGE_MEAR_MII_CLK); 454 DELAY(1); 455 } 456 goto fail; 457 } 458 459 for (i = 0x8000; i; i >>= 1) { 460 SIO_CLR(NGE_MEAR_MII_CLK); 461 DELAY(1); 462 if (!ack) { 463 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 464 frame->mii_data |= i; 465 DELAY(1); 466 } 467 SIO_SET(NGE_MEAR_MII_CLK); 468 DELAY(1); 469 } 470 471 fail: 472 473 SIO_CLR(NGE_MEAR_MII_CLK); 474 DELAY(1); 475 SIO_SET(NGE_MEAR_MII_CLK); 476 DELAY(1); 477 478 splx(s); 479 480 if (ack) 481 return(1); 482 return(0); 483 } 484 485 /* 486 * Write to a PHY register through the MII. 487 */ 488 int 489 nge_mii_writereg(sc, frame) 490 struct nge_softc *sc; 491 struct nge_mii_frame *frame; 492 { 493 int s; 494 495 s = splnet(); 496 /* 497 * Set up frame for TX. 498 */ 499 500 frame->mii_stdelim = NGE_MII_STARTDELIM; 501 frame->mii_opcode = NGE_MII_WRITEOP; 502 frame->mii_turnaround = NGE_MII_TURNAROUND; 503 504 /* 505 * Turn on data output. 506 */ 507 SIO_SET(NGE_MEAR_MII_DIR); 508 509 nge_mii_sync(sc); 510 511 nge_mii_send(sc, frame->mii_stdelim, 2); 512 nge_mii_send(sc, frame->mii_opcode, 2); 513 nge_mii_send(sc, frame->mii_phyaddr, 5); 514 nge_mii_send(sc, frame->mii_regaddr, 5); 515 nge_mii_send(sc, frame->mii_turnaround, 2); 516 nge_mii_send(sc, frame->mii_data, 16); 517 518 /* Idle bit. */ 519 SIO_SET(NGE_MEAR_MII_CLK); 520 DELAY(1); 521 SIO_CLR(NGE_MEAR_MII_CLK); 522 DELAY(1); 523 524 /* 525 * Turn off xmit. 526 */ 527 SIO_CLR(NGE_MEAR_MII_DIR); 528 529 splx(s); 530 531 return(0); 532 } 533 534 int 535 nge_miibus_readreg(dev, phy, reg) 536 struct device *dev; 537 int phy, reg; 538 { 539 struct nge_softc *sc = (struct nge_softc *)dev; 540 struct nge_mii_frame frame; 541 542 DPRINTFN(9, ("%s: nge_miibus_readreg\n", sc->sc_dv.dv_xname)); 543 544 bzero((char *)&frame, sizeof(frame)); 545 546 frame.mii_phyaddr = phy; 547 frame.mii_regaddr = reg; 548 nge_mii_readreg(sc, &frame); 549 550 return(frame.mii_data); 551 } 552 553 void 554 nge_miibus_writereg(dev, phy, reg, data) 555 struct device *dev; 556 int phy, reg, data; 557 { 558 struct nge_softc *sc = (struct nge_softc *)dev; 559 struct nge_mii_frame frame; 560 561 562 DPRINTFN(9, ("%s: nge_miibus_writereg\n", sc->sc_dv.dv_xname)); 563 564 bzero((char *)&frame, sizeof(frame)); 565 566 frame.mii_phyaddr = phy; 567 frame.mii_regaddr = reg; 568 frame.mii_data = data; 569 nge_mii_writereg(sc, &frame); 570 } 571 572 void 573 nge_miibus_statchg(dev) 574 struct device *dev; 575 { 576 struct nge_softc *sc = (struct nge_softc *)dev; 577 struct mii_data *mii = &sc->nge_mii; 578 u_int32_t txcfg, rxcfg; 579 580 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 581 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 582 583 DPRINTFN(4, ("%s: nge_miibus_statchg txcfg=%#x, rxcfg=%#x\n", 584 sc->sc_dv.dv_xname, txcfg, rxcfg)); 585 586 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 587 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 588 rxcfg |= (NGE_RXCFG_RX_FDX); 589 } else { 590 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 591 rxcfg &= ~(NGE_RXCFG_RX_FDX); 592 } 593 594 txcfg |= NGE_TXCFG_AUTOPAD; 595 596 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 597 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 598 599 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 600 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 601 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 602 else 603 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 604 } 605 606 void 607 nge_setmulti(sc) 608 struct nge_softc *sc; 609 { 610 struct arpcom *ac = &sc->arpcom; 611 struct ifnet *ifp = &ac->ac_if; 612 struct ether_multi *enm; 613 struct ether_multistep step; 614 u_int32_t h = 0, i, filtsave; 615 int bit, index; 616 617 allmulti: 618 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 619 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 620 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 621 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 622 return; 623 } 624 625 /* 626 * We have to explicitly enable the multicast hash table 627 * on the NatSemi chip if we want to use it, which we do. 628 * We also have to tell it that we don't want to use the 629 * hash table for matching unicast addresses. 630 */ 631 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 632 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 633 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); 634 635 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 636 637 /* first, zot all the existing hash bits */ 638 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 639 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 640 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 641 } 642 643 /* 644 * From the 11 bits returned by the crc routine, the top 7 645 * bits represent the 16-bit word in the mcast hash table 646 * that needs to be updated, and the lower 4 bits represent 647 * which bit within that byte needs to be set. 648 */ 649 ETHER_FIRST_MULTI(step, ac, enm); 650 while (enm != NULL) { 651 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 652 ifp->if_flags |= IFF_ALLMULTI; 653 goto allmulti; 654 } 655 h = (ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN) >> 21) & 656 0x00000FFF; 657 index = (h >> 4) & 0x7F; 658 bit = h & 0xF; 659 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 660 NGE_FILTADDR_MCAST_LO + (index * 2)); 661 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 662 ETHER_NEXT_MULTI(step, enm); 663 } 664 665 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); 666 } 667 668 void 669 nge_reset(sc) 670 struct nge_softc *sc; 671 { 672 int i; 673 674 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 675 676 for (i = 0; i < NGE_TIMEOUT; i++) { 677 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 678 break; 679 } 680 681 if (i == NGE_TIMEOUT) 682 printf("%s: reset never completed\n", sc->sc_dv.dv_xname); 683 684 /* Wait a little while for the chip to get its brains in order. */ 685 DELAY(1000); 686 687 /* 688 * If this is a NetSemi chip, make sure to clear 689 * PME mode. 690 */ 691 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 692 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 693 } 694 695 /* 696 * Probe for an NatSemi chip. Check the PCI vendor and device 697 * IDs against our list and return a device name if we find a match. 698 */ 699 int 700 nge_probe(parent, match, aux) 701 struct device *parent; 702 void *match; 703 void *aux; 704 { 705 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 706 707 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 708 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_DP83820) 709 return (1); 710 711 return (0); 712 } 713 714 /* 715 * Attach the interface. Allocate softc structures, do ifmedia 716 * setup and ethernet/BPF attach. 717 */ 718 void 719 nge_attach(parent, self, aux) 720 struct device *parent, *self; 721 void *aux; 722 { 723 struct nge_softc *sc = (struct nge_softc *)self; 724 struct pci_attach_args *pa = aux; 725 pci_chipset_tag_t pc = pa->pa_pc; 726 pci_intr_handle_t ih; 727 const char *intrstr = NULL; 728 bus_size_t size; 729 bus_dma_segment_t seg; 730 bus_dmamap_t dmamap; 731 int rseg; 732 u_char eaddr[ETHER_ADDR_LEN]; 733 pcireg_t command; 734 #ifndef NGE_USEIOSPACE 735 pcireg_t memtype; 736 #endif 737 struct ifnet *ifp; 738 caddr_t kva; 739 740 /* 741 * Handle power management nonsense. 742 */ 743 DPRINTFN(5, ("%s: preparing for conf read\n", sc->sc_dv.dv_xname)); 744 command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_CAPID) & 0x000000FF; 745 if (command == 0x01) { 746 command = pci_conf_read(pc, pa->pa_tag, NGE_PCI_PWRMGMTCTRL); 747 if (command & NGE_PSTATE_MASK) { 748 pcireg_t iobase, membase, irq; 749 750 /* Save important PCI config data. */ 751 iobase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOIO); 752 membase = pci_conf_read(pc, pa->pa_tag, NGE_PCI_LOMEM); 753 irq = pci_conf_read(pc, pa->pa_tag, NGE_PCI_INTLINE); 754 755 /* Reset the power state. */ 756 printf("%s: chip is in D%d power mode " 757 "-- setting to D0\n", sc->sc_dv.dv_xname, 758 command & NGE_PSTATE_MASK); 759 command &= 0xFFFFFFFC; 760 pci_conf_write(pc, pa->pa_tag, 761 NGE_PCI_PWRMGMTCTRL, command); 762 763 /* Restore PCI config data. */ 764 pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOIO, iobase); 765 pci_conf_write(pc, pa->pa_tag, NGE_PCI_LOMEM, membase); 766 pci_conf_write(pc, pa->pa_tag, NGE_PCI_INTLINE, irq); 767 } 768 } 769 770 /* 771 * Map control/status registers. 772 */ 773 DPRINTFN(5, ("%s: map control/status regs\n", sc->sc_dv.dv_xname)); 774 775 #ifdef NGE_USEIOSPACE 776 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 777 if (pci_mapreg_map(pa, NGE_PCI_LOIO, PCI_MAPREG_TYPE_IO, 0, 778 &sc->nge_btag, &sc->nge_bhandle, NULL, &size, 0)) { 779 printf(": can't map i/o space\n"); 780 return; 781 } 782 #else 783 DPRINTFN(5, ("%s: pci_mapreg_map\n", sc->sc_dv.dv_xname)); 784 memtype = pci_mapreg_type(pc, pa->pa_tag, NGE_PCI_LOMEM); 785 if (pci_mapreg_map(pa, NGE_PCI_LOMEM, memtype, 0, &sc->nge_btag, 786 &sc->nge_bhandle, NULL, &size, 0)) { 787 printf(": can't map mem space\n"); 788 return; 789 } 790 #endif 791 792 /* Disable all interrupts */ 793 CSR_WRITE_4(sc, NGE_IER, 0); 794 795 DPRINTFN(5, ("%s: pci_intr_map\n", sc->sc_dv.dv_xname)); 796 if (pci_intr_map(pa, &ih)) { 797 printf(": couldn't map interrupt\n"); 798 goto fail_1; 799 } 800 801 DPRINTFN(5, ("%s: pci_intr_string\n", sc->sc_dv.dv_xname)); 802 intrstr = pci_intr_string(pc, ih); 803 DPRINTFN(5, ("%s: pci_intr_establish\n", sc->sc_dv.dv_xname)); 804 sc->nge_intrhand = pci_intr_establish(pc, ih, IPL_NET, nge_intr, sc, 805 sc->sc_dv.dv_xname); 806 if (sc->nge_intrhand == NULL) { 807 printf(": couldn't establish interrupt"); 808 if (intrstr != NULL) 809 printf(" at %s", intrstr); 810 printf("\n"); 811 goto fail_1; 812 } 813 printf(": %s", intrstr); 814 815 /* Reset the adapter. */ 816 DPRINTFN(5, ("%s: nge_reset\n", sc->sc_dv.dv_xname)); 817 nge_reset(sc); 818 819 /* 820 * Get station address from the EEPROM. 821 */ 822 DPRINTFN(5, ("%s: nge_read_eeprom\n", sc->sc_dv.dv_xname)); 823 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 824 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 825 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); 826 827 /* 828 * A NatSemi chip was detected. Inform the world. 829 */ 830 printf(", address %s\n", ether_sprintf(eaddr)); 831 832 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 833 834 sc->sc_dmatag = pa->pa_dmat; 835 DPRINTFN(5, ("%s: bus_dmamem_alloc\n", sc->sc_dv.dv_xname)); 836 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct nge_list_data), 837 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 838 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 839 goto fail_2; 840 } 841 DPRINTFN(5, ("%s: bus_dmamem_map\n", sc->sc_dv.dv_xname)); 842 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 843 sizeof(struct nge_list_data), &kva, 844 BUS_DMA_NOWAIT)) { 845 printf("%s: can't map dma buffers (%d bytes)\n", 846 sc->sc_dv.dv_xname, sizeof(struct nge_list_data)); 847 goto fail_3; 848 } 849 DPRINTFN(5, ("%s: bus_dmamem_create\n", sc->sc_dv.dv_xname)); 850 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct nge_list_data), 1, 851 sizeof(struct nge_list_data), 0, 852 BUS_DMA_NOWAIT, &dmamap)) { 853 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 854 goto fail_4; 855 } 856 DPRINTFN(5, ("%s: bus_dmamem_load\n", sc->sc_dv.dv_xname)); 857 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, 858 sizeof(struct nge_list_data), NULL, 859 BUS_DMA_NOWAIT)) { 860 goto fail_5; 861 } 862 863 DPRINTFN(5, ("%s: bzero\n", sc->sc_dv.dv_xname)); 864 sc->nge_ldata = (struct nge_list_data *)kva; 865 bzero(sc->nge_ldata, sizeof(struct nge_list_data)); 866 867 /* Try to allocate memory for jumbo buffers. */ 868 DPRINTFN(5, ("%s: nge_alloc_jumbo_mem\n", sc->sc_dv.dv_xname)); 869 if (nge_alloc_jumbo_mem(sc)) { 870 printf("%s: jumbo buffer allocation failed\n", 871 sc->sc_dv.dv_xname); 872 goto fail_5; 873 } 874 875 ifp = &sc->arpcom.ac_if; 876 ifp->if_softc = sc; 877 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 878 ifp->if_ioctl = nge_ioctl; 879 ifp->if_start = nge_start; 880 ifp->if_watchdog = nge_watchdog; 881 ifp->if_baudrate = 1000000000; 882 ifp->if_hardmtu = NGE_JUMBO_MTU; 883 IFQ_SET_MAXLEN(&ifp->if_snd, NGE_TX_LIST_CNT - 1); 884 IFQ_SET_READY(&ifp->if_snd); 885 DPRINTFN(5, ("%s: bcopy\n", sc->sc_dv.dv_xname)); 886 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 887 888 ifp->if_capabilities = IFCAP_VLAN_MTU; 889 890 #if NVLAN > 0 891 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 892 #endif 893 894 /* 895 * Do MII setup. 896 */ 897 DPRINTFN(5, ("%s: mii setup\n", sc->sc_dv.dv_xname)); 898 if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { 899 DPRINTFN(5, ("%s: TBI mode\n", sc->sc_dv.dv_xname)); 900 sc->nge_tbi = 1; 901 902 ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_tbi_upd, 903 nge_ifmedia_tbi_sts); 904 905 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_NONE, 0, NULL), 906 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 907 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 908 0, NULL); 909 ifmedia_add(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 910 911 ifmedia_set(&sc->nge_ifmedia, IFM_ETHER|IFM_AUTO); 912 913 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 914 | NGE_GPIO_GP4_OUT 915 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 916 | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP4_OUTENB 917 | NGE_GPIO_GP5_OUTENB); 918 919 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 920 } else { 921 sc->nge_mii.mii_ifp = ifp; 922 sc->nge_mii.mii_readreg = nge_miibus_readreg; 923 sc->nge_mii.mii_writereg = nge_miibus_writereg; 924 sc->nge_mii.mii_statchg = nge_miibus_statchg; 925 926 ifmedia_init(&sc->nge_mii.mii_media, 0, nge_ifmedia_mii_upd, 927 nge_ifmedia_mii_sts); 928 mii_attach(&sc->sc_dv, &sc->nge_mii, 0xffffffff, MII_PHY_ANY, 929 MII_OFFSET_ANY, 0); 930 931 if (LIST_FIRST(&sc->nge_mii.mii_phys) == NULL) { 932 933 printf("%s: no PHY found!\n", sc->sc_dv.dv_xname); 934 ifmedia_add(&sc->nge_mii.mii_media, 935 IFM_ETHER|IFM_MANUAL, 0, NULL); 936 ifmedia_set(&sc->nge_mii.mii_media, 937 IFM_ETHER|IFM_MANUAL); 938 } 939 else 940 ifmedia_set(&sc->nge_mii.mii_media, 941 IFM_ETHER|IFM_AUTO); 942 } 943 944 /* 945 * Call MI attach routine. 946 */ 947 DPRINTFN(5, ("%s: if_attach\n", sc->sc_dv.dv_xname)); 948 if_attach(ifp); 949 DPRINTFN(5, ("%s: ether_ifattach\n", sc->sc_dv.dv_xname)); 950 ether_ifattach(ifp); 951 DPRINTFN(5, ("%s: timeout_set\n", sc->sc_dv.dv_xname)); 952 timeout_set(&sc->nge_timeout, nge_tick, sc); 953 timeout_add_sec(&sc->nge_timeout, 1); 954 return; 955 956 fail_5: 957 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 958 959 fail_4: 960 bus_dmamem_unmap(sc->sc_dmatag, kva, 961 sizeof(struct nge_list_data)); 962 963 fail_3: 964 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 965 966 fail_2: 967 pci_intr_disestablish(pc, sc->nge_intrhand); 968 969 fail_1: 970 bus_space_unmap(sc->nge_btag, sc->nge_bhandle, size); 971 } 972 973 /* 974 * Initialize the transmit descriptors. 975 */ 976 int 977 nge_list_tx_init(sc) 978 struct nge_softc *sc; 979 { 980 struct nge_list_data *ld; 981 struct nge_ring_data *cd; 982 int i; 983 984 cd = &sc->nge_cdata; 985 ld = sc->nge_ldata; 986 987 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 988 if (i == (NGE_TX_LIST_CNT - 1)) { 989 ld->nge_tx_list[i].nge_nextdesc = 990 &ld->nge_tx_list[0]; 991 ld->nge_tx_list[i].nge_next = 992 VTOPHYS(&ld->nge_tx_list[0]); 993 } else { 994 ld->nge_tx_list[i].nge_nextdesc = 995 &ld->nge_tx_list[i + 1]; 996 ld->nge_tx_list[i].nge_next = 997 VTOPHYS(&ld->nge_tx_list[i + 1]); 998 } 999 ld->nge_tx_list[i].nge_mbuf = NULL; 1000 ld->nge_tx_list[i].nge_ptr = 0; 1001 ld->nge_tx_list[i].nge_ctl = 0; 1002 } 1003 1004 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; 1005 1006 return(0); 1007 } 1008 1009 1010 /* 1011 * Initialize the RX descriptors and allocate mbufs for them. Note that 1012 * we arrange the descriptors in a closed ring, so that the last descriptor 1013 * points back to the first. 1014 */ 1015 int 1016 nge_list_rx_init(sc) 1017 struct nge_softc *sc; 1018 { 1019 struct nge_list_data *ld; 1020 struct nge_ring_data *cd; 1021 int i; 1022 1023 ld = sc->nge_ldata; 1024 cd = &sc->nge_cdata; 1025 1026 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1027 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) 1028 return(ENOBUFS); 1029 if (i == (NGE_RX_LIST_CNT - 1)) { 1030 ld->nge_rx_list[i].nge_nextdesc = 1031 &ld->nge_rx_list[0]; 1032 ld->nge_rx_list[i].nge_next = 1033 VTOPHYS(&ld->nge_rx_list[0]); 1034 } else { 1035 ld->nge_rx_list[i].nge_nextdesc = 1036 &ld->nge_rx_list[i + 1]; 1037 ld->nge_rx_list[i].nge_next = 1038 VTOPHYS(&ld->nge_rx_list[i + 1]); 1039 } 1040 } 1041 1042 cd->nge_rx_prod = 0; 1043 1044 return(0); 1045 } 1046 1047 /* 1048 * Initialize an RX descriptor and attach an MBUF cluster. 1049 */ 1050 int 1051 nge_newbuf(sc, c, m) 1052 struct nge_softc *sc; 1053 struct nge_desc *c; 1054 struct mbuf *m; 1055 { 1056 struct mbuf *m_new = NULL; 1057 1058 if (m == NULL) { 1059 caddr_t buf = NULL; 1060 1061 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1062 if (m_new == NULL) 1063 return (ENOBUFS); 1064 1065 /* Allocate the jumbo buffer */ 1066 buf = nge_jalloc(sc); 1067 if (buf == NULL) { 1068 m_freem(m_new); 1069 return (ENOBUFS); 1070 } 1071 1072 /* Attach the buffer to the mbuf */ 1073 m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES; 1074 MEXTADD(m_new, buf, NGE_MCLBYTES, 0, nge_jfree, sc); 1075 } else { 1076 /* 1077 * We're re-using a previously allocated mbuf; 1078 * be sure to re-init pointers and lengths to 1079 * default values. 1080 */ 1081 m_new = m; 1082 m_new->m_len = m_new->m_pkthdr.len = NGE_MCLBYTES; 1083 m_new->m_data = m_new->m_ext.ext_buf; 1084 } 1085 1086 m_adj(m_new, sizeof(u_int64_t)); 1087 1088 c->nge_mbuf = m_new; 1089 c->nge_ptr = VTOPHYS(mtod(m_new, caddr_t)); 1090 DPRINTFN(7,("%s: c->nge_ptr=%#x\n", sc->sc_dv.dv_xname, 1091 c->nge_ptr)); 1092 c->nge_ctl = m_new->m_len; 1093 c->nge_extsts = 0; 1094 1095 return(0); 1096 } 1097 1098 int 1099 nge_alloc_jumbo_mem(sc) 1100 struct nge_softc *sc; 1101 { 1102 caddr_t ptr, kva; 1103 bus_dma_segment_t seg; 1104 bus_dmamap_t dmamap; 1105 int i, rseg, state, error; 1106 struct nge_jpool_entry *entry; 1107 1108 state = error = 0; 1109 1110 if (bus_dmamem_alloc(sc->sc_dmatag, NGE_JMEM, PAGE_SIZE, 0, 1111 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1112 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 1113 return (ENOBUFS); 1114 } 1115 1116 state = 1; 1117 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, NGE_JMEM, &kva, 1118 BUS_DMA_NOWAIT)) { 1119 printf("%s: can't map dma buffers (%d bytes)\n", 1120 sc->sc_dv.dv_xname, NGE_JMEM); 1121 error = ENOBUFS; 1122 goto out; 1123 } 1124 1125 state = 2; 1126 if (bus_dmamap_create(sc->sc_dmatag, NGE_JMEM, 1, 1127 NGE_JMEM, 0, BUS_DMA_NOWAIT, &dmamap)) { 1128 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 1129 error = ENOBUFS; 1130 goto out; 1131 } 1132 1133 state = 3; 1134 if (bus_dmamap_load(sc->sc_dmatag, dmamap, kva, NGE_JMEM, 1135 NULL, BUS_DMA_NOWAIT)) { 1136 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 1137 error = ENOBUFS; 1138 goto out; 1139 } 1140 1141 state = 4; 1142 sc->nge_cdata.nge_jumbo_buf = (caddr_t)kva; 1143 DPRINTFN(1,("%s: nge_jumbo_buf=%#x, NGE_MCLBYTES=%#x\n", 1144 sc->sc_dv.dv_xname , sc->nge_cdata.nge_jumbo_buf, 1145 NGE_MCLBYTES)); 1146 1147 LIST_INIT(&sc->nge_jfree_listhead); 1148 LIST_INIT(&sc->nge_jinuse_listhead); 1149 1150 /* 1151 * Now divide it up into 9K pieces and save the addresses 1152 * in an array. Note that we play an evil trick here by using 1153 * the first few bytes in the buffer to hold the address 1154 * of the softc structure for this interface. This is because 1155 * nge_jfree() needs it, but it is called by the mbuf management 1156 * code which will not pass it to us explicitly. 1157 */ 1158 ptr = sc->nge_cdata.nge_jumbo_buf; 1159 for (i = 0; i < NGE_JSLOTS; i++) { 1160 sc->nge_cdata.nge_jslots[i].nge_buf = ptr; 1161 sc->nge_cdata.nge_jslots[i].nge_inuse = 0; 1162 ptr += NGE_MCLBYTES; 1163 entry = malloc(sizeof(struct nge_jpool_entry), 1164 M_DEVBUF, M_NOWAIT); 1165 if (entry == NULL) { 1166 sc->nge_cdata.nge_jumbo_buf = NULL; 1167 printf("%s: no memory for jumbo buffer queue!\n", 1168 sc->sc_dv.dv_xname); 1169 error = ENOBUFS; 1170 goto out; 1171 } 1172 entry->slot = i; 1173 LIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, 1174 jpool_entries); 1175 } 1176 out: 1177 if (error != 0) { 1178 switch (state) { 1179 case 4: 1180 bus_dmamap_unload(sc->sc_dmatag, dmamap); 1181 case 3: 1182 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 1183 case 2: 1184 bus_dmamem_unmap(sc->sc_dmatag, kva, NGE_JMEM); 1185 case 1: 1186 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1187 break; 1188 default: 1189 break; 1190 } 1191 } 1192 1193 return (error); 1194 } 1195 1196 /* 1197 * Allocate a jumbo buffer. 1198 */ 1199 void * 1200 nge_jalloc(sc) 1201 struct nge_softc *sc; 1202 { 1203 struct nge_jpool_entry *entry; 1204 1205 entry = LIST_FIRST(&sc->nge_jfree_listhead); 1206 1207 if (entry == NULL) 1208 return (NULL); 1209 1210 LIST_REMOVE(entry, jpool_entries); 1211 LIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries); 1212 sc->nge_cdata.nge_jslots[entry->slot].nge_inuse = 1; 1213 return(sc->nge_cdata.nge_jslots[entry->slot].nge_buf); 1214 } 1215 1216 /* 1217 * Release a jumbo buffer. 1218 */ 1219 void 1220 nge_jfree(buf, size, arg) 1221 caddr_t buf; 1222 u_int size; 1223 void *arg; 1224 { 1225 struct nge_softc *sc; 1226 int i; 1227 struct nge_jpool_entry *entry; 1228 1229 /* Extract the softc struct pointer. */ 1230 sc = (struct nge_softc *)arg; 1231 1232 if (sc == NULL) 1233 panic("nge_jfree: can't find softc pointer!"); 1234 1235 /* calculate the slot this buffer belongs to */ 1236 1237 i = ((vaddr_t)buf - (vaddr_t)sc->nge_cdata.nge_jumbo_buf) 1238 / NGE_MCLBYTES; 1239 1240 if ((i < 0) || (i >= NGE_JSLOTS)) 1241 panic("nge_jfree: asked to free buffer that we don't manage!"); 1242 else if (sc->nge_cdata.nge_jslots[i].nge_inuse == 0) 1243 panic("nge_jfree: buffer already free!"); 1244 else { 1245 sc->nge_cdata.nge_jslots[i].nge_inuse--; 1246 if(sc->nge_cdata.nge_jslots[i].nge_inuse == 0) { 1247 entry = LIST_FIRST(&sc->nge_jinuse_listhead); 1248 if (entry == NULL) 1249 panic("nge_jfree: buffer not in use!"); 1250 entry->slot = i; 1251 LIST_REMOVE(entry, jpool_entries); 1252 LIST_INSERT_HEAD(&sc->nge_jfree_listhead, 1253 entry, jpool_entries); 1254 } 1255 } 1256 } 1257 1258 /* 1259 * A frame has been uploaded: pass the resulting mbuf chain up to 1260 * the higher level protocols. 1261 */ 1262 void 1263 nge_rxeof(sc) 1264 struct nge_softc *sc; 1265 { 1266 struct mbuf *m; 1267 struct ifnet *ifp; 1268 struct nge_desc *cur_rx; 1269 int i, total_len = 0; 1270 u_int32_t rxstat; 1271 1272 ifp = &sc->arpcom.ac_if; 1273 i = sc->nge_cdata.nge_rx_prod; 1274 1275 while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1276 struct mbuf *m0 = NULL; 1277 u_int32_t extsts; 1278 1279 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1280 rxstat = cur_rx->nge_rxstat; 1281 extsts = cur_rx->nge_extsts; 1282 m = cur_rx->nge_mbuf; 1283 cur_rx->nge_mbuf = NULL; 1284 total_len = NGE_RXBYTES(cur_rx); 1285 NGE_INC(i, NGE_RX_LIST_CNT); 1286 1287 /* 1288 * If an error occurs, update stats, clear the 1289 * status word and leave the mbuf cluster in place: 1290 * it should simply get re-used next time this descriptor 1291 * comes up in the ring. 1292 */ 1293 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1294 #if NVLAN > 0 1295 if ((rxstat & NGE_RXSTAT_RUNT) && 1296 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 1297 ETHER_VLAN_ENCAP_LEN)) { 1298 /* 1299 * Workaround a hardware bug. Accept runt 1300 * frames if its length is larger than or 1301 * equal to 56. 1302 */ 1303 } else { 1304 #endif 1305 ifp->if_ierrors++; 1306 nge_newbuf(sc, cur_rx, m); 1307 continue; 1308 #if NVLAN > 0 1309 } 1310 #endif 1311 } 1312 1313 /* 1314 * Ok. NatSemi really screwed up here. This is the 1315 * only gigE chip I know of with alignment constraints 1316 * on receive buffers. RX buffers must be 64-bit aligned. 1317 */ 1318 #ifndef __STRICT_ALIGNMENT 1319 /* 1320 * By popular demand, ignore the alignment problems 1321 * on the Intel x86 platform. The performance hit 1322 * incurred due to unaligned accesses is much smaller 1323 * than the hit produced by forcing buffer copies all 1324 * the time, especially with jumbo frames. We still 1325 * need to fix up the alignment everywhere else though. 1326 */ 1327 if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 1328 #endif 1329 m0 = m_devget(mtod(m, char *), total_len, 1330 ETHER_ALIGN, ifp, NULL); 1331 nge_newbuf(sc, cur_rx, m); 1332 if (m0 == NULL) { 1333 ifp->if_ierrors++; 1334 continue; 1335 } 1336 m_adj(m0, ETHER_ALIGN); 1337 m = m0; 1338 #ifndef __STRICT_ALIGNMENT 1339 } else { 1340 m->m_pkthdr.rcvif = ifp; 1341 m->m_pkthdr.len = m->m_len = total_len; 1342 } 1343 #endif 1344 1345 ifp->if_ipackets++; 1346 1347 #if NVLAN > 0 1348 if (extsts & NGE_RXEXTSTS_VLANPKT) { 1349 m->m_pkthdr.ether_vtag = 1350 ntohs(extsts & NGE_RXEXTSTS_VTCI); 1351 m->m_flags |= M_VLANTAG; 1352 } 1353 #endif 1354 1355 #if NBPFILTER > 0 1356 /* 1357 * Handle BPF listeners. Let the BPF user see the packet. 1358 */ 1359 if (ifp->if_bpf) 1360 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1361 #endif 1362 1363 /* Do IP checksum checking. */ 1364 if (extsts & NGE_RXEXTSTS_IPPKT) { 1365 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1366 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1367 if ((extsts & NGE_RXEXTSTS_TCPPKT) && 1368 (!(extsts & NGE_RXEXTSTS_TCPCSUMERR))) 1369 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 1370 else if ((extsts & NGE_RXEXTSTS_UDPPKT) && 1371 (!(extsts & NGE_RXEXTSTS_UDPCSUMERR))) 1372 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 1373 } 1374 1375 ether_input_mbuf(ifp, m); 1376 } 1377 1378 sc->nge_cdata.nge_rx_prod = i; 1379 } 1380 1381 /* 1382 * A frame was downloaded to the chip. It's safe for us to clean up 1383 * the list buffers. 1384 */ 1385 1386 void 1387 nge_txeof(sc) 1388 struct nge_softc *sc; 1389 { 1390 struct nge_desc *cur_tx; 1391 struct ifnet *ifp; 1392 u_int32_t idx; 1393 1394 ifp = &sc->arpcom.ac_if; 1395 1396 /* 1397 * Go through our tx list and free mbufs for those 1398 * frames that have been transmitted. 1399 */ 1400 idx = sc->nge_cdata.nge_tx_cons; 1401 while (idx != sc->nge_cdata.nge_tx_prod) { 1402 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1403 1404 if (NGE_OWNDESC(cur_tx)) 1405 break; 1406 1407 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1408 sc->nge_cdata.nge_tx_cnt--; 1409 NGE_INC(idx, NGE_TX_LIST_CNT); 1410 continue; 1411 } 1412 1413 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { 1414 ifp->if_oerrors++; 1415 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) 1416 ifp->if_collisions++; 1417 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) 1418 ifp->if_collisions++; 1419 } 1420 1421 ifp->if_collisions += 1422 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1423 1424 ifp->if_opackets++; 1425 if (cur_tx->nge_mbuf != NULL) { 1426 m_freem(cur_tx->nge_mbuf); 1427 cur_tx->nge_mbuf = NULL; 1428 ifp->if_flags &= ~IFF_OACTIVE; 1429 } 1430 1431 sc->nge_cdata.nge_tx_cnt--; 1432 NGE_INC(idx, NGE_TX_LIST_CNT); 1433 } 1434 1435 sc->nge_cdata.nge_tx_cons = idx; 1436 1437 if (idx == sc->nge_cdata.nge_tx_prod) 1438 ifp->if_timer = 0; 1439 } 1440 1441 void 1442 nge_tick(xsc) 1443 void *xsc; 1444 { 1445 struct nge_softc *sc = xsc; 1446 struct mii_data *mii = &sc->nge_mii; 1447 struct ifnet *ifp = &sc->arpcom.ac_if; 1448 int s; 1449 1450 s = splnet(); 1451 1452 DPRINTFN(10, ("%s: nge_tick: link=%d\n", sc->sc_dv.dv_xname, 1453 sc->nge_link)); 1454 1455 timeout_add_sec(&sc->nge_timeout, 1); 1456 if (sc->nge_link) { 1457 splx(s); 1458 return; 1459 } 1460 1461 if (sc->nge_tbi) { 1462 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1463 == IFM_AUTO) { 1464 u_int32_t bmsr, anlpar, txcfg, rxcfg; 1465 1466 bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 1467 DPRINTFN(2, ("%s: nge_tick: bmsr=%#x\n", 1468 sc->sc_dv.dv_xname, bmsr)); 1469 1470 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 1471 CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); 1472 1473 splx(s); 1474 return; 1475 } 1476 1477 anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 1478 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1479 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1480 1481 DPRINTFN(2, ("%s: nge_tick: anlpar=%#x, txcfg=%#x, " 1482 "rxcfg=%#x\n", sc->sc_dv.dv_xname, anlpar, 1483 txcfg, rxcfg)); 1484 1485 if (anlpar == 0 || anlpar & NGE_TBIANAR_FDX) { 1486 txcfg |= (NGE_TXCFG_IGN_HBEAT| 1487 NGE_TXCFG_IGN_CARR); 1488 rxcfg |= NGE_RXCFG_RX_FDX; 1489 } else { 1490 txcfg &= ~(NGE_TXCFG_IGN_HBEAT| 1491 NGE_TXCFG_IGN_CARR); 1492 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1493 } 1494 txcfg |= NGE_TXCFG_AUTOPAD; 1495 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1496 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1497 } 1498 1499 DPRINTF(("%s: gigabit link up\n", sc->sc_dv.dv_xname)); 1500 sc->nge_link++; 1501 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1502 nge_start(ifp); 1503 } else { 1504 mii_tick(mii); 1505 if (mii->mii_media_status & IFM_ACTIVE && 1506 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1507 sc->nge_link++; 1508 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1509 DPRINTF(("%s: gigabit link up\n", 1510 sc->sc_dv.dv_xname)); 1511 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1512 nge_start(ifp); 1513 } 1514 1515 } 1516 1517 splx(s); 1518 } 1519 1520 int 1521 nge_intr(arg) 1522 void *arg; 1523 { 1524 struct nge_softc *sc; 1525 struct ifnet *ifp; 1526 u_int32_t status; 1527 int claimed = 0; 1528 1529 sc = arg; 1530 ifp = &sc->arpcom.ac_if; 1531 1532 /* Suppress unwanted interrupts */ 1533 if (!(ifp->if_flags & IFF_UP)) { 1534 nge_stop(sc); 1535 return (0); 1536 } 1537 1538 /* Disable interrupts. */ 1539 CSR_WRITE_4(sc, NGE_IER, 0); 1540 1541 /* Data LED on for TBI mode */ 1542 if(sc->nge_tbi) 1543 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1544 | NGE_GPIO_GP3_OUT); 1545 1546 for (;;) { 1547 /* Reading the ISR register clears all interrupts. */ 1548 status = CSR_READ_4(sc, NGE_ISR); 1549 1550 if ((status & NGE_INTRS) == 0) 1551 break; 1552 1553 claimed = 1; 1554 1555 if ((status & NGE_ISR_TX_DESC_OK) || 1556 (status & NGE_ISR_TX_ERR) || 1557 (status & NGE_ISR_TX_OK) || 1558 (status & NGE_ISR_TX_IDLE)) 1559 nge_txeof(sc); 1560 1561 if ((status & NGE_ISR_RX_DESC_OK) || 1562 (status & NGE_ISR_RX_ERR) || 1563 (status & NGE_ISR_RX_OFLOW) || 1564 (status & NGE_ISR_RX_FIFO_OFLOW) || 1565 (status & NGE_ISR_RX_IDLE) || 1566 (status & NGE_ISR_RX_OK)) 1567 nge_rxeof(sc); 1568 1569 if ((status & NGE_ISR_RX_IDLE)) 1570 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1571 1572 if (status & NGE_ISR_SYSERR) { 1573 nge_reset(sc); 1574 ifp->if_flags &= ~IFF_RUNNING; 1575 nge_init(sc); 1576 } 1577 1578 #if 0 1579 /* 1580 * XXX: nge_tick() is not ready to be called this way 1581 * it screws up the aneg timeout because mii_tick() is 1582 * only to be called once per second. 1583 */ 1584 if (status & NGE_IMR_PHY_INTR) { 1585 sc->nge_link = 0; 1586 nge_tick(sc); 1587 } 1588 #endif 1589 } 1590 1591 /* Re-enable interrupts. */ 1592 CSR_WRITE_4(sc, NGE_IER, 1); 1593 1594 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1595 nge_start(ifp); 1596 1597 /* Data LED off for TBI mode */ 1598 if(sc->nge_tbi) 1599 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1600 & ~NGE_GPIO_GP3_OUT); 1601 1602 return claimed; 1603 } 1604 1605 /* 1606 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1607 * pointers to the fragment pointers. 1608 */ 1609 int 1610 nge_encap(sc, m_head, txidx) 1611 struct nge_softc *sc; 1612 struct mbuf *m_head; 1613 u_int32_t *txidx; 1614 { 1615 struct nge_desc *f = NULL; 1616 struct mbuf *m; 1617 int frag, cur, cnt = 0; 1618 1619 /* 1620 * Start packing the mbufs in this chain into 1621 * the fragment pointers. Stop when we run out 1622 * of fragments or hit the end of the mbuf chain. 1623 */ 1624 m = m_head; 1625 cur = frag = *txidx; 1626 1627 for (m = m_head; m != NULL; m = m->m_next) { 1628 if (m->m_len != 0) { 1629 if ((NGE_TX_LIST_CNT - 1630 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1631 return(ENOBUFS); 1632 f = &sc->nge_ldata->nge_tx_list[frag]; 1633 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1634 f->nge_ptr = VTOPHYS(mtod(m, vaddr_t)); 1635 DPRINTFN(7,("%s: f->nge_ptr=%#x\n", 1636 sc->sc_dv.dv_xname, f->nge_ptr)); 1637 if (cnt != 0) 1638 f->nge_ctl |= NGE_CMDSTS_OWN; 1639 cur = frag; 1640 NGE_INC(frag, NGE_TX_LIST_CNT); 1641 cnt++; 1642 } 1643 } 1644 1645 if (m != NULL) 1646 return(ENOBUFS); 1647 1648 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1649 1650 #if NVLAN > 0 1651 if (m_head->m_flags & M_VLANTAG) { 1652 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1653 (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag)); 1654 } 1655 #endif 1656 1657 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1658 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1659 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1660 sc->nge_cdata.nge_tx_cnt += cnt; 1661 *txidx = frag; 1662 1663 return(0); 1664 } 1665 1666 /* 1667 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1668 * to the mbuf data regions directly in the transmit lists. We also save a 1669 * copy of the pointers since the transmit list fragment pointers are 1670 * physical addresses. 1671 */ 1672 1673 void 1674 nge_start(ifp) 1675 struct ifnet *ifp; 1676 { 1677 struct nge_softc *sc; 1678 struct mbuf *m_head = NULL; 1679 u_int32_t idx; 1680 int pkts = 0; 1681 1682 sc = ifp->if_softc; 1683 1684 if (!sc->nge_link) 1685 return; 1686 1687 idx = sc->nge_cdata.nge_tx_prod; 1688 1689 if (ifp->if_flags & IFF_OACTIVE) 1690 return; 1691 1692 while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1693 IFQ_POLL(&ifp->if_snd, m_head); 1694 if (m_head == NULL) 1695 break; 1696 1697 if (nge_encap(sc, m_head, &idx)) { 1698 ifp->if_flags |= IFF_OACTIVE; 1699 break; 1700 } 1701 1702 /* now we are committed to transmit the packet */ 1703 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1704 pkts++; 1705 1706 #if NBPFILTER > 0 1707 /* 1708 * If there's a BPF listener, bounce a copy of this frame 1709 * to him. 1710 */ 1711 if (ifp->if_bpf) 1712 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1713 #endif 1714 } 1715 if (pkts == 0) 1716 return; 1717 1718 /* Transmit */ 1719 sc->nge_cdata.nge_tx_prod = idx; 1720 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 1721 1722 /* 1723 * Set a timeout in case the chip goes out to lunch. 1724 */ 1725 ifp->if_timer = 5; 1726 } 1727 1728 void 1729 nge_init(xsc) 1730 void *xsc; 1731 { 1732 struct nge_softc *sc = xsc; 1733 struct ifnet *ifp = &sc->arpcom.ac_if; 1734 struct mii_data *mii; 1735 u_int32_t txcfg, rxcfg; 1736 int s, media; 1737 1738 if (ifp->if_flags & IFF_RUNNING) 1739 return; 1740 1741 s = splnet(); 1742 1743 /* 1744 * Cancel pending I/O and free all RX/TX buffers. 1745 */ 1746 nge_stop(sc); 1747 1748 mii = sc->nge_tbi ? NULL: &sc->nge_mii; 1749 1750 /* Set MAC address */ 1751 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 1752 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1753 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1754 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 1755 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1756 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1757 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 1758 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1759 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1760 1761 /* Init circular RX list. */ 1762 if (nge_list_rx_init(sc) == ENOBUFS) { 1763 printf("%s: initialization failed: no " 1764 "memory for rx buffers\n", sc->sc_dv.dv_xname); 1765 nge_stop(sc); 1766 splx(s); 1767 return; 1768 } 1769 1770 /* 1771 * Init tx descriptors. 1772 */ 1773 nge_list_tx_init(sc); 1774 1775 /* 1776 * For the NatSemi chip, we have to explicitly enable the 1777 * reception of ARP frames, as well as turn on the 'perfect 1778 * match' filter where we store the station address, otherwise 1779 * we won't receive unicasts meant for this host. 1780 */ 1781 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1782 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1783 1784 /* If we want promiscuous mode, set the allframes bit. */ 1785 if (ifp->if_flags & IFF_PROMISC) 1786 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1787 else 1788 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1789 1790 /* 1791 * Set the capture broadcast bit to capture broadcast frames. 1792 */ 1793 if (ifp->if_flags & IFF_BROADCAST) 1794 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1795 else 1796 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1797 1798 /* 1799 * Load the multicast filter. 1800 */ 1801 nge_setmulti(sc); 1802 1803 /* Turn the receive filter on */ 1804 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1805 1806 /* 1807 * Load the address of the RX and TX lists. 1808 */ 1809 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1810 VTOPHYS(&sc->nge_ldata->nge_rx_list[0])); 1811 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1812 VTOPHYS(&sc->nge_ldata->nge_tx_list[0])); 1813 1814 /* Set RX configuration */ 1815 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 1816 1817 /* 1818 * Enable hardware checksum validation for all IPv4 1819 * packets, do not reject packets with bad checksums. 1820 */ 1821 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 1822 1823 /* 1824 * If VLAN support is enabled, tell the chip to detect 1825 * and strip VLAN tag info from received frames. The tag 1826 * will be provided in the extsts field in the RX descriptors. 1827 */ 1828 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1829 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1830 NGE_VIPRXCTL_TAG_DETECT_ENB | NGE_VIPRXCTL_TAG_STRIP_ENB); 1831 1832 /* Set TX configuration */ 1833 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1834 1835 /* 1836 * If VLAN support is enabled, tell the chip to insert 1837 * VLAN tags on a per-packet basis as dictated by the 1838 * code in the frame encapsulation routine. 1839 */ 1840 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1841 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1842 1843 /* Set full/half duplex mode. */ 1844 if (sc->nge_tbi) 1845 media = sc->nge_ifmedia.ifm_cur->ifm_media; 1846 else 1847 media = mii->mii_media_active; 1848 1849 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1850 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1851 1852 DPRINTFN(4, ("%s: nge_init txcfg=%#x, rxcfg=%#x\n", 1853 sc->sc_dv.dv_xname, txcfg, rxcfg)); 1854 1855 if ((media & IFM_GMASK) == IFM_FDX) { 1856 txcfg |= (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1857 rxcfg |= (NGE_RXCFG_RX_FDX); 1858 } else { 1859 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1860 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1861 } 1862 1863 txcfg |= NGE_TXCFG_AUTOPAD; 1864 1865 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1866 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1867 1868 nge_tick(sc); 1869 1870 /* 1871 * Enable the delivery of PHY interrupts based on 1872 * link/speed/duplex status changes and enable return 1873 * of extended status information in the DMA descriptors, 1874 * required for checksum offloading. 1875 */ 1876 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD|NGE_CFG_PHYINTR_LNK| 1877 NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); 1878 1879 DPRINTFN(1, ("%s: nge_init: config=%#x\n", sc->sc_dv.dv_xname, 1880 CSR_READ_4(sc, NGE_CFG))); 1881 1882 /* 1883 * Configure interrupt holdoff (moderation). We can 1884 * have the chip delay interrupt delivery for a certain 1885 * period. Units are in 100us, and the max setting 1886 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1887 */ 1888 CSR_WRITE_4(sc, NGE_IHR, 0x01); 1889 1890 /* 1891 * Enable interrupts. 1892 */ 1893 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1894 CSR_WRITE_4(sc, NGE_IER, 1); 1895 1896 /* Enable receiver and transmitter. */ 1897 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1898 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1899 1900 if (sc->nge_tbi) 1901 nge_ifmedia_tbi_upd(ifp); 1902 else 1903 nge_ifmedia_mii_upd(ifp); 1904 1905 ifp->if_flags |= IFF_RUNNING; 1906 ifp->if_flags &= ~IFF_OACTIVE; 1907 1908 splx(s); 1909 } 1910 1911 /* 1912 * Set mii media options. 1913 */ 1914 int 1915 nge_ifmedia_mii_upd(ifp) 1916 struct ifnet *ifp; 1917 { 1918 struct nge_softc *sc = ifp->if_softc; 1919 struct mii_data *mii = &sc->nge_mii; 1920 1921 DPRINTFN(2, ("%s: nge_ifmedia_mii_upd\n", sc->sc_dv.dv_xname)); 1922 1923 sc->nge_link = 0; 1924 1925 if (mii->mii_instance) { 1926 struct mii_softc *miisc; 1927 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1928 mii_phy_reset(miisc); 1929 } 1930 mii_mediachg(mii); 1931 1932 return(0); 1933 } 1934 1935 /* 1936 * Report current mii media status. 1937 */ 1938 void 1939 nge_ifmedia_mii_sts(ifp, ifmr) 1940 struct ifnet *ifp; 1941 struct ifmediareq *ifmr; 1942 { 1943 struct nge_softc *sc = ifp->if_softc; 1944 struct mii_data *mii = &sc->nge_mii; 1945 1946 DPRINTFN(2, ("%s: nge_ifmedia_mii_sts\n", sc->sc_dv.dv_xname)); 1947 1948 mii_pollstat(mii); 1949 ifmr->ifm_active = mii->mii_media_active; 1950 ifmr->ifm_status = mii->mii_media_status; 1951 } 1952 1953 /* 1954 * Set mii media options. 1955 */ 1956 int 1957 nge_ifmedia_tbi_upd(ifp) 1958 struct ifnet *ifp; 1959 { 1960 struct nge_softc *sc = ifp->if_softc; 1961 1962 DPRINTFN(2, ("%s: nge_ifmedia_tbi_upd\n", sc->sc_dv.dv_xname)); 1963 1964 sc->nge_link = 0; 1965 1966 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1967 == IFM_AUTO) { 1968 u_int32_t anar, bmcr; 1969 anar = CSR_READ_4(sc, NGE_TBI_ANAR); 1970 anar |= (NGE_TBIANAR_HDX | NGE_TBIANAR_FDX); 1971 CSR_WRITE_4(sc, NGE_TBI_ANAR, anar); 1972 1973 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 1974 bmcr |= (NGE_TBIBMCR_ENABLE_ANEG|NGE_TBIBMCR_RESTART_ANEG); 1975 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1976 1977 bmcr &= ~(NGE_TBIBMCR_RESTART_ANEG); 1978 CSR_WRITE_4(sc, NGE_TBI_BMCR, bmcr); 1979 } else { 1980 u_int32_t txcfg, rxcfg; 1981 txcfg = CSR_READ_4(sc, NGE_TX_CFG); 1982 rxcfg = CSR_READ_4(sc, NGE_RX_CFG); 1983 1984 if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 1985 == IFM_FDX) { 1986 txcfg |= NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR; 1987 rxcfg |= NGE_RXCFG_RX_FDX; 1988 } else { 1989 txcfg &= ~(NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR); 1990 rxcfg &= ~(NGE_RXCFG_RX_FDX); 1991 } 1992 1993 txcfg |= NGE_TXCFG_AUTOPAD; 1994 CSR_WRITE_4(sc, NGE_TX_CFG, txcfg); 1995 CSR_WRITE_4(sc, NGE_RX_CFG, rxcfg); 1996 } 1997 1998 NGE_CLRBIT(sc, NGE_GPIO, NGE_GPIO_GP3_OUT); 1999 2000 return(0); 2001 } 2002 2003 /* 2004 * Report current tbi media status. 2005 */ 2006 void 2007 nge_ifmedia_tbi_sts(ifp, ifmr) 2008 struct ifnet *ifp; 2009 struct ifmediareq *ifmr; 2010 { 2011 struct nge_softc *sc = ifp->if_softc; 2012 u_int32_t bmcr; 2013 2014 bmcr = CSR_READ_4(sc, NGE_TBI_BMCR); 2015 2016 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 2017 u_int32_t bmsr = CSR_READ_4(sc, NGE_TBI_BMSR); 2018 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmsr=%#x, bmcr=%#x\n", 2019 sc->sc_dv.dv_xname, bmsr, bmcr)); 2020 2021 if (!(bmsr & NGE_TBIBMSR_ANEG_DONE)) { 2022 ifmr->ifm_active = IFM_ETHER|IFM_NONE; 2023 ifmr->ifm_status = IFM_AVALID; 2024 return; 2025 } 2026 } else { 2027 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts bmcr=%#x\n", 2028 sc->sc_dv.dv_xname, bmcr)); 2029 } 2030 2031 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; 2032 ifmr->ifm_active = IFM_ETHER|IFM_1000_SX; 2033 2034 if (bmcr & NGE_TBIBMCR_LOOPBACK) 2035 ifmr->ifm_active |= IFM_LOOP; 2036 2037 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { 2038 u_int32_t anlpar = CSR_READ_4(sc, NGE_TBI_ANLPAR); 2039 DPRINTFN(2, ("%s: nge_ifmedia_tbi_sts anlpar=%#x\n", 2040 sc->sc_dv.dv_xname, anlpar)); 2041 2042 ifmr->ifm_active |= IFM_AUTO; 2043 if (anlpar & NGE_TBIANLPAR_FDX) { 2044 ifmr->ifm_active |= IFM_FDX; 2045 } else if (anlpar & NGE_TBIANLPAR_HDX) { 2046 ifmr->ifm_active |= IFM_HDX; 2047 } else 2048 ifmr->ifm_active |= IFM_FDX; 2049 2050 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) 2051 ifmr->ifm_active |= IFM_FDX; 2052 else 2053 ifmr->ifm_active |= IFM_HDX; 2054 2055 } 2056 2057 int 2058 nge_ioctl(ifp, command, data) 2059 struct ifnet *ifp; 2060 u_long command; 2061 caddr_t data; 2062 { 2063 struct nge_softc *sc = ifp->if_softc; 2064 struct ifaddr *ifa = (struct ifaddr *) data; 2065 struct ifreq *ifr = (struct ifreq *) data; 2066 struct mii_data *mii; 2067 int s, error = 0; 2068 2069 s = splnet(); 2070 2071 switch(command) { 2072 case SIOCSIFADDR: 2073 ifp->if_flags |= IFF_UP; 2074 switch (ifa->ifa_addr->sa_family) { 2075 #ifdef INET 2076 case AF_INET: 2077 nge_init(sc); 2078 arp_ifinit(&sc->arpcom, ifa); 2079 break; 2080 #endif /* INET */ 2081 default: 2082 nge_init(sc); 2083 break; 2084 } 2085 break; 2086 2087 case SIOCSIFFLAGS: 2088 if (ifp->if_flags & IFF_UP) { 2089 if (ifp->if_flags & IFF_RUNNING && 2090 ifp->if_flags & IFF_PROMISC && 2091 !(sc->nge_if_flags & IFF_PROMISC)) { 2092 NGE_SETBIT(sc, NGE_RXFILT_CTL, 2093 NGE_RXFILTCTL_ALLPHYS| 2094 NGE_RXFILTCTL_ALLMULTI); 2095 } else if (ifp->if_flags & IFF_RUNNING && 2096 !(ifp->if_flags & IFF_PROMISC) && 2097 sc->nge_if_flags & IFF_PROMISC) { 2098 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 2099 NGE_RXFILTCTL_ALLPHYS); 2100 if (!(ifp->if_flags & IFF_ALLMULTI)) 2101 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 2102 NGE_RXFILTCTL_ALLMULTI); 2103 } else { 2104 ifp->if_flags &= ~IFF_RUNNING; 2105 nge_init(sc); 2106 } 2107 } else { 2108 if (ifp->if_flags & IFF_RUNNING) 2109 nge_stop(sc); 2110 } 2111 sc->nge_if_flags = ifp->if_flags; 2112 error = 0; 2113 break; 2114 2115 case SIOCGIFMEDIA: 2116 case SIOCSIFMEDIA: 2117 if (sc->nge_tbi) { 2118 error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, 2119 command); 2120 } else { 2121 mii = &sc->nge_mii; 2122 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 2123 command); 2124 } 2125 break; 2126 2127 default: 2128 error = ether_ioctl(ifp, &sc->arpcom, command, data); 2129 } 2130 2131 if (error == ENETRESET) { 2132 if (ifp->if_flags & IFF_RUNNING) 2133 nge_setmulti(sc); 2134 error = 0; 2135 } 2136 2137 splx(s); 2138 return(error); 2139 } 2140 2141 void 2142 nge_watchdog(ifp) 2143 struct ifnet *ifp; 2144 { 2145 struct nge_softc *sc; 2146 2147 sc = ifp->if_softc; 2148 2149 ifp->if_oerrors++; 2150 printf("%s: watchdog timeout\n", sc->sc_dv.dv_xname); 2151 2152 nge_stop(sc); 2153 nge_reset(sc); 2154 ifp->if_flags &= ~IFF_RUNNING; 2155 nge_init(sc); 2156 2157 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 2158 nge_start(ifp); 2159 } 2160 2161 /* 2162 * Stop the adapter and free any mbufs allocated to the 2163 * RX and TX lists. 2164 */ 2165 void 2166 nge_stop(sc) 2167 struct nge_softc *sc; 2168 { 2169 int i; 2170 struct ifnet *ifp; 2171 struct mii_data *mii; 2172 2173 ifp = &sc->arpcom.ac_if; 2174 ifp->if_timer = 0; 2175 if (sc->nge_tbi) { 2176 mii = NULL; 2177 } else { 2178 mii = &sc->nge_mii; 2179 } 2180 2181 timeout_del(&sc->nge_timeout); 2182 2183 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2184 2185 CSR_WRITE_4(sc, NGE_IER, 0); 2186 CSR_WRITE_4(sc, NGE_IMR, 0); 2187 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 2188 DELAY(1000); 2189 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 2190 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); 2191 2192 if (!sc->nge_tbi) 2193 mii_down(mii); 2194 2195 sc->nge_link = 0; 2196 2197 /* 2198 * Free data in the RX lists. 2199 */ 2200 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 2201 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 2202 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 2203 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; 2204 } 2205 } 2206 bzero((char *)&sc->nge_ldata->nge_rx_list, 2207 sizeof(sc->nge_ldata->nge_rx_list)); 2208 2209 /* 2210 * Free the TX list buffers. 2211 */ 2212 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 2213 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 2214 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 2215 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; 2216 } 2217 } 2218 2219 bzero((char *)&sc->nge_ldata->nge_tx_list, 2220 sizeof(sc->nge_ldata->nge_tx_list)); 2221 } 2222 2223 /* 2224 * Stop all chip I/O so that the kernel's probe routines don't 2225 * get confused by errant DMAs when rebooting. 2226 */ 2227 void 2228 nge_shutdown(xsc) 2229 void *xsc; 2230 { 2231 struct nge_softc *sc = (struct nge_softc *)xsc; 2232 2233 nge_reset(sc); 2234 nge_stop(sc); 2235 } 2236 2237 struct cfattach nge_ca = { 2238 sizeof(struct nge_softc), nge_probe, nge_attach 2239 }; 2240 2241 struct cfdriver nge_cd = { 2242 0, "nge", DV_IFNET 2243 }; 2244