1 /* $OpenBSD: fxp.c,v 1.94 2008/11/28 02:44:17 brad Exp $ */ 2 /* $NetBSD: if_fxp.c,v 1.2 1997/06/05 02:01:55 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1995, David Greenman 6 * All rights reserved. 7 * 8 * Modifications to support NetBSD: 9 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice unmodified, this list of conditions, and the following 16 * disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Id: if_fxp.c,v 1.55 1998/08/04 08:53:12 dg Exp 34 */ 35 36 /* 37 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 38 */ 39 40 #include "bpfilter.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/syslog.h> 49 #include <sys/timeout.h> 50 51 #include <net/if.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #endif 62 63 #if NBPFILTER > 0 64 #include <net/bpf.h> 65 #endif 66 67 #include <sys/ioctl.h> 68 #include <sys/errno.h> 69 #include <sys/device.h> 70 71 #include <netinet/if_ether.h> 72 73 #include <machine/cpu.h> 74 #include <machine/bus.h> 75 #include <machine/intr.h> 76 77 #include <dev/mii/miivar.h> 78 79 #include <dev/ic/fxpreg.h> 80 #include <dev/ic/fxpvar.h> 81 82 /* 83 * NOTE! On the Alpha, we have an alignment constraint. The 84 * card DMAs the packet immediately following the RFA. However, 85 * the first thing in the packet is a 14-byte Ethernet header. 86 * This means that the packet is misaligned. To compensate, 87 * we actually offset the RFA 2 bytes into the cluster. This 88 * aligns the packet after the Ethernet header at a 32-bit 89 * boundary. HOWEVER! This means that the RFA is misaligned! 90 */ 91 #define RFA_ALIGNMENT_FUDGE (2 + sizeof(bus_dmamap_t *)) 92 93 /* 94 * Inline function to copy a 16-bit aligned 32-bit quantity. 95 */ 96 static __inline void fxp_lwcopy(volatile u_int32_t *, 97 volatile u_int32_t *); 98 99 static __inline void 100 fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst) 101 { 102 volatile u_int16_t *a = (u_int16_t *)src; 103 volatile u_int16_t *b = (u_int16_t *)dst; 104 105 b[0] = a[0]; 106 b[1] = a[1]; 107 } 108 109 /* 110 * Template for default configuration parameters. 111 * See struct fxp_cb_config for the bit definitions. 112 * Note, cb_command is filled in later. 113 */ 114 static u_char fxp_cb_config_template[] = { 115 0x0, 0x0, /* cb_status */ 116 0x0, 0x0, /* cb_command */ 117 0xff, 0xff, 0xff, 0xff, /* link_addr */ 118 0x16, /* 0 Byte count. */ 119 0x08, /* 1 Fifo limit */ 120 0x00, /* 2 Adaptive ifs */ 121 0x00, /* 3 ctrl0 */ 122 0x00, /* 4 rx_dma_bytecount */ 123 0x80, /* 5 tx_dma_bytecount */ 124 0xb2, /* 6 ctrl 1*/ 125 0x03, /* 7 ctrl 2*/ 126 0x01, /* 8 mediatype */ 127 0x00, /* 9 void2 */ 128 0x26, /* 10 ctrl3 */ 129 0x00, /* 11 linear priority */ 130 0x60, /* 12 interfrm_spacing */ 131 0x00, /* 13 void31 */ 132 0xf2, /* 14 void32 */ 133 0x48, /* 15 promiscuous */ 134 0x00, /* 16 void41 */ 135 0x40, /* 17 void42 */ 136 0xf3, /* 18 stripping */ 137 0x00, /* 19 fdx_pin */ 138 0x3f, /* 20 multi_ia */ 139 0x05 /* 21 mc_all */ 140 }; 141 142 void fxp_eeprom_shiftin(struct fxp_softc *, int, int); 143 void fxp_eeprom_putword(struct fxp_softc *, int, u_int16_t); 144 void fxp_write_eeprom(struct fxp_softc *, u_short *, int, int); 145 int fxp_mediachange(struct ifnet *); 146 void fxp_mediastatus(struct ifnet *, struct ifmediareq *); 147 void fxp_scb_wait(struct fxp_softc *); 148 void fxp_start(struct ifnet *); 149 int fxp_ioctl(struct ifnet *, u_long, caddr_t); 150 void fxp_init(void *); 151 void fxp_load_ucode(struct fxp_softc *); 152 void fxp_stop(struct fxp_softc *, int); 153 void fxp_watchdog(struct ifnet *); 154 int fxp_add_rfabuf(struct fxp_softc *, struct mbuf *); 155 int fxp_mdi_read(struct device *, int, int); 156 void fxp_mdi_write(struct device *, int, int, int); 157 void fxp_autosize_eeprom(struct fxp_softc *); 158 void fxp_statchg(struct device *); 159 void fxp_read_eeprom(struct fxp_softc *, u_int16_t *, 160 int, int); 161 void fxp_stats_update(void *); 162 void fxp_mc_setup(struct fxp_softc *, int); 163 void fxp_scb_cmd(struct fxp_softc *, u_int16_t); 164 165 /* 166 * Set initial transmit threshold at 64 (512 bytes). This is 167 * increased by 64 (512 bytes) at a time, to maximum of 192 168 * (1536 bytes), if an underrun occurs. 169 */ 170 static int tx_threshold = 64; 171 172 /* 173 * Interrupts coalescing code params 174 */ 175 int fxp_int_delay = FXP_INT_DELAY; 176 int fxp_bundle_max = FXP_BUNDLE_MAX; 177 int fxp_min_size_mask = FXP_MIN_SIZE_MASK; 178 179 /* 180 * TxCB list index mask. This is used to do list wrap-around. 181 */ 182 #define FXP_TXCB_MASK (FXP_NTXCB - 1) 183 184 /* 185 * Maximum number of seconds that the receiver can be idle before we 186 * assume it's dead and attempt to reset it by reprogramming the 187 * multicast filter. This is part of a work-around for a bug in the 188 * NIC. See fxp_stats_update(). 189 */ 190 #define FXP_MAX_RX_IDLE 15 191 192 /* 193 * Wait for the previous command to be accepted (but not necessarily 194 * completed). 195 */ 196 void 197 fxp_scb_wait(struct fxp_softc *sc) 198 { 199 int i = FXP_CMD_TMO; 200 201 while ((CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff) && --i) 202 DELAY(2); 203 if (i == 0) 204 printf("%s: warning: SCB timed out\n", sc->sc_dev.dv_xname); 205 } 206 207 void 208 fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 209 { 210 u_int16_t reg; 211 int x; 212 213 /* 214 * Shift in data. 215 */ 216 for (x = 1 << (length - 1); x; x >>= 1) { 217 if (data & x) 218 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 219 else 220 reg = FXP_EEPROM_EECS; 221 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 222 DELAY(1); 223 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 224 DELAY(1); 225 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 226 DELAY(1); 227 } 228 } 229 230 void 231 fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) 232 { 233 int i; 234 235 /* 236 * Erase/write enable. 237 */ 238 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 239 fxp_eeprom_shiftin(sc, 0x4, 3); 240 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 241 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 242 DELAY(1); 243 /* 244 * Shift in write opcode, address, data. 245 */ 246 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 247 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 248 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 249 fxp_eeprom_shiftin(sc, data, 16); 250 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 251 DELAY(1); 252 /* 253 * Wait for EEPROM to finish up. 254 */ 255 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 256 DELAY(1); 257 for (i = 0; i < 1000; i++) { 258 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 259 break; 260 DELAY(50); 261 } 262 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 263 DELAY(1); 264 /* 265 * Erase/write disable. 266 */ 267 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 268 fxp_eeprom_shiftin(sc, 0x4, 3); 269 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 270 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 271 DELAY(1); 272 } 273 274 void 275 fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 276 { 277 int i; 278 279 for (i = 0; i < words; i++) 280 fxp_eeprom_putword(sc, offset + i, data[i]); 281 } 282 283 /************************************************************* 284 * Operating system-specific autoconfiguration glue 285 *************************************************************/ 286 287 void fxp_shutdown(void *); 288 void fxp_power(int, void *); 289 290 struct cfdriver fxp_cd = { 291 NULL, "fxp", DV_IFNET 292 }; 293 294 /* 295 * Device shutdown routine. Called at system shutdown after sync. The 296 * main purpose of this routine is to shut off receiver DMA so that 297 * kernel memory doesn't get clobbered during warmboot. 298 */ 299 void 300 fxp_shutdown(void *sc) 301 { 302 fxp_stop((struct fxp_softc *) sc, 0); 303 } 304 305 /* 306 * Power handler routine. Called when the system is transitioning 307 * into/out of power save modes. As with fxp_shutdown, the main 308 * purpose of this routine is to shut off receiver DMA so it doesn't 309 * clobber kernel memory at the wrong time. 310 */ 311 void 312 fxp_power(int why, void *arg) 313 { 314 struct fxp_softc *sc = arg; 315 struct ifnet *ifp; 316 int s; 317 318 s = splnet(); 319 if (why != PWR_RESUME) 320 fxp_stop(sc, 0); 321 else { 322 ifp = &sc->sc_arpcom.ac_if; 323 if (ifp->if_flags & IFF_UP) 324 fxp_init(sc); 325 } 326 splx(s); 327 } 328 329 /************************************************************* 330 * End of operating system-specific autoconfiguration glue 331 *************************************************************/ 332 333 /* 334 * Do generic parts of attach. 335 */ 336 int 337 fxp_attach(struct fxp_softc *sc, const char *intrstr) 338 { 339 struct ifnet *ifp; 340 struct mbuf *m; 341 bus_dmamap_t rxmap; 342 u_int16_t data; 343 u_int8_t enaddr[6]; 344 int i, err; 345 346 /* 347 * Reset to a stable state. 348 */ 349 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 350 DELAY(10); 351 352 if (bus_dmamem_alloc(sc->sc_dmat, sizeof(struct fxp_ctrl), 353 PAGE_SIZE, 0, &sc->sc_cb_seg, 1, &sc->sc_cb_nseg, BUS_DMA_NOWAIT)) 354 goto fail; 355 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg, 356 sizeof(struct fxp_ctrl), (caddr_t *)&sc->sc_ctrl, 357 BUS_DMA_NOWAIT)) { 358 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 359 goto fail; 360 } 361 if (bus_dmamap_create(sc->sc_dmat, sizeof(struct fxp_ctrl), 362 1, sizeof(struct fxp_ctrl), 0, BUS_DMA_NOWAIT, 363 &sc->tx_cb_map)) { 364 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 365 sizeof(struct fxp_ctrl)); 366 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 367 goto fail; 368 } 369 if (bus_dmamap_load(sc->sc_dmat, sc->tx_cb_map, (caddr_t)sc->sc_ctrl, 370 sizeof(struct fxp_ctrl), NULL, BUS_DMA_NOWAIT)) { 371 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 372 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 373 sizeof(struct fxp_ctrl)); 374 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 375 goto fail; 376 } 377 378 for (i = 0; i < FXP_NTXCB; i++) { 379 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 380 FXP_NTXSEG, MCLBYTES, 0, 0, &sc->txs[i].tx_map)) != 0) { 381 printf("%s: unable to create tx dma map %d, error %d\n", 382 sc->sc_dev.dv_xname, i, err); 383 goto fail; 384 } 385 sc->txs[i].tx_mbuf = NULL; 386 sc->txs[i].tx_cb = sc->sc_ctrl->tx_cb + i; 387 sc->txs[i].tx_off = offsetof(struct fxp_ctrl, tx_cb[i]); 388 sc->txs[i].tx_next = &sc->txs[(i + 1) & FXP_TXCB_MASK]; 389 } 390 bzero(sc->sc_ctrl, sizeof(struct fxp_ctrl)); 391 392 /* 393 * Pre-allocate some receive buffers. 394 */ 395 sc->sc_rxfree = 0; 396 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 397 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 398 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 399 printf("%s: unable to create rx dma map %d, error %d\n", 400 sc->sc_dev.dv_xname, i, err); 401 goto fail; 402 } 403 sc->rx_bufs++; 404 } 405 for (i = 0; i < FXP_NRFABUFS_MIN; i++) 406 if (fxp_add_rfabuf(sc, NULL) != 0) 407 goto fail; 408 409 /* 410 * Find out how large of an SEEPROM we have. 411 */ 412 fxp_autosize_eeprom(sc); 413 414 /* 415 * Get info about the primary PHY 416 */ 417 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 418 sc->phy_primary_addr = data & 0xff; 419 sc->phy_primary_device = (data >> 8) & 0x3f; 420 sc->phy_10Mbps_only = data >> 15; 421 422 /* 423 * Only 82558 and newer cards can do this. 424 */ 425 if (sc->sc_revision >= FXP_REV_82558_A4) { 426 sc->sc_int_delay = fxp_int_delay; 427 sc->sc_bundle_max = fxp_bundle_max; 428 sc->sc_min_size_mask = fxp_min_size_mask; 429 } 430 /* 431 * Read MAC address. 432 */ 433 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 434 435 ifp = &sc->sc_arpcom.ac_if; 436 bcopy(enaddr, sc->sc_arpcom.ac_enaddr, ETHER_ADDR_LEN); 437 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 438 ifp->if_softc = sc; 439 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 440 ifp->if_ioctl = fxp_ioctl; 441 ifp->if_start = fxp_start; 442 ifp->if_watchdog = fxp_watchdog; 443 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 444 IFQ_SET_READY(&ifp->if_snd); 445 446 ifp->if_capabilities = IFCAP_VLAN_MTU; 447 448 printf(": %s, address %s\n", intrstr, 449 ether_sprintf(sc->sc_arpcom.ac_enaddr)); 450 451 if (sc->sc_flags & FXPF_DISABLE_STANDBY) { 452 fxp_read_eeprom(sc, &data, 10, 1); 453 if (data & 0x02) { /* STB enable */ 454 u_int16_t cksum; 455 456 printf("%s: Disabling dynamic standby mode in EEPROM", 457 sc->sc_dev.dv_xname); 458 data &= ~0x02; 459 fxp_write_eeprom(sc, &data, 10, 1); 460 printf(", New ID 0x%x", data); 461 cksum = 0; 462 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 463 fxp_read_eeprom(sc, &data, i, 1); 464 cksum += data; 465 } 466 i = (1 << sc->eeprom_size) - 1; 467 cksum = 0xBABA - cksum; 468 fxp_read_eeprom(sc, &data, i, 1); 469 fxp_write_eeprom(sc, &cksum, i, 1); 470 printf(", cksum @ 0x%x: 0x%x -> 0x%x\n", 471 i, data, cksum); 472 } 473 } 474 475 /* Receiver lock-up workaround detection. */ 476 fxp_read_eeprom(sc, &data, 3, 1); 477 if ((data & 0x03) != 0x03) 478 sc->sc_flags |= FXPF_RECV_WORKAROUND; 479 480 /* 481 * Initialize our media structures and probe the MII. 482 */ 483 sc->sc_mii.mii_ifp = ifp; 484 sc->sc_mii.mii_readreg = fxp_mdi_read; 485 sc->sc_mii.mii_writereg = fxp_mdi_write; 486 sc->sc_mii.mii_statchg = fxp_statchg; 487 ifmedia_init(&sc->sc_mii.mii_media, 0, fxp_mediachange, 488 fxp_mediastatus); 489 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 490 MII_OFFSET_ANY, MIIF_NOISOLATE); 491 /* If no phy found, just use auto mode */ 492 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 493 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 494 0, NULL); 495 printf("%s: no phy found, using manual mode\n", 496 sc->sc_dev.dv_xname); 497 } 498 499 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0)) 500 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 501 else if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) 502 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 503 else 504 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_10_T); 505 506 /* 507 * Attach the interface. 508 */ 509 if_attach(ifp); 510 ether_ifattach(ifp); 511 512 /* 513 * Add shutdown hook so that DMA is disabled prior to reboot. Not 514 * doing so could allow DMA to corrupt kernel memory during the 515 * reboot before the driver initializes. 516 */ 517 sc->sc_sdhook = shutdownhook_establish(fxp_shutdown, sc); 518 519 /* 520 * Add suspend hook, for similiar reasons.. 521 */ 522 sc->sc_powerhook = powerhook_establish(fxp_power, sc); 523 524 /* 525 * Initialize timeout for statistics update. 526 */ 527 timeout_set(&sc->stats_update_to, fxp_stats_update, sc); 528 529 return (0); 530 531 fail: 532 printf("%s: Failed to malloc memory\n", sc->sc_dev.dv_xname); 533 if (sc->tx_cb_map != NULL) { 534 bus_dmamap_unload(sc->sc_dmat, sc->tx_cb_map); 535 bus_dmamap_destroy(sc->sc_dmat, sc->tx_cb_map); 536 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_ctrl, 537 sizeof(struct fxp_cb_tx) * FXP_NTXCB); 538 bus_dmamem_free(sc->sc_dmat, &sc->sc_cb_seg, sc->sc_cb_nseg); 539 } 540 m = sc->rfa_headm; 541 while (m != NULL) { 542 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 543 bus_dmamap_unload(sc->sc_dmat, rxmap); 544 FXP_RXMAP_PUT(sc, rxmap); 545 m = m_free(m); 546 } 547 return (ENOMEM); 548 } 549 550 /* 551 * From NetBSD: 552 * 553 * Figure out EEPROM size. 554 * 555 * 559's can have either 64-word or 256-word EEPROMs, the 558 556 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 557 * talks about the existence of 16 to 256 word EEPROMs. 558 * 559 * The only known sizes are 64 and 256, where the 256 version is used 560 * by CardBus cards to store CIS information. 561 * 562 * The address is shifted in msb-to-lsb, and after the last 563 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 564 * after which follows the actual data. We try to detect this zero, by 565 * probing the data-out bit in the EEPROM control register just after 566 * having shifted in a bit. If the bit is zero, we assume we've 567 * shifted enough address bits. The data-out should be tri-state, 568 * before this, which should translate to a logical one. 569 * 570 * Other ways to do this would be to try to read a register with known 571 * contents with a varying number of address bits, but no such 572 * register seem to be available. The high bits of register 10 are 01 573 * on the 558 and 559, but apparently not on the 557. 574 * 575 * The Linux driver computes a checksum on the EEPROM data, but the 576 * value of this checksum is not very well documented. 577 */ 578 void 579 fxp_autosize_eeprom(struct fxp_softc *sc) 580 { 581 u_int16_t reg; 582 int x; 583 584 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 585 /* 586 * Shift in read opcode. 587 */ 588 for (x = 3; x > 0; x--) { 589 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 590 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 591 } else { 592 reg = FXP_EEPROM_EECS; 593 } 594 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 595 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 596 reg | FXP_EEPROM_EESK); 597 DELAY(4); 598 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 599 DELAY(4); 600 } 601 /* 602 * Shift in address. 603 * Wait for the dummy zero following a correct address shift. 604 */ 605 for (x = 1; x <= 8; x++) { 606 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 607 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 608 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 609 DELAY(4); 610 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 611 break; 612 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 613 DELAY(4); 614 } 615 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 616 DELAY(4); 617 sc->eeprom_size = x; 618 } 619 620 /* 621 * Read from the serial EEPROM. Basically, you manually shift in 622 * the read opcode (one bit at a time) and then shift in the address, 623 * and then you shift out the data (all of this one bit at a time). 624 * The word size is 16 bits, so you have to provide the address for 625 * every 16 bits of data. 626 */ 627 void 628 fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, 629 int words) 630 { 631 u_int16_t reg; 632 int i, x; 633 634 for (i = 0; i < words; i++) { 635 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 636 /* 637 * Shift in read opcode. 638 */ 639 for (x = 3; x > 0; x--) { 640 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 641 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 642 } else { 643 reg = FXP_EEPROM_EECS; 644 } 645 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 646 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 647 reg | FXP_EEPROM_EESK); 648 DELAY(4); 649 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 650 DELAY(4); 651 } 652 /* 653 * Shift in address. 654 */ 655 for (x = sc->eeprom_size; x > 0; x--) { 656 if ((i + offset) & (1 << (x - 1))) { 657 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 658 } else { 659 reg = FXP_EEPROM_EECS; 660 } 661 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 662 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 663 reg | FXP_EEPROM_EESK); 664 DELAY(4); 665 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 666 DELAY(4); 667 } 668 reg = FXP_EEPROM_EECS; 669 data[i] = 0; 670 /* 671 * Shift out data. 672 */ 673 for (x = 16; x > 0; x--) { 674 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 675 reg | FXP_EEPROM_EESK); 676 DELAY(4); 677 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 678 FXP_EEPROM_EEDO) 679 data[i] |= (1 << (x - 1)); 680 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 681 DELAY(4); 682 } 683 data[i] = letoh16(data[i]); 684 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 685 DELAY(4); 686 } 687 } 688 689 /* 690 * Start packet transmission on the interface. 691 */ 692 void 693 fxp_start(struct ifnet *ifp) 694 { 695 struct fxp_softc *sc = ifp->if_softc; 696 struct fxp_txsw *txs = sc->sc_cbt_prod; 697 struct fxp_cb_tx *txc; 698 struct mbuf *m0, *m = NULL; 699 int cnt = sc->sc_cbt_cnt, seg; 700 701 if ((ifp->if_flags & (IFF_OACTIVE | IFF_RUNNING)) != IFF_RUNNING) 702 return; 703 704 while (1) { 705 if (cnt >= (FXP_NTXCB - 2)) { 706 ifp->if_flags |= IFF_OACTIVE; 707 break; 708 } 709 710 txs = txs->tx_next; 711 712 IFQ_POLL(&ifp->if_snd, m0); 713 if (m0 == NULL) 714 break; 715 716 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 717 m0, BUS_DMA_NOWAIT) != 0) { 718 MGETHDR(m, M_DONTWAIT, MT_DATA); 719 if (m == NULL) 720 break; 721 if (m0->m_pkthdr.len > MHLEN) { 722 MCLGET(m, M_DONTWAIT); 723 if (!(m->m_flags & M_EXT)) { 724 m_freem(m); 725 break; 726 } 727 } 728 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 729 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 730 if (bus_dmamap_load_mbuf(sc->sc_dmat, txs->tx_map, 731 m, BUS_DMA_NOWAIT) != 0) { 732 m_freem(m); 733 break; 734 } 735 } 736 737 IFQ_DEQUEUE(&ifp->if_snd, m0); 738 if (m != NULL) { 739 m_freem(m0); 740 m0 = m; 741 m = NULL; 742 } 743 744 txs->tx_mbuf = m0; 745 746 #if NBPFILTER > 0 747 if (ifp->if_bpf) 748 bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); 749 #endif 750 751 FXP_MBUF_SYNC(sc, txs->tx_map, BUS_DMASYNC_PREWRITE); 752 753 txc = txs->tx_cb; 754 txc->tbd_number = txs->tx_map->dm_nsegs; 755 txc->cb_status = 0; 756 txc->cb_command = htole16(FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF); 757 txc->tx_threshold = tx_threshold; 758 for (seg = 0; seg < txs->tx_map->dm_nsegs; seg++) { 759 txc->tbd[seg].tb_addr = 760 htole32(txs->tx_map->dm_segs[seg].ds_addr); 761 txc->tbd[seg].tb_size = 762 htole32(txs->tx_map->dm_segs[seg].ds_len); 763 } 764 FXP_TXCB_SYNC(sc, txs, 765 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 766 767 ++cnt; 768 sc->sc_cbt_prod = txs; 769 } 770 771 if (cnt != sc->sc_cbt_cnt) { 772 /* We enqueued at least one. */ 773 ifp->if_timer = 5; 774 775 txs = sc->sc_cbt_prod; 776 txs = txs->tx_next; 777 sc->sc_cbt_prod = txs; 778 txs->tx_cb->cb_command = 779 htole16(FXP_CB_COMMAND_I | FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 780 FXP_TXCB_SYNC(sc, txs, 781 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 782 783 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 784 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 785 sc->sc_cbt_prev->tx_cb->cb_command &= 786 htole16(~(FXP_CB_COMMAND_S | FXP_CB_COMMAND_I)); 787 FXP_TXCB_SYNC(sc, sc->sc_cbt_prev, 788 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 789 790 sc->sc_cbt_prev = txs; 791 792 fxp_scb_wait(sc); 793 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 794 795 sc->sc_cbt_cnt = cnt + 1; 796 } 797 } 798 799 /* 800 * Process interface interrupts. 801 */ 802 int 803 fxp_intr(void *arg) 804 { 805 struct fxp_softc *sc = arg; 806 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 807 u_int16_t statack; 808 bus_dmamap_t rxmap; 809 int claimed = 0; 810 int rnr = 0; 811 812 /* 813 * If the interface isn't running, don't try to 814 * service the interrupt.. just ack it and bail. 815 */ 816 if ((ifp->if_flags & IFF_RUNNING) == 0) { 817 statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS); 818 if (statack) { 819 claimed = 1; 820 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 821 statack & FXP_SCB_STATACK_MASK); 822 } 823 return claimed; 824 } 825 826 while ((statack = CSR_READ_2(sc, FXP_CSR_SCB_STATUS)) & 827 FXP_SCB_STATACK_MASK) { 828 claimed = 1; 829 rnr = (statack & (FXP_SCB_STATACK_RNR | 830 FXP_SCB_STATACK_SWI)) ? 1 : 0; 831 /* 832 * First ACK all the interrupts in this pass. 833 */ 834 CSR_WRITE_2(sc, FXP_CSR_SCB_STATUS, 835 statack & FXP_SCB_STATACK_MASK); 836 837 /* 838 * Free any finished transmit mbuf chains. 839 */ 840 if (statack & (FXP_SCB_STATACK_CXTNO|FXP_SCB_STATACK_CNA)) { 841 int txcnt = sc->sc_cbt_cnt; 842 struct fxp_txsw *txs = sc->sc_cbt_cons; 843 844 FXP_TXCB_SYNC(sc, txs, 845 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 846 847 while ((txcnt > 0) && 848 ((txs->tx_cb->cb_status & htole16(FXP_CB_STATUS_C)) || 849 (txs->tx_cb->cb_command & htole16(FXP_CB_COMMAND_NOP)))) { 850 if (txs->tx_mbuf != NULL) { 851 FXP_MBUF_SYNC(sc, txs->tx_map, 852 BUS_DMASYNC_POSTWRITE); 853 bus_dmamap_unload(sc->sc_dmat, 854 txs->tx_map); 855 m_freem(txs->tx_mbuf); 856 txs->tx_mbuf = NULL; 857 } 858 --txcnt; 859 txs = txs->tx_next; 860 FXP_TXCB_SYNC(sc, txs, 861 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 862 } 863 sc->sc_cbt_cnt = txcnt; 864 /* Did we transmit any packets? */ 865 if (sc->sc_cbt_cons != txs) 866 ifp->if_flags &= ~IFF_OACTIVE; 867 ifp->if_timer = sc->sc_cbt_cnt ? 5 : 0; 868 sc->sc_cbt_cons = txs; 869 870 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 871 /* 872 * Try to start more packets transmitting. 873 */ 874 fxp_start(ifp); 875 } 876 } 877 /* 878 * Process receiver interrupts. If a Receive Unit 879 * not ready (RNR) condition exists, get whatever 880 * packets we can and re-start the receiver. 881 */ 882 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR | 883 FXP_SCB_STATACK_SWI)) { 884 struct mbuf *m; 885 u_int8_t *rfap; 886 rcvloop: 887 m = sc->rfa_headm; 888 rfap = m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 889 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 890 bus_dmamap_sync(sc->sc_dmat, rxmap, 891 0, MCLBYTES, BUS_DMASYNC_POSTREAD | 892 BUS_DMASYNC_POSTWRITE); 893 894 if (*(u_int16_t *)(rfap + 895 offsetof(struct fxp_rfa, rfa_status)) & 896 htole16(FXP_RFA_STATUS_C)) { 897 if (*(u_int16_t *)(rfap + 898 offsetof(struct fxp_rfa, rfa_status)) & 899 htole16(FXP_RFA_STATUS_RNR)) 900 rnr = 1; 901 902 /* 903 * Remove first packet from the chain. 904 */ 905 sc->rfa_headm = m->m_next; 906 m->m_next = NULL; 907 908 /* 909 * Add a new buffer to the receive chain. 910 * If this fails, the old buffer is recycled 911 * instead. 912 */ 913 if (fxp_add_rfabuf(sc, m) == 0) { 914 u_int16_t total_len; 915 916 total_len = htole16(*(u_int16_t *)(rfap + 917 offsetof(struct fxp_rfa, 918 actual_size))) & 919 (MCLBYTES - 1); 920 if (total_len < 921 sizeof(struct ether_header)) { 922 m_freem(m); 923 goto rcvloop; 924 } 925 if (*(u_int16_t *)(rfap + 926 offsetof(struct fxp_rfa, 927 rfa_status)) & 928 htole16(FXP_RFA_STATUS_CRC)) { 929 m_freem(m); 930 goto rcvloop; 931 } 932 933 m->m_pkthdr.rcvif = ifp; 934 m->m_pkthdr.len = m->m_len = 935 total_len; 936 #if NBPFILTER > 0 937 if (ifp->if_bpf) 938 bpf_mtap(ifp->if_bpf, m, 939 BPF_DIRECTION_IN); 940 #endif /* NBPFILTER > 0 */ 941 ether_input_mbuf(ifp, m); 942 } 943 goto rcvloop; 944 } 945 } 946 if (rnr) { 947 rxmap = *((bus_dmamap_t *) 948 sc->rfa_headm->m_ext.ext_buf); 949 fxp_scb_wait(sc); 950 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 951 rxmap->dm_segs[0].ds_addr + 952 RFA_ALIGNMENT_FUDGE); 953 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 954 955 } 956 } 957 return (claimed); 958 } 959 960 /* 961 * Update packet in/out/collision statistics. The i82557 doesn't 962 * allow you to access these counters without doing a fairly 963 * expensive DMA to get _all_ of the statistics it maintains, so 964 * we do this operation here only once per second. The statistics 965 * counters in the kernel are updated from the previous dump-stats 966 * DMA and then a new dump-stats DMA is started. The on-chip 967 * counters are zeroed when the DMA completes. If we can't start 968 * the DMA immediately, we don't wait - we just prepare to read 969 * them again next time. 970 */ 971 void 972 fxp_stats_update(void *arg) 973 { 974 struct fxp_softc *sc = arg; 975 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 976 struct fxp_stats *sp = &sc->sc_ctrl->stats; 977 int s; 978 979 FXP_STATS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 980 ifp->if_opackets += letoh32(sp->tx_good); 981 ifp->if_collisions += letoh32(sp->tx_total_collisions); 982 if (sp->rx_good) { 983 ifp->if_ipackets += letoh32(sp->rx_good); 984 sc->rx_idle_secs = 0; 985 } else if (sc->sc_flags & FXPF_RECV_WORKAROUND) 986 sc->rx_idle_secs++; 987 ifp->if_ierrors += 988 letoh32(sp->rx_crc_errors) + 989 letoh32(sp->rx_alignment_errors) + 990 letoh32(sp->rx_rnr_errors) + 991 letoh32(sp->rx_overrun_errors); 992 /* 993 * If any transmit underruns occurred, bump up the transmit 994 * threshold by another 512 bytes (64 * 8). 995 */ 996 if (sp->tx_underruns) { 997 ifp->if_oerrors += letoh32(sp->tx_underruns); 998 if (tx_threshold < 192) 999 tx_threshold += 64; 1000 } 1001 s = splnet(); 1002 /* 1003 * If we haven't received any packets in FXP_MAX_RX_IDLE seconds, 1004 * then assume the receiver has locked up and attempt to clear 1005 * the condition by reprogramming the multicast filter. This is 1006 * a work-around for a bug in the 82557 where the receiver locks 1007 * up if it gets certain types of garbage in the synchronization 1008 * bits prior to the packet header. This bug is supposed to only 1009 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1010 * mode as well (perhaps due to a 10/100 speed transition). 1011 */ 1012 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1013 sc->rx_idle_secs = 0; 1014 fxp_init(sc); 1015 splx(s); 1016 return; 1017 } 1018 /* 1019 * If there is no pending command, start another stats 1020 * dump. Otherwise punt for now. 1021 */ 1022 FXP_STATS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1023 if (!(CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) & 0xff)) { 1024 /* 1025 * Start another stats dump. 1026 */ 1027 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 1028 } else { 1029 /* 1030 * A previous command is still waiting to be accepted. 1031 * Just zero our copy of the stats and wait for the 1032 * next timer event to update them. 1033 */ 1034 sp->tx_good = 0; 1035 sp->tx_underruns = 0; 1036 sp->tx_total_collisions = 0; 1037 1038 sp->rx_good = 0; 1039 sp->rx_crc_errors = 0; 1040 sp->rx_alignment_errors = 0; 1041 sp->rx_rnr_errors = 0; 1042 sp->rx_overrun_errors = 0; 1043 } 1044 1045 /* Tick the MII clock. */ 1046 mii_tick(&sc->sc_mii); 1047 1048 splx(s); 1049 /* 1050 * Schedule another timeout one second from now. 1051 */ 1052 timeout_add_sec(&sc->stats_update_to, 1); 1053 } 1054 1055 /* 1056 * Stop the interface. Cancels the statistics updater and resets 1057 * the interface. 1058 */ 1059 void 1060 fxp_stop(struct fxp_softc *sc, int drain) 1061 { 1062 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1063 int i; 1064 1065 /* 1066 * Turn down interface (done early to avoid bad interactions 1067 * between panics, shutdown hooks, and the watchdog timer) 1068 */ 1069 ifp->if_timer = 0; 1070 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1071 1072 /* 1073 * Cancel stats updater. 1074 */ 1075 timeout_del(&sc->stats_update_to); 1076 mii_down(&sc->sc_mii); 1077 1078 /* 1079 * Issue software reset. 1080 */ 1081 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1082 DELAY(10); 1083 1084 /* 1085 * Release any xmit buffers. 1086 */ 1087 for (i = 0; i < FXP_NTXCB; i++) { 1088 if (sc->txs[i].tx_mbuf != NULL) { 1089 bus_dmamap_unload(sc->sc_dmat, sc->txs[i].tx_map); 1090 m_freem(sc->txs[i].tx_mbuf); 1091 sc->txs[i].tx_mbuf = NULL; 1092 } 1093 } 1094 sc->sc_cbt_cnt = 0; 1095 1096 if (drain) { 1097 bus_dmamap_t rxmap; 1098 struct mbuf *m; 1099 1100 /* 1101 * Free all the receive buffers then reallocate/reinitialize 1102 */ 1103 m = sc->rfa_headm; 1104 while (m != NULL) { 1105 rxmap = *((bus_dmamap_t *)m->m_ext.ext_buf); 1106 bus_dmamap_unload(sc->sc_dmat, rxmap); 1107 FXP_RXMAP_PUT(sc, rxmap); 1108 m = m_free(m); 1109 sc->rx_bufs--; 1110 } 1111 sc->rfa_headm = NULL; 1112 sc->rfa_tailm = NULL; 1113 for (i = 0; i < FXP_NRFABUFS_MIN; i++) { 1114 if (fxp_add_rfabuf(sc, NULL) != 0) { 1115 /* 1116 * This "can't happen" - we're at splnet() 1117 * and we just freed all the buffers we need 1118 * above. 1119 */ 1120 panic("fxp_stop: no buffers!"); 1121 } 1122 sc->rx_bufs++; 1123 } 1124 } 1125 } 1126 1127 /* 1128 * Watchdog/transmission transmit timeout handler. Called when a 1129 * transmission is started on the interface, but no interrupt is 1130 * received before the timeout. This usually indicates that the 1131 * card has wedged for some reason. 1132 */ 1133 void 1134 fxp_watchdog(struct ifnet *ifp) 1135 { 1136 struct fxp_softc *sc = ifp->if_softc; 1137 1138 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 1139 ifp->if_oerrors++; 1140 1141 fxp_init(sc); 1142 } 1143 1144 /* 1145 * Submit a command to the i82557. 1146 */ 1147 void 1148 fxp_scb_cmd(struct fxp_softc *sc, u_int16_t cmd) 1149 { 1150 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, cmd); 1151 } 1152 1153 void 1154 fxp_init(void *xsc) 1155 { 1156 struct fxp_softc *sc = xsc; 1157 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1158 struct fxp_cb_config *cbp; 1159 struct fxp_cb_ias *cb_ias; 1160 struct fxp_cb_tx *txp; 1161 bus_dmamap_t rxmap; 1162 int i, prm, save_bf, lrxen, allm, s, bufs; 1163 1164 s = splnet(); 1165 1166 /* 1167 * Cancel any pending I/O 1168 */ 1169 fxp_stop(sc, 0); 1170 1171 /* 1172 * Initialize base of CBL and RFA memory. Loading with zero 1173 * sets it up for regular linear addressing. 1174 */ 1175 fxp_scb_wait(sc); 1176 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1177 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 1178 1179 fxp_scb_wait(sc); 1180 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1181 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 1182 1183 #ifndef SMALL_KERNEL 1184 fxp_load_ucode(sc); 1185 #endif 1186 /* Once through to set flags */ 1187 fxp_mc_setup(sc, 0); 1188 1189 /* 1190 * In order to support receiving 802.1Q VLAN frames, we have to 1191 * enable "save bad frames", since they are 4 bytes larger than 1192 * the normal Ethernet maximum frame length. On i82558 and later, 1193 * we have a better mechanism for this. 1194 */ 1195 save_bf = 0; 1196 lrxen = 0; 1197 1198 if (sc->sc_revision >= FXP_REV_82558_A4) 1199 lrxen = 1; 1200 else 1201 save_bf = 1; 1202 1203 /* 1204 * Initialize base of dump-stats buffer. 1205 */ 1206 fxp_scb_wait(sc); 1207 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1208 sc->tx_cb_map->dm_segs->ds_addr + 1209 offsetof(struct fxp_ctrl, stats)); 1210 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 1211 1212 cbp = &sc->sc_ctrl->u.cfg; 1213 /* 1214 * This bcopy is kind of disgusting, but there are a bunch of must be 1215 * zero and must be one bits in this structure and this is the easiest 1216 * way to initialize them all to proper values. 1217 */ 1218 bcopy(fxp_cb_config_template, (void *)&cbp->cb_status, 1219 sizeof(fxp_cb_config_template)); 1220 1221 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1222 allm = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1223 1224 #if 0 1225 cbp->cb_status = 0; 1226 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1227 cbp->link_addr = 0xffffffff; /* (no) next command */ 1228 cbp->byte_count = 22; /* (22) bytes to config */ 1229 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1230 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1231 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1232 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1233 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1234 cbp->dma_bce = 0; /* (disable) dma max counters */ 1235 cbp->late_scb = 0; /* (don't) defer SCB update */ 1236 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1237 cbp->ci_int = 1; /* interrupt on CU idle */ 1238 cbp->save_bf = save_bf ? 1 : prm; /* save bad frames */ 1239 cbp->disc_short_rx = !prm; /* discard short packets */ 1240 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1241 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1242 cbp->nsai = 1; /* (don't) disable source addr insert */ 1243 cbp->preamble_length = 2; /* (7 byte) preamble */ 1244 cbp->loopback = 0; /* (don't) loopback */ 1245 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1246 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1247 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1248 cbp->promiscuous = prm; /* promiscuous mode */ 1249 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1250 cbp->crscdt = 0; /* (CRS only) */ 1251 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1252 cbp->padding = 1; /* (do) pad short tx packets */ 1253 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1254 cbp->long_rx = lrxen; /* (enable) long packets */ 1255 cbp->force_fdx = 0; /* (don't) force full duplex */ 1256 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1257 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1258 cbp->mc_all = allm; 1259 #else 1260 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); 1261 if (allm) 1262 cbp->mc_all |= 0x08; /* accept all multicasts */ 1263 else 1264 cbp->mc_all &= ~0x08; /* reject all multicasts */ 1265 1266 if (prm) { 1267 cbp->promiscuous |= 1; /* promiscuous mode */ 1268 cbp->ctrl2 &= ~0x01; /* save short packets */ 1269 cbp->stripping &= ~0x01; /* don't truncate rx packets */ 1270 } else { 1271 cbp->promiscuous &= ~1; /* no promiscuous mode */ 1272 cbp->ctrl2 |= 0x01; /* discard short packets */ 1273 cbp->stripping |= 0x01; /* truncate rx packets */ 1274 } 1275 1276 if (prm || save_bf) 1277 cbp->ctrl1 |= 0x80; /* save bad frames */ 1278 else 1279 cbp->ctrl1 &= ~0x80; /* discard bad frames */ 1280 1281 if (sc->sc_flags & FXPF_MWI_ENABLE) 1282 cbp->ctrl0 |= 0x01; /* enable PCI MWI command */ 1283 1284 if(!sc->phy_10Mbps_only) /* interface mode */ 1285 cbp->mediatype |= 0x01; 1286 else 1287 cbp->mediatype &= ~0x01; 1288 1289 if(lrxen) /* long packets */ 1290 cbp->stripping |= 0x08; 1291 else 1292 cbp->stripping &= ~0x08; 1293 1294 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max, dma_dce = 0 ??? */ 1295 cbp->ctrl1 |= 0x08; /* ci_int = 1 */ 1296 cbp->ctrl3 |= 0x08; /* nsai */ 1297 cbp->fifo_limit = 0x08; /* tx and rx fifo limit */ 1298 cbp->fdx_pin |= 0x80; /* Enable full duplex setting by pin */ 1299 #endif 1300 1301 /* 1302 * Start the config command/DMA. 1303 */ 1304 fxp_scb_wait(sc); 1305 FXP_CFG_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1306 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1307 offsetof(struct fxp_ctrl, u.cfg)); 1308 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1309 /* ...and wait for it to complete. */ 1310 i = FXP_CMD_TMO; 1311 do { 1312 DELAY(1); 1313 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1314 } while ((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0 && i--); 1315 1316 FXP_CFG_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1317 if (!(cbp->cb_status & htole16(FXP_CB_STATUS_C))) { 1318 printf("%s: config command timeout\n", sc->sc_dev.dv_xname); 1319 return; 1320 } 1321 1322 /* 1323 * Now initialize the station address. 1324 */ 1325 cb_ias = &sc->sc_ctrl->u.ias; 1326 cb_ias->cb_status = htole16(0); 1327 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 1328 cb_ias->link_addr = htole32(0xffffffff); 1329 bcopy(sc->sc_arpcom.ac_enaddr, (void *)cb_ias->macaddr, 1330 sizeof(sc->sc_arpcom.ac_enaddr)); 1331 1332 /* 1333 * Start the IAS (Individual Address Setup) command/DMA. 1334 */ 1335 fxp_scb_wait(sc); 1336 FXP_IAS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1337 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1338 offsetof(struct fxp_ctrl, u.ias)); 1339 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1340 /* ...and wait for it to complete. */ 1341 i = FXP_CMD_TMO; 1342 do { 1343 DELAY(1); 1344 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1345 } while (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1346 1347 FXP_IAS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1348 if (!(cb_ias->cb_status & htole16(FXP_CB_STATUS_C))) { 1349 printf("%s: IAS command timeout\n", sc->sc_dev.dv_xname); 1350 return; 1351 } 1352 1353 /* Again, this time really upload the multicast addresses */ 1354 fxp_mc_setup(sc, 1); 1355 1356 /* 1357 * Initialize transmit control block (TxCB) list. 1358 */ 1359 bzero(sc->sc_ctrl->tx_cb, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1360 txp = sc->sc_ctrl->tx_cb; 1361 for (i = 0; i < FXP_NTXCB; i++) { 1362 txp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 1363 txp[i].link_addr = htole32(sc->tx_cb_map->dm_segs->ds_addr + 1364 offsetof(struct fxp_ctrl, tx_cb[(i + 1) & FXP_TXCB_MASK])); 1365 txp[i].tbd_array_addr =htole32(sc->tx_cb_map->dm_segs->ds_addr + 1366 offsetof(struct fxp_ctrl, tx_cb[i].tbd[0])); 1367 } 1368 /* 1369 * Set the suspend flag on the first TxCB and start the control 1370 * unit. It will execute the NOP and then suspend. 1371 */ 1372 sc->sc_cbt_prev = sc->sc_cbt_prod = sc->sc_cbt_cons = sc->txs; 1373 sc->sc_cbt_cnt = 1; 1374 sc->sc_ctrl->tx_cb[0].cb_command = htole16(FXP_CB_COMMAND_NOP | 1375 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1376 bus_dmamap_sync(sc->sc_dmat, sc->tx_cb_map, 0, 1377 sc->tx_cb_map->dm_mapsize, 1378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1379 1380 fxp_scb_wait(sc); 1381 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1382 offsetof(struct fxp_ctrl, tx_cb[0])); 1383 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1384 1385 /* 1386 * Initialize receiver buffer area - RFA. 1387 */ 1388 if (ifp->if_flags & IFF_UP) 1389 bufs = FXP_NRFABUFS_MAX; 1390 else 1391 bufs = FXP_NRFABUFS_MIN; 1392 if (sc->rx_bufs > bufs) { 1393 while (sc->rfa_headm != NULL && sc->rx_bufs-- > bufs) { 1394 rxmap = *((bus_dmamap_t *)sc->rfa_headm->m_ext.ext_buf); 1395 bus_dmamap_unload(sc->sc_dmat, rxmap); 1396 FXP_RXMAP_PUT(sc, rxmap); 1397 sc->rfa_headm = m_free(sc->rfa_headm); 1398 } 1399 } else if (sc->rx_bufs < bufs) { 1400 int err, tmp_rx_bufs = sc->rx_bufs; 1401 for (i = sc->rx_bufs; i < bufs; i++) { 1402 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1403 MCLBYTES, 0, 0, &sc->sc_rxmaps[i])) != 0) { 1404 printf("%s: unable to create rx dma map %d, " 1405 "error %d\n", sc->sc_dev.dv_xname, i, err); 1406 break; 1407 } 1408 sc->rx_bufs++; 1409 } 1410 for (i = tmp_rx_bufs; i < sc->rx_bufs; i++) 1411 if (fxp_add_rfabuf(sc, NULL) != 0) 1412 break; 1413 } 1414 fxp_scb_wait(sc); 1415 1416 /* 1417 * Set current media. 1418 */ 1419 mii_mediachg(&sc->sc_mii); 1420 1421 ifp->if_flags |= IFF_RUNNING; 1422 ifp->if_flags &= ~IFF_OACTIVE; 1423 1424 /* 1425 * Request a software generated interrupt that will be used to 1426 * (re)start the RU processing. If we direct the chip to start 1427 * receiving from the start of queue now, instead of letting the 1428 * interrupt handler first process all received packets, we run 1429 * the risk of having it overwrite mbuf clusters while they are 1430 * being processed or after they have been returned to the pool. 1431 */ 1432 CSR_WRITE_2(sc, FXP_CSR_SCB_COMMAND, 1433 CSR_READ_2(sc, FXP_CSR_SCB_COMMAND) | 1434 FXP_SCB_INTRCNTL_REQUEST_SWI); 1435 splx(s); 1436 1437 /* 1438 * Start stats updater. 1439 */ 1440 timeout_add_sec(&sc->stats_update_to, 1); 1441 } 1442 1443 /* 1444 * Change media according to request. 1445 */ 1446 int 1447 fxp_mediachange(struct ifnet *ifp) 1448 { 1449 struct fxp_softc *sc = ifp->if_softc; 1450 struct mii_data *mii = &sc->sc_mii; 1451 1452 if (mii->mii_instance) { 1453 struct mii_softc *miisc; 1454 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1455 mii_phy_reset(miisc); 1456 } 1457 mii_mediachg(&sc->sc_mii); 1458 return (0); 1459 } 1460 1461 /* 1462 * Notify the world which media we're using. 1463 */ 1464 void 1465 fxp_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1466 { 1467 struct fxp_softc *sc = ifp->if_softc; 1468 1469 mii_pollstat(&sc->sc_mii); 1470 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1471 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1472 } 1473 1474 /* 1475 * Add a buffer to the end of the RFA buffer list. 1476 * Return 0 if successful, 1 for failure. A failure results in 1477 * adding the 'oldm' (if non-NULL) on to the end of the list - 1478 * tossing out its old contents and recycling it. 1479 * The RFA struct is stuck at the beginning of mbuf cluster and the 1480 * data pointer is fixed up to point just past it. 1481 */ 1482 int 1483 fxp_add_rfabuf(struct fxp_softc *sc, struct mbuf *oldm) 1484 { 1485 u_int32_t v; 1486 struct mbuf *m; 1487 u_int8_t *rfap; 1488 bus_dmamap_t rxmap = NULL; 1489 1490 MGETHDR(m, M_DONTWAIT, MT_DATA); 1491 if (m != NULL) { 1492 MCLGET(m, M_DONTWAIT); 1493 if ((m->m_flags & M_EXT) == 0) { 1494 m_freem(m); 1495 if (oldm == NULL) 1496 return 1; 1497 m = oldm; 1498 m->m_data = m->m_ext.ext_buf; 1499 } 1500 if (oldm == NULL) { 1501 rxmap = FXP_RXMAP_GET(sc); 1502 *((bus_dmamap_t *)m->m_ext.ext_buf) = rxmap; 1503 bus_dmamap_load(sc->sc_dmat, rxmap, 1504 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1505 BUS_DMA_NOWAIT); 1506 } else if (oldm == m) 1507 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1508 else { 1509 rxmap = *((bus_dmamap_t *)oldm->m_ext.ext_buf); 1510 bus_dmamap_unload(sc->sc_dmat, rxmap); 1511 bus_dmamap_load(sc->sc_dmat, rxmap, 1512 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 1513 BUS_DMA_NOWAIT); 1514 *mtod(m, bus_dmamap_t *) = rxmap; 1515 } 1516 } else { 1517 if (oldm == NULL) 1518 return 1; 1519 m = oldm; 1520 m->m_data = m->m_ext.ext_buf; 1521 rxmap = *mtod(m, bus_dmamap_t *); 1522 } 1523 1524 /* 1525 * Move the data pointer up so that the incoming data packet 1526 * will be 32-bit aligned. 1527 */ 1528 m->m_data += RFA_ALIGNMENT_FUDGE; 1529 1530 /* 1531 * Get a pointer to the base of the mbuf cluster and move 1532 * data start past it. 1533 */ 1534 rfap = m->m_data; 1535 m->m_data += sizeof(struct fxp_rfa); 1536 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, size)) = 1537 htole16(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1538 1539 /* 1540 * Initialize the rest of the RFA. Note that since the RFA 1541 * is misaligned, we cannot store values directly. Instead, 1542 * we use an optimized, inline copy. 1543 */ 1544 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_status)) = 0; 1545 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) = 1546 htole16(FXP_RFA_CONTROL_EL); 1547 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, actual_size)) = 0; 1548 1549 v = -1; 1550 fxp_lwcopy(&v, 1551 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1552 fxp_lwcopy(&v, 1553 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, rbd_addr))); 1554 1555 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, MCLBYTES, 1556 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1557 1558 /* 1559 * If there are other buffers already on the list, attach this 1560 * one to the end by fixing up the tail to point to this one. 1561 */ 1562 if (sc->rfa_headm != NULL) { 1563 sc->rfa_tailm->m_next = m; 1564 v = htole32(rxmap->dm_segs[0].ds_addr + RFA_ALIGNMENT_FUDGE); 1565 rfap = sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE; 1566 fxp_lwcopy(&v, 1567 (u_int32_t *)(rfap + offsetof(struct fxp_rfa, link_addr))); 1568 *(u_int16_t *)(rfap + offsetof(struct fxp_rfa, rfa_control)) &= 1569 htole16((u_int16_t)~FXP_RFA_CONTROL_EL); 1570 /* XXX we only need to sync the control struct */ 1571 bus_dmamap_sync(sc->sc_dmat, 1572 *((bus_dmamap_t *)sc->rfa_tailm->m_ext.ext_buf), 0, 1573 MCLBYTES, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1574 } else 1575 sc->rfa_headm = m; 1576 1577 sc->rfa_tailm = m; 1578 1579 return (m == oldm); 1580 } 1581 1582 int 1583 fxp_mdi_read(struct device *self, int phy, int reg) 1584 { 1585 struct fxp_softc *sc = (struct fxp_softc *)self; 1586 int count = FXP_CMD_TMO; 1587 int value; 1588 1589 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1590 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1591 1592 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1593 && count--) 1594 DELAY(10); 1595 1596 if (count <= 0) 1597 printf("%s: fxp_mdi_read: timed out\n", sc->sc_dev.dv_xname); 1598 1599 return (value & 0xffff); 1600 } 1601 1602 void 1603 fxp_statchg(struct device *self) 1604 { 1605 /* Nothing to do. */ 1606 } 1607 1608 void 1609 fxp_mdi_write(struct device *self, int phy, int reg, int value) 1610 { 1611 struct fxp_softc *sc = (struct fxp_softc *)self; 1612 int count = FXP_CMD_TMO; 1613 1614 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1615 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1616 (value & 0xffff)); 1617 1618 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1619 count--) 1620 DELAY(10); 1621 1622 if (count <= 0) 1623 printf("%s: fxp_mdi_write: timed out\n", sc->sc_dev.dv_xname); 1624 } 1625 1626 int 1627 fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1628 { 1629 struct fxp_softc *sc = ifp->if_softc; 1630 struct ifreq *ifr = (struct ifreq *)data; 1631 struct ifaddr *ifa = (struct ifaddr *)data; 1632 int s, error = 0; 1633 1634 s = splnet(); 1635 1636 switch (command) { 1637 case SIOCSIFADDR: 1638 ifp->if_flags |= IFF_UP; 1639 if (!(ifp->if_flags & IFF_RUNNING)) 1640 fxp_init(sc); 1641 #ifdef INET 1642 if (ifa->ifa_addr->sa_family == AF_INET) 1643 arp_ifinit(&sc->sc_arpcom, ifa); 1644 #endif 1645 break; 1646 1647 case SIOCSIFFLAGS: 1648 /* 1649 * If interface is marked up and not running, then start it. 1650 * If it is marked down and running, stop it. 1651 * XXX If it's up then re-initialize it. This is so flags 1652 * such as IFF_PROMISC are handled. 1653 */ 1654 if (ifp->if_flags & IFF_UP) 1655 fxp_init(sc); 1656 else if (ifp->if_flags & IFF_RUNNING) 1657 fxp_stop(sc, 1); 1658 break; 1659 1660 case SIOCSIFMEDIA: 1661 case SIOCGIFMEDIA: 1662 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1663 break; 1664 1665 default: 1666 error = ether_ioctl(ifp, &sc->sc_arpcom, command, data); 1667 } 1668 1669 if (error == ENETRESET) { 1670 if (ifp->if_flags & IFF_RUNNING) 1671 fxp_init(sc); 1672 error = 0; 1673 } 1674 1675 splx(s); 1676 return (error); 1677 } 1678 1679 /* 1680 * Program the multicast filter. 1681 * 1682 * We have an artificial restriction that the multicast setup command 1683 * must be the first command in the chain, so we take steps to ensure 1684 * this. By requiring this, it allows us to keep up the performance of 1685 * the pre-initialized command ring (esp. link pointers) by not actually 1686 * inserting the mcsetup command in the ring - i.e. its link pointer 1687 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1688 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1689 * lead into the regular TxCB ring when it completes. 1690 * 1691 * This function must be called at splnet. 1692 */ 1693 void 1694 fxp_mc_setup(struct fxp_softc *sc, int doit) 1695 { 1696 struct fxp_cb_mcs *mcsp = &sc->sc_ctrl->u.mcs; 1697 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1698 struct ether_multistep step; 1699 struct ether_multi *enm; 1700 int i, nmcasts; 1701 1702 /* 1703 * Initialize multicast setup descriptor. 1704 */ 1705 mcsp->cb_status = htole16(0); 1706 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1707 mcsp->link_addr = htole32(-1); 1708 1709 nmcasts = 0; 1710 if (!(ifp->if_flags & IFF_ALLMULTI)) { 1711 ETHER_FIRST_MULTI(step, &sc->sc_arpcom, enm); 1712 while (enm != NULL) { 1713 if (nmcasts >= MAXMCADDR) { 1714 ifp->if_flags |= IFF_ALLMULTI; 1715 nmcasts = 0; 1716 break; 1717 } 1718 1719 /* Punt on ranges. */ 1720 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 1721 sizeof(enm->enm_addrlo)) != 0) { 1722 ifp->if_flags |= IFF_ALLMULTI; 1723 nmcasts = 0; 1724 break; 1725 } 1726 bcopy(enm->enm_addrlo, 1727 (void *)&mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 1728 nmcasts++; 1729 ETHER_NEXT_MULTI(step, enm); 1730 } 1731 } 1732 if (doit == 0) 1733 return; 1734 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 1735 1736 /* 1737 * Wait until command unit is not active. This should never 1738 * be the case when nothing is queued, but make sure anyway. 1739 */ 1740 for (i = FXP_CMD_TMO; (CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1741 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE && i--; DELAY(1)); 1742 1743 if ((CSR_READ_2(sc, FXP_CSR_SCB_STATUS) & 1744 FXP_SCB_CUS_MASK) != FXP_SCB_CUS_IDLE) { 1745 printf("%s: timeout waiting for CU ready\n", 1746 sc->sc_dev.dv_xname); 1747 return; 1748 } 1749 1750 /* 1751 * Start the multicast setup command. 1752 */ 1753 fxp_scb_wait(sc); 1754 FXP_MCS_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1755 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr + 1756 offsetof(struct fxp_ctrl, u.mcs)); 1757 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1758 1759 i = FXP_CMD_TMO; 1760 do { 1761 DELAY(1); 1762 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1763 } while (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C)) && i--); 1764 1765 FXP_MCS_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1766 if (!(mcsp->cb_status & htole16(FXP_CB_STATUS_C))) { 1767 printf("%s: multicast command timeout\n", sc->sc_dev.dv_xname); 1768 return; 1769 } 1770 1771 } 1772 1773 #ifndef SMALL_KERNEL 1774 #include <dev/microcode/fxp/rcvbundl.h> 1775 struct ucode { 1776 u_int16_t revision; 1777 u_int16_t int_delay_offset; 1778 u_int16_t bundle_max_offset; 1779 u_int16_t min_size_mask_offset; 1780 const char *uname; 1781 } const ucode_table[] = { 1782 { FXP_REV_82558_A4, D101_CPUSAVER_DWORD, 1783 0, 0, 1784 "fxp-d101a" }, 1785 1786 { FXP_REV_82558_B0, D101_CPUSAVER_DWORD, 1787 0, 0, 1788 "fxp-d101b0" }, 1789 1790 { FXP_REV_82559_A0, D101M_CPUSAVER_DWORD, 1791 D101M_CPUSAVER_BUNDLE_MAX_DWORD, D101M_CPUSAVER_MIN_SIZE_DWORD, 1792 "fxp-d101ma" }, 1793 1794 { FXP_REV_82559S_A, D101S_CPUSAVER_DWORD, 1795 D101S_CPUSAVER_BUNDLE_MAX_DWORD, D101S_CPUSAVER_MIN_SIZE_DWORD, 1796 "fxp-d101s" }, 1797 1798 { FXP_REV_82550, D102_B_CPUSAVER_DWORD, 1799 D102_B_CPUSAVER_BUNDLE_MAX_DWORD, D102_B_CPUSAVER_MIN_SIZE_DWORD, 1800 "fxp-d102" }, 1801 1802 { FXP_REV_82550_C, D102_C_CPUSAVER_DWORD, 1803 D102_C_CPUSAVER_BUNDLE_MAX_DWORD, D102_C_CPUSAVER_MIN_SIZE_DWORD, 1804 "fxp-d102c" }, 1805 1806 { FXP_REV_82551_F, D102_E_CPUSAVER_DWORD, 1807 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1808 "fxp-d102e" }, 1809 1810 { FXP_REV_82551_10, D102_E_CPUSAVER_DWORD, 1811 D102_E_CPUSAVER_BUNDLE_MAX_DWORD, D102_E_CPUSAVER_MIN_SIZE_DWORD, 1812 "fxp-d102e" }, 1813 1814 { 0, 0, 1815 0, 0, 1816 NULL } 1817 }; 1818 1819 void 1820 fxp_load_ucode(struct fxp_softc *sc) 1821 { 1822 const struct ucode *uc; 1823 struct fxp_cb_ucode *cbp = &sc->sc_ctrl->u.code; 1824 int i, error; 1825 u_int32_t *ucode_buf; 1826 size_t ucode_len; 1827 1828 if (sc->sc_flags & FXPF_UCODE) 1829 return; 1830 1831 for (uc = ucode_table; uc->revision != 0; uc++) 1832 if (sc->sc_revision == uc->revision) 1833 break; 1834 if (uc->revision == NULL) 1835 return; /* no ucode for this chip is found */ 1836 1837 error = loadfirmware(uc->uname, (u_char **)&ucode_buf, &ucode_len); 1838 if (error) { 1839 printf("%s: error %d, could not read firmware %s\n", 1840 sc->sc_dev.dv_xname, error, uc->uname); 1841 sc->sc_flags |= FXPF_UCODE; 1842 return; 1843 } 1844 1845 cbp->cb_status = 0; 1846 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE|FXP_CB_COMMAND_EL); 1847 cbp->link_addr = 0xffffffff; /* (no) next command */ 1848 for (i = 0; i < (ucode_len / sizeof(u_int32_t)); i++) 1849 cbp->ucode[i] = ucode_buf[i]; 1850 1851 if (uc->int_delay_offset) 1852 *((u_int16_t *)&cbp->ucode[uc->int_delay_offset]) = 1853 htole16(sc->sc_int_delay + sc->sc_int_delay / 2); 1854 1855 if (uc->bundle_max_offset) 1856 *((u_int16_t *)&cbp->ucode[uc->bundle_max_offset]) = 1857 htole16(sc->sc_bundle_max); 1858 1859 if (uc->min_size_mask_offset) 1860 *((u_int16_t *)&cbp->ucode[uc->min_size_mask_offset]) = 1861 htole16(sc->sc_min_size_mask); 1862 1863 FXP_UCODE_SYNC(sc, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1864 1865 /* 1866 * Download the ucode to the chip. 1867 */ 1868 fxp_scb_wait(sc); 1869 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->tx_cb_map->dm_segs->ds_addr 1870 + offsetof(struct fxp_ctrl, u.code)); 1871 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1872 1873 /* ...and wait for it to complete. */ 1874 i = FXP_CMD_TMO; 1875 do { 1876 DELAY(2); 1877 FXP_UCODE_SYNC(sc, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1878 } while (((cbp->cb_status & htole16(FXP_CB_STATUS_C)) == 0) && --i); 1879 if (i == 0) { 1880 printf("%s: timeout loading microcode\n", sc->sc_dev.dv_xname); 1881 free(ucode_buf, M_DEVBUF); 1882 return; 1883 } 1884 1885 #ifdef DEBUG 1886 printf("%s: microcode loaded, int_delay: %d usec", 1887 sc->sc_dev.dv_xname, sc->sc_int_delay); 1888 1889 if (uc->bundle_max_offset) 1890 printf(", bundle_max %d\n", sc->sc_bundle_max); 1891 else 1892 printf("\n"); 1893 #endif 1894 1895 free(ucode_buf, M_DEVBUF); 1896 sc->sc_flags |= FXPF_UCODE; 1897 } 1898 #endif /* SMALL_KERNEL */ 1899