1 /* $NetBSD: elinkxl.c,v 1.115 2012/07/22 14:32:57 matt Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Frank van der Linden. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.115 2012/07/22 14:32:57 matt Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/callout.h> 38 #include <sys/kernel.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/ioctl.h> 42 #include <sys/errno.h> 43 #include <sys/syslog.h> 44 #include <sys/select.h> 45 #include <sys/device.h> 46 #include <sys/rnd.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_ether.h> 51 #include <net/if_media.h> 52 53 #include <net/bpf.h> 54 #include <net/bpfdesc.h> 55 56 #include <sys/cpu.h> 57 #include <sys/bus.h> 58 #include <sys/intr.h> 59 #include <machine/endian.h> 60 61 #include <dev/mii/miivar.h> 62 #include <dev/mii/mii.h> 63 #include <dev/mii/mii_bitbang.h> 64 65 #include <dev/ic/elink3reg.h> 66 /* #include <dev/ic/elink3var.h> */ 67 #include <dev/ic/elinkxlreg.h> 68 #include <dev/ic/elinkxlvar.h> 69 70 #ifdef DEBUG 71 int exdebug = 0; 72 #endif 73 74 /* ifmedia callbacks */ 75 int ex_media_chg(struct ifnet *ifp); 76 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req); 77 78 static int ex_ifflags_cb(struct ethercom *); 79 80 void ex_probe_media(struct ex_softc *); 81 void ex_set_filter(struct ex_softc *); 82 void ex_set_media(struct ex_softc *); 83 void ex_set_xcvr(struct ex_softc *, uint16_t); 84 struct mbuf *ex_get(struct ex_softc *, int); 85 uint16_t ex_read_eeprom(struct ex_softc *, int); 86 int ex_init(struct ifnet *); 87 void ex_read(struct ex_softc *); 88 void ex_reset(struct ex_softc *); 89 void ex_set_mc(struct ex_softc *); 90 void ex_getstats(struct ex_softc *); 91 void ex_printstats(struct ex_softc *); 92 void ex_tick(void *); 93 94 static int ex_eeprom_busy(struct ex_softc *); 95 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *); 96 static void ex_init_txdescs(struct ex_softc *); 97 98 static void ex_setup_tx(struct ex_softc *); 99 static bool ex_shutdown(device_t, int); 100 static void ex_start(struct ifnet *); 101 static void ex_txstat(struct ex_softc *); 102 103 int ex_mii_readreg(device_t, int, int); 104 void ex_mii_writereg(device_t, int, int, int); 105 void ex_mii_statchg(struct ifnet *); 106 107 void ex_probemedia(struct ex_softc *); 108 109 /* 110 * Structure to map media-present bits in boards to ifmedia codes and 111 * printable media names. Used for table-driven ifmedia initialization. 112 */ 113 struct ex_media { 114 int exm_mpbit; /* media present bit */ 115 const char *exm_name; /* name of medium */ 116 int exm_ifmedia; /* ifmedia word for medium */ 117 int exm_epmedia; /* ELINKMEDIA_* constant */ 118 }; 119 120 /* 121 * Media table for 3c90x chips. Note that chips with MII have no 122 * `native' media. 123 */ 124 struct ex_media ex_native_media[] = { 125 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T, 126 ELINKMEDIA_10BASE_T }, 127 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX, 128 ELINKMEDIA_10BASE_T }, 129 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5, 130 ELINKMEDIA_AUI }, 131 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2, 132 ELINKMEDIA_10BASE_2 }, 133 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX, 134 ELINKMEDIA_100BASE_TX }, 135 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX, 136 ELINKMEDIA_100BASE_TX }, 137 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX, 138 ELINKMEDIA_100BASE_FX }, 139 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL, 140 ELINKMEDIA_MII }, 141 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4, 142 ELINKMEDIA_100BASE_T4 }, 143 { 0, NULL, 0, 144 0 }, 145 }; 146 147 /* 148 * MII bit-bang glue. 149 */ 150 uint32_t ex_mii_bitbang_read(device_t); 151 void ex_mii_bitbang_write(device_t, uint32_t); 152 153 const struct mii_bitbang_ops ex_mii_bitbang_ops = { 154 ex_mii_bitbang_read, 155 ex_mii_bitbang_write, 156 { 157 ELINK_PHY_DATA, /* MII_BIT_MDO */ 158 ELINK_PHY_DATA, /* MII_BIT_MDI */ 159 ELINK_PHY_CLK, /* MII_BIT_MDC */ 160 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */ 161 0, /* MII_BIT_DIR_PHY_HOST */ 162 } 163 }; 164 165 /* 166 * Back-end attach and configure. 167 */ 168 void 169 ex_config(struct ex_softc *sc) 170 { 171 struct ifnet *ifp; 172 uint16_t val; 173 uint8_t macaddr[ETHER_ADDR_LEN] = {0}; 174 bus_space_tag_t iot = sc->sc_iot; 175 bus_space_handle_t ioh = sc->sc_ioh; 176 int i, error, attach_stage; 177 178 pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual); 179 180 callout_init(&sc->ex_mii_callout, 0); 181 182 ex_reset(sc); 183 184 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0); 185 macaddr[0] = val >> 8; 186 macaddr[1] = val & 0xff; 187 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1); 188 macaddr[2] = val >> 8; 189 macaddr[3] = val & 0xff; 190 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2); 191 macaddr[4] = val >> 8; 192 macaddr[5] = val & 0xff; 193 194 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr)); 195 196 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) { 197 GO_WINDOW(2); 198 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 199 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 200 val |= ELINK_RESET_OPT_LEDPOLAR; 201 if (sc->ex_conf & EX_CONF_PHY_POWER) 202 val |= ELINK_RESET_OPT_PHYPOWER; 203 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 204 } 205 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) { 206 GO_WINDOW(0); 207 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID, 208 EX_XCVR_PWR_MAGICBITS); 209 } 210 211 attach_stage = 0; 212 213 /* 214 * Allocate the upload descriptors, and create and load the DMA 215 * map for them. 216 */ 217 if ((error = bus_dmamem_alloc(sc->sc_dmat, 218 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1, 219 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) { 220 aprint_error_dev(sc->sc_dev, 221 "can't allocate upload descriptors, error = %d\n", error); 222 goto fail; 223 } 224 225 attach_stage = 1; 226 227 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg, 228 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd, 229 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 230 aprint_error_dev(sc->sc_dev, 231 "can't map upload descriptors, error = %d\n", error); 232 goto fail; 233 } 234 235 attach_stage = 2; 236 237 if ((error = bus_dmamap_create(sc->sc_dmat, 238 EX_NUPD * sizeof (struct ex_upd), 1, 239 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT, 240 &sc->sc_upd_dmamap)) != 0) { 241 aprint_error_dev(sc->sc_dev, 242 "can't create upload desc. DMA map, error = %d\n", error); 243 goto fail; 244 } 245 246 attach_stage = 3; 247 248 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap, 249 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL, 250 BUS_DMA_NOWAIT)) != 0) { 251 aprint_error_dev(sc->sc_dev, 252 "can't load upload desc. DMA map, error = %d\n", error); 253 goto fail; 254 } 255 256 attach_stage = 4; 257 258 /* 259 * Allocate the download descriptors, and create and load the DMA 260 * map for them. 261 */ 262 if ((error = bus_dmamem_alloc(sc->sc_dmat, 263 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1, 264 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) { 265 aprint_error_dev(sc->sc_dev, 266 "can't allocate download descriptors, error = %d\n", error); 267 goto fail; 268 } 269 270 attach_stage = 5; 271 272 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg, 273 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd, 274 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 275 aprint_error_dev(sc->sc_dev, 276 "can't map download descriptors, error = %d\n", error); 277 goto fail; 278 } 279 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN); 280 281 attach_stage = 6; 282 283 if ((error = bus_dmamap_create(sc->sc_dmat, 284 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1, 285 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT, 286 &sc->sc_dpd_dmamap)) != 0) { 287 aprint_error_dev(sc->sc_dev, 288 "can't create download desc. DMA map, error = %d\n", error); 289 goto fail; 290 } 291 292 attach_stage = 7; 293 294 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap, 295 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL, 296 BUS_DMA_NOWAIT)) != 0) { 297 aprint_error_dev(sc->sc_dev, 298 "can't load download desc. DMA map, error = %d\n", error); 299 goto fail; 300 } 301 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 302 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE); 303 304 attach_stage = 8; 305 306 307 /* 308 * Create the transmit buffer DMA maps. 309 */ 310 for (i = 0; i < EX_NDPD; i++) { 311 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 312 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 313 &sc->sc_tx_dmamaps[i])) != 0) { 314 aprint_error_dev(sc->sc_dev, 315 "can't create tx DMA map %d, error = %d\n", 316 i, error); 317 goto fail; 318 } 319 } 320 321 attach_stage = 9; 322 323 /* 324 * Create the receive buffer DMA maps. 325 */ 326 for (i = 0; i < EX_NUPD; i++) { 327 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 328 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 329 &sc->sc_rx_dmamaps[i])) != 0) { 330 aprint_error_dev(sc->sc_dev, 331 "can't create rx DMA map %d, error = %d\n", 332 i, error); 333 goto fail; 334 } 335 } 336 337 attach_stage = 10; 338 339 /* 340 * Create ring of upload descriptors, only once. The DMA engine 341 * will loop over this when receiving packets, stalling if it 342 * hits an UPD with a finished receive. 343 */ 344 for (i = 0; i < EX_NUPD; i++) { 345 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i]; 346 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i]; 347 sc->sc_upd[i].upd_frags[0].fr_len = 348 htole32((MCLBYTES - 2) | EX_FR_LAST); 349 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) { 350 aprint_error_dev(sc->sc_dev, 351 "can't allocate or map rx buffers\n"); 352 goto fail; 353 } 354 } 355 356 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0, 357 EX_NUPD * sizeof (struct ex_upd), 358 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 359 360 ex_init_txdescs(sc); 361 362 attach_stage = 11; 363 364 365 GO_WINDOW(3); 366 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 367 if (val & ELINK_MEDIACAP_MII) 368 sc->ex_conf |= EX_CONF_MII; 369 370 ifp = &sc->sc_ethercom.ec_if; 371 372 /* 373 * Initialize our media structures and MII info. We'll 374 * probe the MII if we discover that we have one. 375 */ 376 sc->ex_mii.mii_ifp = ifp; 377 sc->ex_mii.mii_readreg = ex_mii_readreg; 378 sc->ex_mii.mii_writereg = ex_mii_writereg; 379 sc->ex_mii.mii_statchg = ex_mii_statchg; 380 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg, 381 ex_media_stat); 382 383 if (sc->ex_conf & EX_CONF_MII) { 384 /* 385 * Find PHY, extract media information from it. 386 * First, select the right transceiver. 387 */ 388 ex_set_xcvr(sc, val); 389 390 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff, 391 MII_PHY_ANY, MII_OFFSET_ANY, 0); 392 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) { 393 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE, 394 0, NULL); 395 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE); 396 } else { 397 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO); 398 } 399 } else 400 ex_probemedia(sc); 401 402 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 403 ifp->if_softc = sc; 404 ifp->if_start = ex_start; 405 ifp->if_ioctl = ex_ioctl; 406 ifp->if_watchdog = ex_watchdog; 407 ifp->if_init = ex_init; 408 ifp->if_stop = ex_stop; 409 ifp->if_flags = 410 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 411 sc->sc_if_flags = ifp->if_flags; 412 IFQ_SET_READY(&ifp->if_snd); 413 414 /* 415 * We can support 802.1Q VLAN-sized frames. 416 */ 417 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 418 419 /* 420 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support. 421 */ 422 if (sc->ex_conf & EX_CONF_90XB) 423 sc->sc_ethercom.ec_if.if_capabilities |= 424 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 425 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 426 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 427 428 if_attach(ifp); 429 ether_ifattach(ifp, macaddr); 430 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb); 431 432 GO_WINDOW(1); 433 434 sc->tx_start_thresh = 20; 435 sc->tx_succ_ok = 0; 436 437 /* TODO: set queues to 0 */ 438 439 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 440 RND_TYPE_NET, 0); 441 442 if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown)) 443 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 444 else 445 aprint_error_dev(sc->sc_dev, 446 "couldn't establish power handler\n"); 447 448 /* The attach is successful. */ 449 sc->ex_flags |= EX_FLAGS_ATTACHED; 450 return; 451 452 fail: 453 /* 454 * Free any resources we've allocated during the failed attach 455 * attempt. Do this in reverse order and fall though. 456 */ 457 switch (attach_stage) { 458 case 11: 459 { 460 struct ex_rxdesc *rxd; 461 462 for (i = 0; i < EX_NUPD; i++) { 463 rxd = &sc->sc_rxdescs[i]; 464 if (rxd->rx_mbhead != NULL) { 465 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 466 m_freem(rxd->rx_mbhead); 467 } 468 } 469 } 470 /* FALLTHROUGH */ 471 472 case 10: 473 for (i = 0; i < EX_NUPD; i++) 474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 475 /* FALLTHROUGH */ 476 477 case 9: 478 for (i = 0; i < EX_NDPD; i++) 479 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 480 /* FALLTHROUGH */ 481 case 8: 482 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 483 /* FALLTHROUGH */ 484 485 case 7: 486 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 487 /* FALLTHROUGH */ 488 489 case 6: 490 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 491 EX_NDPD * sizeof (struct ex_dpd)); 492 /* FALLTHROUGH */ 493 494 case 5: 495 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 496 break; 497 498 case 4: 499 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 500 /* FALLTHROUGH */ 501 502 case 3: 503 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 504 /* FALLTHROUGH */ 505 506 case 2: 507 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 508 EX_NUPD * sizeof (struct ex_upd)); 509 /* FALLTHROUGH */ 510 511 case 1: 512 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 513 break; 514 } 515 516 } 517 518 /* 519 * Find the media present on non-MII chips. 520 */ 521 void 522 ex_probemedia(struct ex_softc *sc) 523 { 524 bus_space_tag_t iot = sc->sc_iot; 525 bus_space_handle_t ioh = sc->sc_ioh; 526 struct ifmedia *ifm = &sc->ex_mii.mii_media; 527 struct ex_media *exm; 528 uint16_t config1, reset_options, default_media; 529 int defmedia = 0; 530 const char *sep = "", *defmedianame = NULL; 531 532 GO_WINDOW(3); 533 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2); 534 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS); 535 GO_WINDOW(0); 536 537 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT; 538 539 /* Sanity check that there are any media! */ 540 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) { 541 aprint_error_dev(sc->sc_dev, "no media present!\n"); 542 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL); 543 ifmedia_set(ifm, IFM_ETHER|IFM_NONE); 544 return; 545 } 546 547 aprint_normal_dev(sc->sc_dev, ""); 548 549 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", " 550 551 for (exm = ex_native_media; exm->exm_name != NULL; exm++) { 552 if (reset_options & exm->exm_mpbit) { 553 /* 554 * Default media is a little complicated. We 555 * support full-duplex which uses the same 556 * reset options bit. 557 * 558 * XXX Check EEPROM for default to FDX? 559 */ 560 if (exm->exm_epmedia == default_media) { 561 if ((exm->exm_ifmedia & IFM_FDX) == 0) { 562 defmedia = exm->exm_ifmedia; 563 defmedianame = exm->exm_name; 564 } 565 } else if (defmedia == 0) { 566 defmedia = exm->exm_ifmedia; 567 defmedianame = exm->exm_name; 568 } 569 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia, 570 NULL); 571 PRINT(exm->exm_name); 572 } 573 } 574 575 #undef PRINT 576 577 #ifdef DIAGNOSTIC 578 if (defmedia == 0) 579 panic("ex_probemedia: impossible"); 580 #endif 581 582 aprint_normal(", default %s\n", defmedianame); 583 ifmedia_set(ifm, defmedia); 584 } 585 586 /* 587 * Setup transmitter parameters. 588 */ 589 static void 590 ex_setup_tx(struct ex_softc *sc) 591 { 592 bus_space_tag_t iot = sc->sc_iot; 593 bus_space_handle_t ioh = sc->sc_ioh; 594 595 /* 596 * Disable reclaim threshold for 90xB, set free threshold to 597 * 6 * 256 = 1536 for 90x. 598 */ 599 if (sc->ex_conf & EX_CONF_90XB) 600 bus_space_write_2(iot, ioh, ELINK_COMMAND, 601 ELINK_TXRECLTHRESH | 255); 602 else 603 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6); 604 605 /* Setup early transmission start threshold. */ 606 bus_space_write_2(iot, ioh, ELINK_COMMAND, 607 ELINK_TXSTARTTHRESH | sc->tx_start_thresh); 608 } 609 610 /* 611 * Bring device up. 612 */ 613 int 614 ex_init(struct ifnet *ifp) 615 { 616 struct ex_softc *sc = ifp->if_softc; 617 bus_space_tag_t iot = sc->sc_iot; 618 bus_space_handle_t ioh = sc->sc_ioh; 619 int i; 620 uint16_t val; 621 int error = 0; 622 623 if ((error = ex_enable(sc)) != 0) 624 goto out; 625 626 ex_waitcmd(sc); 627 ex_stop(ifp, 0); 628 629 GO_WINDOW(2); 630 631 /* Turn on PHY power. */ 632 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) { 633 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 634 if (sc->ex_conf & EX_CONF_PHY_POWER) 635 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */ 636 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 637 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */ 638 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 639 } 640 641 /* 642 * Set the station address and clear the station mask. The latter 643 * is needed for 90x cards, 0 is the default for 90xB cards. 644 */ 645 for (i = 0; i < ETHER_ADDR_LEN; i++) { 646 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i, 647 CLLADDR(ifp->if_sadl)[i]); 648 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0); 649 } 650 651 GO_WINDOW(3); 652 653 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET); 654 ex_waitcmd(sc); 655 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 656 ex_waitcmd(sc); 657 658 /* Load Tx parameters. */ 659 ex_setup_tx(sc); 660 661 bus_space_write_2(iot, ioh, ELINK_COMMAND, 662 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE); 663 664 bus_space_write_4(iot, ioh, ELINK_DMACTRL, 665 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN); 666 667 bus_space_write_2(iot, ioh, ELINK_COMMAND, 668 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS); 669 bus_space_write_2(iot, ioh, ELINK_COMMAND, 670 SET_INTR_MASK | XL_WATCHED_INTERRUPTS); 671 672 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff); 673 if (sc->intr_ack) 674 (* sc->intr_ack)(sc); 675 ex_set_media(sc); 676 ex_set_mc(sc); 677 678 679 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE); 680 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 681 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma); 682 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE); 683 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL); 684 685 ifp->if_flags |= IFF_RUNNING; 686 ifp->if_flags &= ~IFF_OACTIVE; 687 ex_start(ifp); 688 sc->sc_if_flags = ifp->if_flags; 689 690 GO_WINDOW(1); 691 692 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 693 694 out: 695 if (error) { 696 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 697 ifp->if_timer = 0; 698 aprint_error_dev(sc->sc_dev, "interface not running\n"); 699 } 700 return (error); 701 } 702 703 #define MCHASHSIZE 256 704 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \ 705 (MCHASHSIZE - 1)) 706 707 /* 708 * Set multicast receive filter. Also take care of promiscuous mode 709 * here (XXX). 710 */ 711 void 712 ex_set_mc(struct ex_softc *sc) 713 { 714 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 715 struct ethercom *ec = &sc->sc_ethercom; 716 struct ether_multi *enm; 717 struct ether_multistep estep; 718 int i; 719 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST; 720 721 if (ifp->if_flags & IFF_PROMISC) { 722 mask |= FIL_PROMISC; 723 goto allmulti; 724 } 725 726 ETHER_FIRST_MULTI(estep, ec, enm); 727 if (enm == NULL) 728 goto nomulti; 729 730 if ((sc->ex_conf & EX_CONF_90XB) == 0) 731 /* No multicast hash filtering. */ 732 goto allmulti; 733 734 for (i = 0; i < MCHASHSIZE; i++) 735 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 736 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i); 737 738 do { 739 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 740 ETHER_ADDR_LEN) != 0) 741 goto allmulti; 742 743 i = ex_mchash(enm->enm_addrlo); 744 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 745 ELINK_COMMAND, ELINK_SETHASHFILBIT | i); 746 ETHER_NEXT_MULTI(estep, enm); 747 } while (enm != NULL); 748 mask |= FIL_MULTIHASH; 749 750 nomulti: 751 ifp->if_flags &= ~IFF_ALLMULTI; 752 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 753 SET_RX_FILTER | mask); 754 return; 755 756 allmulti: 757 ifp->if_flags |= IFF_ALLMULTI; 758 mask |= FIL_MULTICAST; 759 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 760 SET_RX_FILTER | mask); 761 } 762 763 764 /* 765 * The Tx Complete interrupts occur only on errors, 766 * and this is the error handler. 767 */ 768 static void 769 ex_txstat(struct ex_softc *sc) 770 { 771 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 772 bus_space_tag_t iot = sc->sc_iot; 773 bus_space_handle_t ioh = sc->sc_ioh; 774 int i, err = 0; 775 776 /* 777 * We need to read+write TX_STATUS until we get a 0 status 778 * in order to turn off the interrupt flag. 779 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER. 780 */ 781 for (;;) { 782 i = bus_space_read_2(iot, ioh, ELINK_TIMER); 783 if ((i & TXS_COMPLETE) == 0) 784 break; 785 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0); 786 err |= i; 787 } 788 err &= ~TXS_TIMER; 789 790 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM)) 791 || err == 0 /* should not happen, just in case */) { 792 /* 793 * Make sure the transmission is stopped. 794 */ 795 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL); 796 for (i = 1000; i > 0; i--) 797 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) & 798 ELINK_DMAC_DNINPROG) == 0) 799 break; 800 801 /* 802 * Reset the transmitter. 803 */ 804 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 805 806 /* Resetting takes a while and we will do more than wait. */ 807 808 ifp->if_flags &= ~IFF_OACTIVE; 809 ++sc->sc_ethercom.ec_if.if_oerrors; 810 aprint_error_dev(sc->sc_dev, "%s%s%s", 811 (err & TXS_UNDERRUN) ? " transmit underrun" : "", 812 (err & TXS_JABBER) ? " jabber" : "", 813 (err & TXS_RECLAIM) ? " reclaim" : ""); 814 if (err == 0) 815 aprint_error(" unknown Tx error"); 816 printf(" (%x)", err); 817 if (err & TXS_UNDERRUN) { 818 aprint_error(" @%d", sc->tx_start_thresh); 819 if (sc->tx_succ_ok < 256 && 820 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20)) 821 > sc->tx_start_thresh) { 822 aprint_error(", new threshold is %d", i); 823 sc->tx_start_thresh = i; 824 } 825 sc->tx_succ_ok = 0; 826 } 827 aprint_error("\n"); 828 if (err & TXS_MAX_COLLISION) 829 ++sc->sc_ethercom.ec_if.if_collisions; 830 831 /* Wait for TX_RESET to finish. */ 832 ex_waitcmd(sc); 833 834 /* Reload Tx parameters. */ 835 ex_setup_tx(sc); 836 } else { 837 if (err & TXS_MAX_COLLISION) 838 ++sc->sc_ethercom.ec_if.if_collisions; 839 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE; 840 } 841 842 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 843 844 /* Retransmit current packet if any. */ 845 if (sc->tx_head) { 846 ifp->if_flags |= IFF_OACTIVE; 847 bus_space_write_2(iot, ioh, ELINK_COMMAND, 848 ELINK_DNUNSTALL); 849 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 850 DPD_DMADDR(sc, sc->tx_head)); 851 852 /* Retrigger watchdog if stopped. */ 853 if (ifp->if_timer == 0) 854 ifp->if_timer = 1; 855 } 856 } 857 858 int 859 ex_media_chg(struct ifnet *ifp) 860 { 861 862 if (ifp->if_flags & IFF_UP) 863 ex_init(ifp); 864 return 0; 865 } 866 867 void 868 ex_set_xcvr(struct ex_softc *sc, const uint16_t media) 869 { 870 bus_space_tag_t iot = sc->sc_iot; 871 bus_space_handle_t ioh = sc->sc_ioh; 872 uint32_t icfg; 873 874 /* 875 * We're already in Window 3 876 */ 877 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 878 icfg &= ~(CONFIG_XCVR_SEL << 16); 879 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4)) 880 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16); 881 if (media & ELINK_MEDIACAP_100BASETX) 882 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16); 883 if (media & ELINK_MEDIACAP_100BASEFX) 884 icfg |= ELINKMEDIA_100BASE_FX 885 << (CONFIG_XCVR_SEL_SHIFT + 16); 886 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg); 887 } 888 889 void 890 ex_set_media(struct ex_softc *sc) 891 { 892 bus_space_tag_t iot = sc->sc_iot; 893 bus_space_handle_t ioh = sc->sc_ioh; 894 uint32_t configreg; 895 896 if (((sc->ex_conf & EX_CONF_MII) && 897 (sc->ex_mii.mii_media_active & IFM_FDX)) 898 || (!(sc->ex_conf & EX_CONF_MII) && 899 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) { 900 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 901 MAC_CONTROL_FDX); 902 } else { 903 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0); 904 } 905 906 /* 907 * If the device has MII, select it, and then tell the 908 * PHY which media to use. 909 */ 910 if (sc->ex_conf & EX_CONF_MII) { 911 uint16_t val; 912 913 GO_WINDOW(3); 914 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 915 ex_set_xcvr(sc, val); 916 mii_mediachg(&sc->ex_mii); 917 return; 918 } 919 920 GO_WINDOW(4); 921 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0); 922 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 923 delay(800); 924 925 /* 926 * Now turn on the selected media/transceiver. 927 */ 928 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) { 929 case IFM_10_T: 930 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 931 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE); 932 break; 933 934 case IFM_10_2: 935 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER); 936 DELAY(800); 937 break; 938 939 case IFM_100_TX: 940 case IFM_100_FX: 941 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 942 LINKBEAT_ENABLE); 943 DELAY(800); 944 break; 945 946 case IFM_10_5: 947 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 948 SQE_ENABLE); 949 DELAY(800); 950 break; 951 952 case IFM_MANUAL: 953 break; 954 955 case IFM_NONE: 956 return; 957 958 default: 959 panic("ex_set_media: impossible"); 960 } 961 962 GO_WINDOW(3); 963 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 964 965 configreg &= ~(CONFIG_MEDIAMASK << 16); 966 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data << 967 (CONFIG_MEDIAMASK_SHIFT + 16)); 968 969 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg); 970 } 971 972 /* 973 * Get currently-selected media from card. 974 * (if_media callback, may be called before interface is brought up). 975 */ 976 void 977 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req) 978 { 979 struct ex_softc *sc = ifp->if_softc; 980 uint16_t help; 981 982 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) { 983 if (sc->ex_conf & EX_CONF_MII) { 984 mii_pollstat(&sc->ex_mii); 985 req->ifm_status = sc->ex_mii.mii_media_status; 986 req->ifm_active = sc->ex_mii.mii_media_active; 987 } else { 988 GO_WINDOW(4); 989 req->ifm_status = IFM_AVALID; 990 req->ifm_active = 991 sc->ex_mii.mii_media.ifm_cur->ifm_media; 992 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 993 ELINK_W4_MEDIA_TYPE); 994 if (help & LINKBEAT_DETECT) 995 req->ifm_status |= IFM_ACTIVE; 996 GO_WINDOW(1); 997 } 998 } 999 } 1000 1001 1002 1003 /* 1004 * Start outputting on the interface. 1005 */ 1006 static void 1007 ex_start(struct ifnet *ifp) 1008 { 1009 struct ex_softc *sc = ifp->if_softc; 1010 bus_space_tag_t iot = sc->sc_iot; 1011 bus_space_handle_t ioh = sc->sc_ioh; 1012 volatile struct ex_fraghdr *fr = NULL; 1013 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL; 1014 struct ex_txdesc *txp; 1015 struct mbuf *mb_head; 1016 bus_dmamap_t dmamap; 1017 int m_csumflags, offset, seglen, totlen, segment, error; 1018 uint32_t csum_flags; 1019 1020 if (sc->tx_head || sc->tx_free == NULL) 1021 return; 1022 1023 txp = NULL; 1024 1025 /* 1026 * We're finished if there is nothing more to add to the list or if 1027 * we're all filled up with buffers to transmit. 1028 */ 1029 while (sc->tx_free != NULL) { 1030 /* 1031 * Grab a packet to transmit. 1032 */ 1033 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1034 if (mb_head == NULL) 1035 break; 1036 1037 /* 1038 * mb_head might be updated later, 1039 * so preserve csum_flags here. 1040 */ 1041 m_csumflags = mb_head->m_pkthdr.csum_flags; 1042 1043 /* 1044 * Get pointer to next available tx desc. 1045 */ 1046 txp = sc->tx_free; 1047 dmamap = txp->tx_dmamap; 1048 1049 /* 1050 * Go through each of the mbufs in the chain and initialize 1051 * the transmit buffer descriptors with the physical address 1052 * and size of the mbuf. 1053 */ 1054 reload: 1055 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 1056 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1057 switch (error) { 1058 case 0: 1059 /* Success. */ 1060 break; 1061 1062 case EFBIG: 1063 { 1064 struct mbuf *mn; 1065 1066 /* 1067 * We ran out of segments. We have to recopy this 1068 * mbuf chain first. Bail out if we can't get the 1069 * new buffers. 1070 */ 1071 aprint_error_dev(sc->sc_dev, "too many segments, "); 1072 1073 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1074 if (mn == NULL) { 1075 m_freem(mb_head); 1076 aprint_error("aborting\n"); 1077 goto out; 1078 } 1079 if (mb_head->m_pkthdr.len > MHLEN) { 1080 MCLGET(mn, M_DONTWAIT); 1081 if ((mn->m_flags & M_EXT) == 0) { 1082 m_freem(mn); 1083 m_freem(mb_head); 1084 aprint_error("aborting\n"); 1085 goto out; 1086 } 1087 } 1088 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1089 mtod(mn, void *)); 1090 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1091 m_freem(mb_head); 1092 mb_head = mn; 1093 aprint_error("retrying\n"); 1094 goto reload; 1095 } 1096 1097 default: 1098 /* 1099 * Some other problem; report it. 1100 */ 1101 aprint_error_dev(sc->sc_dev, 1102 "can't load mbuf chain, error = %d\n", error); 1103 m_freem(mb_head); 1104 goto out; 1105 } 1106 1107 /* 1108 * remove our tx desc from freelist. 1109 */ 1110 sc->tx_free = txp->tx_next; 1111 txp->tx_next = NULL; 1112 1113 fr = &txp->tx_dpd->dpd_frags[0]; 1114 totlen = 0; 1115 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) { 1116 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr); 1117 seglen = dmamap->dm_segs[segment].ds_len; 1118 fr->fr_len = htole32(seglen); 1119 totlen += seglen; 1120 } 1121 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN && 1122 (m_csumflags & M_CSUM_IPv4) != 0)) { 1123 /* 1124 * Pad short packets to avoid ip4csum-tx bug. 1125 * 1126 * XXX Should we still consider if such short 1127 * (36 bytes or less) packets might already 1128 * occupy EX_NTFRAG (== 32) fragments here? 1129 */ 1130 KASSERT(segment < EX_NTFRAGS); 1131 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc)); 1132 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen; 1133 fr->fr_len = htole32(EX_FR_LAST | seglen); 1134 totlen += seglen; 1135 } else { 1136 fr--; 1137 fr->fr_len |= htole32(EX_FR_LAST); 1138 } 1139 txp->tx_mbhead = mb_head; 1140 1141 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1142 BUS_DMASYNC_PREWRITE); 1143 1144 dpd = txp->tx_dpd; 1145 dpd->dpd_nextptr = 0; 1146 dpd->dpd_fsh = htole32(totlen); 1147 1148 /* Byte-swap constants so compiler can optimize. */ 1149 1150 if (sc->ex_conf & EX_CONF_90XB) { 1151 csum_flags = 0; 1152 1153 if (m_csumflags & M_CSUM_IPv4) 1154 csum_flags |= htole32(EX_DPD_IPCKSUM); 1155 1156 if (m_csumflags & M_CSUM_TCPv4) 1157 csum_flags |= htole32(EX_DPD_TCPCKSUM); 1158 else if (m_csumflags & M_CSUM_UDPv4) 1159 csum_flags |= htole32(EX_DPD_UDPCKSUM); 1160 1161 dpd->dpd_fsh |= csum_flags; 1162 } else { 1163 KDASSERT((mb_head->m_pkthdr.csum_flags & 1164 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0); 1165 } 1166 1167 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1168 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd), 1169 sizeof (struct ex_dpd), 1170 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1171 1172 /* 1173 * No need to stall the download engine, we know it's 1174 * not busy right now. 1175 * 1176 * Fix up pointers in both the "soft" tx and the physical 1177 * tx list. 1178 */ 1179 if (sc->tx_head != NULL) { 1180 prevdpd = sc->tx_tail->tx_dpd; 1181 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd); 1182 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1183 offset, sizeof (struct ex_dpd), 1184 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1185 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp)); 1186 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1187 offset, sizeof (struct ex_dpd), 1188 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1189 sc->tx_tail->tx_next = txp; 1190 sc->tx_tail = txp; 1191 } else { 1192 sc->tx_tail = sc->tx_head = txp; 1193 } 1194 1195 /* 1196 * Pass packet to bpf if there is a listener. 1197 */ 1198 bpf_mtap(ifp, mb_head); 1199 } 1200 out: 1201 if (sc->tx_head) { 1202 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND); 1203 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1204 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd), 1205 sizeof (struct ex_dpd), 1206 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1207 ifp->if_flags |= IFF_OACTIVE; 1208 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL); 1209 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 1210 DPD_DMADDR(sc, sc->tx_head)); 1211 1212 /* trigger watchdog */ 1213 ifp->if_timer = 5; 1214 } 1215 } 1216 1217 1218 int 1219 ex_intr(void *arg) 1220 { 1221 struct ex_softc *sc = arg; 1222 bus_space_tag_t iot = sc->sc_iot; 1223 bus_space_handle_t ioh = sc->sc_ioh; 1224 uint16_t stat; 1225 int ret = 0; 1226 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1227 1228 if ((ifp->if_flags & IFF_RUNNING) == 0 || 1229 !device_is_active(sc->sc_dev)) 1230 return (0); 1231 1232 for (;;) { 1233 stat = bus_space_read_2(iot, ioh, ELINK_STATUS); 1234 1235 if ((stat & XL_WATCHED_INTERRUPTS) == 0) { 1236 if ((stat & INTR_LATCH) == 0) { 1237 #if 0 1238 aprint_error_dev(sc->sc_dev, 1239 "intr latch cleared\n"); 1240 #endif 1241 break; 1242 } 1243 } 1244 1245 ret = 1; 1246 1247 /* 1248 * Acknowledge interrupts. 1249 */ 1250 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 1251 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH))); 1252 if (sc->intr_ack) 1253 (*sc->intr_ack)(sc); 1254 1255 if (stat & HOST_ERROR) { 1256 aprint_error_dev(sc->sc_dev, 1257 "adapter failure (%x)\n", stat); 1258 ex_reset(sc); 1259 ex_init(ifp); 1260 return 1; 1261 } 1262 if (stat & UPD_STATS) { 1263 ex_getstats(sc); 1264 } 1265 if (stat & TX_COMPLETE) { 1266 ex_txstat(sc); 1267 #if 0 1268 if (stat & DN_COMPLETE) 1269 aprint_error_dev(sc->sc_dev, 1270 "Ignoring Dn interrupt (%x)\n", stat); 1271 #endif 1272 /* 1273 * In some rare cases, both Tx Complete and 1274 * Dn Complete bits are set. However, the packet 1275 * has been reloaded in ex_txstat() and should not 1276 * handle the Dn Complete event here. 1277 * Hence the "else" below. 1278 */ 1279 } else if (stat & DN_COMPLETE) { 1280 struct ex_txdesc *txp, *ptxp = NULL; 1281 bus_dmamap_t txmap; 1282 1283 /* reset watchdog timer, was set in ex_start() */ 1284 ifp->if_timer = 0; 1285 1286 for (txp = sc->tx_head; txp != NULL; 1287 txp = txp->tx_next) { 1288 bus_dmamap_sync(sc->sc_dmat, 1289 sc->sc_dpd_dmamap, 1290 (char *)txp->tx_dpd - (char *)sc->sc_dpd, 1291 sizeof (struct ex_dpd), 1292 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1293 if (txp->tx_mbhead != NULL) { 1294 txmap = txp->tx_dmamap; 1295 bus_dmamap_sync(sc->sc_dmat, txmap, 1296 0, txmap->dm_mapsize, 1297 BUS_DMASYNC_POSTWRITE); 1298 bus_dmamap_unload(sc->sc_dmat, txmap); 1299 m_freem(txp->tx_mbhead); 1300 txp->tx_mbhead = NULL; 1301 } 1302 ptxp = txp; 1303 } 1304 1305 /* 1306 * Move finished tx buffers back to the tx free list. 1307 */ 1308 if (sc->tx_free) { 1309 sc->tx_ftail->tx_next = sc->tx_head; 1310 sc->tx_ftail = ptxp; 1311 } else 1312 sc->tx_ftail = sc->tx_free = sc->tx_head; 1313 1314 sc->tx_head = sc->tx_tail = NULL; 1315 ifp->if_flags &= ~IFF_OACTIVE; 1316 1317 if (sc->tx_succ_ok < 256) 1318 sc->tx_succ_ok++; 1319 } 1320 1321 if (stat & UP_COMPLETE) { 1322 struct ex_rxdesc *rxd; 1323 struct mbuf *m; 1324 struct ex_upd *upd; 1325 bus_dmamap_t rxmap; 1326 uint32_t pktstat; 1327 1328 rcvloop: 1329 rxd = sc->rx_head; 1330 rxmap = rxd->rx_dmamap; 1331 m = rxd->rx_mbhead; 1332 upd = rxd->rx_upd; 1333 1334 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 1335 rxmap->dm_mapsize, 1336 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1337 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1338 ((char *)upd - (char *)sc->sc_upd), 1339 sizeof (struct ex_upd), 1340 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1341 pktstat = le32toh(upd->upd_pktstatus); 1342 1343 if (pktstat & EX_UPD_COMPLETE) { 1344 /* 1345 * Remove first packet from the chain. 1346 */ 1347 sc->rx_head = rxd->rx_next; 1348 rxd->rx_next = NULL; 1349 1350 /* 1351 * Add a new buffer to the receive chain. 1352 * If this fails, the old buffer is recycled 1353 * instead. 1354 */ 1355 if (ex_add_rxbuf(sc, rxd) == 0) { 1356 uint16_t total_len; 1357 1358 if (pktstat & 1359 ((sc->sc_ethercom.ec_capenable & 1360 ETHERCAP_VLAN_MTU) ? 1361 EX_UPD_ERR_VLAN : EX_UPD_ERR)) { 1362 ifp->if_ierrors++; 1363 m_freem(m); 1364 goto rcvloop; 1365 } 1366 1367 total_len = pktstat & EX_UPD_PKTLENMASK; 1368 if (total_len < 1369 sizeof(struct ether_header)) { 1370 m_freem(m); 1371 goto rcvloop; 1372 } 1373 m->m_pkthdr.rcvif = ifp; 1374 m->m_pkthdr.len = m->m_len = total_len; 1375 bpf_mtap(ifp, m); 1376 /* 1377 * Set the incoming checksum information for the packet. 1378 */ 1379 if ((sc->ex_conf & EX_CONF_90XB) != 0 && 1380 (pktstat & EX_UPD_IPCHECKED) != 0) { 1381 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1382 if (pktstat & EX_UPD_IPCKSUMERR) 1383 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1384 if (pktstat & EX_UPD_TCPCHECKED) { 1385 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1386 if (pktstat & EX_UPD_TCPCKSUMERR) 1387 m->m_pkthdr.csum_flags |= 1388 M_CSUM_TCP_UDP_BAD; 1389 } else if (pktstat & EX_UPD_UDPCHECKED) { 1390 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1391 if (pktstat & EX_UPD_UDPCKSUMERR) 1392 m->m_pkthdr.csum_flags |= 1393 M_CSUM_TCP_UDP_BAD; 1394 } 1395 } 1396 (*ifp->if_input)(ifp, m); 1397 } 1398 goto rcvloop; 1399 } 1400 /* 1401 * Just in case we filled up all UPDs and the DMA engine 1402 * stalled. We could be more subtle about this. 1403 */ 1404 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) { 1405 aprint_error_dev(sc->sc_dev, 1406 "uplistptr was 0\n"); 1407 ex_init(ifp); 1408 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS) 1409 & 0x2000) { 1410 aprint_error_dev(sc->sc_dev, 1411 "receive stalled\n"); 1412 bus_space_write_2(iot, ioh, ELINK_COMMAND, 1413 ELINK_UPUNSTALL); 1414 } 1415 } 1416 1417 if (stat) 1418 rnd_add_uint32(&sc->rnd_source, stat); 1419 } 1420 1421 /* no more interrupts */ 1422 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0) 1423 ex_start(ifp); 1424 return ret; 1425 } 1426 1427 static int 1428 ex_ifflags_cb(struct ethercom *ec) 1429 { 1430 struct ifnet *ifp = &ec->ec_if; 1431 struct ex_softc *sc = ifp->if_softc; 1432 int change = ifp->if_flags ^ sc->sc_if_flags; 1433 1434 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1435 return ENETRESET; 1436 else if ((change & IFF_PROMISC) != 0) 1437 ex_set_mc(sc); 1438 return 0; 1439 } 1440 1441 int 1442 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1443 { 1444 struct ex_softc *sc = ifp->if_softc; 1445 struct ifreq *ifr = (struct ifreq *)data; 1446 int s, error; 1447 1448 s = splnet(); 1449 1450 switch (cmd) { 1451 case SIOCSIFMEDIA: 1452 case SIOCGIFMEDIA: 1453 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd); 1454 break; 1455 default: 1456 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1457 break; 1458 1459 error = 0; 1460 1461 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1462 ; 1463 else if (ifp->if_flags & IFF_RUNNING) { 1464 /* 1465 * Multicast list has changed; set the hardware filter 1466 * accordingly. 1467 */ 1468 ex_set_mc(sc); 1469 } 1470 break; 1471 } 1472 1473 sc->sc_if_flags = ifp->if_flags; 1474 splx(s); 1475 return (error); 1476 } 1477 1478 void 1479 ex_getstats(struct ex_softc *sc) 1480 { 1481 bus_space_handle_t ioh = sc->sc_ioh; 1482 bus_space_tag_t iot = sc->sc_iot; 1483 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1484 uint8_t upperok; 1485 1486 GO_WINDOW(6); 1487 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK); 1488 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK); 1489 ifp->if_ipackets += (upperok & 0x03) << 8; 1490 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK); 1491 ifp->if_opackets += (upperok & 0x30) << 4; 1492 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS); 1493 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS); 1494 /* 1495 * There seems to be no way to get the exact number of collisions, 1496 * this is the number that occurred at the very least. 1497 */ 1498 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh, 1499 TX_AFTER_X_COLLISIONS); 1500 /* 1501 * Interface byte counts are counted by ether_input() and 1502 * ether_output(), so don't accumulate them here. Just 1503 * read the NIC counters so they don't generate overflow interrupts. 1504 * Upper byte counters are latched from reading the totals, so 1505 * they don't need to be read if we don't need their values. 1506 */ 1507 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK); 1508 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK); 1509 1510 /* 1511 * Clear the following to avoid stats overflow interrupts 1512 */ 1513 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS); 1514 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION); 1515 (void)bus_space_read_1(iot, ioh, TX_NO_SQE); 1516 (void)bus_space_read_1(iot, ioh, TX_CD_LOST); 1517 GO_WINDOW(4); 1518 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD); 1519 GO_WINDOW(1); 1520 } 1521 1522 void 1523 ex_printstats(struct ex_softc *sc) 1524 { 1525 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1526 1527 ex_getstats(sc); 1528 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes " 1529 "%llu\n", (unsigned long long)ifp->if_ipackets, 1530 (unsigned long long)ifp->if_opackets, 1531 (unsigned long long)ifp->if_ierrors, 1532 (unsigned long long)ifp->if_oerrors, 1533 (unsigned long long)ifp->if_ibytes, 1534 (unsigned long long)ifp->if_obytes); 1535 } 1536 1537 void 1538 ex_tick(void *arg) 1539 { 1540 struct ex_softc *sc = arg; 1541 int s; 1542 1543 if (!device_is_active(sc->sc_dev)) 1544 return; 1545 1546 s = splnet(); 1547 1548 if (sc->ex_conf & EX_CONF_MII) 1549 mii_tick(&sc->ex_mii); 1550 1551 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS) 1552 & COMMAND_IN_PROGRESS)) 1553 ex_getstats(sc); 1554 1555 splx(s); 1556 1557 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 1558 } 1559 1560 void 1561 ex_reset(struct ex_softc *sc) 1562 { 1563 uint16_t val = GLOBAL_RESET; 1564 1565 if (sc->ex_conf & EX_CONF_RESETHACK) 1566 val |= 0x10; 1567 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val); 1568 /* 1569 * XXX apparently the command in progress bit can't be trusted 1570 * during a reset, so we just always wait this long. Fortunately 1571 * we normally only reset the chip during autoconfig. 1572 */ 1573 delay(100000); 1574 ex_waitcmd(sc); 1575 } 1576 1577 void 1578 ex_watchdog(struct ifnet *ifp) 1579 { 1580 struct ex_softc *sc = ifp->if_softc; 1581 1582 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1583 ++sc->sc_ethercom.ec_if.if_oerrors; 1584 1585 ex_reset(sc); 1586 ex_init(ifp); 1587 } 1588 1589 void 1590 ex_stop(struct ifnet *ifp, int disable) 1591 { 1592 struct ex_softc *sc = ifp->if_softc; 1593 bus_space_tag_t iot = sc->sc_iot; 1594 bus_space_handle_t ioh = sc->sc_ioh; 1595 struct ex_txdesc *tx; 1596 struct ex_rxdesc *rx; 1597 int i; 1598 1599 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE); 1600 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE); 1601 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 1602 1603 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) { 1604 if (tx->tx_mbhead == NULL) 1605 continue; 1606 m_freem(tx->tx_mbhead); 1607 tx->tx_mbhead = NULL; 1608 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap); 1609 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0; 1610 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1611 ((char *)tx->tx_dpd - (char *)sc->sc_dpd), 1612 sizeof (struct ex_dpd), 1613 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1614 } 1615 sc->tx_tail = sc->tx_head = NULL; 1616 ex_init_txdescs(sc); 1617 1618 sc->rx_tail = sc->rx_head = 0; 1619 for (i = 0; i < EX_NUPD; i++) { 1620 rx = &sc->sc_rxdescs[i]; 1621 if (rx->rx_mbhead != NULL) { 1622 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap); 1623 m_freem(rx->rx_mbhead); 1624 rx->rx_mbhead = NULL; 1625 } 1626 ex_add_rxbuf(sc, rx); 1627 } 1628 1629 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH); 1630 1631 callout_stop(&sc->ex_mii_callout); 1632 if (sc->ex_conf & EX_CONF_MII) 1633 mii_down(&sc->ex_mii); 1634 1635 if (disable) 1636 ex_disable(sc); 1637 1638 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1639 sc->sc_if_flags = ifp->if_flags; 1640 ifp->if_timer = 0; 1641 } 1642 1643 static void 1644 ex_init_txdescs(struct ex_softc *sc) 1645 { 1646 int i; 1647 1648 for (i = 0; i < EX_NDPD; i++) { 1649 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i]; 1650 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i]; 1651 if (i < EX_NDPD - 1) 1652 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1]; 1653 else 1654 sc->sc_txdescs[i].tx_next = NULL; 1655 } 1656 sc->tx_free = &sc->sc_txdescs[0]; 1657 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1]; 1658 } 1659 1660 1661 int 1662 ex_activate(device_t self, enum devact act) 1663 { 1664 struct ex_softc *sc = device_private(self); 1665 1666 switch (act) { 1667 case DVACT_DEACTIVATE: 1668 if_deactivate(&sc->sc_ethercom.ec_if); 1669 return 0; 1670 default: 1671 return EOPNOTSUPP; 1672 } 1673 } 1674 1675 int 1676 ex_detach(struct ex_softc *sc) 1677 { 1678 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1679 struct ex_rxdesc *rxd; 1680 int i, s; 1681 1682 /* Succeed now if there's no work to do. */ 1683 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0) 1684 return (0); 1685 1686 s = splnet(); 1687 /* Stop the interface. Callouts are stopped in it. */ 1688 ex_stop(ifp, 1); 1689 splx(s); 1690 1691 /* Destroy our callout. */ 1692 callout_destroy(&sc->ex_mii_callout); 1693 1694 if (sc->ex_conf & EX_CONF_MII) { 1695 /* Detach all PHYs */ 1696 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1697 } 1698 1699 /* Delete all remaining media. */ 1700 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY); 1701 1702 rnd_detach_source(&sc->rnd_source); 1703 ether_ifdetach(ifp); 1704 if_detach(ifp); 1705 1706 for (i = 0; i < EX_NUPD; i++) { 1707 rxd = &sc->sc_rxdescs[i]; 1708 if (rxd->rx_mbhead != NULL) { 1709 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1710 m_freem(rxd->rx_mbhead); 1711 rxd->rx_mbhead = NULL; 1712 } 1713 } 1714 for (i = 0; i < EX_NUPD; i++) 1715 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 1716 for (i = 0; i < EX_NDPD; i++) 1717 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 1718 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 1719 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 1720 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 1721 EX_NDPD * sizeof (struct ex_dpd)); 1722 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 1723 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 1724 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 1725 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 1726 EX_NUPD * sizeof (struct ex_upd)); 1727 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 1728 1729 pmf_device_deregister(sc->sc_dev); 1730 1731 return (0); 1732 } 1733 1734 /* 1735 * Before reboots, reset card completely. 1736 */ 1737 static bool 1738 ex_shutdown(device_t self, int flags) 1739 { 1740 struct ex_softc *sc = device_private(self); 1741 1742 ex_stop(&sc->sc_ethercom.ec_if, 1); 1743 /* 1744 * Make sure the interface is powered up when we reboot, 1745 * otherwise firmware on some systems gets really confused. 1746 */ 1747 (void) ex_enable(sc); 1748 return true; 1749 } 1750 1751 /* 1752 * Read EEPROM data. 1753 * XXX what to do if EEPROM doesn't unbusy? 1754 */ 1755 uint16_t 1756 ex_read_eeprom(struct ex_softc *sc, int offset) 1757 { 1758 bus_space_tag_t iot = sc->sc_iot; 1759 bus_space_handle_t ioh = sc->sc_ioh; 1760 uint16_t data = 0, cmd = READ_EEPROM; 1761 int off; 1762 1763 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0; 1764 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM; 1765 1766 GO_WINDOW(0); 1767 if (ex_eeprom_busy(sc)) 1768 goto out; 1769 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND, 1770 cmd | (off + (offset & 0x3f))); 1771 if (ex_eeprom_busy(sc)) 1772 goto out; 1773 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA); 1774 out: 1775 return data; 1776 } 1777 1778 static int 1779 ex_eeprom_busy(struct ex_softc *sc) 1780 { 1781 bus_space_tag_t iot = sc->sc_iot; 1782 bus_space_handle_t ioh = sc->sc_ioh; 1783 int i = 100; 1784 1785 while (i--) { 1786 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) & 1787 EEPROM_BUSY)) 1788 return 0; 1789 delay(100); 1790 } 1791 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n"); 1792 return (1); 1793 } 1794 1795 /* 1796 * Create a new rx buffer and add it to the 'soft' rx list. 1797 */ 1798 static int 1799 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd) 1800 { 1801 struct mbuf *m, *oldm; 1802 bus_dmamap_t rxmap; 1803 int error, rval = 0; 1804 1805 oldm = rxd->rx_mbhead; 1806 rxmap = rxd->rx_dmamap; 1807 1808 MGETHDR(m, M_DONTWAIT, MT_DATA); 1809 if (m != NULL) { 1810 MCLGET(m, M_DONTWAIT); 1811 if ((m->m_flags & M_EXT) == 0) { 1812 m_freem(m); 1813 if (oldm == NULL) 1814 return 1; 1815 m = oldm; 1816 MRESETDATA(m); 1817 rval = 1; 1818 } 1819 } else { 1820 if (oldm == NULL) 1821 return 1; 1822 m = oldm; 1823 MRESETDATA(m); 1824 rval = 1; 1825 } 1826 1827 /* 1828 * Setup the DMA map for this receive buffer. 1829 */ 1830 if (m != oldm) { 1831 if (oldm != NULL) 1832 bus_dmamap_unload(sc->sc_dmat, rxmap); 1833 error = bus_dmamap_load(sc->sc_dmat, rxmap, 1834 m->m_ext.ext_buf, MCLBYTES, NULL, 1835 BUS_DMA_READ|BUS_DMA_NOWAIT); 1836 if (error) { 1837 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n", 1838 error); 1839 panic("ex_add_rxbuf"); /* XXX */ 1840 } 1841 } 1842 1843 /* 1844 * Align for data after 14 byte header. 1845 */ 1846 m->m_data += 2; 1847 1848 rxd->rx_mbhead = m; 1849 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2); 1850 rxd->rx_upd->upd_frags[0].fr_addr = 1851 htole32(rxmap->dm_segs[0].ds_addr + 2); 1852 rxd->rx_upd->upd_nextptr = 0; 1853 1854 /* 1855 * Attach it to the end of the list. 1856 */ 1857 if (sc->rx_head != NULL) { 1858 sc->rx_tail->rx_next = rxd; 1859 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma + 1860 ((char *)rxd->rx_upd - (char *)sc->sc_upd)); 1861 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1862 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd, 1863 sizeof (struct ex_upd), 1864 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1865 } else { 1866 sc->rx_head = rxd; 1867 } 1868 sc->rx_tail = rxd; 1869 1870 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize, 1871 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1872 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1873 ((char *)rxd->rx_upd - (char *)sc->sc_upd), 1874 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1875 return (rval); 1876 } 1877 1878 uint32_t 1879 ex_mii_bitbang_read(device_t self) 1880 { 1881 struct ex_softc *sc = device_private(self); 1882 1883 /* We're already in Window 4. */ 1884 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT)); 1885 } 1886 1887 void 1888 ex_mii_bitbang_write(device_t self, uint32_t val) 1889 { 1890 struct ex_softc *sc = device_private(self); 1891 1892 /* We're already in Window 4. */ 1893 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val); 1894 } 1895 1896 int 1897 ex_mii_readreg(device_t v, int phy, int reg) 1898 { 1899 struct ex_softc *sc = device_private(v); 1900 int val; 1901 1902 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID) 1903 return 0; 1904 1905 GO_WINDOW(4); 1906 1907 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg); 1908 1909 GO_WINDOW(1); 1910 1911 return (val); 1912 } 1913 1914 void 1915 ex_mii_writereg(device_t v, int phy, int reg, int data) 1916 { 1917 struct ex_softc *sc = device_private(v); 1918 1919 GO_WINDOW(4); 1920 1921 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data); 1922 1923 GO_WINDOW(1); 1924 } 1925 1926 void 1927 ex_mii_statchg(struct ifnet *ifp) 1928 { 1929 struct ex_softc *sc = ifp->if_softc; 1930 bus_space_tag_t iot = sc->sc_iot; 1931 bus_space_handle_t ioh = sc->sc_ioh; 1932 int mctl; 1933 1934 GO_WINDOW(3); 1935 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL); 1936 if (sc->ex_mii.mii_media_active & IFM_FDX) 1937 mctl |= MAC_CONTROL_FDX; 1938 else 1939 mctl &= ~MAC_CONTROL_FDX; 1940 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl); 1941 GO_WINDOW(1); /* back to operating window */ 1942 } 1943 1944 int 1945 ex_enable(struct ex_softc *sc) 1946 { 1947 if (sc->enabled == 0 && sc->enable != NULL) { 1948 if ((*sc->enable)(sc) != 0) { 1949 aprint_error_dev(sc->sc_dev, "device enable failed\n"); 1950 return (EIO); 1951 } 1952 sc->enabled = 1; 1953 } 1954 return (0); 1955 } 1956 1957 void 1958 ex_disable(struct ex_softc *sc) 1959 { 1960 if (sc->enabled == 1 && sc->disable != NULL) { 1961 (*sc->disable)(sc); 1962 sc->enabled = 0; 1963 } 1964 } 1965 1966