1 /* $NetBSD: elinkxl.c,v 1.111 2010/03/22 17:11:19 dyoung Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Frank van der Linden. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.111 2010/03/22 17:11:19 dyoung Exp $"); 34 35 #include "rnd.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/callout.h> 40 #include <sys/kernel.h> 41 #include <sys/mbuf.h> 42 #include <sys/socket.h> 43 #include <sys/ioctl.h> 44 #include <sys/errno.h> 45 #include <sys/syslog.h> 46 #include <sys/select.h> 47 #include <sys/device.h> 48 #if NRND > 0 49 #include <sys/rnd.h> 50 #endif 51 52 #include <uvm/uvm_extern.h> 53 54 #include <net/if.h> 55 #include <net/if_dl.h> 56 #include <net/if_ether.h> 57 #include <net/if_media.h> 58 59 #include <net/bpf.h> 60 #include <net/bpfdesc.h> 61 62 #include <sys/cpu.h> 63 #include <sys/bus.h> 64 #include <sys/intr.h> 65 #include <machine/endian.h> 66 67 #include <dev/mii/miivar.h> 68 #include <dev/mii/mii.h> 69 #include <dev/mii/mii_bitbang.h> 70 71 #include <dev/ic/elink3reg.h> 72 /* #include <dev/ic/elink3var.h> */ 73 #include <dev/ic/elinkxlreg.h> 74 #include <dev/ic/elinkxlvar.h> 75 76 #ifdef DEBUG 77 int exdebug = 0; 78 #endif 79 80 /* ifmedia callbacks */ 81 int ex_media_chg(struct ifnet *ifp); 82 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req); 83 84 static int ex_ifflags_cb(struct ethercom *); 85 86 void ex_probe_media(struct ex_softc *); 87 void ex_set_filter(struct ex_softc *); 88 void ex_set_media(struct ex_softc *); 89 void ex_set_xcvr(struct ex_softc *, uint16_t); 90 struct mbuf *ex_get(struct ex_softc *, int); 91 uint16_t ex_read_eeprom(struct ex_softc *, int); 92 int ex_init(struct ifnet *); 93 void ex_read(struct ex_softc *); 94 void ex_reset(struct ex_softc *); 95 void ex_set_mc(struct ex_softc *); 96 void ex_getstats(struct ex_softc *); 97 void ex_printstats(struct ex_softc *); 98 void ex_tick(void *); 99 100 static int ex_eeprom_busy(struct ex_softc *); 101 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *); 102 static void ex_init_txdescs(struct ex_softc *); 103 104 static void ex_setup_tx(struct ex_softc *); 105 static bool ex_shutdown(device_t, int); 106 static void ex_start(struct ifnet *); 107 static void ex_txstat(struct ex_softc *); 108 109 int ex_mii_readreg(device_t, int, int); 110 void ex_mii_writereg(device_t, int, int, int); 111 void ex_mii_statchg(device_t); 112 113 void ex_probemedia(struct ex_softc *); 114 115 /* 116 * Structure to map media-present bits in boards to ifmedia codes and 117 * printable media names. Used for table-driven ifmedia initialization. 118 */ 119 struct ex_media { 120 int exm_mpbit; /* media present bit */ 121 const char *exm_name; /* name of medium */ 122 int exm_ifmedia; /* ifmedia word for medium */ 123 int exm_epmedia; /* ELINKMEDIA_* constant */ 124 }; 125 126 /* 127 * Media table for 3c90x chips. Note that chips with MII have no 128 * `native' media. 129 */ 130 struct ex_media ex_native_media[] = { 131 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T, 132 ELINKMEDIA_10BASE_T }, 133 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX, 134 ELINKMEDIA_10BASE_T }, 135 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5, 136 ELINKMEDIA_AUI }, 137 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2, 138 ELINKMEDIA_10BASE_2 }, 139 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX, 140 ELINKMEDIA_100BASE_TX }, 141 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX, 142 ELINKMEDIA_100BASE_TX }, 143 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX, 144 ELINKMEDIA_100BASE_FX }, 145 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL, 146 ELINKMEDIA_MII }, 147 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4, 148 ELINKMEDIA_100BASE_T4 }, 149 { 0, NULL, 0, 150 0 }, 151 }; 152 153 /* 154 * MII bit-bang glue. 155 */ 156 uint32_t ex_mii_bitbang_read(device_t); 157 void ex_mii_bitbang_write(device_t, uint32_t); 158 159 const struct mii_bitbang_ops ex_mii_bitbang_ops = { 160 ex_mii_bitbang_read, 161 ex_mii_bitbang_write, 162 { 163 ELINK_PHY_DATA, /* MII_BIT_MDO */ 164 ELINK_PHY_DATA, /* MII_BIT_MDI */ 165 ELINK_PHY_CLK, /* MII_BIT_MDC */ 166 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */ 167 0, /* MII_BIT_DIR_PHY_HOST */ 168 } 169 }; 170 171 /* 172 * Back-end attach and configure. 173 */ 174 void 175 ex_config(struct ex_softc *sc) 176 { 177 struct ifnet *ifp; 178 uint16_t val; 179 uint8_t macaddr[ETHER_ADDR_LEN] = {0}; 180 bus_space_tag_t iot = sc->sc_iot; 181 bus_space_handle_t ioh = sc->sc_ioh; 182 int i, error, attach_stage; 183 184 pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual); 185 186 callout_init(&sc->ex_mii_callout, 0); 187 188 ex_reset(sc); 189 190 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0); 191 macaddr[0] = val >> 8; 192 macaddr[1] = val & 0xff; 193 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1); 194 macaddr[2] = val >> 8; 195 macaddr[3] = val & 0xff; 196 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2); 197 macaddr[4] = val >> 8; 198 macaddr[5] = val & 0xff; 199 200 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr)); 201 202 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) { 203 GO_WINDOW(2); 204 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 205 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 206 val |= ELINK_RESET_OPT_LEDPOLAR; 207 if (sc->ex_conf & EX_CONF_PHY_POWER) 208 val |= ELINK_RESET_OPT_PHYPOWER; 209 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 210 } 211 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) { 212 GO_WINDOW(0); 213 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID, 214 EX_XCVR_PWR_MAGICBITS); 215 } 216 217 attach_stage = 0; 218 219 /* 220 * Allocate the upload descriptors, and create and load the DMA 221 * map for them. 222 */ 223 if ((error = bus_dmamem_alloc(sc->sc_dmat, 224 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1, 225 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) { 226 aprint_error_dev(sc->sc_dev, 227 "can't allocate upload descriptors, error = %d\n", error); 228 goto fail; 229 } 230 231 attach_stage = 1; 232 233 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg, 234 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd, 235 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 236 aprint_error_dev(sc->sc_dev, 237 "can't map upload descriptors, error = %d\n", error); 238 goto fail; 239 } 240 241 attach_stage = 2; 242 243 if ((error = bus_dmamap_create(sc->sc_dmat, 244 EX_NUPD * sizeof (struct ex_upd), 1, 245 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT, 246 &sc->sc_upd_dmamap)) != 0) { 247 aprint_error_dev(sc->sc_dev, 248 "can't create upload desc. DMA map, error = %d\n", error); 249 goto fail; 250 } 251 252 attach_stage = 3; 253 254 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap, 255 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL, 256 BUS_DMA_NOWAIT)) != 0) { 257 aprint_error_dev(sc->sc_dev, 258 "can't load upload desc. DMA map, error = %d\n", error); 259 goto fail; 260 } 261 262 attach_stage = 4; 263 264 /* 265 * Allocate the download descriptors, and create and load the DMA 266 * map for them. 267 */ 268 if ((error = bus_dmamem_alloc(sc->sc_dmat, 269 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1, 270 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) { 271 aprint_error_dev(sc->sc_dev, 272 "can't allocate download descriptors, error = %d\n", error); 273 goto fail; 274 } 275 276 attach_stage = 5; 277 278 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg, 279 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd, 280 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 281 aprint_error_dev(sc->sc_dev, 282 "can't map download descriptors, error = %d\n", error); 283 goto fail; 284 } 285 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN); 286 287 attach_stage = 6; 288 289 if ((error = bus_dmamap_create(sc->sc_dmat, 290 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1, 291 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT, 292 &sc->sc_dpd_dmamap)) != 0) { 293 aprint_error_dev(sc->sc_dev, 294 "can't create download desc. DMA map, error = %d\n", error); 295 goto fail; 296 } 297 298 attach_stage = 7; 299 300 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap, 301 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL, 302 BUS_DMA_NOWAIT)) != 0) { 303 aprint_error_dev(sc->sc_dev, 304 "can't load download desc. DMA map, error = %d\n", error); 305 goto fail; 306 } 307 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 308 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE); 309 310 attach_stage = 8; 311 312 313 /* 314 * Create the transmit buffer DMA maps. 315 */ 316 for (i = 0; i < EX_NDPD; i++) { 317 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 318 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 319 &sc->sc_tx_dmamaps[i])) != 0) { 320 aprint_error_dev(sc->sc_dev, 321 "can't create tx DMA map %d, error = %d\n", 322 i, error); 323 goto fail; 324 } 325 } 326 327 attach_stage = 9; 328 329 /* 330 * Create the receive buffer DMA maps. 331 */ 332 for (i = 0; i < EX_NUPD; i++) { 333 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 334 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 335 &sc->sc_rx_dmamaps[i])) != 0) { 336 aprint_error_dev(sc->sc_dev, 337 "can't create rx DMA map %d, error = %d\n", 338 i, error); 339 goto fail; 340 } 341 } 342 343 attach_stage = 10; 344 345 /* 346 * Create ring of upload descriptors, only once. The DMA engine 347 * will loop over this when receiving packets, stalling if it 348 * hits an UPD with a finished receive. 349 */ 350 for (i = 0; i < EX_NUPD; i++) { 351 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i]; 352 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i]; 353 sc->sc_upd[i].upd_frags[0].fr_len = 354 htole32((MCLBYTES - 2) | EX_FR_LAST); 355 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) { 356 aprint_error_dev(sc->sc_dev, 357 "can't allocate or map rx buffers\n"); 358 goto fail; 359 } 360 } 361 362 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0, 363 EX_NUPD * sizeof (struct ex_upd), 364 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 365 366 ex_init_txdescs(sc); 367 368 attach_stage = 11; 369 370 371 GO_WINDOW(3); 372 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 373 if (val & ELINK_MEDIACAP_MII) 374 sc->ex_conf |= EX_CONF_MII; 375 376 ifp = &sc->sc_ethercom.ec_if; 377 378 /* 379 * Initialize our media structures and MII info. We'll 380 * probe the MII if we discover that we have one. 381 */ 382 sc->ex_mii.mii_ifp = ifp; 383 sc->ex_mii.mii_readreg = ex_mii_readreg; 384 sc->ex_mii.mii_writereg = ex_mii_writereg; 385 sc->ex_mii.mii_statchg = ex_mii_statchg; 386 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg, 387 ex_media_stat); 388 389 if (sc->ex_conf & EX_CONF_MII) { 390 /* 391 * Find PHY, extract media information from it. 392 * First, select the right transceiver. 393 */ 394 ex_set_xcvr(sc, val); 395 396 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff, 397 MII_PHY_ANY, MII_OFFSET_ANY, 0); 398 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) { 399 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE, 400 0, NULL); 401 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE); 402 } else { 403 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO); 404 } 405 } else 406 ex_probemedia(sc); 407 408 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 409 ifp->if_softc = sc; 410 ifp->if_start = ex_start; 411 ifp->if_ioctl = ex_ioctl; 412 ifp->if_watchdog = ex_watchdog; 413 ifp->if_init = ex_init; 414 ifp->if_stop = ex_stop; 415 ifp->if_flags = 416 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 417 sc->sc_if_flags = ifp->if_flags; 418 IFQ_SET_READY(&ifp->if_snd); 419 420 /* 421 * We can support 802.1Q VLAN-sized frames. 422 */ 423 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 424 425 /* 426 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support. 427 */ 428 if (sc->ex_conf & EX_CONF_90XB) 429 sc->sc_ethercom.ec_if.if_capabilities |= 430 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 431 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 432 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 433 434 if_attach(ifp); 435 ether_ifattach(ifp, macaddr); 436 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb); 437 438 GO_WINDOW(1); 439 440 sc->tx_start_thresh = 20; 441 sc->tx_succ_ok = 0; 442 443 /* TODO: set queues to 0 */ 444 445 #if NRND > 0 446 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 447 RND_TYPE_NET, 0); 448 #endif 449 450 if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown)) 451 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 452 else 453 aprint_error_dev(sc->sc_dev, 454 "couldn't establish power handler\n"); 455 456 /* The attach is successful. */ 457 sc->ex_flags |= EX_FLAGS_ATTACHED; 458 return; 459 460 fail: 461 /* 462 * Free any resources we've allocated during the failed attach 463 * attempt. Do this in reverse order and fall though. 464 */ 465 switch (attach_stage) { 466 case 11: 467 { 468 struct ex_rxdesc *rxd; 469 470 for (i = 0; i < EX_NUPD; i++) { 471 rxd = &sc->sc_rxdescs[i]; 472 if (rxd->rx_mbhead != NULL) { 473 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 474 m_freem(rxd->rx_mbhead); 475 } 476 } 477 } 478 /* FALLTHROUGH */ 479 480 case 10: 481 for (i = 0; i < EX_NUPD; i++) 482 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 483 /* FALLTHROUGH */ 484 485 case 9: 486 for (i = 0; i < EX_NDPD; i++) 487 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 488 /* FALLTHROUGH */ 489 case 8: 490 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 491 /* FALLTHROUGH */ 492 493 case 7: 494 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 495 /* FALLTHROUGH */ 496 497 case 6: 498 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 499 EX_NDPD * sizeof (struct ex_dpd)); 500 /* FALLTHROUGH */ 501 502 case 5: 503 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 504 break; 505 506 case 4: 507 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 508 /* FALLTHROUGH */ 509 510 case 3: 511 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 512 /* FALLTHROUGH */ 513 514 case 2: 515 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 516 EX_NUPD * sizeof (struct ex_upd)); 517 /* FALLTHROUGH */ 518 519 case 1: 520 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 521 break; 522 } 523 524 } 525 526 /* 527 * Find the media present on non-MII chips. 528 */ 529 void 530 ex_probemedia(struct ex_softc *sc) 531 { 532 bus_space_tag_t iot = sc->sc_iot; 533 bus_space_handle_t ioh = sc->sc_ioh; 534 struct ifmedia *ifm = &sc->ex_mii.mii_media; 535 struct ex_media *exm; 536 uint16_t config1, reset_options, default_media; 537 int defmedia = 0; 538 const char *sep = "", *defmedianame = NULL; 539 540 GO_WINDOW(3); 541 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2); 542 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS); 543 GO_WINDOW(0); 544 545 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT; 546 547 /* Sanity check that there are any media! */ 548 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) { 549 aprint_error_dev(sc->sc_dev, "no media present!\n"); 550 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL); 551 ifmedia_set(ifm, IFM_ETHER|IFM_NONE); 552 return; 553 } 554 555 aprint_normal_dev(sc->sc_dev, ""); 556 557 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", " 558 559 for (exm = ex_native_media; exm->exm_name != NULL; exm++) { 560 if (reset_options & exm->exm_mpbit) { 561 /* 562 * Default media is a little complicated. We 563 * support full-duplex which uses the same 564 * reset options bit. 565 * 566 * XXX Check EEPROM for default to FDX? 567 */ 568 if (exm->exm_epmedia == default_media) { 569 if ((exm->exm_ifmedia & IFM_FDX) == 0) { 570 defmedia = exm->exm_ifmedia; 571 defmedianame = exm->exm_name; 572 } 573 } else if (defmedia == 0) { 574 defmedia = exm->exm_ifmedia; 575 defmedianame = exm->exm_name; 576 } 577 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia, 578 NULL); 579 PRINT(exm->exm_name); 580 } 581 } 582 583 #undef PRINT 584 585 #ifdef DIAGNOSTIC 586 if (defmedia == 0) 587 panic("ex_probemedia: impossible"); 588 #endif 589 590 aprint_normal(", default %s\n", defmedianame); 591 ifmedia_set(ifm, defmedia); 592 } 593 594 /* 595 * Setup transmitter parameters. 596 */ 597 static void 598 ex_setup_tx(struct ex_softc *sc) 599 { 600 bus_space_tag_t iot = sc->sc_iot; 601 bus_space_handle_t ioh = sc->sc_ioh; 602 603 /* 604 * Disable reclaim threshold for 90xB, set free threshold to 605 * 6 * 256 = 1536 for 90x. 606 */ 607 if (sc->ex_conf & EX_CONF_90XB) 608 bus_space_write_2(iot, ioh, ELINK_COMMAND, 609 ELINK_TXRECLTHRESH | 255); 610 else 611 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6); 612 613 /* Setup early transmission start threshold. */ 614 bus_space_write_2(iot, ioh, ELINK_COMMAND, 615 ELINK_TXSTARTTHRESH | sc->tx_start_thresh); 616 } 617 618 /* 619 * Bring device up. 620 */ 621 int 622 ex_init(struct ifnet *ifp) 623 { 624 struct ex_softc *sc = ifp->if_softc; 625 bus_space_tag_t iot = sc->sc_iot; 626 bus_space_handle_t ioh = sc->sc_ioh; 627 int i; 628 uint16_t val; 629 int error = 0; 630 631 if ((error = ex_enable(sc)) != 0) 632 goto out; 633 634 ex_waitcmd(sc); 635 ex_stop(ifp, 0); 636 637 GO_WINDOW(2); 638 639 /* Turn on PHY power. */ 640 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) { 641 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 642 if (sc->ex_conf & EX_CONF_PHY_POWER) 643 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */ 644 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 645 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */ 646 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 647 } 648 649 /* 650 * Set the station address and clear the station mask. The latter 651 * is needed for 90x cards, 0 is the default for 90xB cards. 652 */ 653 for (i = 0; i < ETHER_ADDR_LEN; i++) { 654 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i, 655 CLLADDR(ifp->if_sadl)[i]); 656 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0); 657 } 658 659 GO_WINDOW(3); 660 661 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET); 662 ex_waitcmd(sc); 663 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 664 ex_waitcmd(sc); 665 666 /* Load Tx parameters. */ 667 ex_setup_tx(sc); 668 669 bus_space_write_2(iot, ioh, ELINK_COMMAND, 670 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE); 671 672 bus_space_write_4(iot, ioh, ELINK_DMACTRL, 673 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN); 674 675 bus_space_write_2(iot, ioh, ELINK_COMMAND, 676 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS); 677 bus_space_write_2(iot, ioh, ELINK_COMMAND, 678 SET_INTR_MASK | XL_WATCHED_INTERRUPTS); 679 680 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff); 681 if (sc->intr_ack) 682 (* sc->intr_ack)(sc); 683 ex_set_media(sc); 684 ex_set_mc(sc); 685 686 687 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE); 688 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 689 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma); 690 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE); 691 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL); 692 693 ifp->if_flags |= IFF_RUNNING; 694 ifp->if_flags &= ~IFF_OACTIVE; 695 ex_start(ifp); 696 sc->sc_if_flags = ifp->if_flags; 697 698 GO_WINDOW(1); 699 700 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 701 702 out: 703 if (error) { 704 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 705 ifp->if_timer = 0; 706 aprint_error_dev(sc->sc_dev, "interface not running\n"); 707 } 708 return (error); 709 } 710 711 #define MCHASHSIZE 256 712 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \ 713 (MCHASHSIZE - 1)) 714 715 /* 716 * Set multicast receive filter. Also take care of promiscuous mode 717 * here (XXX). 718 */ 719 void 720 ex_set_mc(struct ex_softc *sc) 721 { 722 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 723 struct ethercom *ec = &sc->sc_ethercom; 724 struct ether_multi *enm; 725 struct ether_multistep estep; 726 int i; 727 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST; 728 729 if (ifp->if_flags & IFF_PROMISC) { 730 mask |= FIL_PROMISC; 731 goto allmulti; 732 } 733 734 ETHER_FIRST_MULTI(estep, ec, enm); 735 if (enm == NULL) 736 goto nomulti; 737 738 if ((sc->ex_conf & EX_CONF_90XB) == 0) 739 /* No multicast hash filtering. */ 740 goto allmulti; 741 742 for (i = 0; i < MCHASHSIZE; i++) 743 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 744 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i); 745 746 do { 747 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 748 ETHER_ADDR_LEN) != 0) 749 goto allmulti; 750 751 i = ex_mchash(enm->enm_addrlo); 752 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 753 ELINK_COMMAND, ELINK_SETHASHFILBIT | i); 754 ETHER_NEXT_MULTI(estep, enm); 755 } while (enm != NULL); 756 mask |= FIL_MULTIHASH; 757 758 nomulti: 759 ifp->if_flags &= ~IFF_ALLMULTI; 760 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 761 SET_RX_FILTER | mask); 762 return; 763 764 allmulti: 765 ifp->if_flags |= IFF_ALLMULTI; 766 mask |= FIL_MULTICAST; 767 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 768 SET_RX_FILTER | mask); 769 } 770 771 772 /* 773 * The Tx Complete interrupts occur only on errors, 774 * and this is the error handler. 775 */ 776 static void 777 ex_txstat(struct ex_softc *sc) 778 { 779 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 780 bus_space_tag_t iot = sc->sc_iot; 781 bus_space_handle_t ioh = sc->sc_ioh; 782 int i, err = 0; 783 784 /* 785 * We need to read+write TX_STATUS until we get a 0 status 786 * in order to turn off the interrupt flag. 787 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER. 788 */ 789 for (;;) { 790 i = bus_space_read_2(iot, ioh, ELINK_TIMER); 791 if ((i & TXS_COMPLETE) == 0) 792 break; 793 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0); 794 err |= i; 795 } 796 err &= ~TXS_TIMER; 797 798 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM)) 799 || err == 0 /* should not happen, just in case */) { 800 /* 801 * Make sure the transmission is stopped. 802 */ 803 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL); 804 for (i = 1000; i > 0; i--) 805 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) & 806 ELINK_DMAC_DNINPROG) == 0) 807 break; 808 809 /* 810 * Reset the transmitter. 811 */ 812 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 813 814 /* Resetting takes a while and we will do more than wait. */ 815 816 ifp->if_flags &= ~IFF_OACTIVE; 817 ++sc->sc_ethercom.ec_if.if_oerrors; 818 aprint_error_dev(sc->sc_dev, "%s%s%s", 819 (err & TXS_UNDERRUN) ? " transmit underrun" : "", 820 (err & TXS_JABBER) ? " jabber" : "", 821 (err & TXS_RECLAIM) ? " reclaim" : ""); 822 if (err == 0) 823 aprint_error(" unknown Tx error"); 824 printf(" (%x)", err); 825 if (err & TXS_UNDERRUN) { 826 aprint_error(" @%d", sc->tx_start_thresh); 827 if (sc->tx_succ_ok < 256 && 828 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20)) 829 > sc->tx_start_thresh) { 830 aprint_error(", new threshold is %d", i); 831 sc->tx_start_thresh = i; 832 } 833 sc->tx_succ_ok = 0; 834 } 835 aprint_error("\n"); 836 if (err & TXS_MAX_COLLISION) 837 ++sc->sc_ethercom.ec_if.if_collisions; 838 839 /* Wait for TX_RESET to finish. */ 840 ex_waitcmd(sc); 841 842 /* Reload Tx parameters. */ 843 ex_setup_tx(sc); 844 } else { 845 if (err & TXS_MAX_COLLISION) 846 ++sc->sc_ethercom.ec_if.if_collisions; 847 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE; 848 } 849 850 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 851 852 /* Retransmit current packet if any. */ 853 if (sc->tx_head) { 854 ifp->if_flags |= IFF_OACTIVE; 855 bus_space_write_2(iot, ioh, ELINK_COMMAND, 856 ELINK_DNUNSTALL); 857 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 858 DPD_DMADDR(sc, sc->tx_head)); 859 860 /* Retrigger watchdog if stopped. */ 861 if (ifp->if_timer == 0) 862 ifp->if_timer = 1; 863 } 864 } 865 866 int 867 ex_media_chg(struct ifnet *ifp) 868 { 869 870 if (ifp->if_flags & IFF_UP) 871 ex_init(ifp); 872 return 0; 873 } 874 875 void 876 ex_set_xcvr(struct ex_softc *sc, const uint16_t media) 877 { 878 bus_space_tag_t iot = sc->sc_iot; 879 bus_space_handle_t ioh = sc->sc_ioh; 880 uint32_t icfg; 881 882 /* 883 * We're already in Window 3 884 */ 885 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 886 icfg &= ~(CONFIG_XCVR_SEL << 16); 887 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4)) 888 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16); 889 if (media & ELINK_MEDIACAP_100BASETX) 890 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16); 891 if (media & ELINK_MEDIACAP_100BASEFX) 892 icfg |= ELINKMEDIA_100BASE_FX 893 << (CONFIG_XCVR_SEL_SHIFT + 16); 894 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg); 895 } 896 897 void 898 ex_set_media(struct ex_softc *sc) 899 { 900 bus_space_tag_t iot = sc->sc_iot; 901 bus_space_handle_t ioh = sc->sc_ioh; 902 uint32_t configreg; 903 904 if (((sc->ex_conf & EX_CONF_MII) && 905 (sc->ex_mii.mii_media_active & IFM_FDX)) 906 || (!(sc->ex_conf & EX_CONF_MII) && 907 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) { 908 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 909 MAC_CONTROL_FDX); 910 } else { 911 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0); 912 } 913 914 /* 915 * If the device has MII, select it, and then tell the 916 * PHY which media to use. 917 */ 918 if (sc->ex_conf & EX_CONF_MII) { 919 uint16_t val; 920 921 GO_WINDOW(3); 922 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 923 ex_set_xcvr(sc, val); 924 mii_mediachg(&sc->ex_mii); 925 return; 926 } 927 928 GO_WINDOW(4); 929 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0); 930 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 931 delay(800); 932 933 /* 934 * Now turn on the selected media/transceiver. 935 */ 936 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) { 937 case IFM_10_T: 938 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 939 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE); 940 break; 941 942 case IFM_10_2: 943 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER); 944 DELAY(800); 945 break; 946 947 case IFM_100_TX: 948 case IFM_100_FX: 949 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 950 LINKBEAT_ENABLE); 951 DELAY(800); 952 break; 953 954 case IFM_10_5: 955 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 956 SQE_ENABLE); 957 DELAY(800); 958 break; 959 960 case IFM_MANUAL: 961 break; 962 963 case IFM_NONE: 964 return; 965 966 default: 967 panic("ex_set_media: impossible"); 968 } 969 970 GO_WINDOW(3); 971 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 972 973 configreg &= ~(CONFIG_MEDIAMASK << 16); 974 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data << 975 (CONFIG_MEDIAMASK_SHIFT + 16)); 976 977 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg); 978 } 979 980 /* 981 * Get currently-selected media from card. 982 * (if_media callback, may be called before interface is brought up). 983 */ 984 void 985 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req) 986 { 987 struct ex_softc *sc = ifp->if_softc; 988 uint16_t help; 989 990 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) { 991 if (sc->ex_conf & EX_CONF_MII) { 992 mii_pollstat(&sc->ex_mii); 993 req->ifm_status = sc->ex_mii.mii_media_status; 994 req->ifm_active = sc->ex_mii.mii_media_active; 995 } else { 996 GO_WINDOW(4); 997 req->ifm_status = IFM_AVALID; 998 req->ifm_active = 999 sc->ex_mii.mii_media.ifm_cur->ifm_media; 1000 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 1001 ELINK_W4_MEDIA_TYPE); 1002 if (help & LINKBEAT_DETECT) 1003 req->ifm_status |= IFM_ACTIVE; 1004 GO_WINDOW(1); 1005 } 1006 } 1007 } 1008 1009 1010 1011 /* 1012 * Start outputting on the interface. 1013 */ 1014 static void 1015 ex_start(struct ifnet *ifp) 1016 { 1017 struct ex_softc *sc = ifp->if_softc; 1018 bus_space_tag_t iot = sc->sc_iot; 1019 bus_space_handle_t ioh = sc->sc_ioh; 1020 volatile struct ex_fraghdr *fr = NULL; 1021 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL; 1022 struct ex_txdesc *txp; 1023 struct mbuf *mb_head; 1024 bus_dmamap_t dmamap; 1025 int m_csumflags, offset, seglen, totlen, segment, error; 1026 uint32_t csum_flags; 1027 1028 if (sc->tx_head || sc->tx_free == NULL) 1029 return; 1030 1031 txp = NULL; 1032 1033 /* 1034 * We're finished if there is nothing more to add to the list or if 1035 * we're all filled up with buffers to transmit. 1036 */ 1037 while (sc->tx_free != NULL) { 1038 /* 1039 * Grab a packet to transmit. 1040 */ 1041 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1042 if (mb_head == NULL) 1043 break; 1044 1045 /* 1046 * mb_head might be updated later, 1047 * so preserve csum_flags here. 1048 */ 1049 m_csumflags = mb_head->m_pkthdr.csum_flags; 1050 1051 /* 1052 * Get pointer to next available tx desc. 1053 */ 1054 txp = sc->tx_free; 1055 dmamap = txp->tx_dmamap; 1056 1057 /* 1058 * Go through each of the mbufs in the chain and initialize 1059 * the transmit buffer descriptors with the physical address 1060 * and size of the mbuf. 1061 */ 1062 reload: 1063 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 1064 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1065 switch (error) { 1066 case 0: 1067 /* Success. */ 1068 break; 1069 1070 case EFBIG: 1071 { 1072 struct mbuf *mn; 1073 1074 /* 1075 * We ran out of segments. We have to recopy this 1076 * mbuf chain first. Bail out if we can't get the 1077 * new buffers. 1078 */ 1079 aprint_error_dev(sc->sc_dev, "too many segments, "); 1080 1081 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1082 if (mn == NULL) { 1083 m_freem(mb_head); 1084 aprint_error("aborting\n"); 1085 goto out; 1086 } 1087 if (mb_head->m_pkthdr.len > MHLEN) { 1088 MCLGET(mn, M_DONTWAIT); 1089 if ((mn->m_flags & M_EXT) == 0) { 1090 m_freem(mn); 1091 m_freem(mb_head); 1092 aprint_error("aborting\n"); 1093 goto out; 1094 } 1095 } 1096 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1097 mtod(mn, void *)); 1098 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1099 m_freem(mb_head); 1100 mb_head = mn; 1101 aprint_error("retrying\n"); 1102 goto reload; 1103 } 1104 1105 default: 1106 /* 1107 * Some other problem; report it. 1108 */ 1109 aprint_error_dev(sc->sc_dev, 1110 "can't load mbuf chain, error = %d\n", error); 1111 m_freem(mb_head); 1112 goto out; 1113 } 1114 1115 /* 1116 * remove our tx desc from freelist. 1117 */ 1118 sc->tx_free = txp->tx_next; 1119 txp->tx_next = NULL; 1120 1121 fr = &txp->tx_dpd->dpd_frags[0]; 1122 totlen = 0; 1123 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) { 1124 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr); 1125 seglen = dmamap->dm_segs[segment].ds_len; 1126 fr->fr_len = htole32(seglen); 1127 totlen += seglen; 1128 } 1129 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN && 1130 (m_csumflags & M_CSUM_IPv4) != 0)) { 1131 /* 1132 * Pad short packets to avoid ip4csum-tx bug. 1133 * 1134 * XXX Should we still consider if such short 1135 * (36 bytes or less) packets might already 1136 * occupy EX_NTFRAG (== 32) fragments here? 1137 */ 1138 KASSERT(segment < EX_NTFRAGS); 1139 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc)); 1140 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen; 1141 fr->fr_len = htole32(EX_FR_LAST | seglen); 1142 totlen += seglen; 1143 } else { 1144 fr--; 1145 fr->fr_len |= htole32(EX_FR_LAST); 1146 } 1147 txp->tx_mbhead = mb_head; 1148 1149 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1150 BUS_DMASYNC_PREWRITE); 1151 1152 dpd = txp->tx_dpd; 1153 dpd->dpd_nextptr = 0; 1154 dpd->dpd_fsh = htole32(totlen); 1155 1156 /* Byte-swap constants so compiler can optimize. */ 1157 1158 if (sc->ex_conf & EX_CONF_90XB) { 1159 csum_flags = 0; 1160 1161 if (m_csumflags & M_CSUM_IPv4) 1162 csum_flags |= htole32(EX_DPD_IPCKSUM); 1163 1164 if (m_csumflags & M_CSUM_TCPv4) 1165 csum_flags |= htole32(EX_DPD_TCPCKSUM); 1166 else if (m_csumflags & M_CSUM_UDPv4) 1167 csum_flags |= htole32(EX_DPD_UDPCKSUM); 1168 1169 dpd->dpd_fsh |= csum_flags; 1170 } else { 1171 KDASSERT((mb_head->m_pkthdr.csum_flags & 1172 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0); 1173 } 1174 1175 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1176 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd), 1177 sizeof (struct ex_dpd), 1178 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1179 1180 /* 1181 * No need to stall the download engine, we know it's 1182 * not busy right now. 1183 * 1184 * Fix up pointers in both the "soft" tx and the physical 1185 * tx list. 1186 */ 1187 if (sc->tx_head != NULL) { 1188 prevdpd = sc->tx_tail->tx_dpd; 1189 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd); 1190 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1191 offset, sizeof (struct ex_dpd), 1192 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1193 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp)); 1194 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1195 offset, sizeof (struct ex_dpd), 1196 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1197 sc->tx_tail->tx_next = txp; 1198 sc->tx_tail = txp; 1199 } else { 1200 sc->tx_tail = sc->tx_head = txp; 1201 } 1202 1203 /* 1204 * Pass packet to bpf if there is a listener. 1205 */ 1206 if (ifp->if_bpf) 1207 bpf_ops->bpf_mtap(ifp->if_bpf, mb_head); 1208 } 1209 out: 1210 if (sc->tx_head) { 1211 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND); 1212 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1213 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd), 1214 sizeof (struct ex_dpd), 1215 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1216 ifp->if_flags |= IFF_OACTIVE; 1217 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL); 1218 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 1219 DPD_DMADDR(sc, sc->tx_head)); 1220 1221 /* trigger watchdog */ 1222 ifp->if_timer = 5; 1223 } 1224 } 1225 1226 1227 int 1228 ex_intr(void *arg) 1229 { 1230 struct ex_softc *sc = arg; 1231 bus_space_tag_t iot = sc->sc_iot; 1232 bus_space_handle_t ioh = sc->sc_ioh; 1233 uint16_t stat; 1234 int ret = 0; 1235 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1236 1237 if ((ifp->if_flags & IFF_RUNNING) == 0 || 1238 !device_is_active(sc->sc_dev)) 1239 return (0); 1240 1241 for (;;) { 1242 stat = bus_space_read_2(iot, ioh, ELINK_STATUS); 1243 1244 if ((stat & XL_WATCHED_INTERRUPTS) == 0) { 1245 if ((stat & INTR_LATCH) == 0) { 1246 #if 0 1247 aprint_error_dev(sc->sc_dev, 1248 "intr latch cleared\n"); 1249 #endif 1250 break; 1251 } 1252 } 1253 1254 ret = 1; 1255 1256 /* 1257 * Acknowledge interrupts. 1258 */ 1259 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 1260 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH))); 1261 if (sc->intr_ack) 1262 (*sc->intr_ack)(sc); 1263 1264 if (stat & HOST_ERROR) { 1265 aprint_error_dev(sc->sc_dev, 1266 "adapter failure (%x)\n", stat); 1267 ex_reset(sc); 1268 ex_init(ifp); 1269 return 1; 1270 } 1271 if (stat & UPD_STATS) { 1272 ex_getstats(sc); 1273 } 1274 if (stat & TX_COMPLETE) { 1275 ex_txstat(sc); 1276 #if 0 1277 if (stat & DN_COMPLETE) 1278 aprint_error_dev(sc->sc_dev, 1279 "Ignoring Dn interrupt (%x)\n", stat); 1280 #endif 1281 /* 1282 * In some rare cases, both Tx Complete and 1283 * Dn Complete bits are set. However, the packet 1284 * has been reloaded in ex_txstat() and should not 1285 * handle the Dn Complete event here. 1286 * Hence the "else" below. 1287 */ 1288 } else if (stat & DN_COMPLETE) { 1289 struct ex_txdesc *txp, *ptxp = NULL; 1290 bus_dmamap_t txmap; 1291 1292 /* reset watchdog timer, was set in ex_start() */ 1293 ifp->if_timer = 0; 1294 1295 for (txp = sc->tx_head; txp != NULL; 1296 txp = txp->tx_next) { 1297 bus_dmamap_sync(sc->sc_dmat, 1298 sc->sc_dpd_dmamap, 1299 (char *)txp->tx_dpd - (char *)sc->sc_dpd, 1300 sizeof (struct ex_dpd), 1301 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1302 if (txp->tx_mbhead != NULL) { 1303 txmap = txp->tx_dmamap; 1304 bus_dmamap_sync(sc->sc_dmat, txmap, 1305 0, txmap->dm_mapsize, 1306 BUS_DMASYNC_POSTWRITE); 1307 bus_dmamap_unload(sc->sc_dmat, txmap); 1308 m_freem(txp->tx_mbhead); 1309 txp->tx_mbhead = NULL; 1310 } 1311 ptxp = txp; 1312 } 1313 1314 /* 1315 * Move finished tx buffers back to the tx free list. 1316 */ 1317 if (sc->tx_free) { 1318 sc->tx_ftail->tx_next = sc->tx_head; 1319 sc->tx_ftail = ptxp; 1320 } else 1321 sc->tx_ftail = sc->tx_free = sc->tx_head; 1322 1323 sc->tx_head = sc->tx_tail = NULL; 1324 ifp->if_flags &= ~IFF_OACTIVE; 1325 1326 if (sc->tx_succ_ok < 256) 1327 sc->tx_succ_ok++; 1328 } 1329 1330 if (stat & UP_COMPLETE) { 1331 struct ex_rxdesc *rxd; 1332 struct mbuf *m; 1333 struct ex_upd *upd; 1334 bus_dmamap_t rxmap; 1335 uint32_t pktstat; 1336 1337 rcvloop: 1338 rxd = sc->rx_head; 1339 rxmap = rxd->rx_dmamap; 1340 m = rxd->rx_mbhead; 1341 upd = rxd->rx_upd; 1342 1343 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 1344 rxmap->dm_mapsize, 1345 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1346 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1347 ((char *)upd - (char *)sc->sc_upd), 1348 sizeof (struct ex_upd), 1349 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1350 pktstat = le32toh(upd->upd_pktstatus); 1351 1352 if (pktstat & EX_UPD_COMPLETE) { 1353 /* 1354 * Remove first packet from the chain. 1355 */ 1356 sc->rx_head = rxd->rx_next; 1357 rxd->rx_next = NULL; 1358 1359 /* 1360 * Add a new buffer to the receive chain. 1361 * If this fails, the old buffer is recycled 1362 * instead. 1363 */ 1364 if (ex_add_rxbuf(sc, rxd) == 0) { 1365 uint16_t total_len; 1366 1367 if (pktstat & 1368 ((sc->sc_ethercom.ec_capenable & 1369 ETHERCAP_VLAN_MTU) ? 1370 EX_UPD_ERR_VLAN : EX_UPD_ERR)) { 1371 ifp->if_ierrors++; 1372 m_freem(m); 1373 goto rcvloop; 1374 } 1375 1376 total_len = pktstat & EX_UPD_PKTLENMASK; 1377 if (total_len < 1378 sizeof(struct ether_header)) { 1379 m_freem(m); 1380 goto rcvloop; 1381 } 1382 m->m_pkthdr.rcvif = ifp; 1383 m->m_pkthdr.len = m->m_len = total_len; 1384 if (ifp->if_bpf) 1385 bpf_ops->bpf_mtap( 1386 ifp->if_bpf, m); 1387 /* 1388 * Set the incoming checksum information for the packet. 1389 */ 1390 if ((sc->ex_conf & EX_CONF_90XB) != 0 && 1391 (pktstat & EX_UPD_IPCHECKED) != 0) { 1392 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1393 if (pktstat & EX_UPD_IPCKSUMERR) 1394 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1395 if (pktstat & EX_UPD_TCPCHECKED) { 1396 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1397 if (pktstat & EX_UPD_TCPCKSUMERR) 1398 m->m_pkthdr.csum_flags |= 1399 M_CSUM_TCP_UDP_BAD; 1400 } else if (pktstat & EX_UPD_UDPCHECKED) { 1401 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1402 if (pktstat & EX_UPD_UDPCKSUMERR) 1403 m->m_pkthdr.csum_flags |= 1404 M_CSUM_TCP_UDP_BAD; 1405 } 1406 } 1407 (*ifp->if_input)(ifp, m); 1408 } 1409 goto rcvloop; 1410 } 1411 /* 1412 * Just in case we filled up all UPDs and the DMA engine 1413 * stalled. We could be more subtle about this. 1414 */ 1415 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) { 1416 aprint_error_dev(sc->sc_dev, 1417 "uplistptr was 0\n"); 1418 ex_init(ifp); 1419 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS) 1420 & 0x2000) { 1421 aprint_error_dev(sc->sc_dev, 1422 "receive stalled\n"); 1423 bus_space_write_2(iot, ioh, ELINK_COMMAND, 1424 ELINK_UPUNSTALL); 1425 } 1426 } 1427 1428 #if NRND > 0 1429 if (stat) 1430 rnd_add_uint32(&sc->rnd_source, stat); 1431 #endif 1432 } 1433 1434 /* no more interrupts */ 1435 if (ret && IFQ_IS_EMPTY(&ifp->if_snd) == 0) 1436 ex_start(ifp); 1437 return ret; 1438 } 1439 1440 static int 1441 ex_ifflags_cb(struct ethercom *ec) 1442 { 1443 struct ifnet *ifp = &ec->ec_if; 1444 struct ex_softc *sc = ifp->if_softc; 1445 int change = ifp->if_flags ^ sc->sc_if_flags; 1446 1447 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1448 return ENETRESET; 1449 else if ((change & IFF_PROMISC) != 0) 1450 ex_set_mc(sc); 1451 return 0; 1452 } 1453 1454 int 1455 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1456 { 1457 struct ex_softc *sc = ifp->if_softc; 1458 struct ifreq *ifr = (struct ifreq *)data; 1459 int s, error; 1460 1461 s = splnet(); 1462 1463 switch (cmd) { 1464 case SIOCSIFMEDIA: 1465 case SIOCGIFMEDIA: 1466 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd); 1467 break; 1468 default: 1469 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1470 break; 1471 1472 error = 0; 1473 1474 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1475 ; 1476 else if (ifp->if_flags & IFF_RUNNING) { 1477 /* 1478 * Multicast list has changed; set the hardware filter 1479 * accordingly. 1480 */ 1481 ex_set_mc(sc); 1482 } 1483 break; 1484 } 1485 1486 sc->sc_if_flags = ifp->if_flags; 1487 splx(s); 1488 return (error); 1489 } 1490 1491 void 1492 ex_getstats(struct ex_softc *sc) 1493 { 1494 bus_space_handle_t ioh = sc->sc_ioh; 1495 bus_space_tag_t iot = sc->sc_iot; 1496 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1497 uint8_t upperok; 1498 1499 GO_WINDOW(6); 1500 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK); 1501 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK); 1502 ifp->if_ipackets += (upperok & 0x03) << 8; 1503 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK); 1504 ifp->if_opackets += (upperok & 0x30) << 4; 1505 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS); 1506 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS); 1507 /* 1508 * There seems to be no way to get the exact number of collisions, 1509 * this is the number that occurred at the very least. 1510 */ 1511 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh, 1512 TX_AFTER_X_COLLISIONS); 1513 /* 1514 * Interface byte counts are counted by ether_input() and 1515 * ether_output(), so don't accumulate them here. Just 1516 * read the NIC counters so they don't generate overflow interrupts. 1517 * Upper byte counters are latched from reading the totals, so 1518 * they don't need to be read if we don't need their values. 1519 */ 1520 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK); 1521 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK); 1522 1523 /* 1524 * Clear the following to avoid stats overflow interrupts 1525 */ 1526 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS); 1527 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION); 1528 (void)bus_space_read_1(iot, ioh, TX_NO_SQE); 1529 (void)bus_space_read_1(iot, ioh, TX_CD_LOST); 1530 GO_WINDOW(4); 1531 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD); 1532 GO_WINDOW(1); 1533 } 1534 1535 void 1536 ex_printstats(struct ex_softc *sc) 1537 { 1538 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1539 1540 ex_getstats(sc); 1541 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes " 1542 "%llu\n", (unsigned long long)ifp->if_ipackets, 1543 (unsigned long long)ifp->if_opackets, 1544 (unsigned long long)ifp->if_ierrors, 1545 (unsigned long long)ifp->if_oerrors, 1546 (unsigned long long)ifp->if_ibytes, 1547 (unsigned long long)ifp->if_obytes); 1548 } 1549 1550 void 1551 ex_tick(void *arg) 1552 { 1553 struct ex_softc *sc = arg; 1554 int s; 1555 1556 if (!device_is_active(sc->sc_dev)) 1557 return; 1558 1559 s = splnet(); 1560 1561 if (sc->ex_conf & EX_CONF_MII) 1562 mii_tick(&sc->ex_mii); 1563 1564 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS) 1565 & COMMAND_IN_PROGRESS)) 1566 ex_getstats(sc); 1567 1568 splx(s); 1569 1570 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 1571 } 1572 1573 void 1574 ex_reset(struct ex_softc *sc) 1575 { 1576 uint16_t val = GLOBAL_RESET; 1577 1578 if (sc->ex_conf & EX_CONF_RESETHACK) 1579 val |= 0x10; 1580 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val); 1581 /* 1582 * XXX apparently the command in progress bit can't be trusted 1583 * during a reset, so we just always wait this long. Fortunately 1584 * we normally only reset the chip during autoconfig. 1585 */ 1586 delay(100000); 1587 ex_waitcmd(sc); 1588 } 1589 1590 void 1591 ex_watchdog(struct ifnet *ifp) 1592 { 1593 struct ex_softc *sc = ifp->if_softc; 1594 1595 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1596 ++sc->sc_ethercom.ec_if.if_oerrors; 1597 1598 ex_reset(sc); 1599 ex_init(ifp); 1600 } 1601 1602 void 1603 ex_stop(struct ifnet *ifp, int disable) 1604 { 1605 struct ex_softc *sc = ifp->if_softc; 1606 bus_space_tag_t iot = sc->sc_iot; 1607 bus_space_handle_t ioh = sc->sc_ioh; 1608 struct ex_txdesc *tx; 1609 struct ex_rxdesc *rx; 1610 int i; 1611 1612 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE); 1613 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE); 1614 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 1615 1616 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) { 1617 if (tx->tx_mbhead == NULL) 1618 continue; 1619 m_freem(tx->tx_mbhead); 1620 tx->tx_mbhead = NULL; 1621 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap); 1622 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0; 1623 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1624 ((char *)tx->tx_dpd - (char *)sc->sc_dpd), 1625 sizeof (struct ex_dpd), 1626 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1627 } 1628 sc->tx_tail = sc->tx_head = NULL; 1629 ex_init_txdescs(sc); 1630 1631 sc->rx_tail = sc->rx_head = 0; 1632 for (i = 0; i < EX_NUPD; i++) { 1633 rx = &sc->sc_rxdescs[i]; 1634 if (rx->rx_mbhead != NULL) { 1635 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap); 1636 m_freem(rx->rx_mbhead); 1637 rx->rx_mbhead = NULL; 1638 } 1639 ex_add_rxbuf(sc, rx); 1640 } 1641 1642 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH); 1643 1644 callout_stop(&sc->ex_mii_callout); 1645 if (sc->ex_conf & EX_CONF_MII) 1646 mii_down(&sc->ex_mii); 1647 1648 if (disable) 1649 ex_disable(sc); 1650 1651 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1652 sc->sc_if_flags = ifp->if_flags; 1653 ifp->if_timer = 0; 1654 } 1655 1656 static void 1657 ex_init_txdescs(struct ex_softc *sc) 1658 { 1659 int i; 1660 1661 for (i = 0; i < EX_NDPD; i++) { 1662 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i]; 1663 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i]; 1664 if (i < EX_NDPD - 1) 1665 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1]; 1666 else 1667 sc->sc_txdescs[i].tx_next = NULL; 1668 } 1669 sc->tx_free = &sc->sc_txdescs[0]; 1670 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1]; 1671 } 1672 1673 1674 int 1675 ex_activate(device_t self, enum devact act) 1676 { 1677 struct ex_softc *sc = device_private(self); 1678 1679 switch (act) { 1680 case DVACT_DEACTIVATE: 1681 if_deactivate(&sc->sc_ethercom.ec_if); 1682 return 0; 1683 default: 1684 return EOPNOTSUPP; 1685 } 1686 } 1687 1688 int 1689 ex_detach(struct ex_softc *sc) 1690 { 1691 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1692 struct ex_rxdesc *rxd; 1693 int i, s; 1694 1695 /* Succeed now if there's no work to do. */ 1696 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0) 1697 return (0); 1698 1699 s = splnet(); 1700 /* Stop the interface. Callouts are stopped in it. */ 1701 ex_stop(ifp, 1); 1702 splx(s); 1703 1704 /* Destroy our callout. */ 1705 callout_destroy(&sc->ex_mii_callout); 1706 1707 if (sc->ex_conf & EX_CONF_MII) { 1708 /* Detach all PHYs */ 1709 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1710 } 1711 1712 /* Delete all remaining media. */ 1713 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY); 1714 1715 #if NRND > 0 1716 rnd_detach_source(&sc->rnd_source); 1717 #endif 1718 ether_ifdetach(ifp); 1719 if_detach(ifp); 1720 1721 for (i = 0; i < EX_NUPD; i++) { 1722 rxd = &sc->sc_rxdescs[i]; 1723 if (rxd->rx_mbhead != NULL) { 1724 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1725 m_freem(rxd->rx_mbhead); 1726 rxd->rx_mbhead = NULL; 1727 } 1728 } 1729 for (i = 0; i < EX_NUPD; i++) 1730 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 1731 for (i = 0; i < EX_NDPD; i++) 1732 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 1733 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 1734 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 1735 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 1736 EX_NDPD * sizeof (struct ex_dpd)); 1737 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 1738 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 1739 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 1740 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 1741 EX_NUPD * sizeof (struct ex_upd)); 1742 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 1743 1744 pmf_device_deregister(sc->sc_dev); 1745 1746 return (0); 1747 } 1748 1749 /* 1750 * Before reboots, reset card completely. 1751 */ 1752 static bool 1753 ex_shutdown(device_t self, int flags) 1754 { 1755 struct ex_softc *sc = device_private(self); 1756 1757 ex_stop(&sc->sc_ethercom.ec_if, 1); 1758 /* 1759 * Make sure the interface is powered up when we reboot, 1760 * otherwise firmware on some systems gets really confused. 1761 */ 1762 (void) ex_enable(sc); 1763 return true; 1764 } 1765 1766 /* 1767 * Read EEPROM data. 1768 * XXX what to do if EEPROM doesn't unbusy? 1769 */ 1770 uint16_t 1771 ex_read_eeprom(struct ex_softc *sc, int offset) 1772 { 1773 bus_space_tag_t iot = sc->sc_iot; 1774 bus_space_handle_t ioh = sc->sc_ioh; 1775 uint16_t data = 0, cmd = READ_EEPROM; 1776 int off; 1777 1778 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0; 1779 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM; 1780 1781 GO_WINDOW(0); 1782 if (ex_eeprom_busy(sc)) 1783 goto out; 1784 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND, 1785 cmd | (off + (offset & 0x3f))); 1786 if (ex_eeprom_busy(sc)) 1787 goto out; 1788 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA); 1789 out: 1790 return data; 1791 } 1792 1793 static int 1794 ex_eeprom_busy(struct ex_softc *sc) 1795 { 1796 bus_space_tag_t iot = sc->sc_iot; 1797 bus_space_handle_t ioh = sc->sc_ioh; 1798 int i = 100; 1799 1800 while (i--) { 1801 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) & 1802 EEPROM_BUSY)) 1803 return 0; 1804 delay(100); 1805 } 1806 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n"); 1807 return (1); 1808 } 1809 1810 /* 1811 * Create a new rx buffer and add it to the 'soft' rx list. 1812 */ 1813 static int 1814 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd) 1815 { 1816 struct mbuf *m, *oldm; 1817 bus_dmamap_t rxmap; 1818 int error, rval = 0; 1819 1820 oldm = rxd->rx_mbhead; 1821 rxmap = rxd->rx_dmamap; 1822 1823 MGETHDR(m, M_DONTWAIT, MT_DATA); 1824 if (m != NULL) { 1825 MCLGET(m, M_DONTWAIT); 1826 if ((m->m_flags & M_EXT) == 0) { 1827 m_freem(m); 1828 if (oldm == NULL) 1829 return 1; 1830 m = oldm; 1831 MRESETDATA(m); 1832 rval = 1; 1833 } 1834 } else { 1835 if (oldm == NULL) 1836 return 1; 1837 m = oldm; 1838 MRESETDATA(m); 1839 rval = 1; 1840 } 1841 1842 /* 1843 * Setup the DMA map for this receive buffer. 1844 */ 1845 if (m != oldm) { 1846 if (oldm != NULL) 1847 bus_dmamap_unload(sc->sc_dmat, rxmap); 1848 error = bus_dmamap_load(sc->sc_dmat, rxmap, 1849 m->m_ext.ext_buf, MCLBYTES, NULL, 1850 BUS_DMA_READ|BUS_DMA_NOWAIT); 1851 if (error) { 1852 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n", 1853 error); 1854 panic("ex_add_rxbuf"); /* XXX */ 1855 } 1856 } 1857 1858 /* 1859 * Align for data after 14 byte header. 1860 */ 1861 m->m_data += 2; 1862 1863 rxd->rx_mbhead = m; 1864 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2); 1865 rxd->rx_upd->upd_frags[0].fr_addr = 1866 htole32(rxmap->dm_segs[0].ds_addr + 2); 1867 rxd->rx_upd->upd_nextptr = 0; 1868 1869 /* 1870 * Attach it to the end of the list. 1871 */ 1872 if (sc->rx_head != NULL) { 1873 sc->rx_tail->rx_next = rxd; 1874 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma + 1875 ((char *)rxd->rx_upd - (char *)sc->sc_upd)); 1876 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1877 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd, 1878 sizeof (struct ex_upd), 1879 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1880 } else { 1881 sc->rx_head = rxd; 1882 } 1883 sc->rx_tail = rxd; 1884 1885 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize, 1886 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1887 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1888 ((char *)rxd->rx_upd - (char *)sc->sc_upd), 1889 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1890 return (rval); 1891 } 1892 1893 uint32_t 1894 ex_mii_bitbang_read(device_t self) 1895 { 1896 struct ex_softc *sc = device_private(self); 1897 1898 /* We're already in Window 4. */ 1899 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT)); 1900 } 1901 1902 void 1903 ex_mii_bitbang_write(device_t self, uint32_t val) 1904 { 1905 struct ex_softc *sc = device_private(self); 1906 1907 /* We're already in Window 4. */ 1908 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val); 1909 } 1910 1911 int 1912 ex_mii_readreg(device_t v, int phy, int reg) 1913 { 1914 struct ex_softc *sc = device_private(v); 1915 int val; 1916 1917 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID) 1918 return 0; 1919 1920 GO_WINDOW(4); 1921 1922 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg); 1923 1924 GO_WINDOW(1); 1925 1926 return (val); 1927 } 1928 1929 void 1930 ex_mii_writereg(device_t v, int phy, int reg, int data) 1931 { 1932 struct ex_softc *sc = device_private(v); 1933 1934 GO_WINDOW(4); 1935 1936 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data); 1937 1938 GO_WINDOW(1); 1939 } 1940 1941 void 1942 ex_mii_statchg(device_t v) 1943 { 1944 struct ex_softc *sc = device_private(v); 1945 bus_space_tag_t iot = sc->sc_iot; 1946 bus_space_handle_t ioh = sc->sc_ioh; 1947 int mctl; 1948 1949 GO_WINDOW(3); 1950 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL); 1951 if (sc->ex_mii.mii_media_active & IFM_FDX) 1952 mctl |= MAC_CONTROL_FDX; 1953 else 1954 mctl &= ~MAC_CONTROL_FDX; 1955 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl); 1956 GO_WINDOW(1); /* back to operating window */ 1957 } 1958 1959 int 1960 ex_enable(struct ex_softc *sc) 1961 { 1962 if (sc->enabled == 0 && sc->enable != NULL) { 1963 if ((*sc->enable)(sc) != 0) { 1964 aprint_error_dev(sc->sc_dev, "device enable failed\n"); 1965 return (EIO); 1966 } 1967 sc->enabled = 1; 1968 } 1969 return (0); 1970 } 1971 1972 void 1973 ex_disable(struct ex_softc *sc) 1974 { 1975 if (sc->enabled == 1 && sc->disable != NULL) { 1976 (*sc->disable)(sc); 1977 sc->enabled = 0; 1978 } 1979 } 1980 1981