1 /* $NetBSD: elinkxl.c,v 1.121 2017/02/20 07:43:29 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Frank van der Linden. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: elinkxl.c,v 1.121 2017/02/20 07:43:29 ozaki-r Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/callout.h> 38 #include <sys/kernel.h> 39 #include <sys/mbuf.h> 40 #include <sys/socket.h> 41 #include <sys/ioctl.h> 42 #include <sys/errno.h> 43 #include <sys/syslog.h> 44 #include <sys/select.h> 45 #include <sys/device.h> 46 #include <sys/rndsource.h> 47 48 #include <net/if.h> 49 #include <net/if_dl.h> 50 #include <net/if_ether.h> 51 #include <net/if_media.h> 52 53 #include <net/bpf.h> 54 #include <net/bpfdesc.h> 55 56 #include <sys/cpu.h> 57 #include <sys/bus.h> 58 #include <sys/intr.h> 59 #include <machine/endian.h> 60 61 #include <dev/mii/miivar.h> 62 #include <dev/mii/mii.h> 63 #include <dev/mii/mii_bitbang.h> 64 65 #include <dev/ic/elink3reg.h> 66 /* #include <dev/ic/elink3var.h> */ 67 #include <dev/ic/elinkxlreg.h> 68 #include <dev/ic/elinkxlvar.h> 69 70 #ifdef DEBUG 71 int exdebug = 0; 72 #endif 73 74 /* ifmedia callbacks */ 75 int ex_media_chg(struct ifnet *ifp); 76 void ex_media_stat(struct ifnet *ifp, struct ifmediareq *req); 77 78 static int ex_ifflags_cb(struct ethercom *); 79 80 void ex_probe_media(struct ex_softc *); 81 void ex_set_filter(struct ex_softc *); 82 void ex_set_media(struct ex_softc *); 83 void ex_set_xcvr(struct ex_softc *, uint16_t); 84 struct mbuf *ex_get(struct ex_softc *, int); 85 uint16_t ex_read_eeprom(struct ex_softc *, int); 86 int ex_init(struct ifnet *); 87 void ex_read(struct ex_softc *); 88 void ex_reset(struct ex_softc *); 89 void ex_set_mc(struct ex_softc *); 90 void ex_getstats(struct ex_softc *); 91 void ex_printstats(struct ex_softc *); 92 void ex_tick(void *); 93 94 static int ex_eeprom_busy(struct ex_softc *); 95 static int ex_add_rxbuf(struct ex_softc *, struct ex_rxdesc *); 96 static void ex_init_txdescs(struct ex_softc *); 97 98 static void ex_setup_tx(struct ex_softc *); 99 static bool ex_shutdown(device_t, int); 100 static void ex_start(struct ifnet *); 101 static void ex_txstat(struct ex_softc *); 102 103 int ex_mii_readreg(device_t, int, int); 104 void ex_mii_writereg(device_t, int, int, int); 105 void ex_mii_statchg(struct ifnet *); 106 107 void ex_probemedia(struct ex_softc *); 108 109 /* 110 * Structure to map media-present bits in boards to ifmedia codes and 111 * printable media names. Used for table-driven ifmedia initialization. 112 */ 113 struct ex_media { 114 int exm_mpbit; /* media present bit */ 115 const char *exm_name; /* name of medium */ 116 int exm_ifmedia; /* ifmedia word for medium */ 117 int exm_epmedia; /* ELINKMEDIA_* constant */ 118 }; 119 120 /* 121 * Media table for 3c90x chips. Note that chips with MII have no 122 * `native' media. 123 */ 124 struct ex_media ex_native_media[] = { 125 { ELINK_PCI_10BASE_T, "10baseT", IFM_ETHER|IFM_10_T, 126 ELINKMEDIA_10BASE_T }, 127 { ELINK_PCI_10BASE_T, "10baseT-FDX", IFM_ETHER|IFM_10_T|IFM_FDX, 128 ELINKMEDIA_10BASE_T }, 129 { ELINK_PCI_AUI, "10base5", IFM_ETHER|IFM_10_5, 130 ELINKMEDIA_AUI }, 131 { ELINK_PCI_BNC, "10base2", IFM_ETHER|IFM_10_2, 132 ELINKMEDIA_10BASE_2 }, 133 { ELINK_PCI_100BASE_TX, "100baseTX", IFM_ETHER|IFM_100_TX, 134 ELINKMEDIA_100BASE_TX }, 135 { ELINK_PCI_100BASE_TX, "100baseTX-FDX",IFM_ETHER|IFM_100_TX|IFM_FDX, 136 ELINKMEDIA_100BASE_TX }, 137 { ELINK_PCI_100BASE_FX, "100baseFX", IFM_ETHER|IFM_100_FX, 138 ELINKMEDIA_100BASE_FX }, 139 { ELINK_PCI_100BASE_MII,"manual", IFM_ETHER|IFM_MANUAL, 140 ELINKMEDIA_MII }, 141 { ELINK_PCI_100BASE_T4, "100baseT4", IFM_ETHER|IFM_100_T4, 142 ELINKMEDIA_100BASE_T4 }, 143 { 0, NULL, 0, 144 0 }, 145 }; 146 147 /* 148 * MII bit-bang glue. 149 */ 150 uint32_t ex_mii_bitbang_read(device_t); 151 void ex_mii_bitbang_write(device_t, uint32_t); 152 153 const struct mii_bitbang_ops ex_mii_bitbang_ops = { 154 ex_mii_bitbang_read, 155 ex_mii_bitbang_write, 156 { 157 ELINK_PHY_DATA, /* MII_BIT_MDO */ 158 ELINK_PHY_DATA, /* MII_BIT_MDI */ 159 ELINK_PHY_CLK, /* MII_BIT_MDC */ 160 ELINK_PHY_DIR, /* MII_BIT_DIR_HOST_PHY */ 161 0, /* MII_BIT_DIR_PHY_HOST */ 162 } 163 }; 164 165 /* 166 * Back-end attach and configure. 167 */ 168 void 169 ex_config(struct ex_softc *sc) 170 { 171 struct ifnet *ifp; 172 uint16_t val; 173 uint8_t macaddr[ETHER_ADDR_LEN] = {0}; 174 bus_space_tag_t iot = sc->sc_iot; 175 bus_space_handle_t ioh = sc->sc_ioh; 176 int i, error, attach_stage; 177 178 pmf_self_suspensor_init(sc->sc_dev, &sc->sc_suspensor, &sc->sc_qual); 179 180 callout_init(&sc->ex_mii_callout, 0); 181 182 ex_reset(sc); 183 184 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR0); 185 macaddr[0] = val >> 8; 186 macaddr[1] = val & 0xff; 187 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR1); 188 macaddr[2] = val >> 8; 189 macaddr[3] = val & 0xff; 190 val = ex_read_eeprom(sc, EEPROM_OEM_ADDR2); 191 macaddr[4] = val >> 8; 192 macaddr[5] = val & 0xff; 193 194 aprint_normal_dev(sc->sc_dev, "MAC address %s\n", ether_sprintf(macaddr)); 195 196 if (sc->ex_conf & (EX_CONF_INV_LED_POLARITY|EX_CONF_PHY_POWER)) { 197 GO_WINDOW(2); 198 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 199 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 200 val |= ELINK_RESET_OPT_LEDPOLAR; 201 if (sc->ex_conf & EX_CONF_PHY_POWER) 202 val |= ELINK_RESET_OPT_PHYPOWER; 203 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 204 } 205 if (sc->ex_conf & EX_CONF_NO_XCVR_PWR) { 206 GO_WINDOW(0); 207 bus_space_write_2(iot, ioh, ELINK_W0_MFG_ID, 208 EX_XCVR_PWR_MAGICBITS); 209 } 210 211 attach_stage = 0; 212 213 /* 214 * Allocate the upload descriptors, and create and load the DMA 215 * map for them. 216 */ 217 if ((error = bus_dmamem_alloc(sc->sc_dmat, 218 EX_NUPD * sizeof (struct ex_upd), PAGE_SIZE, 0, &sc->sc_useg, 1, 219 &sc->sc_urseg, BUS_DMA_NOWAIT)) != 0) { 220 aprint_error_dev(sc->sc_dev, 221 "can't allocate upload descriptors, error = %d\n", error); 222 goto fail; 223 } 224 225 attach_stage = 1; 226 227 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg, 228 EX_NUPD * sizeof (struct ex_upd), (void **)&sc->sc_upd, 229 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 230 aprint_error_dev(sc->sc_dev, 231 "can't map upload descriptors, error = %d\n", error); 232 goto fail; 233 } 234 235 attach_stage = 2; 236 237 if ((error = bus_dmamap_create(sc->sc_dmat, 238 EX_NUPD * sizeof (struct ex_upd), 1, 239 EX_NUPD * sizeof (struct ex_upd), 0, BUS_DMA_NOWAIT, 240 &sc->sc_upd_dmamap)) != 0) { 241 aprint_error_dev(sc->sc_dev, 242 "can't create upload desc. DMA map, error = %d\n", error); 243 goto fail; 244 } 245 246 attach_stage = 3; 247 248 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_upd_dmamap, 249 sc->sc_upd, EX_NUPD * sizeof (struct ex_upd), NULL, 250 BUS_DMA_NOWAIT)) != 0) { 251 aprint_error_dev(sc->sc_dev, 252 "can't load upload desc. DMA map, error = %d\n", error); 253 goto fail; 254 } 255 256 attach_stage = 4; 257 258 /* 259 * Allocate the download descriptors, and create and load the DMA 260 * map for them. 261 */ 262 if ((error = bus_dmamem_alloc(sc->sc_dmat, 263 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, PAGE_SIZE, 0, &sc->sc_dseg, 1, 264 &sc->sc_drseg, BUS_DMA_NOWAIT)) != 0) { 265 aprint_error_dev(sc->sc_dev, 266 "can't allocate download descriptors, error = %d\n", error); 267 goto fail; 268 } 269 270 attach_stage = 5; 271 272 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg, 273 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, (void **)&sc->sc_dpd, 274 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 275 aprint_error_dev(sc->sc_dev, 276 "can't map download descriptors, error = %d\n", error); 277 goto fail; 278 } 279 memset(sc->sc_dpd, 0, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN); 280 281 attach_stage = 6; 282 283 if ((error = bus_dmamap_create(sc->sc_dmat, 284 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 1, 285 DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, 0, BUS_DMA_NOWAIT, 286 &sc->sc_dpd_dmamap)) != 0) { 287 aprint_error_dev(sc->sc_dev, 288 "can't create download desc. DMA map, error = %d\n", error); 289 goto fail; 290 } 291 292 attach_stage = 7; 293 294 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dpd_dmamap, 295 sc->sc_dpd, DPDMEM_SIZE + EX_IP4CSUMTX_PADLEN, NULL, 296 BUS_DMA_NOWAIT)) != 0) { 297 aprint_error_dev(sc->sc_dev, 298 "can't load download desc. DMA map, error = %d\n", error); 299 goto fail; 300 } 301 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 302 DPDMEMPAD_OFF, EX_IP4CSUMTX_PADLEN, BUS_DMASYNC_PREWRITE); 303 304 attach_stage = 8; 305 306 307 /* 308 * Create the transmit buffer DMA maps. 309 */ 310 for (i = 0; i < EX_NDPD; i++) { 311 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 312 EX_NTFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 313 &sc->sc_tx_dmamaps[i])) != 0) { 314 aprint_error_dev(sc->sc_dev, 315 "can't create tx DMA map %d, error = %d\n", 316 i, error); 317 goto fail; 318 } 319 } 320 321 attach_stage = 9; 322 323 /* 324 * Create the receive buffer DMA maps. 325 */ 326 for (i = 0; i < EX_NUPD; i++) { 327 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 328 EX_NRFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 329 &sc->sc_rx_dmamaps[i])) != 0) { 330 aprint_error_dev(sc->sc_dev, 331 "can't create rx DMA map %d, error = %d\n", 332 i, error); 333 goto fail; 334 } 335 } 336 337 attach_stage = 10; 338 339 /* 340 * Create ring of upload descriptors, only once. The DMA engine 341 * will loop over this when receiving packets, stalling if it 342 * hits an UPD with a finished receive. 343 */ 344 for (i = 0; i < EX_NUPD; i++) { 345 sc->sc_rxdescs[i].rx_dmamap = sc->sc_rx_dmamaps[i]; 346 sc->sc_rxdescs[i].rx_upd = &sc->sc_upd[i]; 347 sc->sc_upd[i].upd_frags[0].fr_len = 348 htole32((MCLBYTES - 2) | EX_FR_LAST); 349 if (ex_add_rxbuf(sc, &sc->sc_rxdescs[i]) != 0) { 350 aprint_error_dev(sc->sc_dev, 351 "can't allocate or map rx buffers\n"); 352 goto fail; 353 } 354 } 355 356 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 0, 357 EX_NUPD * sizeof (struct ex_upd), 358 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 359 360 ex_init_txdescs(sc); 361 362 attach_stage = 11; 363 364 365 GO_WINDOW(3); 366 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 367 if (val & ELINK_MEDIACAP_MII) 368 sc->ex_conf |= EX_CONF_MII; 369 370 ifp = &sc->sc_ethercom.ec_if; 371 372 /* 373 * Initialize our media structures and MII info. We'll 374 * probe the MII if we discover that we have one. 375 */ 376 sc->ex_mii.mii_ifp = ifp; 377 sc->ex_mii.mii_readreg = ex_mii_readreg; 378 sc->ex_mii.mii_writereg = ex_mii_writereg; 379 sc->ex_mii.mii_statchg = ex_mii_statchg; 380 ifmedia_init(&sc->ex_mii.mii_media, IFM_IMASK, ex_media_chg, 381 ex_media_stat); 382 383 if (sc->ex_conf & EX_CONF_MII) { 384 /* 385 * Find PHY, extract media information from it. 386 * First, select the right transceiver. 387 */ 388 ex_set_xcvr(sc, val); 389 390 mii_attach(sc->sc_dev, &sc->ex_mii, 0xffffffff, 391 MII_PHY_ANY, MII_OFFSET_ANY, 0); 392 if (LIST_FIRST(&sc->ex_mii.mii_phys) == NULL) { 393 ifmedia_add(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE, 394 0, NULL); 395 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_NONE); 396 } else { 397 ifmedia_set(&sc->ex_mii.mii_media, IFM_ETHER|IFM_AUTO); 398 } 399 } else 400 ex_probemedia(sc); 401 402 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 403 ifp->if_softc = sc; 404 ifp->if_start = ex_start; 405 ifp->if_ioctl = ex_ioctl; 406 ifp->if_watchdog = ex_watchdog; 407 ifp->if_init = ex_init; 408 ifp->if_stop = ex_stop; 409 ifp->if_flags = 410 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 411 sc->sc_if_flags = ifp->if_flags; 412 IFQ_SET_READY(&ifp->if_snd); 413 414 /* 415 * We can support 802.1Q VLAN-sized frames. 416 */ 417 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 418 419 /* 420 * The 3c90xB has hardware IPv4/TCPv4/UDPv4 checksum support. 421 */ 422 if (sc->ex_conf & EX_CONF_90XB) 423 sc->sc_ethercom.ec_if.if_capabilities |= 424 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 425 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 426 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 427 428 if_attach(ifp); 429 if_deferred_start_init(ifp, NULL); 430 ether_ifattach(ifp, macaddr); 431 ether_set_ifflags_cb(&sc->sc_ethercom, ex_ifflags_cb); 432 433 GO_WINDOW(1); 434 435 sc->tx_start_thresh = 20; 436 sc->tx_succ_ok = 0; 437 438 /* TODO: set queues to 0 */ 439 440 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 441 RND_TYPE_NET, RND_FLAG_DEFAULT); 442 443 if (pmf_device_register1(sc->sc_dev, NULL, NULL, ex_shutdown)) 444 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 445 else 446 aprint_error_dev(sc->sc_dev, 447 "couldn't establish power handler\n"); 448 449 /* The attach is successful. */ 450 sc->ex_flags |= EX_FLAGS_ATTACHED; 451 return; 452 453 fail: 454 /* 455 * Free any resources we've allocated during the failed attach 456 * attempt. Do this in reverse order and fall though. 457 */ 458 switch (attach_stage) { 459 case 11: 460 { 461 struct ex_rxdesc *rxd; 462 463 for (i = 0; i < EX_NUPD; i++) { 464 rxd = &sc->sc_rxdescs[i]; 465 if (rxd->rx_mbhead != NULL) { 466 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 467 m_freem(rxd->rx_mbhead); 468 } 469 } 470 } 471 /* FALLTHROUGH */ 472 473 case 10: 474 for (i = 0; i < EX_NUPD; i++) 475 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 476 /* FALLTHROUGH */ 477 478 case 9: 479 for (i = 0; i < EX_NDPD; i++) 480 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 481 /* FALLTHROUGH */ 482 case 8: 483 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 484 /* FALLTHROUGH */ 485 486 case 7: 487 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 488 /* FALLTHROUGH */ 489 490 case 6: 491 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 492 EX_NDPD * sizeof (struct ex_dpd)); 493 /* FALLTHROUGH */ 494 495 case 5: 496 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 497 break; 498 499 case 4: 500 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 501 /* FALLTHROUGH */ 502 503 case 3: 504 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 505 /* FALLTHROUGH */ 506 507 case 2: 508 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 509 EX_NUPD * sizeof (struct ex_upd)); 510 /* FALLTHROUGH */ 511 512 case 1: 513 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 514 break; 515 } 516 517 } 518 519 /* 520 * Find the media present on non-MII chips. 521 */ 522 void 523 ex_probemedia(struct ex_softc *sc) 524 { 525 bus_space_tag_t iot = sc->sc_iot; 526 bus_space_handle_t ioh = sc->sc_ioh; 527 struct ifmedia *ifm = &sc->ex_mii.mii_media; 528 struct ex_media *exm; 529 uint16_t config1, reset_options, default_media; 530 int defmedia = 0; 531 const char *sep = "", *defmedianame = NULL; 532 533 GO_WINDOW(3); 534 config1 = bus_space_read_2(iot, ioh, ELINK_W3_INTERNAL_CONFIG + 2); 535 reset_options = bus_space_read_1(iot, ioh, ELINK_W3_RESET_OPTIONS); 536 GO_WINDOW(0); 537 538 default_media = (config1 & CONFIG_MEDIAMASK) >> CONFIG_MEDIAMASK_SHIFT; 539 540 /* Sanity check that there are any media! */ 541 if ((reset_options & ELINK_PCI_MEDIAMASK) == 0) { 542 aprint_error_dev(sc->sc_dev, "no media present!\n"); 543 ifmedia_add(ifm, IFM_ETHER|IFM_NONE, 0, NULL); 544 ifmedia_set(ifm, IFM_ETHER|IFM_NONE); 545 return; 546 } 547 548 aprint_normal_dev(sc->sc_dev, ""); 549 550 #define PRINT(str) aprint_normal("%s%s", sep, str); sep = ", " 551 552 for (exm = ex_native_media; exm->exm_name != NULL; exm++) { 553 if (reset_options & exm->exm_mpbit) { 554 /* 555 * Default media is a little complicated. We 556 * support full-duplex which uses the same 557 * reset options bit. 558 * 559 * XXX Check EEPROM for default to FDX? 560 */ 561 if (exm->exm_epmedia == default_media) { 562 if ((exm->exm_ifmedia & IFM_FDX) == 0) { 563 defmedia = exm->exm_ifmedia; 564 defmedianame = exm->exm_name; 565 } 566 } else if (defmedia == 0) { 567 defmedia = exm->exm_ifmedia; 568 defmedianame = exm->exm_name; 569 } 570 ifmedia_add(ifm, exm->exm_ifmedia, exm->exm_epmedia, 571 NULL); 572 PRINT(exm->exm_name); 573 } 574 } 575 576 #undef PRINT 577 578 #ifdef DIAGNOSTIC 579 if (defmedia == 0) 580 panic("ex_probemedia: impossible"); 581 #endif 582 583 aprint_normal(", default %s\n", defmedianame); 584 ifmedia_set(ifm, defmedia); 585 } 586 587 /* 588 * Setup transmitter parameters. 589 */ 590 static void 591 ex_setup_tx(struct ex_softc *sc) 592 { 593 bus_space_tag_t iot = sc->sc_iot; 594 bus_space_handle_t ioh = sc->sc_ioh; 595 596 /* 597 * Disable reclaim threshold for 90xB, set free threshold to 598 * 6 * 256 = 1536 for 90x. 599 */ 600 if (sc->ex_conf & EX_CONF_90XB) 601 bus_space_write_2(iot, ioh, ELINK_COMMAND, 602 ELINK_TXRECLTHRESH | 255); 603 else 604 bus_space_write_1(iot, ioh, ELINK_TXFREETHRESH, 6); 605 606 /* Setup early transmission start threshold. */ 607 bus_space_write_2(iot, ioh, ELINK_COMMAND, 608 ELINK_TXSTARTTHRESH | sc->tx_start_thresh); 609 } 610 611 /* 612 * Bring device up. 613 */ 614 int 615 ex_init(struct ifnet *ifp) 616 { 617 struct ex_softc *sc = ifp->if_softc; 618 bus_space_tag_t iot = sc->sc_iot; 619 bus_space_handle_t ioh = sc->sc_ioh; 620 int i; 621 uint16_t val; 622 int error = 0; 623 624 if ((error = ex_enable(sc)) != 0) 625 goto out; 626 627 ex_waitcmd(sc); 628 ex_stop(ifp, 0); 629 630 GO_WINDOW(2); 631 632 /* Turn on PHY power. */ 633 if (sc->ex_conf & (EX_CONF_PHY_POWER | EX_CONF_INV_LED_POLARITY)) { 634 val = bus_space_read_2(iot, ioh, ELINK_W2_RESET_OPTIONS); 635 if (sc->ex_conf & EX_CONF_PHY_POWER) 636 val |= ELINK_RESET_OPT_PHYPOWER; /* turn on PHY power */ 637 if (sc->ex_conf & EX_CONF_INV_LED_POLARITY) 638 val |= ELINK_RESET_OPT_LEDPOLAR; /* invert LED polarity */ 639 bus_space_write_2(iot, ioh, ELINK_W2_RESET_OPTIONS, val); 640 } 641 642 /* 643 * Set the station address and clear the station mask. The latter 644 * is needed for 90x cards, 0 is the default for 90xB cards. 645 */ 646 for (i = 0; i < ETHER_ADDR_LEN; i++) { 647 bus_space_write_1(iot, ioh, ELINK_W2_ADDR_0 + i, 648 CLLADDR(ifp->if_sadl)[i]); 649 bus_space_write_1(iot, ioh, ELINK_W2_RECVMASK_0 + i, 0); 650 } 651 652 GO_WINDOW(3); 653 654 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_RESET); 655 ex_waitcmd(sc); 656 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 657 ex_waitcmd(sc); 658 659 /* Load Tx parameters. */ 660 ex_setup_tx(sc); 661 662 bus_space_write_2(iot, ioh, ELINK_COMMAND, 663 SET_RX_EARLY_THRESH | ELINK_THRESH_DISABLE); 664 665 bus_space_write_4(iot, ioh, ELINK_DMACTRL, 666 bus_space_read_4(iot, ioh, ELINK_DMACTRL) | ELINK_DMAC_UPRXEAREN); 667 668 bus_space_write_2(iot, ioh, ELINK_COMMAND, 669 SET_RD_0_MASK | XL_WATCHED_INTERRUPTS); 670 bus_space_write_2(iot, ioh, ELINK_COMMAND, 671 SET_INTR_MASK | XL_WATCHED_INTERRUPTS); 672 673 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 0xff); 674 if (sc->intr_ack) 675 (* sc->intr_ack)(sc); 676 ex_set_media(sc); 677 ex_set_mc(sc); 678 679 680 bus_space_write_2(iot, ioh, ELINK_COMMAND, STATS_ENABLE); 681 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 682 bus_space_write_4(iot, ioh, ELINK_UPLISTPTR, sc->sc_upddma); 683 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_ENABLE); 684 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_UPUNSTALL); 685 686 ifp->if_flags |= IFF_RUNNING; 687 ifp->if_flags &= ~IFF_OACTIVE; 688 ex_start(ifp); 689 sc->sc_if_flags = ifp->if_flags; 690 691 GO_WINDOW(1); 692 693 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 694 695 out: 696 if (error) { 697 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 698 ifp->if_timer = 0; 699 aprint_error_dev(sc->sc_dev, "interface not running\n"); 700 } 701 return (error); 702 } 703 704 #define MCHASHSIZE 256 705 #define ex_mchash(addr) (ether_crc32_be((addr), ETHER_ADDR_LEN) & \ 706 (MCHASHSIZE - 1)) 707 708 /* 709 * Set multicast receive filter. Also take care of promiscuous mode 710 * here (XXX). 711 */ 712 void 713 ex_set_mc(struct ex_softc *sc) 714 { 715 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 716 struct ethercom *ec = &sc->sc_ethercom; 717 struct ether_multi *enm; 718 struct ether_multistep estep; 719 int i; 720 uint16_t mask = FIL_INDIVIDUAL | FIL_BRDCST; 721 722 if (ifp->if_flags & IFF_PROMISC) { 723 mask |= FIL_PROMISC; 724 goto allmulti; 725 } 726 727 ETHER_FIRST_MULTI(estep, ec, enm); 728 if (enm == NULL) 729 goto nomulti; 730 731 if ((sc->ex_conf & EX_CONF_90XB) == 0) 732 /* No multicast hash filtering. */ 733 goto allmulti; 734 735 for (i = 0; i < MCHASHSIZE; i++) 736 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 737 ELINK_COMMAND, ELINK_CLEARHASHFILBIT | i); 738 739 do { 740 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 741 ETHER_ADDR_LEN) != 0) 742 goto allmulti; 743 744 i = ex_mchash(enm->enm_addrlo); 745 bus_space_write_2(sc->sc_iot, sc->sc_ioh, 746 ELINK_COMMAND, ELINK_SETHASHFILBIT | i); 747 ETHER_NEXT_MULTI(estep, enm); 748 } while (enm != NULL); 749 mask |= FIL_MULTIHASH; 750 751 nomulti: 752 ifp->if_flags &= ~IFF_ALLMULTI; 753 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 754 SET_RX_FILTER | mask); 755 return; 756 757 allmulti: 758 ifp->if_flags |= IFF_ALLMULTI; 759 mask |= FIL_MULTICAST; 760 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, 761 SET_RX_FILTER | mask); 762 } 763 764 765 /* 766 * The Tx Complete interrupts occur only on errors, 767 * and this is the error handler. 768 */ 769 static void 770 ex_txstat(struct ex_softc *sc) 771 { 772 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 773 bus_space_tag_t iot = sc->sc_iot; 774 bus_space_handle_t ioh = sc->sc_ioh; 775 int i, err = 0; 776 777 /* 778 * We need to read+write TX_STATUS until we get a 0 status 779 * in order to turn off the interrupt flag. 780 * ELINK_TXSTATUS is in the upper byte of 2 with ELINK_TIMER. 781 */ 782 for (;;) { 783 i = bus_space_read_2(iot, ioh, ELINK_TIMER); 784 if ((i & TXS_COMPLETE) == 0) 785 break; 786 bus_space_write_2(iot, ioh, ELINK_TIMER, 0x0); 787 err |= i; 788 } 789 err &= ~TXS_TIMER; 790 791 if ((err & (TXS_UNDERRUN | TXS_JABBER | TXS_RECLAIM)) 792 || err == 0 /* should not happen, just in case */) { 793 /* 794 * Make sure the transmission is stopped. 795 */ 796 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNSTALL); 797 for (i = 1000; i > 0; i--) 798 if ((bus_space_read_4(iot, ioh, ELINK_DMACTRL) & 799 ELINK_DMAC_DNINPROG) == 0) 800 break; 801 802 /* 803 * Reset the transmitter. 804 */ 805 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_RESET); 806 807 /* Resetting takes a while and we will do more than wait. */ 808 809 ifp->if_flags &= ~IFF_OACTIVE; 810 ++sc->sc_ethercom.ec_if.if_oerrors; 811 aprint_error_dev(sc->sc_dev, "%s%s%s", 812 (err & TXS_UNDERRUN) ? " transmit underrun" : "", 813 (err & TXS_JABBER) ? " jabber" : "", 814 (err & TXS_RECLAIM) ? " reclaim" : ""); 815 if (err == 0) 816 aprint_error(" unknown Tx error"); 817 printf(" (%x)", err); 818 if (err & TXS_UNDERRUN) { 819 aprint_error(" @%d", sc->tx_start_thresh); 820 if (sc->tx_succ_ok < 256 && 821 (i = min(ETHER_MAX_LEN, sc->tx_start_thresh + 20)) 822 > sc->tx_start_thresh) { 823 aprint_error(", new threshold is %d", i); 824 sc->tx_start_thresh = i; 825 } 826 sc->tx_succ_ok = 0; 827 } 828 aprint_error("\n"); 829 if (err & TXS_MAX_COLLISION) 830 ++sc->sc_ethercom.ec_if.if_collisions; 831 832 /* Wait for TX_RESET to finish. */ 833 ex_waitcmd(sc); 834 835 /* Reload Tx parameters. */ 836 ex_setup_tx(sc); 837 } else { 838 if (err & TXS_MAX_COLLISION) 839 ++sc->sc_ethercom.ec_if.if_collisions; 840 sc->sc_ethercom.ec_if.if_flags &= ~IFF_OACTIVE; 841 } 842 843 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_ENABLE); 844 845 /* Retransmit current packet if any. */ 846 if (sc->tx_head) { 847 ifp->if_flags |= IFF_OACTIVE; 848 bus_space_write_2(iot, ioh, ELINK_COMMAND, 849 ELINK_DNUNSTALL); 850 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 851 DPD_DMADDR(sc, sc->tx_head)); 852 853 /* Retrigger watchdog if stopped. */ 854 if (ifp->if_timer == 0) 855 ifp->if_timer = 1; 856 } 857 } 858 859 int 860 ex_media_chg(struct ifnet *ifp) 861 { 862 863 if (ifp->if_flags & IFF_UP) 864 ex_init(ifp); 865 return 0; 866 } 867 868 void 869 ex_set_xcvr(struct ex_softc *sc, const uint16_t media) 870 { 871 bus_space_tag_t iot = sc->sc_iot; 872 bus_space_handle_t ioh = sc->sc_ioh; 873 uint32_t icfg; 874 875 /* 876 * We're already in Window 3 877 */ 878 icfg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 879 icfg &= ~(CONFIG_XCVR_SEL << 16); 880 if (media & (ELINK_MEDIACAP_MII | ELINK_MEDIACAP_100BASET4)) 881 icfg |= ELINKMEDIA_MII << (CONFIG_XCVR_SEL_SHIFT + 16); 882 if (media & ELINK_MEDIACAP_100BASETX) 883 icfg |= ELINKMEDIA_AUTO << (CONFIG_XCVR_SEL_SHIFT + 16); 884 if (media & ELINK_MEDIACAP_100BASEFX) 885 icfg |= ELINKMEDIA_100BASE_FX 886 << (CONFIG_XCVR_SEL_SHIFT + 16); 887 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, icfg); 888 } 889 890 void 891 ex_set_media(struct ex_softc *sc) 892 { 893 bus_space_tag_t iot = sc->sc_iot; 894 bus_space_handle_t ioh = sc->sc_ioh; 895 uint32_t configreg; 896 897 if (((sc->ex_conf & EX_CONF_MII) && 898 (sc->ex_mii.mii_media_active & IFM_FDX)) 899 || (!(sc->ex_conf & EX_CONF_MII) && 900 (sc->ex_mii.mii_media.ifm_media & IFM_FDX))) { 901 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 902 MAC_CONTROL_FDX); 903 } else { 904 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, 0); 905 } 906 907 /* 908 * If the device has MII, select it, and then tell the 909 * PHY which media to use. 910 */ 911 if (sc->ex_conf & EX_CONF_MII) { 912 uint16_t val; 913 914 GO_WINDOW(3); 915 val = bus_space_read_2(iot, ioh, ELINK_W3_RESET_OPTIONS); 916 ex_set_xcvr(sc, val); 917 mii_mediachg(&sc->ex_mii); 918 return; 919 } 920 921 GO_WINDOW(4); 922 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 0); 923 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 924 delay(800); 925 926 /* 927 * Now turn on the selected media/transceiver. 928 */ 929 switch (IFM_SUBTYPE(sc->ex_mii.mii_media.ifm_cur->ifm_media)) { 930 case IFM_10_T: 931 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 932 JABBER_GUARD_ENABLE|LINKBEAT_ENABLE); 933 break; 934 935 case IFM_10_2: 936 bus_space_write_2(iot, ioh, ELINK_COMMAND, START_TRANSCEIVER); 937 DELAY(800); 938 break; 939 940 case IFM_100_TX: 941 case IFM_100_FX: 942 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 943 LINKBEAT_ENABLE); 944 DELAY(800); 945 break; 946 947 case IFM_10_5: 948 bus_space_write_2(iot, ioh, ELINK_W4_MEDIA_TYPE, 949 SQE_ENABLE); 950 DELAY(800); 951 break; 952 953 case IFM_MANUAL: 954 break; 955 956 case IFM_NONE: 957 return; 958 959 default: 960 panic("ex_set_media: impossible"); 961 } 962 963 GO_WINDOW(3); 964 configreg = bus_space_read_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG); 965 966 configreg &= ~(CONFIG_MEDIAMASK << 16); 967 configreg |= (sc->ex_mii.mii_media.ifm_cur->ifm_data << 968 (CONFIG_MEDIAMASK_SHIFT + 16)); 969 970 bus_space_write_4(iot, ioh, ELINK_W3_INTERNAL_CONFIG, configreg); 971 } 972 973 /* 974 * Get currently-selected media from card. 975 * (if_media callback, may be called before interface is brought up). 976 */ 977 void 978 ex_media_stat(struct ifnet *ifp, struct ifmediareq *req) 979 { 980 struct ex_softc *sc = ifp->if_softc; 981 uint16_t help; 982 983 if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) == (IFF_UP|IFF_RUNNING)) { 984 if (sc->ex_conf & EX_CONF_MII) { 985 mii_pollstat(&sc->ex_mii); 986 req->ifm_status = sc->ex_mii.mii_media_status; 987 req->ifm_active = sc->ex_mii.mii_media_active; 988 } else { 989 GO_WINDOW(4); 990 req->ifm_status = IFM_AVALID; 991 req->ifm_active = 992 sc->ex_mii.mii_media.ifm_cur->ifm_media; 993 help = bus_space_read_2(sc->sc_iot, sc->sc_ioh, 994 ELINK_W4_MEDIA_TYPE); 995 if (help & LINKBEAT_DETECT) 996 req->ifm_status |= IFM_ACTIVE; 997 GO_WINDOW(1); 998 } 999 } 1000 } 1001 1002 1003 1004 /* 1005 * Start outputting on the interface. 1006 */ 1007 static void 1008 ex_start(struct ifnet *ifp) 1009 { 1010 struct ex_softc *sc = ifp->if_softc; 1011 bus_space_tag_t iot = sc->sc_iot; 1012 bus_space_handle_t ioh = sc->sc_ioh; 1013 volatile struct ex_fraghdr *fr = NULL; 1014 volatile struct ex_dpd *dpd = NULL, *prevdpd = NULL; 1015 struct ex_txdesc *txp; 1016 struct mbuf *mb_head; 1017 bus_dmamap_t dmamap; 1018 int m_csumflags, offset, seglen, totlen, segment, error; 1019 uint32_t csum_flags; 1020 1021 if (sc->tx_head || sc->tx_free == NULL) 1022 return; 1023 1024 txp = NULL; 1025 1026 /* 1027 * We're finished if there is nothing more to add to the list or if 1028 * we're all filled up with buffers to transmit. 1029 */ 1030 while (sc->tx_free != NULL) { 1031 /* 1032 * Grab a packet to transmit. 1033 */ 1034 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1035 if (mb_head == NULL) 1036 break; 1037 1038 /* 1039 * mb_head might be updated later, 1040 * so preserve csum_flags here. 1041 */ 1042 m_csumflags = mb_head->m_pkthdr.csum_flags; 1043 1044 /* 1045 * Get pointer to next available tx desc. 1046 */ 1047 txp = sc->tx_free; 1048 dmamap = txp->tx_dmamap; 1049 1050 /* 1051 * Go through each of the mbufs in the chain and initialize 1052 * the transmit buffer descriptors with the physical address 1053 * and size of the mbuf. 1054 */ 1055 reload: 1056 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 1057 mb_head, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1058 switch (error) { 1059 case 0: 1060 /* Success. */ 1061 break; 1062 1063 case EFBIG: 1064 { 1065 struct mbuf *mn; 1066 1067 /* 1068 * We ran out of segments. We have to recopy this 1069 * mbuf chain first. Bail out if we can't get the 1070 * new buffers. 1071 */ 1072 aprint_error_dev(sc->sc_dev, "too many segments, "); 1073 1074 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1075 if (mn == NULL) { 1076 m_freem(mb_head); 1077 aprint_error("aborting\n"); 1078 goto out; 1079 } 1080 if (mb_head->m_pkthdr.len > MHLEN) { 1081 MCLGET(mn, M_DONTWAIT); 1082 if ((mn->m_flags & M_EXT) == 0) { 1083 m_freem(mn); 1084 m_freem(mb_head); 1085 aprint_error("aborting\n"); 1086 goto out; 1087 } 1088 } 1089 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1090 mtod(mn, void *)); 1091 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1092 m_freem(mb_head); 1093 mb_head = mn; 1094 aprint_error("retrying\n"); 1095 goto reload; 1096 } 1097 1098 default: 1099 /* 1100 * Some other problem; report it. 1101 */ 1102 aprint_error_dev(sc->sc_dev, 1103 "can't load mbuf chain, error = %d\n", error); 1104 m_freem(mb_head); 1105 goto out; 1106 } 1107 1108 /* 1109 * remove our tx desc from freelist. 1110 */ 1111 sc->tx_free = txp->tx_next; 1112 txp->tx_next = NULL; 1113 1114 fr = &txp->tx_dpd->dpd_frags[0]; 1115 totlen = 0; 1116 for (segment = 0; segment < dmamap->dm_nsegs; segment++, fr++) { 1117 fr->fr_addr = htole32(dmamap->dm_segs[segment].ds_addr); 1118 seglen = dmamap->dm_segs[segment].ds_len; 1119 fr->fr_len = htole32(seglen); 1120 totlen += seglen; 1121 } 1122 if (__predict_false(totlen <= EX_IP4CSUMTX_PADLEN && 1123 (m_csumflags & M_CSUM_IPv4) != 0)) { 1124 /* 1125 * Pad short packets to avoid ip4csum-tx bug. 1126 * 1127 * XXX Should we still consider if such short 1128 * (36 bytes or less) packets might already 1129 * occupy EX_NTFRAG (== 32) fragments here? 1130 */ 1131 KASSERT(segment < EX_NTFRAGS); 1132 fr->fr_addr = htole32(DPDMEMPAD_DMADDR(sc)); 1133 seglen = EX_IP4CSUMTX_PADLEN + 1 - totlen; 1134 fr->fr_len = htole32(EX_FR_LAST | seglen); 1135 totlen += seglen; 1136 } else { 1137 fr--; 1138 fr->fr_len |= htole32(EX_FR_LAST); 1139 } 1140 txp->tx_mbhead = mb_head; 1141 1142 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1143 BUS_DMASYNC_PREWRITE); 1144 1145 dpd = txp->tx_dpd; 1146 dpd->dpd_nextptr = 0; 1147 dpd->dpd_fsh = htole32(totlen); 1148 1149 /* Byte-swap constants so compiler can optimize. */ 1150 1151 if (sc->ex_conf & EX_CONF_90XB) { 1152 csum_flags = 0; 1153 1154 if (m_csumflags & M_CSUM_IPv4) 1155 csum_flags |= htole32(EX_DPD_IPCKSUM); 1156 1157 if (m_csumflags & M_CSUM_TCPv4) 1158 csum_flags |= htole32(EX_DPD_TCPCKSUM); 1159 else if (m_csumflags & M_CSUM_UDPv4) 1160 csum_flags |= htole32(EX_DPD_UDPCKSUM); 1161 1162 dpd->dpd_fsh |= csum_flags; 1163 } else { 1164 KDASSERT((mb_head->m_pkthdr.csum_flags & 1165 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) == 0); 1166 } 1167 1168 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1169 ((const char *)(intptr_t)dpd - (const char *)sc->sc_dpd), 1170 sizeof (struct ex_dpd), 1171 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1172 1173 /* 1174 * No need to stall the download engine, we know it's 1175 * not busy right now. 1176 * 1177 * Fix up pointers in both the "soft" tx and the physical 1178 * tx list. 1179 */ 1180 if (sc->tx_head != NULL) { 1181 prevdpd = sc->tx_tail->tx_dpd; 1182 offset = ((const char *)(intptr_t)prevdpd - (const char *)sc->sc_dpd); 1183 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1184 offset, sizeof (struct ex_dpd), 1185 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1186 prevdpd->dpd_nextptr = htole32(DPD_DMADDR(sc, txp)); 1187 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1188 offset, sizeof (struct ex_dpd), 1189 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1190 sc->tx_tail->tx_next = txp; 1191 sc->tx_tail = txp; 1192 } else { 1193 sc->tx_tail = sc->tx_head = txp; 1194 } 1195 1196 /* 1197 * Pass packet to bpf if there is a listener. 1198 */ 1199 bpf_mtap(ifp, mb_head); 1200 } 1201 out: 1202 if (sc->tx_head) { 1203 sc->tx_tail->tx_dpd->dpd_fsh |= htole32(EX_DPD_DNIND); 1204 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1205 ((char *)sc->tx_tail->tx_dpd - (char *)sc->sc_dpd), 1206 sizeof (struct ex_dpd), 1207 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1208 ifp->if_flags |= IFF_OACTIVE; 1209 bus_space_write_2(iot, ioh, ELINK_COMMAND, ELINK_DNUNSTALL); 1210 bus_space_write_4(iot, ioh, ELINK_DNLISTPTR, 1211 DPD_DMADDR(sc, sc->tx_head)); 1212 1213 /* trigger watchdog */ 1214 ifp->if_timer = 5; 1215 } 1216 } 1217 1218 1219 int 1220 ex_intr(void *arg) 1221 { 1222 struct ex_softc *sc = arg; 1223 bus_space_tag_t iot = sc->sc_iot; 1224 bus_space_handle_t ioh = sc->sc_ioh; 1225 uint16_t stat; 1226 int ret = 0; 1227 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1228 1229 if ((ifp->if_flags & IFF_RUNNING) == 0 || 1230 !device_is_active(sc->sc_dev)) 1231 return (0); 1232 1233 for (;;) { 1234 stat = bus_space_read_2(iot, ioh, ELINK_STATUS); 1235 1236 if ((stat & XL_WATCHED_INTERRUPTS) == 0) { 1237 if ((stat & INTR_LATCH) == 0) { 1238 #if 0 1239 aprint_error_dev(sc->sc_dev, 1240 "intr latch cleared\n"); 1241 #endif 1242 break; 1243 } 1244 } 1245 1246 ret = 1; 1247 1248 /* 1249 * Acknowledge interrupts. 1250 */ 1251 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | 1252 (stat & (XL_WATCHED_INTERRUPTS | INTR_LATCH))); 1253 if (sc->intr_ack) 1254 (*sc->intr_ack)(sc); 1255 1256 if (stat & HOST_ERROR) { 1257 aprint_error_dev(sc->sc_dev, 1258 "adapter failure (%x)\n", stat); 1259 ex_reset(sc); 1260 ex_init(ifp); 1261 return 1; 1262 } 1263 if (stat & UPD_STATS) { 1264 ex_getstats(sc); 1265 } 1266 if (stat & TX_COMPLETE) { 1267 ex_txstat(sc); 1268 #if 0 1269 if (stat & DN_COMPLETE) 1270 aprint_error_dev(sc->sc_dev, 1271 "Ignoring Dn interrupt (%x)\n", stat); 1272 #endif 1273 /* 1274 * In some rare cases, both Tx Complete and 1275 * Dn Complete bits are set. However, the packet 1276 * has been reloaded in ex_txstat() and should not 1277 * handle the Dn Complete event here. 1278 * Hence the "else" below. 1279 */ 1280 } else if (stat & DN_COMPLETE) { 1281 struct ex_txdesc *txp, *ptxp = NULL; 1282 bus_dmamap_t txmap; 1283 1284 /* reset watchdog timer, was set in ex_start() */ 1285 ifp->if_timer = 0; 1286 1287 for (txp = sc->tx_head; txp != NULL; 1288 txp = txp->tx_next) { 1289 bus_dmamap_sync(sc->sc_dmat, 1290 sc->sc_dpd_dmamap, 1291 (char *)txp->tx_dpd - (char *)sc->sc_dpd, 1292 sizeof (struct ex_dpd), 1293 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1294 if (txp->tx_mbhead != NULL) { 1295 txmap = txp->tx_dmamap; 1296 bus_dmamap_sync(sc->sc_dmat, txmap, 1297 0, txmap->dm_mapsize, 1298 BUS_DMASYNC_POSTWRITE); 1299 bus_dmamap_unload(sc->sc_dmat, txmap); 1300 m_freem(txp->tx_mbhead); 1301 txp->tx_mbhead = NULL; 1302 } 1303 ptxp = txp; 1304 } 1305 1306 /* 1307 * Move finished tx buffers back to the tx free list. 1308 */ 1309 if (sc->tx_free) { 1310 sc->tx_ftail->tx_next = sc->tx_head; 1311 sc->tx_ftail = ptxp; 1312 } else 1313 sc->tx_ftail = sc->tx_free = sc->tx_head; 1314 1315 sc->tx_head = sc->tx_tail = NULL; 1316 ifp->if_flags &= ~IFF_OACTIVE; 1317 1318 if (sc->tx_succ_ok < 256) 1319 sc->tx_succ_ok++; 1320 } 1321 1322 if (stat & UP_COMPLETE) { 1323 struct ex_rxdesc *rxd; 1324 struct mbuf *m; 1325 struct ex_upd *upd; 1326 bus_dmamap_t rxmap; 1327 uint32_t pktstat; 1328 1329 rcvloop: 1330 rxd = sc->rx_head; 1331 rxmap = rxd->rx_dmamap; 1332 m = rxd->rx_mbhead; 1333 upd = rxd->rx_upd; 1334 1335 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, 1336 rxmap->dm_mapsize, 1337 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1338 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1339 ((char *)upd - (char *)sc->sc_upd), 1340 sizeof (struct ex_upd), 1341 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1342 pktstat = le32toh(upd->upd_pktstatus); 1343 1344 if (pktstat & EX_UPD_COMPLETE) { 1345 /* 1346 * Remove first packet from the chain. 1347 */ 1348 sc->rx_head = rxd->rx_next; 1349 rxd->rx_next = NULL; 1350 1351 /* 1352 * Add a new buffer to the receive chain. 1353 * If this fails, the old buffer is recycled 1354 * instead. 1355 */ 1356 if (ex_add_rxbuf(sc, rxd) == 0) { 1357 uint16_t total_len; 1358 1359 if (pktstat & 1360 ((sc->sc_ethercom.ec_capenable & 1361 ETHERCAP_VLAN_MTU) ? 1362 EX_UPD_ERR_VLAN : EX_UPD_ERR)) { 1363 ifp->if_ierrors++; 1364 m_freem(m); 1365 goto rcvloop; 1366 } 1367 1368 total_len = pktstat & EX_UPD_PKTLENMASK; 1369 if (total_len < 1370 sizeof(struct ether_header)) { 1371 m_freem(m); 1372 goto rcvloop; 1373 } 1374 m_set_rcvif(m, ifp); 1375 m->m_pkthdr.len = m->m_len = total_len; 1376 /* 1377 * Set the incoming checksum information for the packet. 1378 */ 1379 if ((sc->ex_conf & EX_CONF_90XB) != 0 && 1380 (pktstat & EX_UPD_IPCHECKED) != 0) { 1381 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1382 if (pktstat & EX_UPD_IPCKSUMERR) 1383 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1384 if (pktstat & EX_UPD_TCPCHECKED) { 1385 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1386 if (pktstat & EX_UPD_TCPCKSUMERR) 1387 m->m_pkthdr.csum_flags |= 1388 M_CSUM_TCP_UDP_BAD; 1389 } else if (pktstat & EX_UPD_UDPCHECKED) { 1390 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1391 if (pktstat & EX_UPD_UDPCKSUMERR) 1392 m->m_pkthdr.csum_flags |= 1393 M_CSUM_TCP_UDP_BAD; 1394 } 1395 } 1396 if_percpuq_enqueue(ifp->if_percpuq, m); 1397 } 1398 goto rcvloop; 1399 } 1400 /* 1401 * Just in case we filled up all UPDs and the DMA engine 1402 * stalled. We could be more subtle about this. 1403 */ 1404 if (bus_space_read_4(iot, ioh, ELINK_UPLISTPTR) == 0) { 1405 aprint_error_dev(sc->sc_dev, 1406 "uplistptr was 0\n"); 1407 ex_init(ifp); 1408 } else if (bus_space_read_4(iot, ioh, ELINK_UPPKTSTATUS) 1409 & 0x2000) { 1410 aprint_error_dev(sc->sc_dev, 1411 "receive stalled\n"); 1412 bus_space_write_2(iot, ioh, ELINK_COMMAND, 1413 ELINK_UPUNSTALL); 1414 } 1415 } 1416 1417 if (stat) 1418 rnd_add_uint32(&sc->rnd_source, stat); 1419 } 1420 1421 /* no more interrupts */ 1422 if (ret) 1423 if_schedule_deferred_start(ifp); 1424 return ret; 1425 } 1426 1427 static int 1428 ex_ifflags_cb(struct ethercom *ec) 1429 { 1430 struct ifnet *ifp = &ec->ec_if; 1431 struct ex_softc *sc = ifp->if_softc; 1432 int change = ifp->if_flags ^ sc->sc_if_flags; 1433 1434 if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0) 1435 return ENETRESET; 1436 else if ((change & IFF_PROMISC) != 0) 1437 ex_set_mc(sc); 1438 return 0; 1439 } 1440 1441 int 1442 ex_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1443 { 1444 struct ex_softc *sc = ifp->if_softc; 1445 struct ifreq *ifr = (struct ifreq *)data; 1446 int s, error; 1447 1448 s = splnet(); 1449 1450 switch (cmd) { 1451 case SIOCSIFMEDIA: 1452 case SIOCGIFMEDIA: 1453 error = ifmedia_ioctl(ifp, ifr, &sc->ex_mii.mii_media, cmd); 1454 break; 1455 default: 1456 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1457 break; 1458 1459 error = 0; 1460 1461 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1462 ; 1463 else if (ifp->if_flags & IFF_RUNNING) { 1464 /* 1465 * Multicast list has changed; set the hardware filter 1466 * accordingly. 1467 */ 1468 ex_set_mc(sc); 1469 } 1470 break; 1471 } 1472 1473 sc->sc_if_flags = ifp->if_flags; 1474 splx(s); 1475 return (error); 1476 } 1477 1478 void 1479 ex_getstats(struct ex_softc *sc) 1480 { 1481 bus_space_handle_t ioh = sc->sc_ioh; 1482 bus_space_tag_t iot = sc->sc_iot; 1483 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1484 uint8_t upperok; 1485 1486 GO_WINDOW(6); 1487 upperok = bus_space_read_1(iot, ioh, UPPER_FRAMES_OK); 1488 ifp->if_ipackets += bus_space_read_1(iot, ioh, RX_FRAMES_OK); 1489 ifp->if_ipackets += (upperok & 0x03) << 8; 1490 ifp->if_opackets += bus_space_read_1(iot, ioh, TX_FRAMES_OK); 1491 ifp->if_opackets += (upperok & 0x30) << 4; 1492 ifp->if_ierrors += bus_space_read_1(iot, ioh, RX_OVERRUNS); 1493 ifp->if_collisions += bus_space_read_1(iot, ioh, TX_COLLISIONS); 1494 /* 1495 * There seems to be no way to get the exact number of collisions, 1496 * this is the number that occurred at the very least. 1497 */ 1498 ifp->if_collisions += 2 * bus_space_read_1(iot, ioh, 1499 TX_AFTER_X_COLLISIONS); 1500 /* 1501 * Interface byte counts are counted by ether_input() and 1502 * ether_output(), so don't accumulate them here. Just 1503 * read the NIC counters so they don't generate overflow interrupts. 1504 * Upper byte counters are latched from reading the totals, so 1505 * they don't need to be read if we don't need their values. 1506 */ 1507 (void)bus_space_read_2(iot, ioh, RX_TOTAL_OK); 1508 (void)bus_space_read_2(iot, ioh, TX_TOTAL_OK); 1509 1510 /* 1511 * Clear the following to avoid stats overflow interrupts 1512 */ 1513 (void)bus_space_read_1(iot, ioh, TX_DEFERRALS); 1514 (void)bus_space_read_1(iot, ioh, TX_AFTER_1_COLLISION); 1515 (void)bus_space_read_1(iot, ioh, TX_NO_SQE); 1516 (void)bus_space_read_1(iot, ioh, TX_CD_LOST); 1517 GO_WINDOW(4); 1518 (void)bus_space_read_1(iot, ioh, ELINK_W4_BADSSD); 1519 GO_WINDOW(1); 1520 } 1521 1522 void 1523 ex_printstats(struct ex_softc *sc) 1524 { 1525 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1526 1527 ex_getstats(sc); 1528 printf("in %llu out %llu ierror %llu oerror %llu ibytes %llu obytes " 1529 "%llu\n", (unsigned long long)ifp->if_ipackets, 1530 (unsigned long long)ifp->if_opackets, 1531 (unsigned long long)ifp->if_ierrors, 1532 (unsigned long long)ifp->if_oerrors, 1533 (unsigned long long)ifp->if_ibytes, 1534 (unsigned long long)ifp->if_obytes); 1535 } 1536 1537 void 1538 ex_tick(void *arg) 1539 { 1540 struct ex_softc *sc = arg; 1541 int s; 1542 1543 if (!device_is_active(sc->sc_dev)) 1544 return; 1545 1546 s = splnet(); 1547 1548 if (sc->ex_conf & EX_CONF_MII) 1549 mii_tick(&sc->ex_mii); 1550 1551 if (!(bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, ELINK_STATUS) 1552 & COMMAND_IN_PROGRESS)) 1553 ex_getstats(sc); 1554 1555 splx(s); 1556 1557 callout_reset(&sc->ex_mii_callout, hz, ex_tick, sc); 1558 } 1559 1560 void 1561 ex_reset(struct ex_softc *sc) 1562 { 1563 uint16_t val = GLOBAL_RESET; 1564 1565 if (sc->ex_conf & EX_CONF_RESETHACK) 1566 val |= 0x10; 1567 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_COMMAND, val); 1568 /* 1569 * XXX apparently the command in progress bit can't be trusted 1570 * during a reset, so we just always wait this long. Fortunately 1571 * we normally only reset the chip during autoconfig. 1572 */ 1573 delay(100000); 1574 ex_waitcmd(sc); 1575 } 1576 1577 void 1578 ex_watchdog(struct ifnet *ifp) 1579 { 1580 struct ex_softc *sc = ifp->if_softc; 1581 1582 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1583 ++sc->sc_ethercom.ec_if.if_oerrors; 1584 1585 ex_reset(sc); 1586 ex_init(ifp); 1587 } 1588 1589 void 1590 ex_stop(struct ifnet *ifp, int disable) 1591 { 1592 struct ex_softc *sc = ifp->if_softc; 1593 bus_space_tag_t iot = sc->sc_iot; 1594 bus_space_handle_t ioh = sc->sc_ioh; 1595 struct ex_txdesc *tx; 1596 struct ex_rxdesc *rx; 1597 int i; 1598 1599 bus_space_write_2(iot, ioh, ELINK_COMMAND, RX_DISABLE); 1600 bus_space_write_2(iot, ioh, ELINK_COMMAND, TX_DISABLE); 1601 bus_space_write_2(iot, ioh, ELINK_COMMAND, STOP_TRANSCEIVER); 1602 1603 for (tx = sc->tx_head ; tx != NULL; tx = tx->tx_next) { 1604 if (tx->tx_mbhead == NULL) 1605 continue; 1606 m_freem(tx->tx_mbhead); 1607 tx->tx_mbhead = NULL; 1608 bus_dmamap_unload(sc->sc_dmat, tx->tx_dmamap); 1609 tx->tx_dpd->dpd_fsh = tx->tx_dpd->dpd_nextptr = 0; 1610 bus_dmamap_sync(sc->sc_dmat, sc->sc_dpd_dmamap, 1611 ((char *)tx->tx_dpd - (char *)sc->sc_dpd), 1612 sizeof (struct ex_dpd), 1613 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1614 } 1615 sc->tx_tail = sc->tx_head = NULL; 1616 ex_init_txdescs(sc); 1617 1618 sc->rx_tail = sc->rx_head = 0; 1619 for (i = 0; i < EX_NUPD; i++) { 1620 rx = &sc->sc_rxdescs[i]; 1621 if (rx->rx_mbhead != NULL) { 1622 bus_dmamap_unload(sc->sc_dmat, rx->rx_dmamap); 1623 m_freem(rx->rx_mbhead); 1624 rx->rx_mbhead = NULL; 1625 } 1626 ex_add_rxbuf(sc, rx); 1627 } 1628 1629 bus_space_write_2(iot, ioh, ELINK_COMMAND, ACK_INTR | INTR_LATCH); 1630 1631 callout_stop(&sc->ex_mii_callout); 1632 if (sc->ex_conf & EX_CONF_MII) 1633 mii_down(&sc->ex_mii); 1634 1635 if (disable) 1636 ex_disable(sc); 1637 1638 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1639 sc->sc_if_flags = ifp->if_flags; 1640 ifp->if_timer = 0; 1641 } 1642 1643 static void 1644 ex_init_txdescs(struct ex_softc *sc) 1645 { 1646 int i; 1647 1648 for (i = 0; i < EX_NDPD; i++) { 1649 sc->sc_txdescs[i].tx_dmamap = sc->sc_tx_dmamaps[i]; 1650 sc->sc_txdescs[i].tx_dpd = &sc->sc_dpd[i]; 1651 if (i < EX_NDPD - 1) 1652 sc->sc_txdescs[i].tx_next = &sc->sc_txdescs[i + 1]; 1653 else 1654 sc->sc_txdescs[i].tx_next = NULL; 1655 } 1656 sc->tx_free = &sc->sc_txdescs[0]; 1657 sc->tx_ftail = &sc->sc_txdescs[EX_NDPD-1]; 1658 } 1659 1660 1661 int 1662 ex_activate(device_t self, enum devact act) 1663 { 1664 struct ex_softc *sc = device_private(self); 1665 1666 switch (act) { 1667 case DVACT_DEACTIVATE: 1668 if_deactivate(&sc->sc_ethercom.ec_if); 1669 return 0; 1670 default: 1671 return EOPNOTSUPP; 1672 } 1673 } 1674 1675 int 1676 ex_detach(struct ex_softc *sc) 1677 { 1678 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1679 struct ex_rxdesc *rxd; 1680 int i, s; 1681 1682 /* Succeed now if there's no work to do. */ 1683 if ((sc->ex_flags & EX_FLAGS_ATTACHED) == 0) 1684 return (0); 1685 1686 s = splnet(); 1687 /* Stop the interface. Callouts are stopped in it. */ 1688 ex_stop(ifp, 1); 1689 splx(s); 1690 1691 /* Destroy our callout. */ 1692 callout_destroy(&sc->ex_mii_callout); 1693 1694 if (sc->ex_conf & EX_CONF_MII) { 1695 /* Detach all PHYs */ 1696 mii_detach(&sc->ex_mii, MII_PHY_ANY, MII_OFFSET_ANY); 1697 } 1698 1699 /* Delete all remaining media. */ 1700 ifmedia_delete_instance(&sc->ex_mii.mii_media, IFM_INST_ANY); 1701 1702 rnd_detach_source(&sc->rnd_source); 1703 ether_ifdetach(ifp); 1704 if_detach(ifp); 1705 1706 for (i = 0; i < EX_NUPD; i++) { 1707 rxd = &sc->sc_rxdescs[i]; 1708 if (rxd->rx_mbhead != NULL) { 1709 bus_dmamap_unload(sc->sc_dmat, rxd->rx_dmamap); 1710 m_freem(rxd->rx_mbhead); 1711 rxd->rx_mbhead = NULL; 1712 } 1713 } 1714 for (i = 0; i < EX_NUPD; i++) 1715 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_dmamaps[i]); 1716 for (i = 0; i < EX_NDPD; i++) 1717 bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_dmamaps[i]); 1718 bus_dmamap_unload(sc->sc_dmat, sc->sc_dpd_dmamap); 1719 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dpd_dmamap); 1720 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_dpd, 1721 EX_NDPD * sizeof (struct ex_dpd)); 1722 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_drseg); 1723 bus_dmamap_unload(sc->sc_dmat, sc->sc_upd_dmamap); 1724 bus_dmamap_destroy(sc->sc_dmat, sc->sc_upd_dmamap); 1725 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_upd, 1726 EX_NUPD * sizeof (struct ex_upd)); 1727 bus_dmamem_free(sc->sc_dmat, &sc->sc_useg, sc->sc_urseg); 1728 1729 pmf_device_deregister(sc->sc_dev); 1730 1731 return (0); 1732 } 1733 1734 /* 1735 * Before reboots, reset card completely. 1736 */ 1737 static bool 1738 ex_shutdown(device_t self, int flags) 1739 { 1740 struct ex_softc *sc = device_private(self); 1741 1742 ex_stop(&sc->sc_ethercom.ec_if, 1); 1743 /* 1744 * Make sure the interface is powered up when we reboot, 1745 * otherwise firmware on some systems gets really confused. 1746 */ 1747 (void) ex_enable(sc); 1748 return true; 1749 } 1750 1751 /* 1752 * Read EEPROM data. 1753 * XXX what to do if EEPROM doesn't unbusy? 1754 */ 1755 uint16_t 1756 ex_read_eeprom(struct ex_softc *sc, int offset) 1757 { 1758 bus_space_tag_t iot = sc->sc_iot; 1759 bus_space_handle_t ioh = sc->sc_ioh; 1760 uint16_t data = 0, cmd = READ_EEPROM; 1761 int off; 1762 1763 off = sc->ex_conf & EX_CONF_EEPROM_OFF ? 0x30 : 0; 1764 cmd = sc->ex_conf & EX_CONF_EEPROM_8BIT ? READ_EEPROM8 : READ_EEPROM; 1765 1766 GO_WINDOW(0); 1767 if (ex_eeprom_busy(sc)) 1768 goto out; 1769 bus_space_write_2(iot, ioh, ELINK_W0_EEPROM_COMMAND, 1770 cmd | (off + (offset & 0x3f))); 1771 if (ex_eeprom_busy(sc)) 1772 goto out; 1773 data = bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_DATA); 1774 out: 1775 return data; 1776 } 1777 1778 static int 1779 ex_eeprom_busy(struct ex_softc *sc) 1780 { 1781 bus_space_tag_t iot = sc->sc_iot; 1782 bus_space_handle_t ioh = sc->sc_ioh; 1783 int i = 100; 1784 1785 while (i--) { 1786 if (!(bus_space_read_2(iot, ioh, ELINK_W0_EEPROM_COMMAND) & 1787 EEPROM_BUSY)) 1788 return 0; 1789 delay(100); 1790 } 1791 aprint_error_dev(sc->sc_dev, "eeprom stays busy.\n"); 1792 return (1); 1793 } 1794 1795 /* 1796 * Create a new rx buffer and add it to the 'soft' rx list. 1797 */ 1798 static int 1799 ex_add_rxbuf(struct ex_softc *sc, struct ex_rxdesc *rxd) 1800 { 1801 struct mbuf *m, *oldm; 1802 bus_dmamap_t rxmap; 1803 int error, rval = 0; 1804 1805 oldm = rxd->rx_mbhead; 1806 rxmap = rxd->rx_dmamap; 1807 1808 MGETHDR(m, M_DONTWAIT, MT_DATA); 1809 if (m != NULL) { 1810 MCLGET(m, M_DONTWAIT); 1811 if ((m->m_flags & M_EXT) == 0) { 1812 m_freem(m); 1813 if (oldm == NULL) 1814 return 1; 1815 m = oldm; 1816 MRESETDATA(m); 1817 rval = 1; 1818 } 1819 } else { 1820 if (oldm == NULL) 1821 return 1; 1822 m = oldm; 1823 MRESETDATA(m); 1824 rval = 1; 1825 } 1826 1827 /* 1828 * Setup the DMA map for this receive buffer. 1829 */ 1830 if (m != oldm) { 1831 if (oldm != NULL) 1832 bus_dmamap_unload(sc->sc_dmat, rxmap); 1833 error = bus_dmamap_load(sc->sc_dmat, rxmap, 1834 m->m_ext.ext_buf, MCLBYTES, NULL, 1835 BUS_DMA_READ|BUS_DMA_NOWAIT); 1836 if (error) { 1837 aprint_error_dev(sc->sc_dev, "can't load rx buffer, error = %d\n", 1838 error); 1839 panic("ex_add_rxbuf"); /* XXX */ 1840 } 1841 } 1842 1843 /* 1844 * Align for data after 14 byte header. 1845 */ 1846 m->m_data += 2; 1847 1848 rxd->rx_mbhead = m; 1849 rxd->rx_upd->upd_pktstatus = htole32(MCLBYTES - 2); 1850 rxd->rx_upd->upd_frags[0].fr_addr = 1851 htole32(rxmap->dm_segs[0].ds_addr + 2); 1852 rxd->rx_upd->upd_nextptr = 0; 1853 1854 /* 1855 * Attach it to the end of the list. 1856 */ 1857 if (sc->rx_head != NULL) { 1858 sc->rx_tail->rx_next = rxd; 1859 sc->rx_tail->rx_upd->upd_nextptr = htole32(sc->sc_upddma + 1860 ((char *)rxd->rx_upd - (char *)sc->sc_upd)); 1861 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1862 (char *)sc->rx_tail->rx_upd - (char *)sc->sc_upd, 1863 sizeof (struct ex_upd), 1864 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1865 } else { 1866 sc->rx_head = rxd; 1867 } 1868 sc->rx_tail = rxd; 1869 1870 bus_dmamap_sync(sc->sc_dmat, rxmap, 0, rxmap->dm_mapsize, 1871 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1872 bus_dmamap_sync(sc->sc_dmat, sc->sc_upd_dmamap, 1873 ((char *)rxd->rx_upd - (char *)sc->sc_upd), 1874 sizeof (struct ex_upd), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1875 return (rval); 1876 } 1877 1878 uint32_t 1879 ex_mii_bitbang_read(device_t self) 1880 { 1881 struct ex_softc *sc = device_private(self); 1882 1883 /* We're already in Window 4. */ 1884 return (bus_space_read_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT)); 1885 } 1886 1887 void 1888 ex_mii_bitbang_write(device_t self, uint32_t val) 1889 { 1890 struct ex_softc *sc = device_private(self); 1891 1892 /* We're already in Window 4. */ 1893 bus_space_write_2(sc->sc_iot, sc->sc_ioh, ELINK_W4_PHYSMGMT, val); 1894 } 1895 1896 int 1897 ex_mii_readreg(device_t v, int phy, int reg) 1898 { 1899 struct ex_softc *sc = device_private(v); 1900 int val; 1901 1902 if ((sc->ex_conf & EX_CONF_INTPHY) && phy != ELINK_INTPHY_ID) 1903 return 0; 1904 1905 GO_WINDOW(4); 1906 1907 val = mii_bitbang_readreg(v, &ex_mii_bitbang_ops, phy, reg); 1908 1909 GO_WINDOW(1); 1910 1911 return (val); 1912 } 1913 1914 void 1915 ex_mii_writereg(device_t v, int phy, int reg, int data) 1916 { 1917 struct ex_softc *sc = device_private(v); 1918 1919 GO_WINDOW(4); 1920 1921 mii_bitbang_writereg(v, &ex_mii_bitbang_ops, phy, reg, data); 1922 1923 GO_WINDOW(1); 1924 } 1925 1926 void 1927 ex_mii_statchg(struct ifnet *ifp) 1928 { 1929 struct ex_softc *sc = ifp->if_softc; 1930 bus_space_tag_t iot = sc->sc_iot; 1931 bus_space_handle_t ioh = sc->sc_ioh; 1932 int mctl; 1933 1934 GO_WINDOW(3); 1935 mctl = bus_space_read_2(iot, ioh, ELINK_W3_MAC_CONTROL); 1936 if (sc->ex_mii.mii_media_active & IFM_FDX) 1937 mctl |= MAC_CONTROL_FDX; 1938 else 1939 mctl &= ~MAC_CONTROL_FDX; 1940 bus_space_write_2(iot, ioh, ELINK_W3_MAC_CONTROL, mctl); 1941 GO_WINDOW(1); /* back to operating window */ 1942 } 1943 1944 int 1945 ex_enable(struct ex_softc *sc) 1946 { 1947 if (sc->enabled == 0 && sc->enable != NULL) { 1948 if ((*sc->enable)(sc) != 0) { 1949 aprint_error_dev(sc->sc_dev, "device enable failed\n"); 1950 return (EIO); 1951 } 1952 sc->enabled = 1; 1953 } 1954 return (0); 1955 } 1956 1957 void 1958 ex_disable(struct ex_softc *sc) 1959 { 1960 if (sc->enabled == 1 && sc->disable != NULL) { 1961 (*sc->disable)(sc); 1962 sc->enabled = 0; 1963 } 1964 } 1965 1966