1 /* $NetBSD: hme.c,v 1.76 2009/04/16 14:39:11 tsutsui Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * HME Ethernet module driver. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.76 2009/04/16 14:39:11 tsutsui Exp $"); 38 39 /* #define HMEDEBUG */ 40 41 #include "opt_inet.h" 42 #include "bpfilter.h" 43 #include "rnd.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/kernel.h> 48 #include <sys/mbuf.h> 49 #include <sys/syslog.h> 50 #include <sys/socket.h> 51 #include <sys/device.h> 52 #include <sys/malloc.h> 53 #include <sys/ioctl.h> 54 #include <sys/errno.h> 55 #if NRND > 0 56 #include <sys/rnd.h> 57 #endif 58 59 #include <net/if.h> 60 #include <net/if_dl.h> 61 #include <net/if_ether.h> 62 #include <net/if_media.h> 63 64 #ifdef INET 65 #include <net/if_vlanvar.h> 66 #include <netinet/in.h> 67 #include <netinet/if_inarp.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #include <netinet/tcp.h> 72 #include <netinet/udp.h> 73 #endif 74 75 76 #if NBPFILTER > 0 77 #include <net/bpf.h> 78 #include <net/bpfdesc.h> 79 #endif 80 81 #include <dev/mii/mii.h> 82 #include <dev/mii/miivar.h> 83 84 #include <sys/bus.h> 85 86 #include <dev/ic/hmereg.h> 87 #include <dev/ic/hmevar.h> 88 89 void hme_start(struct ifnet *); 90 void hme_stop(struct hme_softc *,bool); 91 int hme_ioctl(struct ifnet *, u_long, void *); 92 void hme_tick(void *); 93 void hme_watchdog(struct ifnet *); 94 void hme_shutdown(void *); 95 int hme_init(struct hme_softc *); 96 void hme_meminit(struct hme_softc *); 97 void hme_mifinit(struct hme_softc *); 98 void hme_reset(struct hme_softc *); 99 void hme_setladrf(struct hme_softc *); 100 101 /* MII methods & callbacks */ 102 static int hme_mii_readreg(struct device *, int, int); 103 static void hme_mii_writereg(struct device *, int, int, int); 104 static void hme_mii_statchg(struct device *); 105 106 int hme_mediachange(struct ifnet *); 107 108 struct mbuf *hme_get(struct hme_softc *, int, uint32_t); 109 int hme_put(struct hme_softc *, int, struct mbuf *); 110 void hme_read(struct hme_softc *, int, uint32_t); 111 int hme_eint(struct hme_softc *, u_int); 112 int hme_rint(struct hme_softc *); 113 int hme_tint(struct hme_softc *); 114 115 /* Default buffer copy routines */ 116 void hme_copytobuf_contig(struct hme_softc *, void *, int, int); 117 void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int); 118 void hme_zerobuf_contig(struct hme_softc *, int, int); 119 120 121 void 122 hme_config(struct hme_softc *sc) 123 { 124 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 125 struct mii_data *mii = &sc->sc_mii; 126 struct mii_softc *child; 127 bus_dma_tag_t dmatag = sc->sc_dmatag; 128 bus_dma_segment_t seg; 129 bus_size_t size; 130 int rseg, error; 131 132 /* 133 * HME common initialization. 134 * 135 * hme_softc fields that must be initialized by the front-end: 136 * 137 * the bus tag: 138 * sc_bustag 139 * 140 * the DMA bus tag: 141 * sc_dmatag 142 * 143 * the bus handles: 144 * sc_seb (Shared Ethernet Block registers) 145 * sc_erx (Receiver Unit registers) 146 * sc_etx (Transmitter Unit registers) 147 * sc_mac (MAC registers) 148 * sc_mif (Management Interface registers) 149 * 150 * the maximum bus burst size: 151 * sc_burst 152 * 153 * (notyet:DMA capable memory for the ring descriptors & packet buffers: 154 * rb_membase, rb_dmabase) 155 * 156 * the local Ethernet address: 157 * sc_enaddr 158 * 159 */ 160 161 /* Make sure the chip is stopped. */ 162 hme_stop(sc, true); 163 164 165 /* 166 * Allocate descriptors and buffers 167 * XXX - do all this differently.. and more configurably, 168 * eg. use things as `dma_load_mbuf()' on transmit, 169 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped 170 * all the time) on the receiver side. 171 * 172 * Note: receive buffers must be 64-byte aligned. 173 * Also, apparently, the buffers must extend to a DMA burst 174 * boundary beyond the maximum packet size. 175 */ 176 #define _HME_NDESC 128 177 #define _HME_BUFSZ 1600 178 179 /* Note: the # of descriptors must be a multiple of 16 */ 180 sc->sc_rb.rb_ntbuf = _HME_NDESC; 181 sc->sc_rb.rb_nrbuf = _HME_NDESC; 182 183 /* 184 * Allocate DMA capable memory 185 * Buffer descriptors must be aligned on a 2048 byte boundary; 186 * take this into account when calculating the size. Note that 187 * the maximum number of descriptors (256) occupies 2048 bytes, 188 * so we allocate that much regardless of _HME_NDESC. 189 */ 190 size = 2048 + /* TX descriptors */ 191 2048 + /* RX descriptors */ 192 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */ 193 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */ 194 195 /* Allocate DMA buffer */ 196 if ((error = bus_dmamem_alloc(dmatag, size, 197 2048, 0, 198 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 199 aprint_error_dev(&sc->sc_dev, "DMA buffer alloc error %d\n", 200 error); 201 return; 202 } 203 204 /* Map DMA memory in CPU addressable space */ 205 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 206 &sc->sc_rb.rb_membase, 207 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 208 aprint_error_dev(&sc->sc_dev, "DMA buffer map error %d\n", 209 error); 210 bus_dmamap_unload(dmatag, sc->sc_dmamap); 211 bus_dmamem_free(dmatag, &seg, rseg); 212 return; 213 } 214 215 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 216 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 217 aprint_error_dev(&sc->sc_dev, "DMA map create error %d\n", 218 error); 219 return; 220 } 221 222 /* Load the buffer */ 223 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 224 sc->sc_rb.rb_membase, size, NULL, 225 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 226 aprint_error_dev(&sc->sc_dev, "DMA buffer map load error %d\n", 227 error); 228 bus_dmamem_free(dmatag, &seg, rseg); 229 return; 230 } 231 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 232 233 printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev), 234 ether_sprintf(sc->sc_enaddr)); 235 236 /* Initialize ifnet structure. */ 237 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 238 ifp->if_softc = sc; 239 ifp->if_start = hme_start; 240 ifp->if_ioctl = hme_ioctl; 241 ifp->if_watchdog = hme_watchdog; 242 ifp->if_flags = 243 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 244 sc->sc_if_flags = ifp->if_flags; 245 ifp->if_capabilities |= 246 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 247 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 248 IFQ_SET_READY(&ifp->if_snd); 249 250 /* Initialize ifmedia structures and MII info */ 251 mii->mii_ifp = ifp; 252 mii->mii_readreg = hme_mii_readreg; 253 mii->mii_writereg = hme_mii_writereg; 254 mii->mii_statchg = hme_mii_statchg; 255 256 sc->sc_ethercom.ec_mii = mii; 257 ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus); 258 259 hme_mifinit(sc); 260 261 mii_attach(&sc->sc_dev, mii, 0xffffffff, 262 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); 263 264 child = LIST_FIRST(&mii->mii_phys); 265 if (child == NULL) { 266 /* No PHY attached */ 267 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 268 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 269 } else { 270 /* 271 * Walk along the list of attached MII devices and 272 * establish an `MII instance' to `phy number' 273 * mapping. We'll use this mapping in media change 274 * requests to determine which phy to use to program 275 * the MIF configuration register. 276 */ 277 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 278 /* 279 * Note: we support just two PHYs: the built-in 280 * internal device and an external on the MII 281 * connector. 282 */ 283 if (child->mii_phy > 1 || child->mii_inst > 1) { 284 aprint_error_dev(&sc->sc_dev, "cannot accommodate MII device %s" 285 " at phy %d, instance %d\n", 286 device_xname(child->mii_dev), 287 child->mii_phy, child->mii_inst); 288 continue; 289 } 290 291 sc->sc_phys[child->mii_inst] = child->mii_phy; 292 } 293 294 /* 295 * XXX - we can really do the following ONLY if the 296 * phy indeed has the auto negotiation capability!! 297 */ 298 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 299 } 300 301 /* claim 802.1q capability */ 302 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 303 304 /* Attach the interface. */ 305 if_attach(ifp); 306 ether_ifattach(ifp, sc->sc_enaddr); 307 308 sc->sc_sh = shutdownhook_establish(hme_shutdown, sc); 309 if (sc->sc_sh == NULL) 310 panic("hme_config: can't establish shutdownhook"); 311 312 #if NRND > 0 313 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev), 314 RND_TYPE_NET, 0); 315 #endif 316 317 callout_init(&sc->sc_tick_ch, 0); 318 } 319 320 void 321 hme_tick(void *arg) 322 { 323 struct hme_softc *sc = arg; 324 int s; 325 326 s = splnet(); 327 mii_tick(&sc->sc_mii); 328 splx(s); 329 330 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 331 } 332 333 void 334 hme_reset(struct hme_softc *sc) 335 { 336 int s; 337 338 s = splnet(); 339 (void)hme_init(sc); 340 splx(s); 341 } 342 343 void 344 hme_stop(struct hme_softc *sc, bool chip_only) 345 { 346 bus_space_tag_t t = sc->sc_bustag; 347 bus_space_handle_t seb = sc->sc_seb; 348 int n; 349 350 if (!chip_only) { 351 callout_stop(&sc->sc_tick_ch); 352 mii_down(&sc->sc_mii); 353 } 354 355 /* Mask all interrupts */ 356 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); 357 358 /* Reset transmitter and receiver */ 359 bus_space_write_4(t, seb, HME_SEBI_RESET, 360 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); 361 362 for (n = 0; n < 20; n++) { 363 uint32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); 364 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 365 return; 366 DELAY(20); 367 } 368 369 printf("%s: hme_stop: reset failed\n", device_xname(&sc->sc_dev)); 370 } 371 372 void 373 hme_meminit(struct hme_softc *sc) 374 { 375 bus_addr_t txbufdma, rxbufdma; 376 bus_addr_t dma; 377 char *p; 378 unsigned int ntbuf, nrbuf, i; 379 struct hme_ring *hr = &sc->sc_rb; 380 381 p = hr->rb_membase; 382 dma = hr->rb_dmabase; 383 384 ntbuf = hr->rb_ntbuf; 385 nrbuf = hr->rb_nrbuf; 386 387 /* 388 * Allocate transmit descriptors 389 */ 390 hr->rb_txd = p; 391 hr->rb_txddma = dma; 392 p += ntbuf * HME_XD_SIZE; 393 dma += ntbuf * HME_XD_SIZE; 394 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 395 dma = (bus_addr_t)roundup((u_long)dma, 2048); 396 p = (void *)roundup((u_long)p, 2048); 397 398 /* 399 * Allocate receive descriptors 400 */ 401 hr->rb_rxd = p; 402 hr->rb_rxddma = dma; 403 p += nrbuf * HME_XD_SIZE; 404 dma += nrbuf * HME_XD_SIZE; 405 /* Again move forward to the next 2048 byte boundary.*/ 406 dma = (bus_addr_t)roundup((u_long)dma, 2048); 407 p = (void *)roundup((u_long)p, 2048); 408 409 410 /* 411 * Allocate transmit buffers 412 */ 413 hr->rb_txbuf = p; 414 txbufdma = dma; 415 p += ntbuf * _HME_BUFSZ; 416 dma += ntbuf * _HME_BUFSZ; 417 418 /* 419 * Allocate receive buffers 420 */ 421 hr->rb_rxbuf = p; 422 rxbufdma = dma; 423 p += nrbuf * _HME_BUFSZ; 424 dma += nrbuf * _HME_BUFSZ; 425 426 /* 427 * Initialize transmit buffer descriptors 428 */ 429 for (i = 0; i < ntbuf; i++) { 430 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ); 431 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 432 } 433 434 /* 435 * Initialize receive buffer descriptors 436 */ 437 for (i = 0; i < nrbuf; i++) { 438 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ); 439 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 440 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 441 } 442 443 hr->rb_tdhead = hr->rb_tdtail = 0; 444 hr->rb_td_nbusy = 0; 445 hr->rb_rdtail = 0; 446 } 447 448 /* 449 * Initialization of interface; set up initialization block 450 * and transmit/receive descriptor rings. 451 */ 452 int 453 hme_init(struct hme_softc *sc) 454 { 455 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 456 bus_space_tag_t t = sc->sc_bustag; 457 bus_space_handle_t seb = sc->sc_seb; 458 bus_space_handle_t etx = sc->sc_etx; 459 bus_space_handle_t erx = sc->sc_erx; 460 bus_space_handle_t mac = sc->sc_mac; 461 uint8_t *ea; 462 uint32_t v; 463 int rc; 464 465 /* 466 * Initialization sequence. The numbered steps below correspond 467 * to the sequence outlined in section 6.3.5.1 in the Ethernet 468 * Channel Engine manual (part of the PCIO manual). 469 * See also the STP2002-STQ document from Sun Microsystems. 470 */ 471 472 /* step 1 & 2. Reset the Ethernet Channel */ 473 hme_stop(sc, false); 474 475 /* Re-initialize the MIF */ 476 hme_mifinit(sc); 477 478 /* Call MI reset function if any */ 479 if (sc->sc_hwreset) 480 (*sc->sc_hwreset)(sc); 481 482 #if 0 483 /* Mask all MIF interrupts, just in case */ 484 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff); 485 #endif 486 487 /* step 3. Setup data structures in host memory */ 488 hme_meminit(sc); 489 490 /* step 4. TX MAC registers & counters */ 491 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 492 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 493 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 494 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 495 bus_space_write_4(t, mac, HME_MACI_TXSIZE, 496 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 497 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 498 sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable; 499 500 /* Load station MAC address */ 501 ea = sc->sc_enaddr; 502 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 503 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 504 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 505 506 /* 507 * Init seed for backoff 508 * (source suggested by manual: low 10 bits of MAC address) 509 */ 510 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 511 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); 512 513 514 /* Note: Accepting power-on default for other MAC registers here.. */ 515 516 517 /* step 5. RX MAC registers & counters */ 518 hme_setladrf(sc); 519 520 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 521 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); 522 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf); 523 524 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 525 bus_space_write_4(t, mac, HME_MACI_RXSIZE, 526 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 527 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 528 529 /* step 8. Global Configuration & Interrupt Mask */ 530 bus_space_write_4(t, seb, HME_SEBI_IMASK, 531 ~( 532 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 533 HME_SEB_STAT_HOSTTOTX | 534 HME_SEB_STAT_RXTOHOST | 535 HME_SEB_STAT_TXALL | 536 HME_SEB_STAT_TXPERR | 537 HME_SEB_STAT_RCNTEXP | 538 /*HME_SEB_STAT_MIFIRQ |*/ 539 HME_SEB_STAT_ALL_ERRORS )); 540 541 switch (sc->sc_burst) { 542 default: 543 v = 0; 544 break; 545 case 16: 546 v = HME_SEB_CFG_BURST16; 547 break; 548 case 32: 549 v = HME_SEB_CFG_BURST32; 550 break; 551 case 64: 552 v = HME_SEB_CFG_BURST64; 553 break; 554 } 555 bus_space_write_4(t, seb, HME_SEBI_CFG, v); 556 557 /* step 9. ETX Configuration: use mostly default values */ 558 559 /* Enable DMA */ 560 v = bus_space_read_4(t, etx, HME_ETXI_CFG); 561 v |= HME_ETX_CFG_DMAENABLE; 562 bus_space_write_4(t, etx, HME_ETXI_CFG, v); 563 564 /* Transmit Descriptor ring size: in increments of 16 */ 565 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1); 566 567 568 /* step 10. ERX Configuration */ 569 v = bus_space_read_4(t, erx, HME_ERXI_CFG); 570 571 /* Encode Receive Descriptor ring size: four possible values */ 572 switch (_HME_NDESC /*XXX*/) { 573 case 32: 574 v |= HME_ERX_CFG_RINGSIZE32; 575 break; 576 case 64: 577 v |= HME_ERX_CFG_RINGSIZE64; 578 break; 579 case 128: 580 v |= HME_ERX_CFG_RINGSIZE128; 581 break; 582 case 256: 583 v |= HME_ERX_CFG_RINGSIZE256; 584 break; 585 default: 586 printf("hme: invalid Receive Descriptor ring size\n"); 587 break; 588 } 589 590 /* Enable DMA */ 591 v |= HME_ERX_CFG_DMAENABLE; 592 593 /* set h/w rx checksum start offset (# of half-words) */ 594 #ifdef INET 595 v |= (((ETHER_HDR_LEN + sizeof(struct ip)) / sizeof(uint16_t)) 596 << HME_ERX_CFG_CSUMSHIFT) & 597 HME_ERX_CFG_CSUMSTART; 598 #endif 599 bus_space_write_4(t, erx, HME_ERXI_CFG, v); 600 601 /* step 11. XIF Configuration */ 602 v = bus_space_read_4(t, mac, HME_MACI_XIF); 603 v |= HME_MAC_XIF_OE; 604 bus_space_write_4(t, mac, HME_MACI_XIF, v); 605 606 /* step 12. RX_MAC Configuration Register */ 607 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 608 v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP; 609 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 610 611 /* step 13. TX_MAC Configuration Register */ 612 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 613 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 614 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 615 616 /* step 14. Issue Transmit Pending command */ 617 618 /* Call MI initialization function if any */ 619 if (sc->sc_hwinit) 620 (*sc->sc_hwinit)(sc); 621 622 /* Set the current media. */ 623 if ((rc = hme_mediachange(ifp)) != 0) 624 return rc; 625 626 /* Start the one second timer. */ 627 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 628 629 ifp->if_flags |= IFF_RUNNING; 630 ifp->if_flags &= ~IFF_OACTIVE; 631 sc->sc_if_flags = ifp->if_flags; 632 ifp->if_timer = 0; 633 hme_start(ifp); 634 return 0; 635 } 636 637 /* 638 * Routine to copy from mbuf chain to transmit buffer in 639 * network buffer memory. 640 * Returns the amount of data copied. 641 */ 642 int 643 hme_put(struct hme_softc *sc, int ri, struct mbuf *m) 644 /* ri: Ring index */ 645 { 646 struct mbuf *n; 647 int len, tlen = 0; 648 char *bp; 649 650 bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ; 651 for (; m; m = n) { 652 len = m->m_len; 653 if (len == 0) { 654 MFREE(m, n); 655 continue; 656 } 657 memcpy(bp, mtod(m, void *), len); 658 bp += len; 659 tlen += len; 660 MFREE(m, n); 661 } 662 return (tlen); 663 } 664 665 /* 666 * Pull data off an interface. 667 * Len is length of data, with local net header stripped. 668 * We copy the data into mbufs. When full cluster sized units are present 669 * we copy into clusters. 670 */ 671 struct mbuf * 672 hme_get(struct hme_softc *sc, int ri, uint32_t flags) 673 { 674 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 675 struct mbuf *m, *m0, *newm; 676 char *bp; 677 int len, totlen; 678 #ifdef INET 679 int csum_flags; 680 #endif 681 682 totlen = HME_XD_DECODE_RSIZE(flags); 683 MGETHDR(m0, M_DONTWAIT, MT_DATA); 684 if (m0 == 0) 685 return (0); 686 m0->m_pkthdr.rcvif = ifp; 687 m0->m_pkthdr.len = totlen; 688 len = MHLEN; 689 m = m0; 690 691 bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ; 692 693 while (totlen > 0) { 694 if (totlen >= MINCLSIZE) { 695 MCLGET(m, M_DONTWAIT); 696 if ((m->m_flags & M_EXT) == 0) 697 goto bad; 698 len = MCLBYTES; 699 } 700 701 if (m == m0) { 702 char *newdata = (char *) 703 ALIGN(m->m_data + sizeof(struct ether_header)) - 704 sizeof(struct ether_header); 705 len -= newdata - m->m_data; 706 m->m_data = newdata; 707 } 708 709 m->m_len = len = min(totlen, len); 710 memcpy(mtod(m, void *), bp, len); 711 bp += len; 712 713 totlen -= len; 714 if (totlen > 0) { 715 MGET(newm, M_DONTWAIT, MT_DATA); 716 if (newm == 0) 717 goto bad; 718 len = MLEN; 719 m = m->m_next = newm; 720 } 721 } 722 723 #ifdef INET 724 /* hardware checksum */ 725 csum_flags = 0; 726 if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 727 struct ether_header *eh; 728 struct ether_vlan_header *evh; 729 struct ip *ip; 730 struct udphdr *uh; 731 uint16_t *opts; 732 int32_t hlen, pktlen; 733 uint32_t csum_data; 734 735 eh = mtod(m0, struct ether_header *); 736 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 737 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN); 738 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN; 739 } else if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) { 740 evh = (struct ether_vlan_header *)eh; 741 if (ntohs(evh->evl_proto != ETHERTYPE_IP)) 742 goto swcsum; 743 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN + 744 ETHER_VLAN_ENCAP_LEN); 745 pktlen = m0->m_pkthdr.len - 746 ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN; 747 } else 748 goto swcsum; 749 750 /* IPv4 only */ 751 if (ip->ip_v != IPVERSION) 752 goto swcsum; 753 754 hlen = ip->ip_hl << 2; 755 if (hlen < sizeof(struct ip)) 756 goto swcsum; 757 758 /* 759 * bail if too short, has random trailing garbage, truncated, 760 * fragment, or has ethernet pad. 761 */ 762 if (ntohs(ip->ip_len) < hlen || 763 ntohs(ip->ip_len) != pktlen || 764 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) 765 goto swcsum; 766 767 switch (ip->ip_p) { 768 case IPPROTO_TCP: 769 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0) 770 goto swcsum; 771 if (pktlen < (hlen + sizeof(struct tcphdr))) 772 goto swcsum; 773 csum_flags = 774 M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 775 break; 776 case IPPROTO_UDP: 777 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0) 778 goto swcsum; 779 if (pktlen < (hlen + sizeof(struct udphdr))) 780 goto swcsum; 781 uh = (struct udphdr *)((char *)ip + hlen); 782 /* no checksum */ 783 if (uh->uh_sum == 0) 784 goto swcsum; 785 csum_flags = 786 M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 787 break; 788 default: 789 goto swcsum; 790 } 791 792 /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */ 793 csum_data = ~flags & HME_XD_RXCKSUM; 794 795 /* 796 * If data offset is different from RX cksum start offset, 797 * we have to deduct them. 798 */ 799 hlen = ((char *)ip + hlen) - 800 ((char *)eh + ETHER_HDR_LEN + sizeof(struct ip)); 801 if (hlen > 1) { 802 uint32_t optsum; 803 804 optsum = 0; 805 opts = (uint16_t *)((char *)eh + 806 ETHER_HDR_LEN + sizeof(struct ip)); 807 808 while (hlen > 1) { 809 optsum += ntohs(*opts++); 810 hlen -= 2; 811 } 812 while (optsum >> 16) 813 optsum = (optsum >> 16) + (optsum & 0xffff); 814 815 /* Deduct the ip opts sum from the hwsum. */ 816 csum_data += (uint16_t)~optsum; 817 818 while (csum_data >> 16) 819 csum_data = 820 (csum_data >> 16) + (csum_data & 0xffff); 821 } 822 m0->m_pkthdr.csum_data = csum_data; 823 } 824 swcsum: 825 m0->m_pkthdr.csum_flags = csum_flags; 826 #endif 827 828 return (m0); 829 830 bad: 831 m_freem(m0); 832 return (0); 833 } 834 835 /* 836 * Pass a packet to the higher levels. 837 */ 838 void 839 hme_read(struct hme_softc *sc, int ix, uint32_t flags) 840 { 841 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 842 struct mbuf *m; 843 int len; 844 845 len = HME_XD_DECODE_RSIZE(flags); 846 if (len <= sizeof(struct ether_header) || 847 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 848 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) : 849 ETHERMTU + sizeof(struct ether_header))) { 850 #ifdef HMEDEBUG 851 printf("%s: invalid packet size %d; dropping\n", 852 device_xname(&sc->sc_dev), len); 853 #endif 854 ifp->if_ierrors++; 855 return; 856 } 857 858 /* Pull packet off interface. */ 859 m = hme_get(sc, ix, flags); 860 if (m == 0) { 861 ifp->if_ierrors++; 862 return; 863 } 864 865 ifp->if_ipackets++; 866 867 #if NBPFILTER > 0 868 /* 869 * Check if there's a BPF listener on this interface. 870 * If so, hand off the raw packet to BPF. 871 */ 872 if (ifp->if_bpf) 873 bpf_mtap(ifp->if_bpf, m); 874 #endif 875 876 /* Pass the packet up. */ 877 (*ifp->if_input)(ifp, m); 878 } 879 880 void 881 hme_start(struct ifnet *ifp) 882 { 883 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 884 void *txd = sc->sc_rb.rb_txd; 885 struct mbuf *m; 886 unsigned int txflags; 887 unsigned int ri, len; 888 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 889 890 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 891 return; 892 893 ri = sc->sc_rb.rb_tdhead; 894 895 for (;;) { 896 IFQ_DEQUEUE(&ifp->if_snd, m); 897 if (m == 0) 898 break; 899 900 #if NBPFILTER > 0 901 /* 902 * If BPF is listening on this interface, let it see the 903 * packet before we commit it to the wire. 904 */ 905 if (ifp->if_bpf) 906 bpf_mtap(ifp->if_bpf, m); 907 #endif 908 909 #ifdef INET 910 /* collect bits for h/w csum, before hme_put frees the mbuf */ 911 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) && 912 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 913 struct ether_header *eh; 914 uint16_t offset, start; 915 916 eh = mtod(m, struct ether_header *); 917 switch (ntohs(eh->ether_type)) { 918 case ETHERTYPE_IP: 919 start = ETHER_HDR_LEN; 920 break; 921 case ETHERTYPE_VLAN: 922 start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 923 break; 924 default: 925 /* unsupported, drop it */ 926 m_free(m); 927 continue; 928 } 929 start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 930 offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data) 931 + start; 932 txflags = HME_XD_TXCKSUM | 933 (offset << HME_XD_TXCSSTUFFSHIFT) | 934 (start << HME_XD_TXCSSTARTSHIFT); 935 } else 936 #endif 937 txflags = 0; 938 939 /* 940 * Copy the mbuf chain into the transmit buffer. 941 */ 942 len = hme_put(sc, ri, m); 943 944 /* 945 * Initialize transmit registers and start transmission 946 */ 947 HME_XD_SETFLAGS(sc->sc_pci, txd, ri, 948 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP | 949 HME_XD_ENCODE_TSIZE(len) | txflags); 950 951 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/ 952 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, 953 HME_ETX_TP_DMAWAKEUP); 954 955 if (++ri == ntbuf) 956 ri = 0; 957 958 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 959 ifp->if_flags |= IFF_OACTIVE; 960 break; 961 } 962 } 963 964 sc->sc_rb.rb_tdhead = ri; 965 } 966 967 /* 968 * Transmit interrupt. 969 */ 970 int 971 hme_tint(struct hme_softc *sc) 972 { 973 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 974 bus_space_tag_t t = sc->sc_bustag; 975 bus_space_handle_t mac = sc->sc_mac; 976 unsigned int ri, txflags; 977 978 /* 979 * Unload collision counters 980 */ 981 ifp->if_collisions += 982 bus_space_read_4(t, mac, HME_MACI_NCCNT) + 983 bus_space_read_4(t, mac, HME_MACI_FCCNT) + 984 bus_space_read_4(t, mac, HME_MACI_EXCNT) + 985 bus_space_read_4(t, mac, HME_MACI_LTCNT); 986 987 /* 988 * then clear the hardware counters. 989 */ 990 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 991 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 992 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 993 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 994 995 /* Fetch current position in the transmit ring */ 996 ri = sc->sc_rb.rb_tdtail; 997 998 for (;;) { 999 if (sc->sc_rb.rb_td_nbusy <= 0) 1000 break; 1001 1002 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1003 1004 if (txflags & HME_XD_OWN) 1005 break; 1006 1007 ifp->if_flags &= ~IFF_OACTIVE; 1008 ifp->if_opackets++; 1009 1010 if (++ri == sc->sc_rb.rb_ntbuf) 1011 ri = 0; 1012 1013 --sc->sc_rb.rb_td_nbusy; 1014 } 1015 1016 /* Update ring */ 1017 sc->sc_rb.rb_tdtail = ri; 1018 1019 hme_start(ifp); 1020 1021 if (sc->sc_rb.rb_td_nbusy == 0) 1022 ifp->if_timer = 0; 1023 1024 return (1); 1025 } 1026 1027 /* 1028 * Receive interrupt. 1029 */ 1030 int 1031 hme_rint(struct hme_softc *sc) 1032 { 1033 void *xdr = sc->sc_rb.rb_rxd; 1034 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 1035 unsigned int ri; 1036 uint32_t flags; 1037 1038 ri = sc->sc_rb.rb_rdtail; 1039 1040 /* 1041 * Process all buffers with valid data. 1042 */ 1043 for (;;) { 1044 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1045 if (flags & HME_XD_OWN) 1046 break; 1047 1048 if (flags & HME_XD_OFL) { 1049 printf("%s: buffer overflow, ri=%d; flags=0x%x\n", 1050 device_xname(&sc->sc_dev), ri, flags); 1051 } else 1052 hme_read(sc, ri, flags); 1053 1054 /* This buffer can be used by the hardware again */ 1055 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri, 1056 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 1057 1058 if (++ri == nrbuf) 1059 ri = 0; 1060 } 1061 1062 sc->sc_rb.rb_rdtail = ri; 1063 1064 return (1); 1065 } 1066 1067 int 1068 hme_eint(struct hme_softc *sc, u_int status) 1069 { 1070 char bits[128]; 1071 1072 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1073 bus_space_tag_t t = sc->sc_bustag; 1074 bus_space_handle_t mif = sc->sc_mif; 1075 uint32_t cf, st, sm; 1076 cf = bus_space_read_4(t, mif, HME_MIFI_CFG); 1077 st = bus_space_read_4(t, mif, HME_MIFI_STAT); 1078 sm = bus_space_read_4(t, mif, HME_MIFI_SM); 1079 printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n", 1080 device_xname(&sc->sc_dev), cf, st, sm); 1081 return (1); 1082 } 1083 snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status); 1084 printf("%s: status=%s\n", device_xname(&sc->sc_dev), bits); 1085 1086 return (1); 1087 } 1088 1089 int 1090 hme_intr(void *v) 1091 { 1092 struct hme_softc *sc = (struct hme_softc *)v; 1093 bus_space_tag_t t = sc->sc_bustag; 1094 bus_space_handle_t seb = sc->sc_seb; 1095 uint32_t status; 1096 int r = 0; 1097 1098 status = bus_space_read_4(t, seb, HME_SEBI_STAT); 1099 1100 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1101 r |= hme_eint(sc, status); 1102 1103 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1104 r |= hme_tint(sc); 1105 1106 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1107 r |= hme_rint(sc); 1108 1109 #if NRND > 0 1110 rnd_add_uint32(&sc->rnd_source, status); 1111 #endif 1112 1113 return (r); 1114 } 1115 1116 1117 void 1118 hme_watchdog(struct ifnet *ifp) 1119 { 1120 struct hme_softc *sc = ifp->if_softc; 1121 1122 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 1123 ++ifp->if_oerrors; 1124 1125 hme_reset(sc); 1126 } 1127 1128 /* 1129 * Initialize the MII Management Interface 1130 */ 1131 void 1132 hme_mifinit(struct hme_softc *sc) 1133 { 1134 bus_space_tag_t t = sc->sc_bustag; 1135 bus_space_handle_t mif = sc->sc_mif; 1136 bus_space_handle_t mac = sc->sc_mac; 1137 int instance, phy; 1138 uint32_t v; 1139 1140 if (sc->sc_mii.mii_media.ifm_cur != NULL) { 1141 instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1142 phy = sc->sc_phys[instance]; 1143 } else 1144 /* No media set yet, pick phy arbitrarily.. */ 1145 phy = HME_PHYAD_EXTERNAL; 1146 1147 /* Configure the MIF in frame mode, no poll, current phy select */ 1148 v = 0; 1149 if (phy == HME_PHYAD_EXTERNAL) 1150 v |= HME_MIF_CFG_PHY; 1151 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1152 1153 /* If an external transceiver is selected, enable its MII drivers */ 1154 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1155 v &= ~HME_MAC_XIF_MIIENABLE; 1156 if (phy == HME_PHYAD_EXTERNAL) 1157 v |= HME_MAC_XIF_MIIENABLE; 1158 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1159 } 1160 1161 /* 1162 * MII interface 1163 */ 1164 static int 1165 hme_mii_readreg(struct device *self, int phy, int reg) 1166 { 1167 struct hme_softc *sc = (void *)self; 1168 bus_space_tag_t t = sc->sc_bustag; 1169 bus_space_handle_t mif = sc->sc_mif; 1170 bus_space_handle_t mac = sc->sc_mac; 1171 uint32_t v, xif_cfg, mifi_cfg; 1172 int n; 1173 1174 /* We can at most have two PHYs */ 1175 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1176 return (0); 1177 1178 /* Select the desired PHY in the MIF configuration register */ 1179 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1180 v &= ~HME_MIF_CFG_PHY; 1181 if (phy == HME_PHYAD_EXTERNAL) 1182 v |= HME_MIF_CFG_PHY; 1183 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1184 1185 /* Enable MII drivers on external transceiver */ 1186 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1187 if (phy == HME_PHYAD_EXTERNAL) 1188 v |= HME_MAC_XIF_MIIENABLE; 1189 else 1190 v &= ~HME_MAC_XIF_MIIENABLE; 1191 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1192 1193 #if 0 1194 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1195 /* 1196 * Check whether a transceiver is connected by testing 1197 * the MIF configuration register's MDI_X bits. Note that 1198 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1199 */ 1200 mif_mdi_bit = 1 << (8 + (1 - phy)); 1201 delay(100); 1202 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1203 if ((v & mif_mdi_bit) == 0) 1204 return (0); 1205 #endif 1206 1207 /* Construct the frame command */ 1208 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1209 HME_MIF_FO_TAMSB | 1210 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1211 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1212 (reg << HME_MIF_FO_REGAD_SHIFT); 1213 1214 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1215 for (n = 0; n < 100; n++) { 1216 DELAY(1); 1217 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1218 if (v & HME_MIF_FO_TALSB) { 1219 v &= HME_MIF_FO_DATA; 1220 goto out; 1221 } 1222 } 1223 1224 v = 0; 1225 printf("%s: mii_read timeout\n", device_xname(&sc->sc_dev)); 1226 1227 out: 1228 /* Restore MIFI_CFG register */ 1229 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1230 /* Restore XIF register */ 1231 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1232 return (v); 1233 } 1234 1235 static void 1236 hme_mii_writereg(struct device *self, int phy, int reg, int val) 1237 { 1238 struct hme_softc *sc = (void *)self; 1239 bus_space_tag_t t = sc->sc_bustag; 1240 bus_space_handle_t mif = sc->sc_mif; 1241 bus_space_handle_t mac = sc->sc_mac; 1242 uint32_t v, xif_cfg, mifi_cfg; 1243 int n; 1244 1245 /* We can at most have two PHYs */ 1246 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1247 return; 1248 1249 /* Select the desired PHY in the MIF configuration register */ 1250 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1251 v &= ~HME_MIF_CFG_PHY; 1252 if (phy == HME_PHYAD_EXTERNAL) 1253 v |= HME_MIF_CFG_PHY; 1254 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1255 1256 /* Enable MII drivers on external transceiver */ 1257 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1258 if (phy == HME_PHYAD_EXTERNAL) 1259 v |= HME_MAC_XIF_MIIENABLE; 1260 else 1261 v &= ~HME_MAC_XIF_MIIENABLE; 1262 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1263 1264 #if 0 1265 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1266 /* 1267 * Check whether a transceiver is connected by testing 1268 * the MIF configuration register's MDI_X bits. Note that 1269 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1270 */ 1271 mif_mdi_bit = 1 << (8 + (1 - phy)); 1272 delay(100); 1273 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1274 if ((v & mif_mdi_bit) == 0) 1275 return; 1276 #endif 1277 1278 /* Construct the frame command */ 1279 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1280 HME_MIF_FO_TAMSB | 1281 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1282 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1283 (reg << HME_MIF_FO_REGAD_SHIFT) | 1284 (val & HME_MIF_FO_DATA); 1285 1286 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1287 for (n = 0; n < 100; n++) { 1288 DELAY(1); 1289 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1290 if (v & HME_MIF_FO_TALSB) 1291 goto out; 1292 } 1293 1294 printf("%s: mii_write timeout\n", device_xname(&sc->sc_dev)); 1295 out: 1296 /* Restore MIFI_CFG register */ 1297 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1298 /* Restore XIF register */ 1299 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1300 } 1301 1302 static void 1303 hme_mii_statchg(struct device *dev) 1304 { 1305 struct hme_softc *sc = (void *)dev; 1306 bus_space_tag_t t = sc->sc_bustag; 1307 bus_space_handle_t mac = sc->sc_mac; 1308 uint32_t v; 1309 1310 #ifdef HMEDEBUG 1311 if (sc->sc_debug) 1312 printf("hme_mii_statchg: status change\n"); 1313 #endif 1314 1315 /* Set the MAC Full Duplex bit appropriately */ 1316 /* Apparently the hme chip is SIMPLEX if working in full duplex mode, 1317 but not otherwise. */ 1318 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 1319 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1320 v |= HME_MAC_TXCFG_FULLDPLX; 1321 sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX; 1322 } else { 1323 v &= ~HME_MAC_TXCFG_FULLDPLX; 1324 sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX; 1325 } 1326 sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags; 1327 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 1328 } 1329 1330 int 1331 hme_mediachange(struct ifnet *ifp) 1332 { 1333 struct hme_softc *sc = ifp->if_softc; 1334 bus_space_tag_t t = sc->sc_bustag; 1335 bus_space_handle_t mif = sc->sc_mif; 1336 bus_space_handle_t mac = sc->sc_mac; 1337 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1338 int phy = sc->sc_phys[instance]; 1339 int rc; 1340 uint32_t v; 1341 1342 #ifdef HMEDEBUG 1343 if (sc->sc_debug) 1344 printf("hme_mediachange: phy = %d\n", phy); 1345 #endif 1346 1347 /* Select the current PHY in the MIF configuration register */ 1348 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1349 v &= ~HME_MIF_CFG_PHY; 1350 if (phy == HME_PHYAD_EXTERNAL) 1351 v |= HME_MIF_CFG_PHY; 1352 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1353 1354 /* If an external transceiver is selected, enable its MII drivers */ 1355 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1356 v &= ~HME_MAC_XIF_MIIENABLE; 1357 if (phy == HME_PHYAD_EXTERNAL) 1358 v |= HME_MAC_XIF_MIIENABLE; 1359 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1360 1361 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 1362 return 0; 1363 return rc; 1364 } 1365 1366 /* 1367 * Process an ioctl request. 1368 */ 1369 int 1370 hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) 1371 { 1372 struct hme_softc *sc = ifp->if_softc; 1373 struct ifaddr *ifa = (struct ifaddr *)data; 1374 int s, error = 0; 1375 1376 s = splnet(); 1377 1378 switch (cmd) { 1379 1380 case SIOCINITIFADDR: 1381 switch (ifa->ifa_addr->sa_family) { 1382 #ifdef INET 1383 case AF_INET: 1384 if (ifp->if_flags & IFF_UP) 1385 hme_setladrf(sc); 1386 else { 1387 ifp->if_flags |= IFF_UP; 1388 error = hme_init(sc); 1389 } 1390 arp_ifinit(ifp, ifa); 1391 break; 1392 #endif 1393 default: 1394 ifp->if_flags |= IFF_UP; 1395 error = hme_init(sc); 1396 break; 1397 } 1398 break; 1399 1400 case SIOCSIFFLAGS: 1401 #ifdef HMEDEBUG 1402 { 1403 struct ifreq *ifr = data; 1404 sc->sc_debug = 1405 (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0; 1406 } 1407 #endif 1408 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1409 break; 1410 1411 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 1412 case IFF_RUNNING: 1413 /* 1414 * If interface is marked down and it is running, then 1415 * stop it. 1416 */ 1417 hme_stop(sc, false); 1418 ifp->if_flags &= ~IFF_RUNNING; 1419 break; 1420 case IFF_UP: 1421 /* 1422 * If interface is marked up and it is stopped, then 1423 * start it. 1424 */ 1425 error = hme_init(sc); 1426 break; 1427 case IFF_UP|IFF_RUNNING: 1428 /* 1429 * If setting debug or promiscuous mode, do not reset 1430 * the chip; for everything else, call hme_init() 1431 * which will trigger a reset. 1432 */ 1433 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG) 1434 if (ifp->if_flags != sc->sc_if_flags) { 1435 if ((ifp->if_flags & (~RESETIGN)) 1436 == (sc->sc_if_flags & (~RESETIGN))) 1437 hme_setladrf(sc); 1438 else 1439 error = hme_init(sc); 1440 } 1441 #undef RESETIGN 1442 break; 1443 case 0: 1444 break; 1445 } 1446 1447 if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable) 1448 error = hme_init(sc); 1449 1450 break; 1451 1452 default: 1453 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1454 break; 1455 1456 error = 0; 1457 1458 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1459 ; 1460 else if (ifp->if_flags & IFF_RUNNING) { 1461 /* 1462 * Multicast list has changed; set the hardware filter 1463 * accordingly. 1464 */ 1465 hme_setladrf(sc); 1466 } 1467 break; 1468 } 1469 1470 sc->sc_if_flags = ifp->if_flags; 1471 splx(s); 1472 return (error); 1473 } 1474 1475 void 1476 hme_shutdown(void *arg) 1477 { 1478 1479 hme_stop((struct hme_softc *)arg, false); 1480 } 1481 1482 /* 1483 * Set up the logical address filter. 1484 */ 1485 void 1486 hme_setladrf(struct hme_softc *sc) 1487 { 1488 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1489 struct ether_multi *enm; 1490 struct ether_multistep step; 1491 struct ethercom *ec = &sc->sc_ethercom; 1492 bus_space_tag_t t = sc->sc_bustag; 1493 bus_space_handle_t mac = sc->sc_mac; 1494 u_char *cp; 1495 uint32_t crc; 1496 uint32_t hash[4]; 1497 uint32_t v; 1498 int len; 1499 1500 /* Clear hash table */ 1501 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1502 1503 /* Get current RX configuration */ 1504 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 1505 1506 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1507 /* Turn on promiscuous mode; turn off the hash filter */ 1508 v |= HME_MAC_RXCFG_PMISC; 1509 v &= ~HME_MAC_RXCFG_HENABLE; 1510 ifp->if_flags |= IFF_ALLMULTI; 1511 goto chipit; 1512 } 1513 1514 /* Turn off promiscuous mode; turn on the hash filter */ 1515 v &= ~HME_MAC_RXCFG_PMISC; 1516 v |= HME_MAC_RXCFG_HENABLE; 1517 1518 /* 1519 * Set up multicast address filter by passing all multicast addresses 1520 * through a crc generator, and then using the high order 6 bits as an 1521 * index into the 64 bit logical address filter. The high order bit 1522 * selects the word, while the rest of the bits select the bit within 1523 * the word. 1524 */ 1525 1526 ETHER_FIRST_MULTI(step, ec, enm); 1527 while (enm != NULL) { 1528 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1529 /* 1530 * We must listen to a range of multicast addresses. 1531 * For now, just accept all multicasts, rather than 1532 * trying to set only those filter bits needed to match 1533 * the range. (At this time, the only use of address 1534 * ranges is for IP multicast routing, for which the 1535 * range is big enough to require all bits set.) 1536 */ 1537 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1538 ifp->if_flags |= IFF_ALLMULTI; 1539 goto chipit; 1540 } 1541 1542 cp = enm->enm_addrlo; 1543 crc = 0xffffffff; 1544 for (len = sizeof(enm->enm_addrlo); --len >= 0;) { 1545 int octet = *cp++; 1546 int i; 1547 1548 #define MC_POLY_LE 0xedb88320UL /* mcast crc, little endian */ 1549 for (i = 0; i < 8; i++) { 1550 if ((crc & 1) ^ (octet & 1)) { 1551 crc >>= 1; 1552 crc ^= MC_POLY_LE; 1553 } else { 1554 crc >>= 1; 1555 } 1556 octet >>= 1; 1557 } 1558 } 1559 /* Just want the 6 most significant bits. */ 1560 crc >>= 26; 1561 1562 /* Set the corresponding bit in the filter. */ 1563 hash[crc >> 4] |= 1 << (crc & 0xf); 1564 1565 ETHER_NEXT_MULTI(step, enm); 1566 } 1567 1568 ifp->if_flags &= ~IFF_ALLMULTI; 1569 1570 chipit: 1571 /* Now load the hash table into the chip */ 1572 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); 1573 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); 1574 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); 1575 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); 1576 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 1577 } 1578 1579 /* 1580 * Routines for accessing the transmit and receive buffers. 1581 * The various CPU and adapter configurations supported by this 1582 * driver require three different access methods for buffers 1583 * and descriptors: 1584 * (1) contig (contiguous data; no padding), 1585 * (2) gap2 (two bytes of data followed by two bytes of padding), 1586 * (3) gap16 (16 bytes of data followed by 16 bytes of padding). 1587 */ 1588 1589 #if 0 1590 /* 1591 * contig: contiguous data with no padding. 1592 * 1593 * Buffers may have any alignment. 1594 */ 1595 1596 void 1597 hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len) 1598 { 1599 volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ); 1600 1601 /* 1602 * Just call memcpy() to do the work. 1603 */ 1604 memcpy(buf, from, len); 1605 } 1606 1607 void 1608 hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len) 1609 { 1610 volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ); 1611 1612 /* 1613 * Just call memcpy() to do the work. 1614 */ 1615 memcpy(to, buf, len); 1616 } 1617 #endif 1618