1 /* $NetBSD: hme.c,v 1.95 2016/12/15 09:28:05 ozaki-r Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * HME Ethernet module driver. 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: hme.c,v 1.95 2016/12/15 09:28:05 ozaki-r Exp $"); 38 39 /* #define HMEDEBUG */ 40 41 #include "opt_inet.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/mbuf.h> 47 #include <sys/syslog.h> 48 #include <sys/socket.h> 49 #include <sys/device.h> 50 #include <sys/malloc.h> 51 #include <sys/ioctl.h> 52 #include <sys/errno.h> 53 #include <sys/rndsource.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_ether.h> 58 #include <net/if_media.h> 59 60 #ifdef INET 61 #include <net/if_vlanvar.h> 62 #include <netinet/in.h> 63 #include <netinet/if_inarp.h> 64 #include <netinet/in_systm.h> 65 #include <netinet/in_var.h> 66 #include <netinet/ip.h> 67 #include <netinet/tcp.h> 68 #include <netinet/udp.h> 69 #endif 70 71 72 #include <net/bpf.h> 73 #include <net/bpfdesc.h> 74 75 #include <dev/mii/mii.h> 76 #include <dev/mii/miivar.h> 77 78 #include <sys/bus.h> 79 80 #include <dev/ic/hmereg.h> 81 #include <dev/ic/hmevar.h> 82 83 static void hme_start(struct ifnet *); 84 static void hme_stop(struct ifnet *, int); 85 static int hme_ioctl(struct ifnet *, u_long, void *); 86 static void hme_tick(void *); 87 static void hme_watchdog(struct ifnet *); 88 static bool hme_shutdown(device_t, int); 89 static int hme_init(struct ifnet *); 90 static void hme_meminit(struct hme_softc *); 91 static void hme_mifinit(struct hme_softc *); 92 static void hme_reset(struct hme_softc *); 93 static void hme_chipreset(struct hme_softc *); 94 static void hme_setladrf(struct hme_softc *); 95 96 /* MII methods & callbacks */ 97 static int hme_mii_readreg(device_t, int, int); 98 static void hme_mii_writereg(device_t, int, int, int); 99 static void hme_mii_statchg(struct ifnet *); 100 101 static int hme_mediachange(struct ifnet *); 102 103 static struct mbuf *hme_get(struct hme_softc *, int, uint32_t); 104 static int hme_put(struct hme_softc *, int, struct mbuf *); 105 static void hme_read(struct hme_softc *, int, uint32_t); 106 static int hme_eint(struct hme_softc *, u_int); 107 static int hme_rint(struct hme_softc *); 108 static int hme_tint(struct hme_softc *); 109 110 #if 0 111 /* Default buffer copy routines */ 112 static void hme_copytobuf_contig(struct hme_softc *, void *, int, int); 113 static void hme_copyfrombuf_contig(struct hme_softc *, void *, int, int); 114 #endif 115 116 void 117 hme_config(struct hme_softc *sc) 118 { 119 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 120 struct mii_data *mii = &sc->sc_mii; 121 struct mii_softc *child; 122 bus_dma_tag_t dmatag = sc->sc_dmatag; 123 bus_dma_segment_t seg; 124 bus_size_t size; 125 int rseg, error; 126 127 /* 128 * HME common initialization. 129 * 130 * hme_softc fields that must be initialized by the front-end: 131 * 132 * the bus tag: 133 * sc_bustag 134 * 135 * the DMA bus tag: 136 * sc_dmatag 137 * 138 * the bus handles: 139 * sc_seb (Shared Ethernet Block registers) 140 * sc_erx (Receiver Unit registers) 141 * sc_etx (Transmitter Unit registers) 142 * sc_mac (MAC registers) 143 * sc_mif (Management Interface registers) 144 * 145 * the maximum bus burst size: 146 * sc_burst 147 * 148 * (notyet:DMA capable memory for the ring descriptors & packet buffers: 149 * rb_membase, rb_dmabase) 150 * 151 * the local Ethernet address: 152 * sc_enaddr 153 * 154 */ 155 156 /* Make sure the chip is stopped. */ 157 hme_chipreset(sc); 158 159 /* 160 * Allocate descriptors and buffers 161 * XXX - do all this differently.. and more configurably, 162 * eg. use things as `dma_load_mbuf()' on transmit, 163 * and a pool of `EXTMEM' mbufs (with buffers DMA-mapped 164 * all the time) on the receiver side. 165 * 166 * Note: receive buffers must be 64-byte aligned. 167 * Also, apparently, the buffers must extend to a DMA burst 168 * boundary beyond the maximum packet size. 169 */ 170 #define _HME_NDESC 128 171 #define _HME_BUFSZ 1600 172 173 /* Note: the # of descriptors must be a multiple of 16 */ 174 sc->sc_rb.rb_ntbuf = _HME_NDESC; 175 sc->sc_rb.rb_nrbuf = _HME_NDESC; 176 177 /* 178 * Allocate DMA capable memory 179 * Buffer descriptors must be aligned on a 2048 byte boundary; 180 * take this into account when calculating the size. Note that 181 * the maximum number of descriptors (256) occupies 2048 bytes, 182 * so we allocate that much regardless of _HME_NDESC. 183 */ 184 size = 2048 + /* TX descriptors */ 185 2048 + /* RX descriptors */ 186 sc->sc_rb.rb_ntbuf * _HME_BUFSZ + /* TX buffers */ 187 sc->sc_rb.rb_nrbuf * _HME_BUFSZ; /* RX buffers */ 188 189 /* Allocate DMA buffer */ 190 if ((error = bus_dmamem_alloc(dmatag, size, 191 2048, 0, 192 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 193 aprint_error_dev(sc->sc_dev, "DMA buffer alloc error %d\n", 194 error); 195 return; 196 } 197 198 /* Map DMA memory in CPU addressable space */ 199 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 200 &sc->sc_rb.rb_membase, 201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 202 aprint_error_dev(sc->sc_dev, "DMA buffer map error %d\n", 203 error); 204 bus_dmamap_unload(dmatag, sc->sc_dmamap); 205 bus_dmamem_free(dmatag, &seg, rseg); 206 return; 207 } 208 209 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 210 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 211 aprint_error_dev(sc->sc_dev, "DMA map create error %d\n", 212 error); 213 return; 214 } 215 216 /* Load the buffer */ 217 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 218 sc->sc_rb.rb_membase, size, NULL, 219 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 220 aprint_error_dev(sc->sc_dev, "DMA buffer map load error %d\n", 221 error); 222 bus_dmamem_free(dmatag, &seg, rseg); 223 return; 224 } 225 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 226 227 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 228 ether_sprintf(sc->sc_enaddr)); 229 230 /* Initialize ifnet structure. */ 231 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 232 ifp->if_softc = sc; 233 ifp->if_start = hme_start; 234 ifp->if_stop = hme_stop; 235 ifp->if_ioctl = hme_ioctl; 236 ifp->if_init = hme_init; 237 ifp->if_watchdog = hme_watchdog; 238 ifp->if_flags = 239 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 240 sc->sc_if_flags = ifp->if_flags; 241 ifp->if_capabilities |= 242 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 243 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 244 IFQ_SET_READY(&ifp->if_snd); 245 246 /* Initialize ifmedia structures and MII info */ 247 mii->mii_ifp = ifp; 248 mii->mii_readreg = hme_mii_readreg; 249 mii->mii_writereg = hme_mii_writereg; 250 mii->mii_statchg = hme_mii_statchg; 251 252 sc->sc_ethercom.ec_mii = mii; 253 ifmedia_init(&mii->mii_media, 0, hme_mediachange, ether_mediastatus); 254 255 hme_mifinit(sc); 256 257 mii_attach(sc->sc_dev, mii, 0xffffffff, 258 MII_PHY_ANY, MII_OFFSET_ANY, MIIF_FORCEANEG); 259 260 child = LIST_FIRST(&mii->mii_phys); 261 if (child == NULL) { 262 /* No PHY attached */ 263 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 264 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL); 265 } else { 266 /* 267 * Walk along the list of attached MII devices and 268 * establish an `MII instance' to `phy number' 269 * mapping. We'll use this mapping in media change 270 * requests to determine which phy to use to program 271 * the MIF configuration register. 272 */ 273 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 274 /* 275 * Note: we support just two PHYs: the built-in 276 * internal device and an external on the MII 277 * connector. 278 */ 279 if (child->mii_phy > 1 || child->mii_inst > 1) { 280 aprint_error_dev(sc->sc_dev, 281 "cannot accommodate MII device %s" 282 " at phy %d, instance %d\n", 283 device_xname(child->mii_dev), 284 child->mii_phy, child->mii_inst); 285 continue; 286 } 287 288 sc->sc_phys[child->mii_inst] = child->mii_phy; 289 } 290 291 /* 292 * Set the default media to auto negotiation if the phy has 293 * the auto negotiation capability. 294 * XXX; What to do otherwise? 295 */ 296 if (ifmedia_match(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0)) 297 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 298 /* 299 else 300 ifmedia_set(&sc->sc_mii.mii_media, sc->sc_defaultmedia); 301 */ 302 } 303 304 /* claim 802.1q capability */ 305 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 306 307 /* Attach the interface. */ 308 if_attach(ifp); 309 ether_ifattach(ifp, sc->sc_enaddr); 310 311 if (pmf_device_register1(sc->sc_dev, NULL, NULL, hme_shutdown)) 312 pmf_class_network_register(sc->sc_dev, ifp); 313 else 314 aprint_error_dev(sc->sc_dev, 315 "couldn't establish power handler\n"); 316 317 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 318 RND_TYPE_NET, RND_FLAG_DEFAULT); 319 320 callout_init(&sc->sc_tick_ch, 0); 321 } 322 323 void 324 hme_tick(void *arg) 325 { 326 struct hme_softc *sc = arg; 327 int s; 328 329 s = splnet(); 330 mii_tick(&sc->sc_mii); 331 splx(s); 332 333 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 334 } 335 336 void 337 hme_reset(struct hme_softc *sc) 338 { 339 int s; 340 341 s = splnet(); 342 (void)hme_init(&sc->sc_ethercom.ec_if); 343 splx(s); 344 } 345 346 void 347 hme_chipreset(struct hme_softc *sc) 348 { 349 bus_space_tag_t t = sc->sc_bustag; 350 bus_space_handle_t seb = sc->sc_seb; 351 int n; 352 353 /* Mask all interrupts */ 354 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); 355 356 /* Reset transmitter and receiver */ 357 bus_space_write_4(t, seb, HME_SEBI_RESET, 358 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); 359 360 for (n = 0; n < 20; n++) { 361 uint32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); 362 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 363 return; 364 DELAY(20); 365 } 366 367 printf("%s: %s: reset failed\n", device_xname(sc->sc_dev), __func__); 368 } 369 370 void 371 hme_stop(struct ifnet *ifp, int disable) 372 { 373 struct hme_softc *sc; 374 375 sc = ifp->if_softc; 376 377 ifp->if_timer = 0; 378 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 379 380 callout_stop(&sc->sc_tick_ch); 381 mii_down(&sc->sc_mii); 382 383 hme_chipreset(sc); 384 } 385 386 void 387 hme_meminit(struct hme_softc *sc) 388 { 389 bus_addr_t txbufdma, rxbufdma; 390 bus_addr_t dma; 391 char *p; 392 unsigned int ntbuf, nrbuf, i; 393 struct hme_ring *hr = &sc->sc_rb; 394 395 p = hr->rb_membase; 396 dma = hr->rb_dmabase; 397 398 ntbuf = hr->rb_ntbuf; 399 nrbuf = hr->rb_nrbuf; 400 401 /* 402 * Allocate transmit descriptors 403 */ 404 hr->rb_txd = p; 405 hr->rb_txddma = dma; 406 p += ntbuf * HME_XD_SIZE; 407 dma += ntbuf * HME_XD_SIZE; 408 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 409 dma = (bus_addr_t)roundup((u_long)dma, 2048); 410 p = (void *)roundup((u_long)p, 2048); 411 412 /* 413 * Allocate receive descriptors 414 */ 415 hr->rb_rxd = p; 416 hr->rb_rxddma = dma; 417 p += nrbuf * HME_XD_SIZE; 418 dma += nrbuf * HME_XD_SIZE; 419 /* Again move forward to the next 2048 byte boundary.*/ 420 dma = (bus_addr_t)roundup((u_long)dma, 2048); 421 p = (void *)roundup((u_long)p, 2048); 422 423 424 /* 425 * Allocate transmit buffers 426 */ 427 hr->rb_txbuf = p; 428 txbufdma = dma; 429 p += ntbuf * _HME_BUFSZ; 430 dma += ntbuf * _HME_BUFSZ; 431 432 /* 433 * Allocate receive buffers 434 */ 435 hr->rb_rxbuf = p; 436 rxbufdma = dma; 437 p += nrbuf * _HME_BUFSZ; 438 dma += nrbuf * _HME_BUFSZ; 439 440 /* 441 * Initialize transmit buffer descriptors 442 */ 443 for (i = 0; i < ntbuf; i++) { 444 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, txbufdma + i * _HME_BUFSZ); 445 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 446 } 447 448 /* 449 * Initialize receive buffer descriptors 450 */ 451 for (i = 0; i < nrbuf; i++) { 452 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, rxbufdma + i * _HME_BUFSZ); 453 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 454 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 455 } 456 457 hr->rb_tdhead = hr->rb_tdtail = 0; 458 hr->rb_td_nbusy = 0; 459 hr->rb_rdtail = 0; 460 } 461 462 /* 463 * Initialization of interface; set up initialization block 464 * and transmit/receive descriptor rings. 465 */ 466 int 467 hme_init(struct ifnet *ifp) 468 { 469 struct hme_softc *sc = ifp->if_softc; 470 bus_space_tag_t t = sc->sc_bustag; 471 bus_space_handle_t seb = sc->sc_seb; 472 bus_space_handle_t etx = sc->sc_etx; 473 bus_space_handle_t erx = sc->sc_erx; 474 bus_space_handle_t mac = sc->sc_mac; 475 uint8_t *ea; 476 uint32_t v; 477 int rc; 478 479 /* 480 * Initialization sequence. The numbered steps below correspond 481 * to the sequence outlined in section 6.3.5.1 in the Ethernet 482 * Channel Engine manual (part of the PCIO manual). 483 * See also the STP2002-STQ document from Sun Microsystems. 484 */ 485 486 /* step 1 & 2. Reset the Ethernet Channel */ 487 hme_stop(ifp, 0); 488 489 /* Re-initialize the MIF */ 490 hme_mifinit(sc); 491 492 /* Call MI reset function if any */ 493 if (sc->sc_hwreset) 494 (*sc->sc_hwreset)(sc); 495 496 #if 0 497 /* Mask all MIF interrupts, just in case */ 498 bus_space_write_4(t, mif, HME_MIFI_IMASK, 0xffff); 499 #endif 500 501 /* step 3. Setup data structures in host memory */ 502 hme_meminit(sc); 503 504 /* step 4. TX MAC registers & counters */ 505 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 506 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 507 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 508 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 509 bus_space_write_4(t, mac, HME_MACI_TXSIZE, 510 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 511 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 512 sc->sc_ec_capenable = sc->sc_ethercom.ec_capenable; 513 514 /* Load station MAC address */ 515 ea = sc->sc_enaddr; 516 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 517 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 518 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 519 520 /* 521 * Init seed for backoff 522 * (source suggested by manual: low 10 bits of MAC address) 523 */ 524 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 525 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); 526 527 528 /* Note: Accepting power-on default for other MAC registers here.. */ 529 530 531 /* step 5. RX MAC registers & counters */ 532 hme_setladrf(sc); 533 534 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 535 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); 536 bus_space_write_4(t, etx, HME_ETXI_RSIZE, sc->sc_rb.rb_ntbuf); 537 538 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 539 bus_space_write_4(t, mac, HME_MACI_RXSIZE, 540 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 541 ETHER_VLAN_ENCAP_LEN + ETHER_MAX_LEN : ETHER_MAX_LEN); 542 543 /* step 8. Global Configuration & Interrupt Mask */ 544 bus_space_write_4(t, seb, HME_SEBI_IMASK, 545 ~( 546 /*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ 547 HME_SEB_STAT_HOSTTOTX | 548 HME_SEB_STAT_RXTOHOST | 549 HME_SEB_STAT_TXALL | 550 HME_SEB_STAT_TXPERR | 551 HME_SEB_STAT_RCNTEXP | 552 HME_SEB_STAT_MIFIRQ | 553 HME_SEB_STAT_ALL_ERRORS )); 554 555 switch (sc->sc_burst) { 556 default: 557 v = 0; 558 break; 559 case 16: 560 v = HME_SEB_CFG_BURST16; 561 break; 562 case 32: 563 v = HME_SEB_CFG_BURST32; 564 break; 565 case 64: 566 v = HME_SEB_CFG_BURST64; 567 break; 568 } 569 bus_space_write_4(t, seb, HME_SEBI_CFG, v); 570 571 /* step 9. ETX Configuration: use mostly default values */ 572 573 /* Enable DMA */ 574 v = bus_space_read_4(t, etx, HME_ETXI_CFG); 575 v |= HME_ETX_CFG_DMAENABLE; 576 bus_space_write_4(t, etx, HME_ETXI_CFG, v); 577 578 /* Transmit Descriptor ring size: in increments of 16 */ 579 bus_space_write_4(t, etx, HME_ETXI_RSIZE, _HME_NDESC / 16 - 1); 580 581 582 /* step 10. ERX Configuration */ 583 v = bus_space_read_4(t, erx, HME_ERXI_CFG); 584 585 /* Encode Receive Descriptor ring size: four possible values */ 586 switch (_HME_NDESC /*XXX*/) { 587 case 32: 588 v |= HME_ERX_CFG_RINGSIZE32; 589 break; 590 case 64: 591 v |= HME_ERX_CFG_RINGSIZE64; 592 break; 593 case 128: 594 v |= HME_ERX_CFG_RINGSIZE128; 595 break; 596 case 256: 597 v |= HME_ERX_CFG_RINGSIZE256; 598 break; 599 default: 600 printf("hme: invalid Receive Descriptor ring size\n"); 601 break; 602 } 603 604 /* Enable DMA */ 605 v |= HME_ERX_CFG_DMAENABLE; 606 607 /* set h/w rx checksum start offset (# of half-words) */ 608 #ifdef INET 609 v |= (((ETHER_HDR_LEN + sizeof(struct ip)) / sizeof(uint16_t)) 610 << HME_ERX_CFG_CSUMSHIFT) & 611 HME_ERX_CFG_CSUMSTART; 612 #endif 613 bus_space_write_4(t, erx, HME_ERXI_CFG, v); 614 615 /* step 11. XIF Configuration */ 616 v = bus_space_read_4(t, mac, HME_MACI_XIF); 617 v |= HME_MAC_XIF_OE; 618 bus_space_write_4(t, mac, HME_MACI_XIF, v); 619 620 /* step 12. RX_MAC Configuration Register */ 621 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 622 v |= HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_PSTRIP; 623 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 624 625 /* step 13. TX_MAC Configuration Register */ 626 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 627 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 628 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 629 630 /* step 14. Issue Transmit Pending command */ 631 632 /* Call MI initialization function if any */ 633 if (sc->sc_hwinit) 634 (*sc->sc_hwinit)(sc); 635 636 /* Set the current media. */ 637 if ((rc = hme_mediachange(ifp)) != 0) 638 return rc; 639 640 /* Start the one second timer. */ 641 callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); 642 643 ifp->if_flags |= IFF_RUNNING; 644 ifp->if_flags &= ~IFF_OACTIVE; 645 sc->sc_if_flags = ifp->if_flags; 646 ifp->if_timer = 0; 647 hme_start(ifp); 648 return 0; 649 } 650 651 /* 652 * Routine to copy from mbuf chain to transmit buffer in 653 * network buffer memory. 654 * Returns the amount of data copied. 655 */ 656 int 657 hme_put(struct hme_softc *sc, int ri, struct mbuf *m) 658 /* ri: Ring index */ 659 { 660 struct mbuf *n; 661 int len, tlen = 0; 662 char *bp; 663 664 bp = (char *)sc->sc_rb.rb_txbuf + (ri % sc->sc_rb.rb_ntbuf) * _HME_BUFSZ; 665 for (; m; m = n) { 666 len = m->m_len; 667 if (len == 0) { 668 n = m_free(m); 669 continue; 670 } 671 memcpy(bp, mtod(m, void *), len); 672 bp += len; 673 tlen += len; 674 n = m_free(m); 675 } 676 return (tlen); 677 } 678 679 /* 680 * Pull data off an interface. 681 * Len is length of data, with local net header stripped. 682 * We copy the data into mbufs. When full cluster sized units are present 683 * we copy into clusters. 684 */ 685 struct mbuf * 686 hme_get(struct hme_softc *sc, int ri, uint32_t flags) 687 { 688 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 689 struct mbuf *m, *m0, *newm; 690 char *bp; 691 int len, totlen; 692 #ifdef INET 693 int csum_flags; 694 #endif 695 696 totlen = HME_XD_DECODE_RSIZE(flags); 697 MGETHDR(m0, M_DONTWAIT, MT_DATA); 698 if (m0 == 0) 699 return (0); 700 m_set_rcvif(m0, ifp); 701 m0->m_pkthdr.len = totlen; 702 len = MHLEN; 703 m = m0; 704 705 bp = (char *)sc->sc_rb.rb_rxbuf + (ri % sc->sc_rb.rb_nrbuf) * _HME_BUFSZ; 706 707 while (totlen > 0) { 708 if (totlen >= MINCLSIZE) { 709 MCLGET(m, M_DONTWAIT); 710 if ((m->m_flags & M_EXT) == 0) 711 goto bad; 712 len = MCLBYTES; 713 } 714 715 if (m == m0) { 716 char *newdata = (char *) 717 ALIGN(m->m_data + sizeof(struct ether_header)) - 718 sizeof(struct ether_header); 719 len -= newdata - m->m_data; 720 m->m_data = newdata; 721 } 722 723 m->m_len = len = min(totlen, len); 724 memcpy(mtod(m, void *), bp, len); 725 bp += len; 726 727 totlen -= len; 728 if (totlen > 0) { 729 MGET(newm, M_DONTWAIT, MT_DATA); 730 if (newm == 0) 731 goto bad; 732 len = MLEN; 733 m = m->m_next = newm; 734 } 735 } 736 737 #ifdef INET 738 /* hardware checksum */ 739 csum_flags = 0; 740 if (ifp->if_csum_flags_rx & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 741 struct ether_header *eh; 742 struct ether_vlan_header *evh; 743 struct ip *ip; 744 struct udphdr *uh; 745 uint16_t *opts; 746 int32_t hlen, pktlen; 747 uint32_t csum_data; 748 749 eh = mtod(m0, struct ether_header *); 750 if (ntohs(eh->ether_type) == ETHERTYPE_IP) { 751 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN); 752 pktlen = m0->m_pkthdr.len - ETHER_HDR_LEN; 753 } else if (ntohs(eh->ether_type) == ETHERTYPE_VLAN) { 754 evh = (struct ether_vlan_header *)eh; 755 if (ntohs(evh->evl_proto != ETHERTYPE_IP)) 756 goto swcsum; 757 ip = (struct ip *)((char *)eh + ETHER_HDR_LEN + 758 ETHER_VLAN_ENCAP_LEN); 759 pktlen = m0->m_pkthdr.len - 760 ETHER_HDR_LEN - ETHER_VLAN_ENCAP_LEN; 761 } else 762 goto swcsum; 763 764 /* IPv4 only */ 765 if (ip->ip_v != IPVERSION) 766 goto swcsum; 767 768 hlen = ip->ip_hl << 2; 769 if (hlen < sizeof(struct ip)) 770 goto swcsum; 771 772 /* 773 * bail if too short, has random trailing garbage, truncated, 774 * fragment, or has ethernet pad. 775 */ 776 if (ntohs(ip->ip_len) < hlen || 777 ntohs(ip->ip_len) != pktlen || 778 (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) != 0) 779 goto swcsum; 780 781 switch (ip->ip_p) { 782 case IPPROTO_TCP: 783 if ((ifp->if_csum_flags_rx & M_CSUM_TCPv4) == 0) 784 goto swcsum; 785 if (pktlen < (hlen + sizeof(struct tcphdr))) 786 goto swcsum; 787 csum_flags = 788 M_CSUM_TCPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 789 break; 790 case IPPROTO_UDP: 791 if ((ifp->if_csum_flags_rx & M_CSUM_UDPv4) == 0) 792 goto swcsum; 793 if (pktlen < (hlen + sizeof(struct udphdr))) 794 goto swcsum; 795 uh = (struct udphdr *)((char *)ip + hlen); 796 /* no checksum */ 797 if (uh->uh_sum == 0) 798 goto swcsum; 799 csum_flags = 800 M_CSUM_UDPv4 | M_CSUM_DATA | M_CSUM_NO_PSEUDOHDR; 801 break; 802 default: 803 goto swcsum; 804 } 805 806 /* w/ M_CSUM_NO_PSEUDOHDR, the uncomplemented sum is expected */ 807 csum_data = ~flags & HME_XD_RXCKSUM; 808 809 /* 810 * If data offset is different from RX cksum start offset, 811 * we have to deduct them. 812 */ 813 hlen = ((char *)ip + hlen) - 814 ((char *)eh + ETHER_HDR_LEN + sizeof(struct ip)); 815 if (hlen > 1) { 816 uint32_t optsum; 817 818 optsum = 0; 819 opts = (uint16_t *)((char *)eh + 820 ETHER_HDR_LEN + sizeof(struct ip)); 821 822 while (hlen > 1) { 823 optsum += ntohs(*opts++); 824 hlen -= 2; 825 } 826 while (optsum >> 16) 827 optsum = (optsum >> 16) + (optsum & 0xffff); 828 829 /* Deduct the ip opts sum from the hwsum. */ 830 csum_data += (uint16_t)~optsum; 831 832 while (csum_data >> 16) 833 csum_data = 834 (csum_data >> 16) + (csum_data & 0xffff); 835 } 836 m0->m_pkthdr.csum_data = csum_data; 837 } 838 swcsum: 839 m0->m_pkthdr.csum_flags = csum_flags; 840 #endif 841 842 return (m0); 843 844 bad: 845 m_freem(m0); 846 return (0); 847 } 848 849 /* 850 * Pass a packet to the higher levels. 851 */ 852 void 853 hme_read(struct hme_softc *sc, int ix, uint32_t flags) 854 { 855 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 856 struct mbuf *m; 857 int len; 858 859 len = HME_XD_DECODE_RSIZE(flags); 860 if (len <= sizeof(struct ether_header) || 861 len > ((sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU) ? 862 ETHER_VLAN_ENCAP_LEN + ETHERMTU + sizeof(struct ether_header) : 863 ETHERMTU + sizeof(struct ether_header))) { 864 #ifdef HMEDEBUG 865 printf("%s: invalid packet size %d; dropping\n", 866 device_xname(sc->sc_dev), len); 867 #endif 868 ifp->if_ierrors++; 869 return; 870 } 871 872 /* Pull packet off interface. */ 873 m = hme_get(sc, ix, flags); 874 if (m == 0) { 875 ifp->if_ierrors++; 876 return; 877 } 878 879 /* Pass the packet up. */ 880 if_percpuq_enqueue(ifp->if_percpuq, m); 881 } 882 883 void 884 hme_start(struct ifnet *ifp) 885 { 886 struct hme_softc *sc = ifp->if_softc; 887 void *txd = sc->sc_rb.rb_txd; 888 struct mbuf *m; 889 unsigned int txflags; 890 unsigned int ri, len, obusy; 891 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 892 893 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 894 return; 895 896 ri = sc->sc_rb.rb_tdhead; 897 obusy = sc->sc_rb.rb_td_nbusy; 898 899 for (;;) { 900 IFQ_DEQUEUE(&ifp->if_snd, m); 901 if (m == 0) 902 break; 903 904 /* 905 * If BPF is listening on this interface, let it see the 906 * packet before we commit it to the wire. 907 */ 908 bpf_mtap(ifp, m); 909 910 #ifdef INET 911 /* collect bits for h/w csum, before hme_put frees the mbuf */ 912 if (ifp->if_csum_flags_tx & (M_CSUM_TCPv4 | M_CSUM_UDPv4) && 913 m->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_UDPv4)) { 914 struct ether_header *eh; 915 uint16_t offset, start; 916 917 eh = mtod(m, struct ether_header *); 918 switch (ntohs(eh->ether_type)) { 919 case ETHERTYPE_IP: 920 start = ETHER_HDR_LEN; 921 break; 922 case ETHERTYPE_VLAN: 923 start = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 924 break; 925 default: 926 /* unsupported, drop it */ 927 m_free(m); 928 continue; 929 } 930 start += M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 931 offset = M_CSUM_DATA_IPv4_OFFSET(m->m_pkthdr.csum_data) 932 + start; 933 txflags = HME_XD_TXCKSUM | 934 (offset << HME_XD_TXCSSTUFFSHIFT) | 935 (start << HME_XD_TXCSSTARTSHIFT); 936 } else 937 #endif 938 txflags = 0; 939 940 /* 941 * Copy the mbuf chain into the transmit buffer. 942 */ 943 len = hme_put(sc, ri, m); 944 945 /* 946 * Initialize transmit registers and start transmission 947 */ 948 HME_XD_SETFLAGS(sc->sc_pci, txd, ri, 949 HME_XD_OWN | HME_XD_SOP | HME_XD_EOP | 950 HME_XD_ENCODE_TSIZE(len) | txflags); 951 952 /*if (sc->sc_rb.rb_td_nbusy <= 0)*/ 953 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, 954 HME_ETX_TP_DMAWAKEUP); 955 956 if (++ri == ntbuf) 957 ri = 0; 958 959 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 960 ifp->if_flags |= IFF_OACTIVE; 961 break; 962 } 963 } 964 965 if (obusy != sc->sc_rb.rb_td_nbusy) { 966 sc->sc_rb.rb_tdhead = ri; 967 ifp->if_timer = 5; 968 } 969 } 970 971 /* 972 * Transmit interrupt. 973 */ 974 int 975 hme_tint(struct hme_softc *sc) 976 { 977 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 978 bus_space_tag_t t = sc->sc_bustag; 979 bus_space_handle_t mac = sc->sc_mac; 980 unsigned int ri, txflags; 981 982 /* 983 * Unload collision counters 984 */ 985 ifp->if_collisions += 986 bus_space_read_4(t, mac, HME_MACI_NCCNT) + 987 bus_space_read_4(t, mac, HME_MACI_FCCNT); 988 ifp->if_oerrors += 989 bus_space_read_4(t, mac, HME_MACI_EXCNT) + 990 bus_space_read_4(t, mac, HME_MACI_LTCNT); 991 992 /* 993 * then clear the hardware counters. 994 */ 995 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 996 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 997 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 998 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 999 1000 /* Fetch current position in the transmit ring */ 1001 ri = sc->sc_rb.rb_tdtail; 1002 1003 for (;;) { 1004 if (sc->sc_rb.rb_td_nbusy <= 0) 1005 break; 1006 1007 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 1008 1009 if (txflags & HME_XD_OWN) 1010 break; 1011 1012 ifp->if_flags &= ~IFF_OACTIVE; 1013 ifp->if_opackets++; 1014 1015 if (++ri == sc->sc_rb.rb_ntbuf) 1016 ri = 0; 1017 1018 --sc->sc_rb.rb_td_nbusy; 1019 } 1020 1021 /* Update ring */ 1022 sc->sc_rb.rb_tdtail = ri; 1023 1024 hme_start(ifp); 1025 1026 if (sc->sc_rb.rb_td_nbusy == 0) 1027 ifp->if_timer = 0; 1028 1029 return (1); 1030 } 1031 1032 /* 1033 * Receive interrupt. 1034 */ 1035 int 1036 hme_rint(struct hme_softc *sc) 1037 { 1038 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1039 bus_space_tag_t t = sc->sc_bustag; 1040 bus_space_handle_t mac = sc->sc_mac; 1041 void *xdr = sc->sc_rb.rb_rxd; 1042 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 1043 unsigned int ri; 1044 uint32_t flags; 1045 1046 ri = sc->sc_rb.rb_rdtail; 1047 1048 /* 1049 * Process all buffers with valid data. 1050 */ 1051 for (;;) { 1052 flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); 1053 if (flags & HME_XD_OWN) 1054 break; 1055 1056 if (flags & HME_XD_OFL) { 1057 printf("%s: buffer overflow, ri=%d; flags=0x%x\n", 1058 device_xname(sc->sc_dev), ri, flags); 1059 } else 1060 hme_read(sc, ri, flags); 1061 1062 /* This buffer can be used by the hardware again */ 1063 HME_XD_SETFLAGS(sc->sc_pci, xdr, ri, 1064 HME_XD_OWN | HME_XD_ENCODE_RSIZE(_HME_BUFSZ)); 1065 1066 if (++ri == nrbuf) 1067 ri = 0; 1068 } 1069 1070 sc->sc_rb.rb_rdtail = ri; 1071 1072 /* Read error counters ... */ 1073 ifp->if_ierrors += 1074 bus_space_read_4(t, mac, HME_MACI_STAT_LCNT) + 1075 bus_space_read_4(t, mac, HME_MACI_STAT_ACNT) + 1076 bus_space_read_4(t, mac, HME_MACI_STAT_CCNT) + 1077 bus_space_read_4(t, mac, HME_MACI_STAT_CVCNT); 1078 1079 /* ... then clear the hardware counters. */ 1080 bus_space_write_4(t, mac, HME_MACI_STAT_LCNT, 0); 1081 bus_space_write_4(t, mac, HME_MACI_STAT_ACNT, 0); 1082 bus_space_write_4(t, mac, HME_MACI_STAT_CCNT, 0); 1083 bus_space_write_4(t, mac, HME_MACI_STAT_CVCNT, 0); 1084 return (1); 1085 } 1086 1087 int 1088 hme_eint(struct hme_softc *sc, u_int status) 1089 { 1090 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1091 char bits[128]; 1092 1093 if ((status & HME_SEB_STAT_MIFIRQ) != 0) { 1094 bus_space_tag_t t = sc->sc_bustag; 1095 bus_space_handle_t mif = sc->sc_mif; 1096 uint32_t cf, st, sm; 1097 cf = bus_space_read_4(t, mif, HME_MIFI_CFG); 1098 st = bus_space_read_4(t, mif, HME_MIFI_STAT); 1099 sm = bus_space_read_4(t, mif, HME_MIFI_SM); 1100 printf("%s: XXXlink status changed: cfg=%x, stat %x, sm %x\n", 1101 device_xname(sc->sc_dev), cf, st, sm); 1102 return (1); 1103 } 1104 1105 /* Receive error counters rolled over */ 1106 if (status & HME_SEB_STAT_ACNTEXP) 1107 ifp->if_ierrors += 0xff; 1108 if (status & HME_SEB_STAT_CCNTEXP) 1109 ifp->if_ierrors += 0xff; 1110 if (status & HME_SEB_STAT_LCNTEXP) 1111 ifp->if_ierrors += 0xff; 1112 if (status & HME_SEB_STAT_CVCNTEXP) 1113 ifp->if_ierrors += 0xff; 1114 1115 /* RXTERR locks up the interface, so do a reset */ 1116 if (status & HME_SEB_STAT_RXTERR) 1117 hme_reset(sc); 1118 1119 snprintb(bits, sizeof(bits), HME_SEB_STAT_BITS, status); 1120 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 1121 1122 return (1); 1123 } 1124 1125 int 1126 hme_intr(void *v) 1127 { 1128 struct hme_softc *sc = v; 1129 bus_space_tag_t t = sc->sc_bustag; 1130 bus_space_handle_t seb = sc->sc_seb; 1131 uint32_t status; 1132 int r = 0; 1133 1134 status = bus_space_read_4(t, seb, HME_SEBI_STAT); 1135 1136 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 1137 r |= hme_eint(sc, status); 1138 1139 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 1140 r |= hme_tint(sc); 1141 1142 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 1143 r |= hme_rint(sc); 1144 1145 rnd_add_uint32(&sc->rnd_source, status); 1146 1147 return (r); 1148 } 1149 1150 1151 void 1152 hme_watchdog(struct ifnet *ifp) 1153 { 1154 struct hme_softc *sc = ifp->if_softc; 1155 1156 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1157 ++ifp->if_oerrors; 1158 1159 hme_reset(sc); 1160 } 1161 1162 /* 1163 * Initialize the MII Management Interface 1164 */ 1165 void 1166 hme_mifinit(struct hme_softc *sc) 1167 { 1168 bus_space_tag_t t = sc->sc_bustag; 1169 bus_space_handle_t mif = sc->sc_mif; 1170 bus_space_handle_t mac = sc->sc_mac; 1171 int instance, phy; 1172 uint32_t v; 1173 1174 if (sc->sc_mii.mii_media.ifm_cur != NULL) { 1175 instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1176 phy = sc->sc_phys[instance]; 1177 } else 1178 /* No media set yet, pick phy arbitrarily.. */ 1179 phy = HME_PHYAD_EXTERNAL; 1180 1181 /* Configure the MIF in frame mode, no poll, current phy select */ 1182 v = 0; 1183 if (phy == HME_PHYAD_EXTERNAL) 1184 v |= HME_MIF_CFG_PHY; 1185 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1186 1187 /* If an external transceiver is selected, enable its MII drivers */ 1188 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1189 v &= ~HME_MAC_XIF_MIIENABLE; 1190 if (phy == HME_PHYAD_EXTERNAL) 1191 v |= HME_MAC_XIF_MIIENABLE; 1192 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1193 } 1194 1195 /* 1196 * MII interface 1197 */ 1198 static int 1199 hme_mii_readreg(device_t self, int phy, int reg) 1200 { 1201 struct hme_softc *sc = device_private(self); 1202 bus_space_tag_t t = sc->sc_bustag; 1203 bus_space_handle_t mif = sc->sc_mif; 1204 bus_space_handle_t mac = sc->sc_mac; 1205 uint32_t v, xif_cfg, mifi_cfg; 1206 int n; 1207 1208 /* We can at most have two PHYs */ 1209 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1210 return (0); 1211 1212 /* Select the desired PHY in the MIF configuration register */ 1213 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1214 v &= ~HME_MIF_CFG_PHY; 1215 if (phy == HME_PHYAD_EXTERNAL) 1216 v |= HME_MIF_CFG_PHY; 1217 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1218 1219 /* Enable MII drivers on external transceiver */ 1220 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1221 if (phy == HME_PHYAD_EXTERNAL) 1222 v |= HME_MAC_XIF_MIIENABLE; 1223 else 1224 v &= ~HME_MAC_XIF_MIIENABLE; 1225 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1226 1227 #if 0 1228 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1229 /* 1230 * Check whether a transceiver is connected by testing 1231 * the MIF configuration register's MDI_X bits. Note that 1232 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1233 */ 1234 mif_mdi_bit = 1 << (8 + (1 - phy)); 1235 delay(100); 1236 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1237 if ((v & mif_mdi_bit) == 0) 1238 return (0); 1239 #endif 1240 1241 /* Construct the frame command */ 1242 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1243 HME_MIF_FO_TAMSB | 1244 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 1245 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1246 (reg << HME_MIF_FO_REGAD_SHIFT); 1247 1248 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1249 for (n = 0; n < 100; n++) { 1250 DELAY(1); 1251 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1252 if (v & HME_MIF_FO_TALSB) { 1253 v &= HME_MIF_FO_DATA; 1254 goto out; 1255 } 1256 } 1257 1258 v = 0; 1259 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 1260 1261 out: 1262 /* Restore MIFI_CFG register */ 1263 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1264 /* Restore XIF register */ 1265 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1266 return (v); 1267 } 1268 1269 static void 1270 hme_mii_writereg(device_t self, int phy, int reg, int val) 1271 { 1272 struct hme_softc *sc = device_private(self); 1273 bus_space_tag_t t = sc->sc_bustag; 1274 bus_space_handle_t mif = sc->sc_mif; 1275 bus_space_handle_t mac = sc->sc_mac; 1276 uint32_t v, xif_cfg, mifi_cfg; 1277 int n; 1278 1279 /* We can at most have two PHYs */ 1280 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1281 return; 1282 1283 /* Select the desired PHY in the MIF configuration register */ 1284 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1285 v &= ~HME_MIF_CFG_PHY; 1286 if (phy == HME_PHYAD_EXTERNAL) 1287 v |= HME_MIF_CFG_PHY; 1288 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1289 1290 /* Enable MII drivers on external transceiver */ 1291 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1292 if (phy == HME_PHYAD_EXTERNAL) 1293 v |= HME_MAC_XIF_MIIENABLE; 1294 else 1295 v &= ~HME_MAC_XIF_MIIENABLE; 1296 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1297 1298 #if 0 1299 /* This doesn't work reliably; the MDIO_1 bit is off most of the time */ 1300 /* 1301 * Check whether a transceiver is connected by testing 1302 * the MIF configuration register's MDI_X bits. Note that 1303 * MDI_0 (int) == 0x100 and MDI_1 (ext) == 0x200; see hmereg.h 1304 */ 1305 mif_mdi_bit = 1 << (8 + (1 - phy)); 1306 delay(100); 1307 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1308 if ((v & mif_mdi_bit) == 0) 1309 return; 1310 #endif 1311 1312 /* Construct the frame command */ 1313 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1314 HME_MIF_FO_TAMSB | 1315 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1316 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1317 (reg << HME_MIF_FO_REGAD_SHIFT) | 1318 (val & HME_MIF_FO_DATA); 1319 1320 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1321 for (n = 0; n < 100; n++) { 1322 DELAY(1); 1323 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1324 if (v & HME_MIF_FO_TALSB) 1325 goto out; 1326 } 1327 1328 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 1329 out: 1330 /* Restore MIFI_CFG register */ 1331 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1332 /* Restore XIF register */ 1333 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1334 } 1335 1336 static void 1337 hme_mii_statchg(struct ifnet *ifp) 1338 { 1339 struct hme_softc *sc = ifp->if_softc; 1340 bus_space_tag_t t = sc->sc_bustag; 1341 bus_space_handle_t mac = sc->sc_mac; 1342 uint32_t v; 1343 1344 #ifdef HMEDEBUG 1345 if (sc->sc_debug) 1346 printf("hme_mii_statchg: status change\n"); 1347 #endif 1348 1349 /* Set the MAC Full Duplex bit appropriately */ 1350 /* Apparently the hme chip is SIMPLEX if working in full duplex mode, 1351 but not otherwise. */ 1352 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 1353 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1354 v |= HME_MAC_TXCFG_FULLDPLX; 1355 sc->sc_ethercom.ec_if.if_flags |= IFF_SIMPLEX; 1356 } else { 1357 v &= ~HME_MAC_TXCFG_FULLDPLX; 1358 sc->sc_ethercom.ec_if.if_flags &= ~IFF_SIMPLEX; 1359 } 1360 sc->sc_if_flags = sc->sc_ethercom.ec_if.if_flags; 1361 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 1362 } 1363 1364 int 1365 hme_mediachange(struct ifnet *ifp) 1366 { 1367 struct hme_softc *sc = ifp->if_softc; 1368 bus_space_tag_t t = sc->sc_bustag; 1369 bus_space_handle_t mif = sc->sc_mif; 1370 bus_space_handle_t mac = sc->sc_mac; 1371 int instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1372 int phy = sc->sc_phys[instance]; 1373 int rc; 1374 uint32_t v; 1375 1376 #ifdef HMEDEBUG 1377 if (sc->sc_debug) 1378 printf("hme_mediachange: phy = %d\n", phy); 1379 #endif 1380 1381 /* Select the current PHY in the MIF configuration register */ 1382 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1383 v &= ~HME_MIF_CFG_PHY; 1384 if (phy == HME_PHYAD_EXTERNAL) 1385 v |= HME_MIF_CFG_PHY; 1386 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1387 1388 /* If an external transceiver is selected, enable its MII drivers */ 1389 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1390 v &= ~HME_MAC_XIF_MIIENABLE; 1391 if (phy == HME_PHYAD_EXTERNAL) 1392 v |= HME_MAC_XIF_MIIENABLE; 1393 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1394 1395 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO) 1396 return 0; 1397 return rc; 1398 } 1399 1400 /* 1401 * Process an ioctl request. 1402 */ 1403 int 1404 hme_ioctl(struct ifnet *ifp, unsigned long cmd, void *data) 1405 { 1406 struct hme_softc *sc = ifp->if_softc; 1407 struct ifaddr *ifa = (struct ifaddr *)data; 1408 int s, error = 0; 1409 1410 s = splnet(); 1411 1412 switch (cmd) { 1413 1414 case SIOCINITIFADDR: 1415 switch (ifa->ifa_addr->sa_family) { 1416 #ifdef INET 1417 case AF_INET: 1418 if (ifp->if_flags & IFF_UP) 1419 hme_setladrf(sc); 1420 else { 1421 ifp->if_flags |= IFF_UP; 1422 error = hme_init(ifp); 1423 } 1424 arp_ifinit(ifp, ifa); 1425 break; 1426 #endif 1427 default: 1428 ifp->if_flags |= IFF_UP; 1429 error = hme_init(ifp); 1430 break; 1431 } 1432 break; 1433 1434 case SIOCSIFFLAGS: 1435 #ifdef HMEDEBUG 1436 { 1437 struct ifreq *ifr = data; 1438 sc->sc_debug = 1439 (ifr->ifr_flags & IFF_DEBUG) != 0 ? 1 : 0; 1440 } 1441 #endif 1442 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1443 break; 1444 1445 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 1446 case IFF_RUNNING: 1447 /* 1448 * If interface is marked down and it is running, then 1449 * stop it. 1450 */ 1451 hme_stop(ifp, 0); 1452 ifp->if_flags &= ~IFF_RUNNING; 1453 break; 1454 case IFF_UP: 1455 /* 1456 * If interface is marked up and it is stopped, then 1457 * start it. 1458 */ 1459 error = hme_init(ifp); 1460 break; 1461 case IFF_UP|IFF_RUNNING: 1462 /* 1463 * If setting debug or promiscuous mode, do not reset 1464 * the chip; for everything else, call hme_init() 1465 * which will trigger a reset. 1466 */ 1467 #define RESETIGN (IFF_CANTCHANGE | IFF_DEBUG) 1468 if (ifp->if_flags != sc->sc_if_flags) { 1469 if ((ifp->if_flags & (~RESETIGN)) 1470 == (sc->sc_if_flags & (~RESETIGN))) 1471 hme_setladrf(sc); 1472 else 1473 error = hme_init(ifp); 1474 } 1475 #undef RESETIGN 1476 break; 1477 case 0: 1478 break; 1479 } 1480 1481 if (sc->sc_ec_capenable != sc->sc_ethercom.ec_capenable) 1482 error = hme_init(ifp); 1483 1484 break; 1485 1486 default: 1487 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1488 break; 1489 1490 error = 0; 1491 1492 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1493 ; 1494 else if (ifp->if_flags & IFF_RUNNING) { 1495 /* 1496 * Multicast list has changed; set the hardware filter 1497 * accordingly. 1498 */ 1499 hme_setladrf(sc); 1500 } 1501 break; 1502 } 1503 1504 sc->sc_if_flags = ifp->if_flags; 1505 splx(s); 1506 return (error); 1507 } 1508 1509 bool 1510 hme_shutdown(device_t self, int howto) 1511 { 1512 struct hme_softc *sc; 1513 struct ifnet *ifp; 1514 1515 sc = device_private(self); 1516 ifp = &sc->sc_ethercom.ec_if; 1517 hme_stop(ifp, 1); 1518 1519 return true; 1520 } 1521 1522 /* 1523 * Set up the logical address filter. 1524 */ 1525 void 1526 hme_setladrf(struct hme_softc *sc) 1527 { 1528 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1529 struct ether_multi *enm; 1530 struct ether_multistep step; 1531 struct ethercom *ec = &sc->sc_ethercom; 1532 bus_space_tag_t t = sc->sc_bustag; 1533 bus_space_handle_t mac = sc->sc_mac; 1534 uint32_t v; 1535 uint32_t crc; 1536 uint32_t hash[4]; 1537 1538 /* Clear hash table */ 1539 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1540 1541 /* Get current RX configuration */ 1542 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 1543 1544 if ((ifp->if_flags & IFF_PROMISC) != 0) { 1545 /* Turn on promiscuous mode; turn off the hash filter */ 1546 v |= HME_MAC_RXCFG_PMISC; 1547 v &= ~HME_MAC_RXCFG_HENABLE; 1548 ifp->if_flags |= IFF_ALLMULTI; 1549 goto chipit; 1550 } 1551 1552 /* Turn off promiscuous mode; turn on the hash filter */ 1553 v &= ~HME_MAC_RXCFG_PMISC; 1554 v |= HME_MAC_RXCFG_HENABLE; 1555 1556 /* 1557 * Set up multicast address filter by passing all multicast addresses 1558 * through a crc generator, and then using the high order 6 bits as an 1559 * index into the 64 bit logical address filter. The high order bit 1560 * selects the word, while the rest of the bits select the bit within 1561 * the word. 1562 */ 1563 1564 ETHER_FIRST_MULTI(step, ec, enm); 1565 while (enm != NULL) { 1566 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1567 /* 1568 * We must listen to a range of multicast addresses. 1569 * For now, just accept all multicasts, rather than 1570 * trying to set only those filter bits needed to match 1571 * the range. (At this time, the only use of address 1572 * ranges is for IP multicast routing, for which the 1573 * range is big enough to require all bits set.) 1574 */ 1575 hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; 1576 ifp->if_flags |= IFF_ALLMULTI; 1577 goto chipit; 1578 } 1579 1580 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1581 1582 /* Just want the 6 most significant bits. */ 1583 crc >>= 26; 1584 1585 /* Set the corresponding bit in the filter. */ 1586 hash[crc >> 4] |= 1 << (crc & 0xf); 1587 1588 ETHER_NEXT_MULTI(step, enm); 1589 } 1590 1591 ifp->if_flags &= ~IFF_ALLMULTI; 1592 1593 chipit: 1594 /* Now load the hash table into the chip */ 1595 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); 1596 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); 1597 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); 1598 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); 1599 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 1600 } 1601 1602 /* 1603 * Routines for accessing the transmit and receive buffers. 1604 * The various CPU and adapter configurations supported by this 1605 * driver require three different access methods for buffers 1606 * and descriptors: 1607 * (1) contig (contiguous data; no padding), 1608 * (2) gap2 (two bytes of data followed by two bytes of padding), 1609 * (3) gap16 (16 bytes of data followed by 16 bytes of padding). 1610 */ 1611 1612 #if 0 1613 /* 1614 * contig: contiguous data with no padding. 1615 * 1616 * Buffers may have any alignment. 1617 */ 1618 1619 void 1620 hme_copytobuf_contig(struct hme_softc *sc, void *from, int ri, int len) 1621 { 1622 volatile void *buf = sc->sc_rb.rb_txbuf + (ri * _HME_BUFSZ); 1623 1624 /* 1625 * Just call memcpy() to do the work. 1626 */ 1627 memcpy(buf, from, len); 1628 } 1629 1630 void 1631 hme_copyfrombuf_contig(struct hme_softc *sc, void *to, int boff, int len) 1632 { 1633 volatile void *buf = sc->sc_rb.rb_rxbuf + (ri * _HME_BUFSZ); 1634 1635 /* 1636 * Just call memcpy() to do the work. 1637 */ 1638 memcpy(to, buf, len); 1639 } 1640 #endif 1641