1 /* $OpenBSD: hme.c,v 1.81 2017/01/22 10:17:38 dlg Exp $ */ 2 /* $NetBSD: hme.c,v 1.21 2001/07/07 15:59:37 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 1999 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Paul Kranenburg. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * HME Ethernet module driver. 35 */ 36 37 #include "bpfilter.h" 38 39 #undef HMEDEBUG 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/syslog.h> 46 #include <sys/socket.h> 47 #include <sys/device.h> 48 #include <sys/malloc.h> 49 #include <sys/ioctl.h> 50 #include <sys/errno.h> 51 52 #include <net/if.h> 53 #include <net/if_media.h> 54 55 #include <netinet/in.h> 56 #include <netinet/if_ether.h> 57 58 #if NBPFILTER > 0 59 #include <net/bpf.h> 60 #endif 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <machine/bus.h> 66 67 #include <dev/ic/hmereg.h> 68 #include <dev/ic/hmevar.h> 69 70 struct cfdriver hme_cd = { 71 NULL, "hme", DV_IFNET 72 }; 73 74 #define HME_RX_OFFSET 2 75 76 void hme_start(struct ifnet *); 77 void hme_stop(struct hme_softc *, int); 78 int hme_ioctl(struct ifnet *, u_long, caddr_t); 79 void hme_tick(void *); 80 void hme_watchdog(struct ifnet *); 81 void hme_init(struct hme_softc *); 82 void hme_meminit(struct hme_softc *); 83 void hme_mifinit(struct hme_softc *); 84 void hme_reset(struct hme_softc *); 85 void hme_iff(struct hme_softc *); 86 void hme_fill_rx_ring(struct hme_softc *); 87 int hme_newbuf(struct hme_softc *, struct hme_sxd *); 88 89 /* MII methods & callbacks */ 90 static int hme_mii_readreg(struct device *, int, int); 91 static void hme_mii_writereg(struct device *, int, int, int); 92 static void hme_mii_statchg(struct device *); 93 94 int hme_mediachange(struct ifnet *); 95 void hme_mediastatus(struct ifnet *, struct ifmediareq *); 96 97 int hme_eint(struct hme_softc *, u_int); 98 int hme_rint(struct hme_softc *); 99 int hme_tint(struct hme_softc *); 100 101 void 102 hme_config(struct hme_softc *sc) 103 { 104 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 105 struct mii_data *mii = &sc->sc_mii; 106 struct mii_softc *child; 107 bus_dma_tag_t dmatag = sc->sc_dmatag; 108 bus_dma_segment_t seg; 109 bus_size_t size; 110 int rseg, error, i; 111 112 /* 113 * HME common initialization. 114 * 115 * hme_softc fields that must be initialized by the front-end: 116 * 117 * the bus tag: 118 * sc_bustag 119 * 120 * the dma bus tag: 121 * sc_dmatag 122 * 123 * the bus handles: 124 * sc_seb (Shared Ethernet Block registers) 125 * sc_erx (Receiver Unit registers) 126 * sc_etx (Transmitter Unit registers) 127 * sc_mac (MAC registers) 128 * sc_mif (Management Interface registers) 129 * 130 * the maximum bus burst size: 131 * sc_burst 132 * 133 * the local Ethernet address: 134 * sc_arpcom.ac_enaddr 135 * 136 */ 137 138 /* Make sure the chip is stopped. */ 139 hme_stop(sc, 0); 140 141 for (i = 0; i < HME_TX_RING_SIZE; i++) { 142 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, HME_TX_NSEGS, 143 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 144 &sc->sc_txd[i].sd_map) != 0) { 145 sc->sc_txd[i].sd_map = NULL; 146 goto fail; 147 } 148 } 149 for (i = 0; i < HME_RX_RING_SIZE; i++) { 150 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, 151 MCLBYTES, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 152 &sc->sc_rxd[i].sd_map) != 0) { 153 sc->sc_rxd[i].sd_map = NULL; 154 goto fail; 155 } 156 } 157 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 0, 158 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_rxmap_spare) != 0) { 159 sc->sc_rxmap_spare = NULL; 160 goto fail; 161 } 162 163 /* 164 * Allocate DMA capable memory 165 * Buffer descriptors must be aligned on a 2048 byte boundary; 166 * take this into account when calculating the size. Note that 167 * the maximum number of descriptors (256) occupies 2048 bytes, 168 * so we allocate that much regardless of the number of descriptors. 169 */ 170 size = (HME_XD_SIZE * HME_RX_RING_MAX) + /* RX descriptors */ 171 (HME_XD_SIZE * HME_TX_RING_MAX); /* TX descriptors */ 172 173 /* Allocate DMA buffer */ 174 if ((error = bus_dmamem_alloc(dmatag, size, 2048, 0, &seg, 1, &rseg, 175 BUS_DMA_NOWAIT)) != 0) { 176 printf("\n%s: DMA buffer alloc error %d\n", 177 sc->sc_dev.dv_xname, error); 178 return; 179 } 180 181 /* Map DMA memory in CPU addressable space */ 182 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 183 &sc->sc_rb.rb_membase, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 184 printf("\n%s: DMA buffer map error %d\n", 185 sc->sc_dev.dv_xname, error); 186 bus_dmamap_unload(dmatag, sc->sc_dmamap); 187 bus_dmamem_free(dmatag, &seg, rseg); 188 return; 189 } 190 191 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 192 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 193 printf("\n%s: DMA map create error %d\n", 194 sc->sc_dev.dv_xname, error); 195 return; 196 } 197 198 /* Load the buffer */ 199 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 200 sc->sc_rb.rb_membase, size, NULL, 201 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 202 printf("\n%s: DMA buffer map load error %d\n", 203 sc->sc_dev.dv_xname, error); 204 bus_dmamem_free(dmatag, &seg, rseg); 205 return; 206 } 207 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 208 209 printf(", address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 210 211 /* Initialize ifnet structure. */ 212 strlcpy(ifp->if_xname, sc->sc_dev.dv_xname, sizeof ifp->if_xname); 213 ifp->if_softc = sc; 214 ifp->if_start = hme_start; 215 ifp->if_ioctl = hme_ioctl; 216 ifp->if_watchdog = hme_watchdog; 217 ifp->if_flags = 218 IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 219 ifp->if_capabilities = IFCAP_VLAN_MTU; 220 221 /* Initialize ifmedia structures and MII info */ 222 mii->mii_ifp = ifp; 223 mii->mii_readreg = hme_mii_readreg; 224 mii->mii_writereg = hme_mii_writereg; 225 mii->mii_statchg = hme_mii_statchg; 226 227 ifmedia_init(&mii->mii_media, IFM_IMASK, 228 hme_mediachange, hme_mediastatus); 229 230 hme_mifinit(sc); 231 232 if (sc->sc_tcvr == -1) 233 mii_attach(&sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 234 MII_OFFSET_ANY, 0); 235 else 236 mii_attach(&sc->sc_dev, mii, 0xffffffff, sc->sc_tcvr, 237 MII_OFFSET_ANY, 0); 238 239 child = LIST_FIRST(&mii->mii_phys); 240 if (child == NULL) { 241 /* No PHY attached */ 242 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 243 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 244 } else { 245 /* 246 * Walk along the list of attached MII devices and 247 * establish an `MII instance' to `phy number' 248 * mapping. We'll use this mapping in media change 249 * requests to determine which phy to use to program 250 * the MIF configuration register. 251 */ 252 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 253 /* 254 * Note: we support just two PHYs: the built-in 255 * internal device and an external on the MII 256 * connector. 257 */ 258 if (child->mii_phy > 1 || child->mii_inst > 1) { 259 printf("%s: cannot accommodate MII device %s" 260 " at phy %d, instance %lld\n", 261 sc->sc_dev.dv_xname, 262 child->mii_dev.dv_xname, 263 child->mii_phy, child->mii_inst); 264 continue; 265 } 266 267 sc->sc_phys[child->mii_inst] = child->mii_phy; 268 } 269 270 /* 271 * XXX - we can really do the following ONLY if the 272 * phy indeed has the auto negotiation capability!! 273 */ 274 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 275 } 276 277 /* Attach the interface. */ 278 if_attach(ifp); 279 ether_ifattach(ifp); 280 281 timeout_set(&sc->sc_tick_ch, hme_tick, sc); 282 return; 283 284 fail: 285 if (sc->sc_rxmap_spare != NULL) 286 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare); 287 for (i = 0; i < HME_TX_RING_SIZE; i++) 288 if (sc->sc_txd[i].sd_map != NULL) 289 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map); 290 for (i = 0; i < HME_RX_RING_SIZE; i++) 291 if (sc->sc_rxd[i].sd_map != NULL) 292 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map); 293 } 294 295 void 296 hme_unconfig(struct hme_softc *sc) 297 { 298 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 299 int i; 300 301 hme_stop(sc, 1); 302 303 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxmap_spare); 304 for (i = 0; i < HME_TX_RING_SIZE; i++) 305 if (sc->sc_txd[i].sd_map != NULL) 306 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_txd[i].sd_map); 307 for (i = 0; i < HME_RX_RING_SIZE; i++) 308 if (sc->sc_rxd[i].sd_map != NULL) 309 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_rxd[i].sd_map); 310 311 /* Detach all PHYs */ 312 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 313 314 /* Delete all remaining media. */ 315 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 316 317 ether_ifdetach(ifp); 318 if_detach(ifp); 319 } 320 321 void 322 hme_tick(void *arg) 323 { 324 struct hme_softc *sc = arg; 325 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 326 bus_space_tag_t t = sc->sc_bustag; 327 bus_space_handle_t mac = sc->sc_mac; 328 int s; 329 330 s = splnet(); 331 /* 332 * Unload collision counters 333 */ 334 ifp->if_collisions += 335 bus_space_read_4(t, mac, HME_MACI_NCCNT) + 336 bus_space_read_4(t, mac, HME_MACI_FCCNT) + 337 bus_space_read_4(t, mac, HME_MACI_EXCNT) + 338 bus_space_read_4(t, mac, HME_MACI_LTCNT); 339 340 /* 341 * then clear the hardware counters. 342 */ 343 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 344 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 345 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 346 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 347 348 /* 349 * If buffer allocation fails, the receive ring may become 350 * empty. There is no receive interrupt to recover from that. 351 */ 352 if (if_rxr_inuse(&sc->sc_rx_ring) == 0) 353 hme_fill_rx_ring(sc); 354 355 mii_tick(&sc->sc_mii); 356 splx(s); 357 358 timeout_add_sec(&sc->sc_tick_ch, 1); 359 } 360 361 void 362 hme_reset(struct hme_softc *sc) 363 { 364 int s; 365 366 s = splnet(); 367 hme_init(sc); 368 splx(s); 369 } 370 371 void 372 hme_stop(struct hme_softc *sc, int softonly) 373 { 374 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 375 bus_space_tag_t t = sc->sc_bustag; 376 bus_space_handle_t seb = sc->sc_seb; 377 int n; 378 379 timeout_del(&sc->sc_tick_ch); 380 381 /* 382 * Mark the interface down and cancel the watchdog timer. 383 */ 384 ifp->if_flags &= ~IFF_RUNNING; 385 ifq_clr_oactive(&ifp->if_snd); 386 ifp->if_timer = 0; 387 388 if (!softonly) { 389 mii_down(&sc->sc_mii); 390 391 /* Mask all interrupts */ 392 bus_space_write_4(t, seb, HME_SEBI_IMASK, 0xffffffff); 393 394 /* Reset transmitter and receiver */ 395 bus_space_write_4(t, seb, HME_SEBI_RESET, 396 (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)); 397 398 for (n = 0; n < 20; n++) { 399 u_int32_t v = bus_space_read_4(t, seb, HME_SEBI_RESET); 400 if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) 401 break; 402 DELAY(20); 403 } 404 if (n >= 20) 405 printf("%s: hme_stop: reset failed\n", sc->sc_dev.dv_xname); 406 } 407 408 for (n = 0; n < HME_TX_RING_SIZE; n++) { 409 if (sc->sc_txd[n].sd_mbuf != NULL) { 410 bus_dmamap_sync(sc->sc_dmatag, sc->sc_txd[n].sd_map, 411 0, sc->sc_txd[n].sd_map->dm_mapsize, 412 BUS_DMASYNC_POSTWRITE); 413 bus_dmamap_unload(sc->sc_dmatag, sc->sc_txd[n].sd_map); 414 m_freem(sc->sc_txd[n].sd_mbuf); 415 sc->sc_txd[n].sd_mbuf = NULL; 416 } 417 } 418 sc->sc_tx_prod = sc->sc_tx_cons = sc->sc_tx_cnt = 0; 419 420 for (n = 0; n < HME_RX_RING_SIZE; n++) { 421 if (sc->sc_rxd[n].sd_mbuf != NULL) { 422 bus_dmamap_sync(sc->sc_dmatag, sc->sc_rxd[n].sd_map, 423 0, sc->sc_rxd[n].sd_map->dm_mapsize, 424 BUS_DMASYNC_POSTREAD); 425 bus_dmamap_unload(sc->sc_dmatag, sc->sc_rxd[n].sd_map); 426 m_freem(sc->sc_rxd[n].sd_mbuf); 427 sc->sc_rxd[n].sd_mbuf = NULL; 428 } 429 } 430 sc->sc_rx_prod = sc->sc_rx_cons = 0; 431 } 432 433 void 434 hme_meminit(struct hme_softc *sc) 435 { 436 bus_addr_t dma; 437 caddr_t p; 438 unsigned int i; 439 struct hme_ring *hr = &sc->sc_rb; 440 441 p = hr->rb_membase; 442 dma = hr->rb_dmabase; 443 444 /* 445 * Allocate transmit descriptors 446 */ 447 hr->rb_txd = p; 448 hr->rb_txddma = dma; 449 p += HME_TX_RING_SIZE * HME_XD_SIZE; 450 dma += HME_TX_RING_SIZE * HME_XD_SIZE; 451 /* We have reserved descriptor space until the next 2048 byte boundary.*/ 452 dma = (bus_addr_t)roundup((u_long)dma, 2048); 453 p = (caddr_t)roundup((u_long)p, 2048); 454 455 /* 456 * Allocate receive descriptors 457 */ 458 hr->rb_rxd = p; 459 hr->rb_rxddma = dma; 460 p += HME_RX_RING_SIZE * HME_XD_SIZE; 461 dma += HME_RX_RING_SIZE * HME_XD_SIZE; 462 /* Again move forward to the next 2048 byte boundary.*/ 463 dma = (bus_addr_t)roundup((u_long)dma, 2048); 464 p = (caddr_t)roundup((u_long)p, 2048); 465 466 /* 467 * Initialize transmit descriptors 468 */ 469 for (i = 0; i < HME_TX_RING_SIZE; i++) { 470 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); 471 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); 472 sc->sc_txd[i].sd_mbuf = NULL; 473 } 474 475 /* 476 * Initialize receive descriptors 477 */ 478 for (i = 0; i < HME_RX_RING_SIZE; i++) { 479 HME_XD_SETADDR(sc->sc_pci, hr->rb_rxd, i, 0); 480 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_rxd, i, 0); 481 sc->sc_rxd[i].sd_mbuf = NULL; 482 } 483 484 if_rxr_init(&sc->sc_rx_ring, 2, HME_RX_RING_SIZE); 485 hme_fill_rx_ring(sc); 486 } 487 488 /* 489 * Initialization of interface; set up initialization block 490 * and transmit/receive descriptor rings. 491 */ 492 void 493 hme_init(struct hme_softc *sc) 494 { 495 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 496 bus_space_tag_t t = sc->sc_bustag; 497 bus_space_handle_t seb = sc->sc_seb; 498 bus_space_handle_t etx = sc->sc_etx; 499 bus_space_handle_t erx = sc->sc_erx; 500 bus_space_handle_t mac = sc->sc_mac; 501 u_int8_t *ea; 502 u_int32_t v; 503 504 /* 505 * Initialization sequence. The numbered steps below correspond 506 * to the sequence outlined in section 6.3.5.1 in the Ethernet 507 * Channel Engine manual (part of the PCIO manual). 508 * See also the STP2002-STQ document from Sun Microsystems. 509 */ 510 511 /* step 1 & 2. Reset the Ethernet Channel */ 512 hme_stop(sc, 0); 513 514 /* Re-initialize the MIF */ 515 hme_mifinit(sc); 516 517 /* step 3. Setup data structures in host memory */ 518 hme_meminit(sc); 519 520 /* step 4. TX MAC registers & counters */ 521 bus_space_write_4(t, mac, HME_MACI_NCCNT, 0); 522 bus_space_write_4(t, mac, HME_MACI_FCCNT, 0); 523 bus_space_write_4(t, mac, HME_MACI_EXCNT, 0); 524 bus_space_write_4(t, mac, HME_MACI_LTCNT, 0); 525 bus_space_write_4(t, mac, HME_MACI_TXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 526 527 /* Load station MAC address */ 528 ea = sc->sc_arpcom.ac_enaddr; 529 bus_space_write_4(t, mac, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); 530 bus_space_write_4(t, mac, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); 531 bus_space_write_4(t, mac, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); 532 533 /* 534 * Init seed for backoff 535 * (source suggested by manual: low 10 bits of MAC address) 536 */ 537 v = ((ea[4] << 8) | ea[5]) & 0x3fff; 538 bus_space_write_4(t, mac, HME_MACI_RANDSEED, v); 539 540 541 /* Note: Accepting power-on default for other MAC registers here.. */ 542 543 544 /* step 5. RX MAC registers & counters */ 545 hme_iff(sc); 546 547 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 548 bus_space_write_4(t, etx, HME_ETXI_RING, sc->sc_rb.rb_txddma); 549 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE); 550 551 bus_space_write_4(t, erx, HME_ERXI_RING, sc->sc_rb.rb_rxddma); 552 bus_space_write_4(t, mac, HME_MACI_RXSIZE, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 553 554 /* step 8. Global Configuration & Interrupt Mask */ 555 bus_space_write_4(t, seb, HME_SEBI_IMASK, 556 ~(HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST | 557 HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR | 558 HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS)); 559 560 switch (sc->sc_burst) { 561 default: 562 v = 0; 563 break; 564 case 16: 565 v = HME_SEB_CFG_BURST16; 566 break; 567 case 32: 568 v = HME_SEB_CFG_BURST32; 569 break; 570 case 64: 571 v = HME_SEB_CFG_BURST64; 572 break; 573 } 574 bus_space_write_4(t, seb, HME_SEBI_CFG, v); 575 576 /* step 9. ETX Configuration: use mostly default values */ 577 578 /* Enable DMA */ 579 v = bus_space_read_4(t, etx, HME_ETXI_CFG); 580 v |= HME_ETX_CFG_DMAENABLE; 581 bus_space_write_4(t, etx, HME_ETXI_CFG, v); 582 583 /* Transmit Descriptor ring size: in increments of 16 */ 584 bus_space_write_4(t, etx, HME_ETXI_RSIZE, HME_TX_RING_SIZE / 16 - 1); 585 586 /* step 10. ERX Configuration */ 587 v = bus_space_read_4(t, erx, HME_ERXI_CFG); 588 v &= ~HME_ERX_CFG_RINGSIZE256; 589 #if HME_RX_RING_SIZE == 32 590 v |= HME_ERX_CFG_RINGSIZE32; 591 #elif HME_RX_RING_SIZE == 64 592 v |= HME_ERX_CFG_RINGSIZE64; 593 #elif HME_RX_RING_SIZE == 128 594 v |= HME_ERX_CFG_RINGSIZE128; 595 #elif HME_RX_RING_SIZE == 256 596 v |= HME_ERX_CFG_RINGSIZE256; 597 #else 598 # error "RX ring size must be 32, 64, 128, or 256" 599 #endif 600 /* Enable DMA */ 601 v |= HME_ERX_CFG_DMAENABLE | (HME_RX_OFFSET << 3); 602 bus_space_write_4(t, erx, HME_ERXI_CFG, v); 603 604 /* step 11. XIF Configuration */ 605 v = bus_space_read_4(t, mac, HME_MACI_XIF); 606 v |= HME_MAC_XIF_OE; 607 bus_space_write_4(t, mac, HME_MACI_XIF, v); 608 609 /* step 12. RX_MAC Configuration Register */ 610 v = bus_space_read_4(t, mac, HME_MACI_RXCFG); 611 v |= HME_MAC_RXCFG_ENABLE; 612 bus_space_write_4(t, mac, HME_MACI_RXCFG, v); 613 614 /* step 13. TX_MAC Configuration Register */ 615 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 616 v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); 617 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 618 619 /* Set the current media. */ 620 mii_mediachg(&sc->sc_mii); 621 622 /* Start the one second timer. */ 623 timeout_add_sec(&sc->sc_tick_ch, 1); 624 625 ifp->if_flags |= IFF_RUNNING; 626 ifq_clr_oactive(&ifp->if_snd); 627 628 hme_start(ifp); 629 } 630 631 void 632 hme_start(struct ifnet *ifp) 633 { 634 struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; 635 struct hme_ring *hr = &sc->sc_rb; 636 struct mbuf *m; 637 u_int32_t flags; 638 bus_dmamap_t map; 639 u_int32_t frag, cur, i; 640 int error; 641 642 if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) 643 return; 644 645 while (sc->sc_txd[sc->sc_tx_prod].sd_mbuf == NULL) { 646 m = ifq_deq_begin(&ifp->if_snd); 647 if (m == NULL) 648 break; 649 650 /* 651 * Encapsulate this packet and start it going... 652 * or fail... 653 */ 654 655 cur = frag = sc->sc_tx_prod; 656 map = sc->sc_txd[cur].sd_map; 657 658 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 659 BUS_DMA_NOWAIT); 660 if (error != 0 && error != EFBIG) 661 goto drop; 662 if (error != 0) { 663 /* Too many fragments, linearize. */ 664 if (m_defrag(m, M_DONTWAIT)) 665 goto drop; 666 error = bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, 667 BUS_DMA_NOWAIT); 668 if (error != 0) 669 goto drop; 670 } 671 672 if ((HME_TX_RING_SIZE - (sc->sc_tx_cnt + map->dm_nsegs)) < 5) { 673 bus_dmamap_unload(sc->sc_dmatag, map); 674 ifq_deq_rollback(&ifp->if_snd, m); 675 ifq_set_oactive(&ifp->if_snd); 676 break; 677 } 678 679 /* We are now committed to transmitting the packet. */ 680 ifq_deq_commit(&ifp->if_snd, m); 681 682 #if NBPFILTER > 0 683 /* 684 * If BPF is listening on this interface, let it see the 685 * packet before we commit it to the wire. 686 */ 687 if (ifp->if_bpf) 688 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); 689 #endif 690 691 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 692 BUS_DMASYNC_PREWRITE); 693 694 for (i = 0; i < map->dm_nsegs; i++) { 695 flags = HME_XD_ENCODE_TSIZE(map->dm_segs[i].ds_len); 696 if (i == 0) 697 flags |= HME_XD_SOP; 698 else 699 flags |= HME_XD_OWN; 700 701 HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, frag, 702 map->dm_segs[i].ds_addr); 703 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, frag, flags); 704 705 cur = frag; 706 if (++frag == HME_TX_RING_SIZE) 707 frag = 0; 708 } 709 710 /* Set end of packet on last descriptor. */ 711 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, cur); 712 flags |= HME_XD_EOP; 713 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, cur, flags); 714 715 sc->sc_tx_cnt += map->dm_nsegs; 716 sc->sc_txd[sc->sc_tx_prod].sd_map = sc->sc_txd[cur].sd_map; 717 sc->sc_txd[cur].sd_map = map; 718 sc->sc_txd[cur].sd_mbuf = m; 719 720 /* Give first frame over to the hardware. */ 721 flags = HME_XD_GETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod); 722 flags |= HME_XD_OWN; 723 HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, sc->sc_tx_prod, flags); 724 725 bus_space_write_4(sc->sc_bustag, sc->sc_etx, HME_ETXI_PENDING, 726 HME_ETX_TP_DMAWAKEUP); 727 sc->sc_tx_prod = frag; 728 729 ifp->if_timer = 5; 730 } 731 732 return; 733 734 drop: 735 ifq_deq_commit(&ifp->if_snd, m); 736 m_freem(m); 737 ifp->if_oerrors++; 738 } 739 740 /* 741 * Transmit interrupt. 742 */ 743 int 744 hme_tint(struct hme_softc *sc) 745 { 746 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 747 unsigned int ri, txflags; 748 struct hme_sxd *sd; 749 int cnt = sc->sc_tx_cnt; 750 751 /* Fetch current position in the transmit ring */ 752 ri = sc->sc_tx_cons; 753 sd = &sc->sc_txd[ri]; 754 755 for (;;) { 756 if (cnt <= 0) 757 break; 758 759 txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); 760 761 if (txflags & HME_XD_OWN) 762 break; 763 764 ifq_clr_oactive(&ifp->if_snd); 765 766 if (sd->sd_mbuf != NULL) { 767 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 768 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 769 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 770 m_freem(sd->sd_mbuf); 771 sd->sd_mbuf = NULL; 772 } 773 774 if (++ri == HME_TX_RING_SIZE) { 775 ri = 0; 776 sd = sc->sc_txd; 777 } else 778 sd++; 779 780 --cnt; 781 } 782 783 sc->sc_tx_cnt = cnt; 784 ifp->if_timer = cnt > 0 ? 5 : 0; 785 786 /* Update ring */ 787 sc->sc_tx_cons = ri; 788 789 hme_start(ifp); 790 791 return (1); 792 } 793 794 /* 795 * Receive interrupt. 796 */ 797 int 798 hme_rint(struct hme_softc *sc) 799 { 800 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 801 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 802 struct mbuf *m; 803 struct hme_sxd *sd; 804 unsigned int ri, len; 805 u_int32_t flags; 806 807 ri = sc->sc_rx_cons; 808 sd = &sc->sc_rxd[ri]; 809 810 /* 811 * Process all buffers with valid data. 812 */ 813 while (if_rxr_inuse(&sc->sc_rx_ring) > 0) { 814 flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri); 815 if (flags & HME_XD_OWN) 816 break; 817 818 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 819 0, sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 820 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 821 822 m = sd->sd_mbuf; 823 sd->sd_mbuf = NULL; 824 825 if (++ri == HME_RX_RING_SIZE) { 826 ri = 0; 827 sd = sc->sc_rxd; 828 } else 829 sd++; 830 831 if_rxr_put(&sc->sc_rx_ring, 1); 832 833 if (flags & HME_XD_OFL) { 834 ifp->if_ierrors++; 835 printf("%s: buffer overflow, ri=%d; flags=0x%x\n", 836 sc->sc_dev.dv_xname, ri, flags); 837 m_freem(m); 838 continue; 839 } 840 841 len = HME_XD_DECODE_RSIZE(flags); 842 m->m_pkthdr.len = m->m_len = len; 843 844 ml_enqueue(&ml, m); 845 } 846 847 if_input(ifp, &ml); 848 849 sc->sc_rx_cons = ri; 850 hme_fill_rx_ring(sc); 851 return (1); 852 } 853 854 int 855 hme_eint(struct hme_softc *sc, u_int status) 856 { 857 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 858 859 if (status & HME_SEB_STAT_MIFIRQ) { 860 printf("%s: XXXlink status changed\n", sc->sc_dev.dv_xname); 861 status &= ~HME_SEB_STAT_MIFIRQ; 862 } 863 864 if (status & HME_SEB_STAT_DTIMEXP) { 865 ifp->if_oerrors++; 866 status &= ~HME_SEB_STAT_DTIMEXP; 867 } 868 869 if (status & HME_SEB_STAT_NORXD) { 870 ifp->if_ierrors++; 871 status &= ~HME_SEB_STAT_NORXD; 872 } 873 874 status &= ~(HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_GOTFRAME | 875 HME_SEB_STAT_SENTFRAME | HME_SEB_STAT_HOSTTOTX | 876 HME_SEB_STAT_TXALL); 877 878 if (status == 0) 879 return (1); 880 881 #ifdef HME_DEBUG 882 printf("%s: status=%b\n", sc->sc_dev.dv_xname, status, HME_SEB_STAT_BITS); 883 #endif 884 return (1); 885 } 886 887 int 888 hme_intr(void *v) 889 { 890 struct hme_softc *sc = (struct hme_softc *)v; 891 bus_space_tag_t t = sc->sc_bustag; 892 bus_space_handle_t seb = sc->sc_seb; 893 u_int32_t status; 894 int r = 0; 895 896 status = bus_space_read_4(t, seb, HME_SEBI_STAT); 897 if (status == 0xffffffff) 898 return (0); 899 900 if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) 901 r |= hme_eint(sc, status); 902 903 if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) 904 r |= hme_tint(sc); 905 906 if ((status & HME_SEB_STAT_RXTOHOST) != 0) 907 r |= hme_rint(sc); 908 909 return (r); 910 } 911 912 913 void 914 hme_watchdog(struct ifnet *ifp) 915 { 916 struct hme_softc *sc = ifp->if_softc; 917 918 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 919 ifp->if_oerrors++; 920 921 hme_reset(sc); 922 } 923 924 /* 925 * Initialize the MII Management Interface 926 */ 927 void 928 hme_mifinit(struct hme_softc *sc) 929 { 930 bus_space_tag_t t = sc->sc_bustag; 931 bus_space_handle_t mif = sc->sc_mif; 932 bus_space_handle_t mac = sc->sc_mac; 933 int phy; 934 u_int32_t v; 935 936 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 937 phy = HME_PHYAD_EXTERNAL; 938 if (v & HME_MIF_CFG_MDI1) 939 phy = sc->sc_tcvr = HME_PHYAD_EXTERNAL; 940 else if (v & HME_MIF_CFG_MDI0) 941 phy = sc->sc_tcvr = HME_PHYAD_INTERNAL; 942 else 943 sc->sc_tcvr = -1; 944 945 /* Configure the MIF in frame mode, no poll, current phy select */ 946 v = 0; 947 if (phy == HME_PHYAD_EXTERNAL) 948 v |= HME_MIF_CFG_PHY; 949 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 950 951 /* If an external transceiver is selected, enable its MII drivers */ 952 v = bus_space_read_4(t, mac, HME_MACI_XIF); 953 v &= ~HME_MAC_XIF_MIIENABLE; 954 if (phy == HME_PHYAD_EXTERNAL) 955 v |= HME_MAC_XIF_MIIENABLE; 956 bus_space_write_4(t, mac, HME_MACI_XIF, v); 957 } 958 959 /* 960 * MII interface 961 */ 962 static int 963 hme_mii_readreg(struct device *self, int phy, int reg) 964 { 965 struct hme_softc *sc = (struct hme_softc *)self; 966 bus_space_tag_t t = sc->sc_bustag; 967 bus_space_handle_t mif = sc->sc_mif; 968 bus_space_handle_t mac = sc->sc_mac; 969 u_int32_t v, xif_cfg, mifi_cfg; 970 int n; 971 972 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 973 return (0); 974 975 /* Select the desired PHY in the MIF configuration register */ 976 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 977 v &= ~HME_MIF_CFG_PHY; 978 if (phy == HME_PHYAD_EXTERNAL) 979 v |= HME_MIF_CFG_PHY; 980 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 981 982 /* Enable MII drivers on external transceiver */ 983 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 984 if (phy == HME_PHYAD_EXTERNAL) 985 v |= HME_MAC_XIF_MIIENABLE; 986 else 987 v &= ~HME_MAC_XIF_MIIENABLE; 988 bus_space_write_4(t, mac, HME_MACI_XIF, v); 989 990 /* Construct the frame command */ 991 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 992 HME_MIF_FO_TAMSB | 993 (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | 994 (phy << HME_MIF_FO_PHYAD_SHIFT) | 995 (reg << HME_MIF_FO_REGAD_SHIFT); 996 997 bus_space_write_4(t, mif, HME_MIFI_FO, v); 998 for (n = 0; n < 100; n++) { 999 DELAY(1); 1000 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1001 if (v & HME_MIF_FO_TALSB) { 1002 v &= HME_MIF_FO_DATA; 1003 goto out; 1004 } 1005 } 1006 1007 v = 0; 1008 printf("%s: mii_read timeout\n", sc->sc_dev.dv_xname); 1009 1010 out: 1011 /* Restore MIFI_CFG register */ 1012 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1013 /* Restore XIF register */ 1014 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1015 return (v); 1016 } 1017 1018 static void 1019 hme_mii_writereg(struct device *self, int phy, int reg, int val) 1020 { 1021 struct hme_softc *sc = (void *)self; 1022 bus_space_tag_t t = sc->sc_bustag; 1023 bus_space_handle_t mif = sc->sc_mif; 1024 bus_space_handle_t mac = sc->sc_mac; 1025 u_int32_t v, xif_cfg, mifi_cfg; 1026 int n; 1027 1028 /* We can at most have two PHYs */ 1029 if (phy != HME_PHYAD_EXTERNAL && phy != HME_PHYAD_INTERNAL) 1030 return; 1031 1032 /* Select the desired PHY in the MIF configuration register */ 1033 v = mifi_cfg = bus_space_read_4(t, mif, HME_MIFI_CFG); 1034 v &= ~HME_MIF_CFG_PHY; 1035 if (phy == HME_PHYAD_EXTERNAL) 1036 v |= HME_MIF_CFG_PHY; 1037 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1038 1039 /* Enable MII drivers on external transceiver */ 1040 v = xif_cfg = bus_space_read_4(t, mac, HME_MACI_XIF); 1041 if (phy == HME_PHYAD_EXTERNAL) 1042 v |= HME_MAC_XIF_MIIENABLE; 1043 else 1044 v &= ~HME_MAC_XIF_MIIENABLE; 1045 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1046 1047 /* Construct the frame command */ 1048 v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | 1049 HME_MIF_FO_TAMSB | 1050 (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | 1051 (phy << HME_MIF_FO_PHYAD_SHIFT) | 1052 (reg << HME_MIF_FO_REGAD_SHIFT) | 1053 (val & HME_MIF_FO_DATA); 1054 1055 bus_space_write_4(t, mif, HME_MIFI_FO, v); 1056 for (n = 0; n < 100; n++) { 1057 DELAY(1); 1058 v = bus_space_read_4(t, mif, HME_MIFI_FO); 1059 if (v & HME_MIF_FO_TALSB) 1060 goto out; 1061 } 1062 1063 printf("%s: mii_write timeout\n", sc->sc_dev.dv_xname); 1064 out: 1065 /* Restore MIFI_CFG register */ 1066 bus_space_write_4(t, mif, HME_MIFI_CFG, mifi_cfg); 1067 /* Restore XIF register */ 1068 bus_space_write_4(t, mac, HME_MACI_XIF, xif_cfg); 1069 } 1070 1071 static void 1072 hme_mii_statchg(struct device *dev) 1073 { 1074 struct hme_softc *sc = (void *)dev; 1075 bus_space_tag_t t = sc->sc_bustag; 1076 bus_space_handle_t mac = sc->sc_mac; 1077 u_int32_t v; 1078 1079 #ifdef HMEDEBUG 1080 if (sc->sc_debug) 1081 printf("hme_mii_statchg: status change\n", phy); 1082 #endif 1083 1084 /* Set the MAC Full Duplex bit appropriately */ 1085 /* Apparently the hme chip is SIMPLEX if working in full duplex mode, 1086 but not otherwise. */ 1087 v = bus_space_read_4(t, mac, HME_MACI_TXCFG); 1088 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1089 v |= HME_MAC_TXCFG_FULLDPLX; 1090 sc->sc_arpcom.ac_if.if_flags |= IFF_SIMPLEX; 1091 } else { 1092 v &= ~HME_MAC_TXCFG_FULLDPLX; 1093 sc->sc_arpcom.ac_if.if_flags &= ~IFF_SIMPLEX; 1094 } 1095 bus_space_write_4(t, mac, HME_MACI_TXCFG, v); 1096 } 1097 1098 int 1099 hme_mediachange(struct ifnet *ifp) 1100 { 1101 struct hme_softc *sc = ifp->if_softc; 1102 bus_space_tag_t t = sc->sc_bustag; 1103 bus_space_handle_t mif = sc->sc_mif; 1104 bus_space_handle_t mac = sc->sc_mac; 1105 uint64_t instance = IFM_INST(sc->sc_mii.mii_media.ifm_cur->ifm_media); 1106 int phy = sc->sc_phys[instance]; 1107 u_int32_t v; 1108 1109 #ifdef HMEDEBUG 1110 if (sc->sc_debug) 1111 printf("hme_mediachange: phy = %d\n", phy); 1112 #endif 1113 if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER) 1114 return (EINVAL); 1115 1116 /* Select the current PHY in the MIF configuration register */ 1117 v = bus_space_read_4(t, mif, HME_MIFI_CFG); 1118 v &= ~HME_MIF_CFG_PHY; 1119 if (phy == HME_PHYAD_EXTERNAL) 1120 v |= HME_MIF_CFG_PHY; 1121 bus_space_write_4(t, mif, HME_MIFI_CFG, v); 1122 1123 /* If an external transceiver is selected, enable its MII drivers */ 1124 v = bus_space_read_4(t, mac, HME_MACI_XIF); 1125 v &= ~HME_MAC_XIF_MIIENABLE; 1126 if (phy == HME_PHYAD_EXTERNAL) 1127 v |= HME_MAC_XIF_MIIENABLE; 1128 bus_space_write_4(t, mac, HME_MACI_XIF, v); 1129 1130 return (mii_mediachg(&sc->sc_mii)); 1131 } 1132 1133 void 1134 hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1135 { 1136 struct hme_softc *sc = ifp->if_softc; 1137 1138 if ((ifp->if_flags & IFF_UP) == 0) 1139 return; 1140 1141 mii_pollstat(&sc->sc_mii); 1142 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1143 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1144 } 1145 1146 /* 1147 * Process an ioctl request. 1148 */ 1149 int 1150 hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1151 { 1152 struct hme_softc *sc = ifp->if_softc; 1153 struct ifreq *ifr = (struct ifreq *)data; 1154 int s, error = 0; 1155 1156 s = splnet(); 1157 1158 switch (cmd) { 1159 case SIOCSIFADDR: 1160 ifp->if_flags |= IFF_UP; 1161 if (!(ifp->if_flags & IFF_RUNNING)) 1162 hme_init(sc); 1163 break; 1164 1165 case SIOCSIFFLAGS: 1166 if (ifp->if_flags & IFF_UP) { 1167 if (ifp->if_flags & IFF_RUNNING) 1168 error = ENETRESET; 1169 else 1170 hme_init(sc); 1171 } else { 1172 if (ifp->if_flags & IFF_RUNNING) 1173 hme_stop(sc, 0); 1174 } 1175 #ifdef HMEDEBUG 1176 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 1177 #endif 1178 break; 1179 1180 case SIOCGIFMEDIA: 1181 case SIOCSIFMEDIA: 1182 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1183 break; 1184 1185 case SIOCGIFRXR: 1186 error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, 1187 NULL, MCLBYTES, &sc->sc_rx_ring); 1188 break; 1189 1190 default: 1191 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1192 } 1193 1194 if (error == ENETRESET) { 1195 if (ifp->if_flags & IFF_RUNNING) 1196 hme_iff(sc); 1197 error = 0; 1198 } 1199 1200 splx(s); 1201 return (error); 1202 } 1203 1204 void 1205 hme_iff(struct hme_softc *sc) 1206 { 1207 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1208 struct arpcom *ac = &sc->sc_arpcom; 1209 struct ether_multi *enm; 1210 struct ether_multistep step; 1211 bus_space_tag_t t = sc->sc_bustag; 1212 bus_space_handle_t mac = sc->sc_mac; 1213 u_int32_t hash[4]; 1214 u_int32_t rxcfg, crc; 1215 1216 rxcfg = bus_space_read_4(t, mac, HME_MACI_RXCFG); 1217 rxcfg &= ~(HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_PMISC); 1218 ifp->if_flags &= ~IFF_ALLMULTI; 1219 /* Clear hash table */ 1220 hash[0] = hash[1] = hash[2] = hash[3] = 0; 1221 1222 if (ifp->if_flags & IFF_PROMISC) { 1223 ifp->if_flags |= IFF_ALLMULTI; 1224 rxcfg |= HME_MAC_RXCFG_PMISC; 1225 } else if (ac->ac_multirangecnt > 0) { 1226 ifp->if_flags |= IFF_ALLMULTI; 1227 rxcfg |= HME_MAC_RXCFG_HENABLE; 1228 hash[0] = hash[1] = hash[2] = hash[3] = 0xffff; 1229 } else { 1230 rxcfg |= HME_MAC_RXCFG_HENABLE; 1231 1232 ETHER_FIRST_MULTI(step, ac, enm); 1233 while (enm != NULL) { 1234 crc = ether_crc32_le(enm->enm_addrlo, 1235 ETHER_ADDR_LEN) >> 26; 1236 1237 /* Set the corresponding bit in the filter. */ 1238 hash[crc >> 4] |= 1 << (crc & 0xf); 1239 1240 ETHER_NEXT_MULTI(step, enm); 1241 } 1242 } 1243 1244 /* Now load the hash table into the chip */ 1245 bus_space_write_4(t, mac, HME_MACI_HASHTAB0, hash[0]); 1246 bus_space_write_4(t, mac, HME_MACI_HASHTAB1, hash[1]); 1247 bus_space_write_4(t, mac, HME_MACI_HASHTAB2, hash[2]); 1248 bus_space_write_4(t, mac, HME_MACI_HASHTAB3, hash[3]); 1249 bus_space_write_4(t, mac, HME_MACI_RXCFG, rxcfg); 1250 } 1251 1252 void 1253 hme_fill_rx_ring(struct hme_softc *sc) 1254 { 1255 struct hme_sxd *sd; 1256 u_int slots; 1257 1258 for (slots = if_rxr_get(&sc->sc_rx_ring, HME_RX_RING_SIZE); 1259 slots > 0; slots--) { 1260 if (hme_newbuf(sc, &sc->sc_rxd[sc->sc_rx_prod])) 1261 break; 1262 1263 sd = &sc->sc_rxd[sc->sc_rx_prod]; 1264 HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod, 1265 sd->sd_map->dm_segs[0].ds_addr); 1266 HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, sc->sc_rx_prod, 1267 HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_RX_PKTSIZE)); 1268 1269 if (++sc->sc_rx_prod == HME_RX_RING_SIZE) 1270 sc->sc_rx_prod = 0; 1271 } 1272 if_rxr_put(&sc->sc_rx_ring, slots); 1273 } 1274 1275 int 1276 hme_newbuf(struct hme_softc *sc, struct hme_sxd *d) 1277 { 1278 struct mbuf *m; 1279 bus_dmamap_t map; 1280 1281 /* 1282 * All operations should be on local variables and/or rx spare map 1283 * until we're sure everything is a success. 1284 */ 1285 1286 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 1287 if (!m) 1288 return (ENOBUFS); 1289 1290 if (bus_dmamap_load(sc->sc_dmatag, sc->sc_rxmap_spare, 1291 mtod(m, caddr_t), MCLBYTES - HME_RX_OFFSET, NULL, 1292 BUS_DMA_NOWAIT) != 0) { 1293 m_freem(m); 1294 return (ENOBUFS); 1295 } 1296 1297 /* 1298 * At this point we have a new buffer loaded into the spare map. 1299 * Just need to clear out the old mbuf/map and put the new one 1300 * in place. 1301 */ 1302 1303 map = d->sd_map; 1304 d->sd_map = sc->sc_rxmap_spare; 1305 sc->sc_rxmap_spare = map; 1306 1307 bus_dmamap_sync(sc->sc_dmatag, d->sd_map, 0, d->sd_map->dm_mapsize, 1308 BUS_DMASYNC_PREREAD); 1309 1310 m->m_data += HME_RX_OFFSET; 1311 d->sd_mbuf = m; 1312 return (0); 1313 } 1314