1 /* $OpenBSD: if_se.c,v 1.6 2010/09/07 07:54:44 miod Exp $ */ 2 3 /*- 4 * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de> 5 * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com> 6 * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 7 * Copyright (c) 1997, 1998, 1999 8 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Bill Paul. 21 * 4. Neither the name of the author nor the names of any co-contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 28 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 29 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 36 * OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Ported to OpenBSD by Christopher Zimmermann 2009/10 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include "bpfilter.h" 55 56 #include <sys/param.h> 57 #include <sys/systm.h> 58 #include <sys/device.h> 59 #include <sys/ioctl.h> 60 #include <sys/kernel.h> 61 #include <sys/mbuf.h> 62 #include <sys/socket.h> 63 #include <sys/sockio.h> 64 #include <sys/timeout.h> 65 66 #include <net/if.h> 67 #include <net/if_dl.h> 68 #include <net/if_media.h> 69 #include <net/if_types.h> 70 71 #include <netinet/in.h> 72 #include <netinet/if_ether.h> 73 74 #if NBPFILTER > 0 75 #include <net/bpf.h> 76 #endif 77 78 #include <dev/mii/mii.h> 79 #include <dev/mii/miivar.h> 80 81 #include <dev/pci/pcidevs.h> 82 #include <dev/pci/pcireg.h> 83 #include <dev/pci/pcivar.h> 84 85 #include <dev/pci/if_sereg.h> 86 87 #define SE_RX_RING_CNT 256 /* [8, 1024] */ 88 #define SE_TX_RING_CNT 256 /* [8, 8192] */ 89 #define SE_RX_BUF_ALIGN sizeof(uint64_t) 90 91 #define SE_RX_RING_SZ (SE_RX_RING_CNT * sizeof(struct se_desc)) 92 #define SE_TX_RING_SZ (SE_TX_RING_CNT * sizeof(struct se_desc)) 93 94 struct se_list_data { 95 struct se_desc *se_rx_ring; 96 struct se_desc *se_tx_ring; 97 bus_dmamap_t se_rx_dmamap; 98 bus_dmamap_t se_tx_dmamap; 99 }; 100 101 struct se_chain_data { 102 struct mbuf *se_rx_mbuf[SE_RX_RING_CNT]; 103 struct mbuf *se_tx_mbuf[SE_TX_RING_CNT]; 104 bus_dmamap_t se_rx_map[SE_RX_RING_CNT]; 105 bus_dmamap_t se_tx_map[SE_TX_RING_CNT]; 106 uint se_rx_prod; 107 uint se_tx_prod; 108 uint se_tx_cons; 109 uint se_tx_cnt; 110 }; 111 112 struct se_softc { 113 struct device sc_dev; 114 void *sc_ih; 115 bus_space_tag_t sc_iot; 116 bus_space_handle_t sc_ioh; 117 bus_dma_tag_t sc_dmat; 118 119 struct mii_data sc_mii; 120 struct arpcom sc_ac; 121 122 struct se_list_data se_ldata; 123 struct se_chain_data se_cdata; 124 125 struct timeout sc_tick_tmo; 126 127 int sc_flags; 128 #define SE_FLAG_FASTETHER 0x0001 129 #define SE_FLAG_RGMII 0x0010 130 #define SE_FLAG_LINK 0x8000 131 }; 132 133 /* 134 * Various supported device vendors/types and their names. 135 */ 136 const struct pci_matchid se_devices[] = { 137 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 }, 138 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 } 139 }; 140 141 int se_match(struct device *, void *, void *); 142 void se_attach(struct device *, struct device *, void *); 143 int se_activate(struct device *, int); 144 145 const struct cfattach se_ca = { 146 sizeof(struct se_softc), 147 se_match, se_attach, NULL, se_activate 148 }; 149 150 struct cfdriver se_cd = { 151 0, "se", DV_IFNET 152 }; 153 154 uint32_t 155 se_miibus_cmd(struct se_softc *, uint32_t); 156 int se_miibus_readreg(struct device *, int, int); 157 void se_miibus_writereg(struct device *, int, int, int); 158 void se_miibus_statchg(struct device *); 159 160 int se_newbuf(struct se_softc *, uint); 161 void se_discard_rxbuf(struct se_softc *, uint); 162 int se_encap(struct se_softc *, struct mbuf *, uint *); 163 void se_rxeof(struct se_softc *); 164 void se_txeof(struct se_softc *); 165 int se_intr(void *); 166 void se_tick(void *); 167 void se_start(struct ifnet *); 168 int se_ioctl(struct ifnet *, u_long, caddr_t); 169 int se_init(struct ifnet *); 170 void se_stop(struct se_softc *); 171 void se_watchdog(struct ifnet *); 172 int se_ifmedia_upd(struct ifnet *); 173 void se_ifmedia_sts(struct ifnet *, struct ifmediareq *); 174 175 int se_pcib_match(struct pci_attach_args *); 176 int se_get_mac_addr_apc(struct se_softc *, uint8_t *); 177 int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *); 178 uint16_t 179 se_read_eeprom(struct se_softc *, int); 180 181 void se_iff(struct se_softc *); 182 void se_reset(struct se_softc *); 183 int se_list_rx_init(struct se_softc *); 184 int se_list_rx_free(struct se_softc *); 185 int se_list_tx_init(struct se_softc *); 186 int se_list_tx_free(struct se_softc *); 187 188 /* 189 * Register space access macros. 190 */ 191 192 #define CSR_WRITE_4(sc, reg, val) \ 193 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val) 194 #define CSR_WRITE_2(sc, reg, val) \ 195 bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val) 196 #define CSR_WRITE_1(sc, reg, val) \ 197 bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val) 198 199 #define CSR_READ_4(sc, reg) \ 200 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 201 #define CSR_READ_2(sc, reg) \ 202 bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg) 203 #define CSR_READ_1(sc, reg) \ 204 bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg) 205 206 /* 207 * Read a sequence of words from the EEPROM. 208 */ 209 uint16_t 210 se_read_eeprom(struct se_softc *sc, int offset) 211 { 212 uint32_t val; 213 int i; 214 215 KASSERT(offset <= EI_OFFSET); 216 217 CSR_WRITE_4(sc, ROMInterface, 218 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 219 DELAY(500); 220 for (i = 0; i < SE_TIMEOUT; i++) { 221 val = CSR_READ_4(sc, ROMInterface); 222 if ((val & EI_REQ) == 0) 223 break; 224 DELAY(100); 225 } 226 if (i == SE_TIMEOUT) { 227 printf("%s: EEPROM read timeout: 0x%08x\n", 228 sc->sc_dev.dv_xname, val); 229 return 0xffff; 230 } 231 232 return (val & EI_DATA) >> EI_DATA_SHIFT; 233 } 234 235 int 236 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest) 237 { 238 uint16_t val; 239 int i; 240 241 val = se_read_eeprom(sc, EEPROMSignature); 242 if (val == 0xffff || val == 0x0000) { 243 printf("%s: invalid EEPROM signature : 0x%04x\n", 244 sc->sc_dev.dv_xname, val); 245 return (EINVAL); 246 } 247 248 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 249 val = se_read_eeprom(sc, EEPROMMACAddr + i / 2); 250 dest[i + 0] = (uint8_t)val; 251 dest[i + 1] = (uint8_t)(val >> 8); 252 } 253 254 if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 255 sc->sc_flags |= SE_FLAG_RGMII; 256 return (0); 257 } 258 259 /* 260 * For SiS96x, APC CMOS RAM is used to store Ethernet address. 261 * APC CMOS RAM is accessed through ISA bridge. 262 */ 263 #if defined(__amd64__) || defined(__i386__) 264 int 265 se_pcib_match(struct pci_attach_args *pa) 266 { 267 const struct pci_matchid apc_devices[] = { 268 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 }, 269 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 }, 270 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 } 271 }; 272 273 return pci_matchbyid(pa, apc_devices, nitems(apc_devices)); 274 } 275 #endif 276 277 int 278 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest) 279 { 280 #if defined(__amd64__) || defined(__i386__) 281 struct pci_attach_args pa; 282 pcireg_t reg; 283 bus_space_handle_t ioh; 284 int rc, i; 285 286 if (pci_find_device(&pa, se_pcib_match) == 0) { 287 printf("\n%s: couldn't find PCI-ISA bridge\n", 288 sc->sc_dev.dv_xname); 289 return EINVAL; 290 } 291 292 /* Enable port 0x78 and 0x79 to access APC registers. */ 293 reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 294 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02); 295 DELAY(50); 296 (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 297 298 /* XXX this abuses bus_space implementation knowledge */ 299 rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh); 300 if (rc == 0) { 301 /* Read stored Ethernet address. */ 302 for (i = 0; i < ETHER_ADDR_LEN; i++) { 303 bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i); 304 dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1); 305 } 306 bus_space_write_1(pa.pa_iot, ioh, 0, 0x12); 307 if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0) 308 sc->sc_flags |= SE_FLAG_RGMII; 309 _bus_space_unmap(pa.pa_iot, ioh, 2, NULL); 310 } else 311 rc = EINVAL; 312 313 /* Restore access to APC registers. */ 314 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg); 315 316 return rc; 317 #endif 318 return EINVAL; 319 } 320 321 uint32_t 322 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl) 323 { 324 int i; 325 uint32_t val; 326 327 CSR_WRITE_4(sc, GMIIControl, ctrl); 328 DELAY(10); 329 for (i = 0; i < SE_TIMEOUT; i++) { 330 val = CSR_READ_4(sc, GMIIControl); 331 if ((val & GMI_REQ) == 0) 332 return val; 333 DELAY(10); 334 } 335 336 return GMI_REQ; 337 } 338 339 int 340 se_miibus_readreg(struct device *self, int phy, int reg) 341 { 342 struct se_softc *sc = (struct se_softc *)self; 343 uint32_t ctrl, val; 344 345 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 346 GMI_OP_RD | GMI_REQ; 347 val = se_miibus_cmd(sc, ctrl); 348 if ((val & GMI_REQ) != 0) { 349 printf("%s: PHY read timeout : %d\n", 350 sc->sc_dev.dv_xname, reg); 351 return 0; 352 } 353 return (val & GMI_DATA) >> GMI_DATA_SHIFT; 354 } 355 356 void 357 se_miibus_writereg(struct device *self, int phy, int reg, int data) 358 { 359 struct se_softc *sc = (struct se_softc *)self; 360 uint32_t ctrl, val; 361 362 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 363 GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ; 364 val = se_miibus_cmd(sc, ctrl); 365 if ((val & GMI_REQ) != 0) { 366 printf("%s: PHY write timeout : %d\n", 367 sc->sc_dev.dv_xname, reg); 368 } 369 } 370 371 void 372 se_miibus_statchg(struct device *self) 373 { 374 struct se_softc *sc = (struct se_softc *)self; 375 #ifdef SE_DEBUG 376 struct ifnet *ifp = &sc->sc_ac.ac_if; 377 #endif 378 struct mii_data *mii = &sc->sc_mii; 379 uint32_t ctl, speed; 380 381 speed = 0; 382 sc->sc_flags &= ~SE_FLAG_LINK; 383 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 384 (IFM_ACTIVE | IFM_AVALID)) { 385 switch (IFM_SUBTYPE(mii->mii_media_active)) { 386 case IFM_10_T: 387 #ifdef SE_DEBUG 388 if (ifp->if_flags & IFF_DEBUG) 389 printf("%s: 10baseT link\n", ifp->if_xname); 390 #endif 391 sc->sc_flags |= SE_FLAG_LINK; 392 speed = SC_SPEED_10; 393 break; 394 case IFM_100_TX: 395 #ifdef SE_DEBUG 396 if (ifp->if_flags & IFF_DEBUG) 397 printf("%s: 100baseTX link\n", ifp->if_xname); 398 #endif 399 sc->sc_flags |= SE_FLAG_LINK; 400 speed = SC_SPEED_100; 401 break; 402 case IFM_1000_T: 403 #ifdef SE_DEBUG 404 if (ifp->if_flags & IFF_DEBUG) 405 printf("%s: 1000baseT link\n", ifp->if_xname); 406 #endif 407 if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) { 408 sc->sc_flags |= SE_FLAG_LINK; 409 speed = SC_SPEED_1000; 410 } 411 break; 412 default: 413 break; 414 } 415 } 416 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 417 #ifdef SE_DEBUG 418 if (ifp->if_flags & IFF_DEBUG) 419 printf("%s: no link\n", ifp->if_xname); 420 #endif 421 return; 422 } 423 /* Reprogram MAC to resolved speed/duplex/flow-control paramters. */ 424 ctl = CSR_READ_4(sc, StationControl); 425 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 426 if (speed == SC_SPEED_1000) 427 ctl |= 0x07000000; 428 else 429 ctl |= 0x04000000; 430 #ifdef notyet 431 if ((sc->sc_flags & SE_FLAG_GMII) != 0) 432 ctl |= 0x03000000; 433 #endif 434 ctl |= speed; 435 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 436 ctl |= SC_FDX; 437 CSR_WRITE_4(sc, StationControl, ctl); 438 if ((sc->sc_flags & SE_FLAG_RGMII) != 0) { 439 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 440 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 441 } 442 } 443 444 void 445 se_iff(struct se_softc *sc) 446 { 447 struct arpcom *ac = &sc->sc_ac; 448 struct ifnet *ifp = &ac->ac_if; 449 struct ether_multi *enm; 450 struct ether_multistep step; 451 uint32_t crc, hashes[2]; 452 uint16_t rxfilt; 453 454 rxfilt = CSR_READ_2(sc, RxMacControl); 455 rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast); 456 ifp->if_flags &= ~IFF_ALLMULTI; 457 458 /* 459 * Always accept broadcast frames. 460 * Always accept frames destined to our station address. 461 */ 462 rxfilt |= AcceptBroadcast | AcceptMyPhys; 463 464 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 465 ifp->if_flags |= IFF_ALLMULTI; 466 if (ifp->if_flags & IFF_PROMISC) 467 rxfilt |= AcceptAllPhys; 468 rxfilt |= AcceptMulticast; 469 hashes[0] = hashes[1] = 0xffffffff; 470 } else { 471 rxfilt |= AcceptMulticast; 472 hashes[0] = hashes[1] = 0; 473 474 ETHER_FIRST_MULTI(step, ac, enm); 475 while (enm != NULL) { 476 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 477 478 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 479 480 ETHER_NEXT_MULTI(step, enm); 481 } 482 } 483 484 CSR_WRITE_2(sc, RxMacControl, rxfilt); 485 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 486 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 487 } 488 489 void 490 se_reset(struct se_softc *sc) 491 { 492 CSR_WRITE_4(sc, IntrMask, 0); 493 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 494 495 /* Soft reset. */ 496 CSR_WRITE_4(sc, IntrControl, 0x8000); 497 CSR_READ_4(sc, IntrControl); 498 DELAY(100); 499 CSR_WRITE_4(sc, IntrControl, 0); 500 /* Stop MAC. */ 501 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 502 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 503 504 CSR_WRITE_4(sc, IntrMask, 0); 505 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 506 507 CSR_WRITE_4(sc, GMIIControl, 0); 508 } 509 510 /* 511 * Probe for an SiS chip. Check the PCI vendor and device 512 * IDs against our list and return a device name if we find a match. 513 */ 514 int 515 se_match(struct device *parent, void *match, void *aux) 516 { 517 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 518 519 return pci_matchbyid(pa, se_devices, nitems(se_devices)); 520 } 521 522 /* 523 * Attach the interface. Do ifmedia setup and ethernet/BPF attach. 524 */ 525 void 526 se_attach(struct device *parent, struct device *self, void *aux) 527 { 528 struct se_softc *sc = (struct se_softc *)self; 529 struct arpcom *ac = &sc->sc_ac; 530 struct ifnet *ifp = &ac->ac_if; 531 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 532 uint8_t eaddr[ETHER_ADDR_LEN]; 533 const char *intrstr; 534 pci_intr_handle_t ih; 535 bus_size_t iosize; 536 bus_dma_segment_t seg; 537 struct se_list_data *ld; 538 struct se_chain_data *cd; 539 int nseg; 540 uint i; 541 int rc; 542 543 printf(": "); 544 545 /* 546 * Map control/status registers. 547 */ 548 549 rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0, 550 &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0); 551 if (rc != 0) { 552 printf("can't map i/o space\n"); 553 return; 554 } 555 556 if (pci_intr_map(pa, &ih)) { 557 printf("can't map interrupt\n"); 558 goto fail1; 559 } 560 intrstr = pci_intr_string(pa->pa_pc, ih); 561 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc, 562 self->dv_xname); 563 if (sc->sc_ih == NULL) { 564 printf("can't establish interrupt"); 565 if (intrstr != NULL) 566 printf(" at %s", intrstr); 567 printf("\n"); 568 goto fail1; 569 } 570 571 printf("%s", intrstr); 572 573 if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190)) 574 sc->sc_flags |= SE_FLAG_FASTETHER; 575 576 /* Reset the adapter. */ 577 se_reset(sc); 578 579 /* Get MAC address from the EEPROM. */ 580 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0) 581 se_get_mac_addr_apc(sc, eaddr); 582 else 583 se_get_mac_addr_eeprom(sc, eaddr); 584 printf(", address %s\n", ether_sprintf(eaddr)); 585 bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN); 586 587 /* 588 * Now do all the DMA mapping stuff 589 */ 590 591 sc->sc_dmat = pa->pa_dmat; 592 ld = &sc->se_ldata; 593 cd = &sc->se_cdata; 594 595 /* First create TX/RX busdma maps. */ 596 for (i = 0; i < SE_RX_RING_CNT; i++) { 597 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 598 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]); 599 if (rc != 0) { 600 printf("%s: cannot init the RX map array\n", 601 self->dv_xname); 602 goto fail2; 603 } 604 } 605 606 for (i = 0; i < SE_TX_RING_CNT; i++) { 607 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 608 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]); 609 if (rc != 0) { 610 printf("%s: cannot init the TX map array\n", 611 self->dv_xname); 612 goto fail2; 613 } 614 } 615 616 /* 617 * Now allocate a chunk of DMA-able memory for RX and TX ring 618 * descriptors, as a contiguous block of memory. 619 * XXX fix deallocation upon error 620 */ 621 622 /* RX */ 623 rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0, 624 &seg, 1, &nseg, BUS_DMA_NOWAIT); 625 if (rc != 0) { 626 printf("%s: no memory for RX descriptors\n", self->dv_xname); 627 goto fail2; 628 } 629 630 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ, 631 (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT); 632 if (rc != 0) { 633 printf("%s: can't map RX descriptors\n", self->dv_xname); 634 goto fail2; 635 } 636 637 rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1, 638 SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap); 639 if (rc != 0) { 640 printf("%s: can't alloc RX DMA map\n", self->dv_xname); 641 goto fail2; 642 } 643 644 rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap, 645 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 646 if (rc != 0) { 647 printf("%s: can't load RX DMA map\n", self->dv_xname); 648 bus_dmamem_unmap(sc->sc_dmat, 649 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ); 650 bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap); 651 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 652 goto fail2; 653 } 654 655 /* TX */ 656 rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0, 657 &seg, 1, &nseg, BUS_DMA_NOWAIT); 658 if (rc != 0) { 659 printf("%s: no memory for TX descriptors\n", self->dv_xname); 660 goto fail2; 661 } 662 663 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ, 664 (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT); 665 if (rc != 0) { 666 printf("%s: can't map TX descriptors\n", self->dv_xname); 667 goto fail2; 668 } 669 670 rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1, 671 SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap); 672 if (rc != 0) { 673 printf("%s: can't alloc TX DMA map\n", self->dv_xname); 674 goto fail2; 675 } 676 677 rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap, 678 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 679 if (rc != 0) { 680 printf("%s: can't load TX DMA map\n", self->dv_xname); 681 bus_dmamem_unmap(sc->sc_dmat, 682 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ); 683 bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap); 684 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 685 goto fail2; 686 } 687 688 timeout_set(&sc->sc_tick_tmo, se_tick, sc); 689 690 ifp = &sc->sc_ac.ac_if; 691 ifp->if_softc = sc; 692 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 693 ifp->if_ioctl = se_ioctl; 694 ifp->if_start = se_start; 695 ifp->if_watchdog = se_watchdog; 696 IFQ_SET_MAXLEN(&ifp->if_snd, SE_TX_RING_CNT - 1); 697 IFQ_SET_READY(&ifp->if_snd); 698 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 699 700 /* 701 * Do MII setup. 702 */ 703 704 sc->sc_mii.mii_ifp = ifp; 705 sc->sc_mii.mii_readreg = se_miibus_readreg; 706 sc->sc_mii.mii_writereg = se_miibus_writereg; 707 sc->sc_mii.mii_statchg = se_miibus_statchg; 708 ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd, 709 se_ifmedia_sts); 710 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 711 MII_OFFSET_ANY, 0); 712 713 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 714 /* No PHY attached */ 715 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 716 0, NULL); 717 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 718 } else 719 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 720 721 /* 722 * Call MI attach routine. 723 */ 724 if_attach(ifp); 725 ether_ifattach(ifp); 726 727 return; 728 729 fail2: 730 pci_intr_disestablish(pa->pa_pc, sc->sc_ih); 731 fail1: 732 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 733 } 734 735 int 736 se_activate(struct device *self, int act) 737 { 738 struct se_softc *sc = (struct se_softc *)self; 739 struct ifnet *ifp = &sc->sc_ac.ac_if; 740 int rc = 0; 741 742 switch (act) { 743 case DVACT_SUSPEND: 744 if (ifp->if_flags & IFF_RUNNING) 745 se_stop(sc); 746 rc = config_activate_children(self, act); 747 break; 748 case DVACT_RESUME: 749 rc = config_activate_children(self, act); 750 if (ifp->if_flags & IFF_UP) 751 (void)se_init(ifp); 752 break; 753 } 754 755 return rc; 756 } 757 758 /* 759 * Initialize the TX descriptors. 760 */ 761 int 762 se_list_tx_init(struct se_softc *sc) 763 { 764 struct se_list_data *ld = &sc->se_ldata; 765 struct se_chain_data *cd = &sc->se_cdata; 766 767 bzero(ld->se_tx_ring, SE_TX_RING_SZ); 768 ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END); 769 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 770 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 771 cd->se_tx_prod = 0; 772 cd->se_tx_cons = 0; 773 cd->se_tx_cnt = 0; 774 775 return 0; 776 } 777 778 int 779 se_list_tx_free(struct se_softc *sc) 780 { 781 struct se_chain_data *cd = &sc->se_cdata; 782 uint i; 783 784 for (i = 0; i < SE_TX_RING_CNT; i++) { 785 if (cd->se_tx_mbuf[i] != NULL) { 786 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 787 m_free(cd->se_tx_mbuf[i]); 788 cd->se_tx_mbuf[i] = NULL; 789 } 790 } 791 792 return 0; 793 } 794 795 /* 796 * Initialize the RX descriptors and allocate mbufs for them. 797 */ 798 int 799 se_list_rx_init(struct se_softc *sc) 800 { 801 struct se_list_data *ld = &sc->se_ldata; 802 struct se_chain_data *cd = &sc->se_cdata; 803 uint i; 804 805 bzero(ld->se_rx_ring, SE_RX_RING_SZ); 806 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 807 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 808 for (i = 0; i < SE_RX_RING_CNT; i++) { 809 if (se_newbuf(sc, i) != 0) 810 return ENOBUFS; 811 } 812 813 cd->se_rx_prod = 0; 814 815 return 0; 816 } 817 818 int 819 se_list_rx_free(struct se_softc *sc) 820 { 821 struct se_chain_data *cd = &sc->se_cdata; 822 uint i; 823 824 for (i = 0; i < SE_RX_RING_CNT; i++) { 825 if (cd->se_rx_mbuf[i] != NULL) { 826 bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]); 827 m_free(cd->se_rx_mbuf[i]); 828 cd->se_rx_mbuf[i] = NULL; 829 } 830 } 831 832 return 0; 833 } 834 835 /* 836 * Initialize an RX descriptor and attach an MBUF cluster. 837 */ 838 int 839 se_newbuf(struct se_softc *sc, uint i) 840 { 841 #ifdef SE_DEBUG 842 struct ifnet *ifp = &sc->sc_ac.ac_if; 843 #endif 844 struct se_list_data *ld = &sc->se_ldata; 845 struct se_chain_data *cd = &sc->se_cdata; 846 struct se_desc *desc; 847 struct mbuf *m; 848 int rc; 849 850 m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES); 851 if (m == NULL) { 852 #ifdef SE_DEBUG 853 if (ifp->if_flags & IFF_DEBUG) 854 printf("%s: MCLGETI failed\n", ifp->if_xname); 855 #endif 856 return ENOBUFS; 857 } 858 m->m_len = m->m_pkthdr.len = MCLBYTES; 859 m_adj(m, SE_RX_BUF_ALIGN); 860 861 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i], 862 m, BUS_DMA_NOWAIT); 863 KASSERT(cd->se_rx_map[i]->dm_nsegs == 1); 864 if (rc != 0) { 865 m_freem(m); 866 return ENOBUFS; 867 } 868 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 869 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 870 871 cd->se_rx_mbuf[i] = m; 872 desc = &ld->se_rx_ring[i]; 873 desc->se_sts_size = 0; 874 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 875 desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr); 876 desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len); 877 if (i == SE_RX_RING_CNT - 1) 878 desc->se_flags |= htole32(RING_END); 879 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 880 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 881 882 return 0; 883 } 884 885 void 886 se_discard_rxbuf(struct se_softc *sc, uint i) 887 { 888 struct se_list_data *ld = &sc->se_ldata; 889 struct se_desc *desc; 890 891 desc = &ld->se_rx_ring[i]; 892 desc->se_sts_size = 0; 893 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 894 desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN); 895 if (i == SE_RX_RING_CNT - 1) 896 desc->se_flags |= htole32(RING_END); 897 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 898 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 899 } 900 901 /* 902 * A frame has been uploaded: pass the resulting mbuf chain up to 903 * the higher level protocols. 904 */ 905 void 906 se_rxeof(struct se_softc *sc) 907 { 908 struct mbuf *m; 909 struct ifnet *ifp = &sc->sc_ac.ac_if; 910 struct se_list_data *ld = &sc->se_ldata; 911 struct se_chain_data *cd = &sc->se_cdata; 912 struct se_desc *cur_rx; 913 uint32_t rxinfo, rxstat; 914 uint i; 915 916 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 917 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 918 for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) { 919 cur_rx = &ld->se_rx_ring[i]; 920 rxinfo = letoh32(cur_rx->se_cmdsts); 921 if ((rxinfo & RDC_OWN) != 0) 922 break; 923 rxstat = letoh32(cur_rx->se_sts_size); 924 925 /* 926 * If an error occurs, update stats, clear the 927 * status word and leave the mbuf cluster in place: 928 * it should simply get re-used next time this descriptor 929 * comes up in the ring. 930 */ 931 if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 || 932 SE_RX_NSEGS(rxstat) != 1) { 933 /* XXX We don't support multi-segment frames yet. */ 934 if (ifp->if_flags & IFF_DEBUG) 935 printf("%s: rx error %b\n", 936 ifp->if_xname, rxstat, RX_ERR_BITS); 937 se_discard_rxbuf(sc, i); 938 ifp->if_ierrors++; 939 continue; 940 } 941 942 /* No errors; receive the packet. */ 943 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 944 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 945 m = cd->se_rx_mbuf[i]; 946 if (se_newbuf(sc, i) != 0) { 947 se_discard_rxbuf(sc, i); 948 ifp->if_iqdrops++; 949 continue; 950 } 951 /* 952 * Account for 10 bytes auto padding which is used 953 * to align IP header on a 32bit boundary. Also note, 954 * CRC bytes are automatically removed by the hardware. 955 */ 956 m->m_data += SE_RX_PAD_BYTES; 957 m->m_pkthdr.len = m->m_len = 958 SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES; 959 960 ifp->if_ipackets++; 961 m->m_pkthdr.rcvif = ifp; 962 963 #if NBPFILTER > 0 964 if (ifp->if_bpf) 965 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_IN); 966 #endif 967 ether_input_mbuf(ifp, m); 968 } 969 970 cd->se_rx_prod = i; 971 } 972 973 /* 974 * A frame was downloaded to the chip. It's safe for us to clean up 975 * the list buffers. 976 */ 977 978 void 979 se_txeof(struct se_softc *sc) 980 { 981 struct ifnet *ifp = &sc->sc_ac.ac_if; 982 struct se_list_data *ld = &sc->se_ldata; 983 struct se_chain_data *cd = &sc->se_cdata; 984 struct se_desc *cur_tx; 985 uint32_t txstat; 986 uint i; 987 988 /* 989 * Go through our tx list and free mbufs for those 990 * frames that have been transmitted. 991 */ 992 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 993 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 994 for (i = cd->se_tx_cons; cd->se_tx_cnt > 0; 995 cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) { 996 cur_tx = &ld->se_tx_ring[i]; 997 txstat = letoh32(cur_tx->se_cmdsts); 998 if ((txstat & TDC_OWN) != 0) 999 break; 1000 1001 ifp->if_flags &= ~IFF_OACTIVE; 1002 1003 if (SE_TX_ERROR(txstat) != 0) { 1004 if (ifp->if_flags & IFF_DEBUG) 1005 printf("%s: tx error %b\n", 1006 ifp->if_xname, txstat, TX_ERR_BITS); 1007 ifp->if_oerrors++; 1008 /* TODO: better error differentiation */ 1009 } else 1010 ifp->if_opackets++; 1011 1012 if (cd->se_tx_mbuf[i] != NULL) { 1013 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1014 cd->se_tx_map[i]->dm_mapsize, 1015 BUS_DMASYNC_POSTWRITE); 1016 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 1017 m_free(cd->se_tx_mbuf[i]); 1018 cd->se_tx_mbuf[i] = NULL; 1019 } 1020 1021 cur_tx->se_sts_size = 0; 1022 cur_tx->se_cmdsts = 0; 1023 cur_tx->se_ptr = 0; 1024 cur_tx->se_flags &= htole32(RING_END); 1025 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1026 i * sizeof(*cur_tx), sizeof(*cur_tx), 1027 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1028 } 1029 1030 cd->se_tx_cons = i; 1031 if (cd->se_tx_cnt == 0) 1032 ifp->if_timer = 0; 1033 } 1034 1035 void 1036 se_tick(void *xsc) 1037 { 1038 struct se_softc *sc = xsc; 1039 struct mii_data *mii; 1040 struct ifnet *ifp = &sc->sc_ac.ac_if; 1041 int s; 1042 1043 s = splnet(); 1044 mii = &sc->sc_mii; 1045 mii_tick(mii); 1046 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 1047 se_miibus_statchg(&sc->sc_dev); 1048 if ((sc->sc_flags & SE_FLAG_LINK) != 0 && 1049 !IFQ_IS_EMPTY(&ifp->if_snd)) 1050 se_start(ifp); 1051 } 1052 splx(s); 1053 1054 timeout_add_sec(&sc->sc_tick_tmo, 1); 1055 } 1056 1057 int 1058 se_intr(void *arg) 1059 { 1060 struct se_softc *sc = arg; 1061 struct ifnet *ifp = &sc->sc_ac.ac_if; 1062 uint32_t status; 1063 1064 status = CSR_READ_4(sc, IntrStatus); 1065 if (status == 0xffffffff || (status & SE_INTRS) == 0) { 1066 /* Not ours. */ 1067 return 0; 1068 } 1069 /* Ack interrupts/ */ 1070 CSR_WRITE_4(sc, IntrStatus, status); 1071 /* Disable further interrupts. */ 1072 CSR_WRITE_4(sc, IntrMask, 0); 1073 1074 for (;;) { 1075 if ((ifp->if_flags & IFF_RUNNING) == 0) 1076 break; 1077 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1078 se_rxeof(sc); 1079 /* Wakeup Rx MAC. */ 1080 if ((status & INTR_RX_IDLE) != 0) 1081 CSR_WRITE_4(sc, RX_CTL, 1082 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1083 } 1084 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1085 se_txeof(sc); 1086 status = CSR_READ_4(sc, IntrStatus); 1087 if ((status & SE_INTRS) == 0) 1088 break; 1089 /* Ack interrupts. */ 1090 CSR_WRITE_4(sc, IntrStatus, status); 1091 } 1092 1093 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1094 /* Re-enable interrupts */ 1095 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1096 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1097 se_start(ifp); 1098 } 1099 1100 return 1; 1101 } 1102 1103 /* 1104 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1105 * pointers to the fragment pointers. 1106 */ 1107 int 1108 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1109 { 1110 #ifdef SE_DEBUG 1111 struct ifnet *ifp = &sc->sc_ac.ac_if; 1112 #endif 1113 struct mbuf *m; 1114 struct se_list_data *ld = &sc->se_ldata; 1115 struct se_chain_data *cd = &sc->se_cdata; 1116 struct se_desc *desc; 1117 uint i, cnt = 0; 1118 int rc; 1119 1120 /* 1121 * If there's no way we can send any packets, return now. 1122 */ 1123 if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) { 1124 #ifdef SE_DEBUG 1125 if (ifp->if_flags & IFF_DEBUG) 1126 printf("%s: encap failed, not enough TX desc\n", 1127 ifp->if_xname); 1128 #endif 1129 return ENOBUFS; 1130 } 1131 1132 if (m_defrag(m_head, M_DONTWAIT) != 0) { 1133 #ifdef SE_DEBUG 1134 if (ifp->if_flags & IFF_DEBUG) 1135 printf("%s: m_defrag failed\n", ifp->if_xname); 1136 #endif 1137 return ENOBUFS; /* XXX should not be fatal */ 1138 } 1139 1140 /* 1141 * Start packing the mbufs in this chain into 1142 * the fragment pointers. Stop when we run out 1143 * of fragments or hit the end of the mbuf chain. 1144 */ 1145 i = *txidx; 1146 1147 for (m = m_head; m != NULL; m = m->m_next) { 1148 if (m->m_len == 0) 1149 continue; 1150 if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) { 1151 #ifdef SE_DEBUG 1152 if (ifp->if_flags & IFF_DEBUG) 1153 printf("%s: encap failed, not enough TX desc\n", 1154 ifp->if_xname); 1155 #endif 1156 return ENOBUFS; 1157 } 1158 cd->se_tx_mbuf[i] = m; 1159 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i], 1160 m, BUS_DMA_NOWAIT); 1161 if (rc != 0) 1162 return ENOBUFS; 1163 KASSERT(cd->se_tx_map[i]->dm_nsegs == 1); 1164 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1165 cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE); 1166 1167 desc = &ld->se_tx_ring[i]; 1168 desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1169 desc->se_ptr = 1170 htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr); 1171 desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1172 if (i == SE_TX_RING_CNT - 1) 1173 desc->se_flags |= htole32(RING_END); 1174 desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF | 1175 TDC_CRC | TDC_PAD | TDC_BST); 1176 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1177 i * sizeof(*desc), sizeof(*desc), 1178 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1179 1180 SE_INC(i, SE_TX_RING_CNT); 1181 cnt++; 1182 } 1183 1184 /* can't happen */ 1185 if (m != NULL) 1186 return ENOBUFS; 1187 1188 cd->se_tx_cnt += cnt; 1189 *txidx = i; 1190 1191 return 0; 1192 } 1193 1194 /* 1195 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1196 * to the mbuf data regions directly in the transmit lists. We also save a 1197 * copy of the pointers since the transmit list fragment pointers are 1198 * physical addresses. 1199 */ 1200 void 1201 se_start(struct ifnet *ifp) 1202 { 1203 struct se_softc *sc = ifp->if_softc; 1204 struct mbuf *m_head = NULL; 1205 struct se_chain_data *cd = &sc->se_cdata; 1206 uint i, queued = 0; 1207 1208 if ((sc->sc_flags & SE_FLAG_LINK) == 0 || 1209 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) { 1210 #ifdef SE_DEBUG 1211 if (ifp->if_flags & IFF_DEBUG) 1212 printf("%s: can't tx, flags 0x%x 0x%04x\n", 1213 ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags); 1214 #endif 1215 return; 1216 } 1217 1218 i = cd->se_tx_prod; 1219 1220 while (cd->se_tx_mbuf[i] == NULL) { 1221 IFQ_POLL(&ifp->if_snd, m_head); 1222 if (m_head == NULL) 1223 break; 1224 1225 if (se_encap(sc, m_head, &i) != 0) { 1226 ifp->if_flags |= IFF_OACTIVE; 1227 break; 1228 } 1229 1230 /* now we are committed to transmit the packet */ 1231 IFQ_DEQUEUE(&ifp->if_snd, m_head); 1232 queued++; 1233 1234 /* 1235 * If there's a BPF listener, bounce a copy of this frame 1236 * to him. 1237 */ 1238 #if NBPFILTER > 0 1239 if (ifp->if_bpf) 1240 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1241 #endif 1242 } 1243 1244 if (queued > 0) { 1245 /* Transmit */ 1246 cd->se_tx_prod = i; 1247 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1248 ifp->if_timer = 5; 1249 } 1250 } 1251 1252 int 1253 se_init(struct ifnet *ifp) 1254 { 1255 struct se_softc *sc = ifp->if_softc; 1256 uint16_t rxfilt; 1257 int i; 1258 1259 splassert(IPL_NET); 1260 1261 /* 1262 * Cancel pending I/O and free all RX/TX buffers. 1263 */ 1264 se_stop(sc); 1265 se_reset(sc); 1266 1267 /* Init circular RX list. */ 1268 if (se_list_rx_init(sc) == ENOBUFS) { 1269 se_stop(sc); /* XXX necessary? */ 1270 return ENOBUFS; 1271 } 1272 1273 /* Init TX descriptors. */ 1274 se_list_tx_init(sc); 1275 1276 /* 1277 * Load the address of the RX and TX lists. 1278 */ 1279 CSR_WRITE_4(sc, TX_DESC, 1280 (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr); 1281 CSR_WRITE_4(sc, RX_DESC, 1282 (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr); 1283 1284 CSR_WRITE_4(sc, TxMacControl, 0x60); 1285 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1286 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1287 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + SE_RX_PAD_BYTES); 1288 1289 for (i = 0; i < ETHER_ADDR_LEN; i++) 1290 CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]); 1291 /* Configure RX MAC. */ 1292 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1293 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1294 1295 /* Program promiscuous mode and multicast filters. */ 1296 se_iff(sc); 1297 1298 /* 1299 * Clear and enable interrupts. 1300 */ 1301 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1302 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1303 1304 /* Enable receiver and transmitter. */ 1305 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1306 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1307 1308 ifp->if_flags |= IFF_RUNNING; 1309 ifp->if_flags &= ~IFF_OACTIVE; 1310 1311 sc->sc_flags &= ~SE_FLAG_LINK; 1312 mii_mediachg(&sc->sc_mii); 1313 timeout_add_sec(&sc->sc_tick_tmo, 1); 1314 1315 return 0; 1316 } 1317 1318 /* 1319 * Set media options. 1320 */ 1321 int 1322 se_ifmedia_upd(struct ifnet *ifp) 1323 { 1324 struct se_softc *sc = ifp->if_softc; 1325 struct mii_data *mii; 1326 1327 mii = &sc->sc_mii; 1328 sc->sc_flags &= ~SE_FLAG_LINK; 1329 if (mii->mii_instance) { 1330 struct mii_softc *miisc; 1331 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1332 mii_phy_reset(miisc); 1333 } 1334 return mii_mediachg(mii); 1335 } 1336 1337 /* 1338 * Report current media status. 1339 */ 1340 void 1341 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1342 { 1343 struct se_softc *sc = ifp->if_softc; 1344 struct mii_data *mii; 1345 1346 mii = &sc->sc_mii; 1347 mii_pollstat(mii); 1348 ifmr->ifm_active = mii->mii_media_active; 1349 ifmr->ifm_status = mii->mii_media_status; 1350 } 1351 1352 int 1353 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1354 { 1355 struct se_softc *sc = ifp->if_softc; 1356 struct ifreq *ifr = (struct ifreq *) data; 1357 #ifdef INET 1358 struct ifaddr *ifa = (struct ifaddr *)data; 1359 #endif 1360 int s, rc = 0; 1361 1362 s = splnet(); 1363 1364 switch (command) { 1365 case SIOCSIFADDR: 1366 ifp->if_flags |= IFF_UP; 1367 if ((ifp->if_flags & IFF_RUNNING) == 0) 1368 rc = se_init(ifp); 1369 if (rc == 0) { 1370 #ifdef INET 1371 if (ifa->ifa_addr->sa_family == AF_INET) 1372 arp_ifinit(&sc->sc_ac, ifa); 1373 #endif 1374 } 1375 break; 1376 case SIOCSIFFLAGS: 1377 if (ifp->if_flags & IFF_UP) { 1378 if (ifp->if_flags & IFF_RUNNING) 1379 rc = ENETRESET; 1380 else 1381 rc = se_init(ifp); 1382 } else { 1383 if (ifp->if_flags & IFF_RUNNING) 1384 se_stop(sc); 1385 } 1386 break; 1387 case SIOCGIFMEDIA: 1388 case SIOCSIFMEDIA: 1389 rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1390 break; 1391 default: 1392 rc = ether_ioctl(ifp, &sc->sc_ac, command, data); 1393 break; 1394 } 1395 1396 if (rc == ENETRESET) { 1397 if (ifp->if_flags & IFF_RUNNING) 1398 se_iff(sc); 1399 rc = 0; 1400 } 1401 1402 splx(s); 1403 return rc; 1404 } 1405 1406 void 1407 se_watchdog(struct ifnet *ifp) 1408 { 1409 struct se_softc *sc = ifp->if_softc; 1410 int s; 1411 1412 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1413 ifp->if_oerrors++; 1414 1415 s = splnet(); 1416 se_init(ifp); 1417 if (!IFQ_IS_EMPTY(&ifp->if_snd)) 1418 se_start(ifp); 1419 splx(s); 1420 } 1421 1422 /* 1423 * Stop the adapter and free any mbufs allocated to the 1424 * RX and TX lists. 1425 */ 1426 void 1427 se_stop(struct se_softc *sc) 1428 { 1429 struct ifnet *ifp = &sc->sc_ac.ac_if; 1430 1431 ifp->if_timer = 0; 1432 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1433 timeout_del(&sc->sc_tick_tmo); 1434 mii_down(&sc->sc_mii); 1435 1436 CSR_WRITE_4(sc, IntrMask, 0); 1437 CSR_READ_4(sc, IntrMask); 1438 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1439 /* Stop TX/RX MAC. */ 1440 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1441 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1442 /* XXX Can we assume active DMA cycles gone? */ 1443 DELAY(2000); 1444 CSR_WRITE_4(sc, IntrMask, 0); 1445 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1446 1447 sc->sc_flags &= ~SE_FLAG_LINK; 1448 se_list_rx_free(sc); 1449 se_list_tx_free(sc); 1450 } 1451