1 /* $OpenBSD: if_se.c,v 1.27 2024/11/05 18:58:59 miod Exp $ */ 2 3 /*- 4 * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de> 5 * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com> 6 * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 7 * Copyright (c) 1997, 1998, 1999 8 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Bill Paul. 21 * 4. Neither the name of the author nor the names of any co-contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 28 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 29 * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 30 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 31 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 32 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 36 * OF THE POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * SiS 190/191 PCI Ethernet NIC driver. 41 * 42 * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 43 * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 44 * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 45 * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 46 * review and very useful comments. 47 * 48 * Ported to OpenBSD by Christopher Zimmermann 2009/10 49 * 50 * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 51 * Linux and Solaris drivers. 52 */ 53 54 #include "bpfilter.h" 55 56 #include <sys/param.h> 57 #include <sys/systm.h> 58 #include <sys/device.h> 59 #include <sys/ioctl.h> 60 #include <sys/mbuf.h> 61 #include <sys/timeout.h> 62 63 #include <net/if.h> 64 #include <net/if_media.h> 65 66 #include <netinet/in.h> 67 #include <netinet/if_ether.h> 68 69 #if NBPFILTER > 0 70 #include <net/bpf.h> 71 #endif 72 73 #include <dev/mii/miivar.h> 74 75 #include <dev/pci/pcidevs.h> 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 79 #include <dev/pci/if_sereg.h> 80 81 #define SE_RX_RING_CNT 256 /* [8, 1024] */ 82 #define SE_TX_RING_CNT 256 /* [8, 8192] */ 83 #define SE_RX_BUF_ALIGN sizeof(uint64_t) 84 85 #define SE_RX_RING_SZ (SE_RX_RING_CNT * sizeof(struct se_desc)) 86 #define SE_TX_RING_SZ (SE_TX_RING_CNT * sizeof(struct se_desc)) 87 88 struct se_list_data { 89 struct se_desc *se_rx_ring; 90 struct se_desc *se_tx_ring; 91 bus_dmamap_t se_rx_dmamap; 92 bus_dmamap_t se_tx_dmamap; 93 }; 94 95 struct se_chain_data { 96 struct mbuf *se_rx_mbuf[SE_RX_RING_CNT]; 97 struct mbuf *se_tx_mbuf[SE_TX_RING_CNT]; 98 bus_dmamap_t se_rx_map[SE_RX_RING_CNT]; 99 bus_dmamap_t se_tx_map[SE_TX_RING_CNT]; 100 uint se_rx_prod; 101 uint se_tx_prod; 102 uint se_tx_cons; 103 uint se_tx_cnt; 104 }; 105 106 struct se_softc { 107 struct device sc_dev; 108 void *sc_ih; 109 bus_space_tag_t sc_iot; 110 bus_space_handle_t sc_ioh; 111 bus_dma_tag_t sc_dmat; 112 113 struct mii_data sc_mii; 114 struct arpcom sc_ac; 115 116 struct se_list_data se_ldata; 117 struct se_chain_data se_cdata; 118 119 struct timeout sc_tick_tmo; 120 121 int sc_flags; 122 #define SE_FLAG_FASTETHER 0x0001 123 #define SE_FLAG_RGMII 0x0010 124 #define SE_FLAG_LINK 0x8000 125 }; 126 127 /* 128 * Various supported device vendors/types and their names. 129 */ 130 const struct pci_matchid se_devices[] = { 131 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 }, 132 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 } 133 }; 134 135 int se_match(struct device *, void *, void *); 136 void se_attach(struct device *, struct device *, void *); 137 int se_activate(struct device *, int); 138 139 const struct cfattach se_ca = { 140 sizeof(struct se_softc), 141 se_match, se_attach, NULL, se_activate 142 }; 143 144 struct cfdriver se_cd = { 145 NULL, "se", DV_IFNET 146 }; 147 148 uint32_t 149 se_miibus_cmd(struct se_softc *, uint32_t); 150 int se_miibus_readreg(struct device *, int, int); 151 void se_miibus_writereg(struct device *, int, int, int); 152 void se_miibus_statchg(struct device *); 153 154 int se_newbuf(struct se_softc *, uint); 155 void se_discard_rxbuf(struct se_softc *, uint); 156 int se_encap(struct se_softc *, struct mbuf *, uint *); 157 void se_rxeof(struct se_softc *); 158 void se_txeof(struct se_softc *); 159 int se_intr(void *); 160 void se_tick(void *); 161 void se_start(struct ifnet *); 162 int se_ioctl(struct ifnet *, u_long, caddr_t); 163 int se_init(struct ifnet *); 164 void se_stop(struct se_softc *); 165 void se_watchdog(struct ifnet *); 166 int se_ifmedia_upd(struct ifnet *); 167 void se_ifmedia_sts(struct ifnet *, struct ifmediareq *); 168 169 int se_pcib_match(struct pci_attach_args *); 170 int se_get_mac_addr_apc(struct se_softc *, uint8_t *); 171 int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *); 172 uint16_t 173 se_read_eeprom(struct se_softc *, int); 174 175 void se_iff(struct se_softc *); 176 void se_reset(struct se_softc *); 177 int se_list_rx_init(struct se_softc *); 178 int se_list_rx_free(struct se_softc *); 179 int se_list_tx_init(struct se_softc *); 180 int se_list_tx_free(struct se_softc *); 181 182 /* 183 * Register space access macros. 184 */ 185 186 #define CSR_WRITE_4(sc, reg, val) \ 187 bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val) 188 #define CSR_WRITE_2(sc, reg, val) \ 189 bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val) 190 #define CSR_WRITE_1(sc, reg, val) \ 191 bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val) 192 193 #define CSR_READ_4(sc, reg) \ 194 bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 195 #define CSR_READ_2(sc, reg) \ 196 bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg) 197 #define CSR_READ_1(sc, reg) \ 198 bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg) 199 200 /* 201 * Read a sequence of words from the EEPROM. 202 */ 203 uint16_t 204 se_read_eeprom(struct se_softc *sc, int offset) 205 { 206 uint32_t val; 207 int i; 208 209 KASSERT(offset <= EI_OFFSET); 210 211 CSR_WRITE_4(sc, ROMInterface, 212 EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 213 DELAY(500); 214 for (i = 0; i < SE_TIMEOUT; i++) { 215 val = CSR_READ_4(sc, ROMInterface); 216 if ((val & EI_REQ) == 0) 217 break; 218 DELAY(100); 219 } 220 if (i == SE_TIMEOUT) { 221 printf("%s: EEPROM read timeout: 0x%08x\n", 222 sc->sc_dev.dv_xname, val); 223 return 0xffff; 224 } 225 226 return (val & EI_DATA) >> EI_DATA_SHIFT; 227 } 228 229 int 230 se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest) 231 { 232 uint16_t val; 233 int i; 234 235 val = se_read_eeprom(sc, EEPROMSignature); 236 if (val == 0xffff || val == 0x0000) { 237 printf("%s: invalid EEPROM signature : 0x%04x\n", 238 sc->sc_dev.dv_xname, val); 239 return (EINVAL); 240 } 241 242 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 243 val = se_read_eeprom(sc, EEPROMMACAddr + i / 2); 244 dest[i + 0] = (uint8_t)val; 245 dest[i + 1] = (uint8_t)(val >> 8); 246 } 247 248 if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 249 sc->sc_flags |= SE_FLAG_RGMII; 250 return (0); 251 } 252 253 /* 254 * For SiS96x, APC CMOS RAM is used to store Ethernet address. 255 * APC CMOS RAM is accessed through ISA bridge. 256 */ 257 #if defined(__amd64__) || defined(__i386__) 258 int 259 se_pcib_match(struct pci_attach_args *pa) 260 { 261 const struct pci_matchid apc_devices[] = { 262 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 }, 263 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 }, 264 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 } 265 }; 266 267 return pci_matchbyid(pa, apc_devices, nitems(apc_devices)); 268 } 269 #endif 270 271 int 272 se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest) 273 { 274 #if defined(__amd64__) || defined(__i386__) 275 struct pci_attach_args pa; 276 pcireg_t reg; 277 bus_space_handle_t ioh; 278 int rc, i; 279 280 if (pci_find_device(&pa, se_pcib_match) == 0) { 281 printf("\n%s: couldn't find PCI-ISA bridge\n", 282 sc->sc_dev.dv_xname); 283 return EINVAL; 284 } 285 286 /* Enable port 0x78 and 0x79 to access APC registers. */ 287 reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 288 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02); 289 DELAY(50); 290 (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 291 292 /* XXX this abuses bus_space implementation knowledge */ 293 rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh); 294 if (rc == 0) { 295 /* Read stored Ethernet address. */ 296 for (i = 0; i < ETHER_ADDR_LEN; i++) { 297 bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i); 298 dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1); 299 } 300 bus_space_write_1(pa.pa_iot, ioh, 0, 0x12); 301 if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0) 302 sc->sc_flags |= SE_FLAG_RGMII; 303 _bus_space_unmap(pa.pa_iot, ioh, 2, NULL); 304 } else 305 rc = EINVAL; 306 307 /* Restore access to APC registers. */ 308 pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg); 309 310 return rc; 311 #endif 312 return EINVAL; 313 } 314 315 uint32_t 316 se_miibus_cmd(struct se_softc *sc, uint32_t ctrl) 317 { 318 int i; 319 uint32_t val; 320 321 CSR_WRITE_4(sc, GMIIControl, ctrl); 322 DELAY(10); 323 for (i = 0; i < SE_TIMEOUT; i++) { 324 val = CSR_READ_4(sc, GMIIControl); 325 if ((val & GMI_REQ) == 0) 326 return val; 327 DELAY(10); 328 } 329 330 return GMI_REQ; 331 } 332 333 int 334 se_miibus_readreg(struct device *self, int phy, int reg) 335 { 336 struct se_softc *sc = (struct se_softc *)self; 337 uint32_t ctrl, val; 338 339 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 340 GMI_OP_RD | GMI_REQ; 341 val = se_miibus_cmd(sc, ctrl); 342 if ((val & GMI_REQ) != 0) { 343 printf("%s: PHY read timeout : %d\n", 344 sc->sc_dev.dv_xname, reg); 345 return 0; 346 } 347 return (val & GMI_DATA) >> GMI_DATA_SHIFT; 348 } 349 350 void 351 se_miibus_writereg(struct device *self, int phy, int reg, int data) 352 { 353 struct se_softc *sc = (struct se_softc *)self; 354 uint32_t ctrl, val; 355 356 ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 357 GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ; 358 val = se_miibus_cmd(sc, ctrl); 359 if ((val & GMI_REQ) != 0) { 360 printf("%s: PHY write timeout : %d\n", 361 sc->sc_dev.dv_xname, reg); 362 } 363 } 364 365 void 366 se_miibus_statchg(struct device *self) 367 { 368 struct se_softc *sc = (struct se_softc *)self; 369 #ifdef SE_DEBUG 370 struct ifnet *ifp = &sc->sc_ac.ac_if; 371 #endif 372 struct mii_data *mii = &sc->sc_mii; 373 uint32_t ctl, speed; 374 375 speed = 0; 376 sc->sc_flags &= ~SE_FLAG_LINK; 377 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 378 (IFM_ACTIVE | IFM_AVALID)) { 379 switch (IFM_SUBTYPE(mii->mii_media_active)) { 380 case IFM_10_T: 381 #ifdef SE_DEBUG 382 if (ifp->if_flags & IFF_DEBUG) 383 printf("%s: 10baseT link\n", ifp->if_xname); 384 #endif 385 sc->sc_flags |= SE_FLAG_LINK; 386 speed = SC_SPEED_10; 387 break; 388 case IFM_100_TX: 389 #ifdef SE_DEBUG 390 if (ifp->if_flags & IFF_DEBUG) 391 printf("%s: 100baseTX link\n", ifp->if_xname); 392 #endif 393 sc->sc_flags |= SE_FLAG_LINK; 394 speed = SC_SPEED_100; 395 break; 396 case IFM_1000_T: 397 #ifdef SE_DEBUG 398 if (ifp->if_flags & IFF_DEBUG) 399 printf("%s: 1000baseT link\n", ifp->if_xname); 400 #endif 401 if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) { 402 sc->sc_flags |= SE_FLAG_LINK; 403 speed = SC_SPEED_1000; 404 } 405 break; 406 default: 407 break; 408 } 409 } 410 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 411 #ifdef SE_DEBUG 412 if (ifp->if_flags & IFF_DEBUG) 413 printf("%s: no link\n", ifp->if_xname); 414 #endif 415 return; 416 } 417 /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 418 ctl = CSR_READ_4(sc, StationControl); 419 ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 420 if (speed == SC_SPEED_1000) 421 ctl |= 0x07000000; 422 else 423 ctl |= 0x04000000; 424 #ifdef notyet 425 if ((sc->sc_flags & SE_FLAG_GMII) != 0) 426 ctl |= 0x03000000; 427 #endif 428 ctl |= speed; 429 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 430 ctl |= SC_FDX; 431 CSR_WRITE_4(sc, StationControl, ctl); 432 if ((sc->sc_flags & SE_FLAG_RGMII) != 0) { 433 CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 434 CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 435 } 436 } 437 438 void 439 se_iff(struct se_softc *sc) 440 { 441 struct arpcom *ac = &sc->sc_ac; 442 struct ifnet *ifp = &ac->ac_if; 443 struct ether_multi *enm; 444 struct ether_multistep step; 445 uint32_t crc, hashes[2]; 446 uint16_t rxfilt; 447 448 rxfilt = CSR_READ_2(sc, RxMacControl); 449 rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast); 450 ifp->if_flags &= ~IFF_ALLMULTI; 451 452 /* 453 * Always accept broadcast frames. 454 * Always accept frames destined to our station address. 455 */ 456 rxfilt |= AcceptBroadcast | AcceptMyPhys; 457 458 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 459 ifp->if_flags |= IFF_ALLMULTI; 460 if (ifp->if_flags & IFF_PROMISC) 461 rxfilt |= AcceptAllPhys; 462 rxfilt |= AcceptMulticast; 463 hashes[0] = hashes[1] = 0xffffffff; 464 } else { 465 rxfilt |= AcceptMulticast; 466 hashes[0] = hashes[1] = 0; 467 468 ETHER_FIRST_MULTI(step, ac, enm); 469 while (enm != NULL) { 470 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 471 472 hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 473 474 ETHER_NEXT_MULTI(step, enm); 475 } 476 } 477 478 CSR_WRITE_2(sc, RxMacControl, rxfilt); 479 CSR_WRITE_4(sc, RxHashTable, hashes[0]); 480 CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 481 } 482 483 void 484 se_reset(struct se_softc *sc) 485 { 486 CSR_WRITE_4(sc, IntrMask, 0); 487 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 488 489 /* Soft reset. */ 490 CSR_WRITE_4(sc, IntrControl, 0x8000); 491 CSR_READ_4(sc, IntrControl); 492 DELAY(100); 493 CSR_WRITE_4(sc, IntrControl, 0); 494 /* Stop MAC. */ 495 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 496 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 497 498 CSR_WRITE_4(sc, IntrMask, 0); 499 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 500 501 CSR_WRITE_4(sc, GMIIControl, 0); 502 } 503 504 /* 505 * Probe for an SiS chip. Check the PCI vendor and device 506 * IDs against our list and return a device name if we find a match. 507 */ 508 int 509 se_match(struct device *parent, void *match, void *aux) 510 { 511 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 512 513 return pci_matchbyid(pa, se_devices, nitems(se_devices)); 514 } 515 516 /* 517 * Attach the interface. Do ifmedia setup and ethernet/BPF attach. 518 */ 519 void 520 se_attach(struct device *parent, struct device *self, void *aux) 521 { 522 struct se_softc *sc = (struct se_softc *)self; 523 struct arpcom *ac = &sc->sc_ac; 524 struct ifnet *ifp = &ac->ac_if; 525 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 526 uint8_t eaddr[ETHER_ADDR_LEN]; 527 const char *intrstr; 528 pci_intr_handle_t ih; 529 bus_size_t iosize; 530 bus_dma_segment_t seg; 531 struct se_list_data *ld; 532 struct se_chain_data *cd; 533 int nseg; 534 uint i; 535 int rc; 536 537 printf(": "); 538 539 /* 540 * Map control/status registers. 541 */ 542 543 rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0, 544 &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0); 545 if (rc != 0) { 546 printf("can't map i/o space\n"); 547 return; 548 } 549 550 if (pci_intr_map(pa, &ih)) { 551 printf("can't map interrupt\n"); 552 goto fail1; 553 } 554 intrstr = pci_intr_string(pa->pa_pc, ih); 555 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc, 556 self->dv_xname); 557 if (sc->sc_ih == NULL) { 558 printf("can't establish interrupt"); 559 if (intrstr != NULL) 560 printf(" at %s", intrstr); 561 printf("\n"); 562 goto fail1; 563 } 564 565 printf("%s", intrstr); 566 567 if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190)) 568 sc->sc_flags |= SE_FLAG_FASTETHER; 569 570 /* Reset the adapter. */ 571 se_reset(sc); 572 573 /* Get MAC address from the EEPROM. */ 574 if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0) 575 se_get_mac_addr_apc(sc, eaddr); 576 else 577 se_get_mac_addr_eeprom(sc, eaddr); 578 printf(", address %s\n", ether_sprintf(eaddr)); 579 bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN); 580 581 /* 582 * Now do all the DMA mapping stuff 583 */ 584 585 sc->sc_dmat = pa->pa_dmat; 586 ld = &sc->se_ldata; 587 cd = &sc->se_cdata; 588 589 /* First create TX/RX busdma maps. */ 590 for (i = 0; i < SE_RX_RING_CNT; i++) { 591 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 592 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]); 593 if (rc != 0) { 594 printf("%s: cannot init the RX map array\n", 595 self->dv_xname); 596 goto fail2; 597 } 598 } 599 600 for (i = 0; i < SE_TX_RING_CNT; i++) { 601 rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 602 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]); 603 if (rc != 0) { 604 printf("%s: cannot init the TX map array\n", 605 self->dv_xname); 606 goto fail2; 607 } 608 } 609 610 /* 611 * Now allocate a chunk of DMA-able memory for RX and TX ring 612 * descriptors, as a contiguous block of memory. 613 * XXX fix deallocation upon error 614 */ 615 616 /* RX */ 617 rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0, 618 &seg, 1, &nseg, BUS_DMA_NOWAIT); 619 if (rc != 0) { 620 printf("%s: no memory for RX descriptors\n", self->dv_xname); 621 goto fail2; 622 } 623 624 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ, 625 (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT); 626 if (rc != 0) { 627 printf("%s: can't map RX descriptors\n", self->dv_xname); 628 goto fail2; 629 } 630 631 rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1, 632 SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap); 633 if (rc != 0) { 634 printf("%s: can't alloc RX DMA map\n", self->dv_xname); 635 goto fail2; 636 } 637 638 rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap, 639 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 640 if (rc != 0) { 641 printf("%s: can't load RX DMA map\n", self->dv_xname); 642 bus_dmamem_unmap(sc->sc_dmat, 643 (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ); 644 bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap); 645 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 646 goto fail2; 647 } 648 649 /* TX */ 650 rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0, 651 &seg, 1, &nseg, BUS_DMA_NOWAIT); 652 if (rc != 0) { 653 printf("%s: no memory for TX descriptors\n", self->dv_xname); 654 goto fail2; 655 } 656 657 rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ, 658 (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT); 659 if (rc != 0) { 660 printf("%s: can't map TX descriptors\n", self->dv_xname); 661 goto fail2; 662 } 663 664 rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1, 665 SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap); 666 if (rc != 0) { 667 printf("%s: can't alloc TX DMA map\n", self->dv_xname); 668 goto fail2; 669 } 670 671 rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap, 672 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 673 if (rc != 0) { 674 printf("%s: can't load TX DMA map\n", self->dv_xname); 675 bus_dmamem_unmap(sc->sc_dmat, 676 (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ); 677 bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap); 678 bus_dmamem_free(sc->sc_dmat, &seg, nseg); 679 goto fail2; 680 } 681 682 timeout_set(&sc->sc_tick_tmo, se_tick, sc); 683 684 ifp = &sc->sc_ac.ac_if; 685 ifp->if_softc = sc; 686 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 687 ifp->if_ioctl = se_ioctl; 688 ifp->if_start = se_start; 689 ifp->if_watchdog = se_watchdog; 690 ifq_init_maxlen(&ifp->if_snd, SE_TX_RING_CNT - 1); 691 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 692 693 ifp->if_capabilities = IFCAP_VLAN_MTU; 694 695 /* 696 * Do MII setup. 697 */ 698 699 sc->sc_mii.mii_ifp = ifp; 700 sc->sc_mii.mii_readreg = se_miibus_readreg; 701 sc->sc_mii.mii_writereg = se_miibus_writereg; 702 sc->sc_mii.mii_statchg = se_miibus_statchg; 703 ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd, 704 se_ifmedia_sts); 705 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 706 MII_OFFSET_ANY, 0); 707 708 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 709 /* No PHY attached */ 710 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 711 0, NULL); 712 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 713 } else 714 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 715 716 /* 717 * Call MI attach routine. 718 */ 719 if_attach(ifp); 720 ether_ifattach(ifp); 721 722 return; 723 724 fail2: 725 pci_intr_disestablish(pa->pa_pc, sc->sc_ih); 726 fail1: 727 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 728 } 729 730 int 731 se_activate(struct device *self, int act) 732 { 733 struct se_softc *sc = (struct se_softc *)self; 734 struct ifnet *ifp = &sc->sc_ac.ac_if; 735 736 switch (act) { 737 case DVACT_SUSPEND: 738 if (ifp->if_flags & IFF_RUNNING) 739 se_stop(sc); 740 break; 741 case DVACT_RESUME: 742 if (ifp->if_flags & IFF_UP) 743 (void)se_init(ifp); 744 break; 745 } 746 return (0); 747 } 748 749 /* 750 * Initialize the TX descriptors. 751 */ 752 int 753 se_list_tx_init(struct se_softc *sc) 754 { 755 struct se_list_data *ld = &sc->se_ldata; 756 struct se_chain_data *cd = &sc->se_cdata; 757 758 bzero(ld->se_tx_ring, SE_TX_RING_SZ); 759 ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END); 760 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 761 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 762 cd->se_tx_prod = 0; 763 cd->se_tx_cons = 0; 764 cd->se_tx_cnt = 0; 765 766 return 0; 767 } 768 769 int 770 se_list_tx_free(struct se_softc *sc) 771 { 772 struct se_chain_data *cd = &sc->se_cdata; 773 uint i; 774 775 for (i = 0; i < SE_TX_RING_CNT; i++) { 776 if (cd->se_tx_mbuf[i] != NULL) { 777 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 778 m_free(cd->se_tx_mbuf[i]); 779 cd->se_tx_mbuf[i] = NULL; 780 } 781 } 782 783 return 0; 784 } 785 786 /* 787 * Initialize the RX descriptors and allocate mbufs for them. 788 */ 789 int 790 se_list_rx_init(struct se_softc *sc) 791 { 792 struct se_list_data *ld = &sc->se_ldata; 793 struct se_chain_data *cd = &sc->se_cdata; 794 uint i; 795 796 bzero(ld->se_rx_ring, SE_RX_RING_SZ); 797 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 799 for (i = 0; i < SE_RX_RING_CNT; i++) { 800 if (se_newbuf(sc, i) != 0) 801 return ENOBUFS; 802 } 803 804 cd->se_rx_prod = 0; 805 806 return 0; 807 } 808 809 int 810 se_list_rx_free(struct se_softc *sc) 811 { 812 struct se_chain_data *cd = &sc->se_cdata; 813 uint i; 814 815 for (i = 0; i < SE_RX_RING_CNT; i++) { 816 if (cd->se_rx_mbuf[i] != NULL) { 817 bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]); 818 m_free(cd->se_rx_mbuf[i]); 819 cd->se_rx_mbuf[i] = NULL; 820 } 821 } 822 823 return 0; 824 } 825 826 /* 827 * Initialize an RX descriptor and attach an MBUF cluster. 828 */ 829 int 830 se_newbuf(struct se_softc *sc, uint i) 831 { 832 #ifdef SE_DEBUG 833 struct ifnet *ifp = &sc->sc_ac.ac_if; 834 #endif 835 struct se_list_data *ld = &sc->se_ldata; 836 struct se_chain_data *cd = &sc->se_cdata; 837 struct se_desc *desc; 838 struct mbuf *m; 839 int rc; 840 841 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 842 if (m == NULL) { 843 #ifdef SE_DEBUG 844 if (ifp->if_flags & IFF_DEBUG) 845 printf("%s: MCLGETL failed\n", ifp->if_xname); 846 #endif 847 return ENOBUFS; 848 } 849 m->m_len = m->m_pkthdr.len = MCLBYTES; 850 m_adj(m, SE_RX_BUF_ALIGN); 851 852 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i], 853 m, BUS_DMA_NOWAIT); 854 KASSERT(cd->se_rx_map[i]->dm_nsegs == 1); 855 if (rc != 0) { 856 m_freem(m); 857 return ENOBUFS; 858 } 859 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 860 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 861 862 cd->se_rx_mbuf[i] = m; 863 desc = &ld->se_rx_ring[i]; 864 desc->se_sts_size = 0; 865 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 866 desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr); 867 desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len); 868 if (i == SE_RX_RING_CNT - 1) 869 desc->se_flags |= htole32(RING_END); 870 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 871 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 872 873 return 0; 874 } 875 876 void 877 se_discard_rxbuf(struct se_softc *sc, uint i) 878 { 879 struct se_list_data *ld = &sc->se_ldata; 880 struct se_desc *desc; 881 882 desc = &ld->se_rx_ring[i]; 883 desc->se_sts_size = 0; 884 desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 885 desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN); 886 if (i == SE_RX_RING_CNT - 1) 887 desc->se_flags |= htole32(RING_END); 888 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 889 sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 890 } 891 892 /* 893 * A frame has been uploaded: pass the resulting mbuf chain up to 894 * the higher level protocols. 895 */ 896 void 897 se_rxeof(struct se_softc *sc) 898 { 899 struct mbuf *m; 900 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 901 struct ifnet *ifp = &sc->sc_ac.ac_if; 902 struct se_list_data *ld = &sc->se_ldata; 903 struct se_chain_data *cd = &sc->se_cdata; 904 struct se_desc *cur_rx; 905 uint32_t rxinfo, rxstat; 906 uint i; 907 908 bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 909 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 910 for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) { 911 cur_rx = &ld->se_rx_ring[i]; 912 rxinfo = letoh32(cur_rx->se_cmdsts); 913 if ((rxinfo & RDC_OWN) != 0) 914 break; 915 rxstat = letoh32(cur_rx->se_sts_size); 916 917 /* 918 * If an error occurs, update stats, clear the 919 * status word and leave the mbuf cluster in place: 920 * it should simply get re-used next time this descriptor 921 * comes up in the ring. 922 */ 923 if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 || 924 SE_RX_NSEGS(rxstat) != 1) { 925 /* XXX We don't support multi-segment frames yet. */ 926 if (ifp->if_flags & IFF_DEBUG) 927 printf("%s: rx error %b\n", 928 ifp->if_xname, rxstat, RX_ERR_BITS); 929 se_discard_rxbuf(sc, i); 930 ifp->if_ierrors++; 931 continue; 932 } 933 934 /* No errors; receive the packet. */ 935 bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 936 cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 937 m = cd->se_rx_mbuf[i]; 938 if (se_newbuf(sc, i) != 0) { 939 se_discard_rxbuf(sc, i); 940 ifp->if_iqdrops++; 941 continue; 942 } 943 /* 944 * Account for 10 bytes auto padding which is used 945 * to align IP header on a 32bit boundary. Also note, 946 * CRC bytes are automatically removed by the hardware. 947 */ 948 m->m_data += SE_RX_PAD_BYTES; 949 m->m_pkthdr.len = m->m_len = 950 SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES; 951 952 ml_enqueue(&ml, m); 953 } 954 955 if_input(ifp, &ml); 956 957 cd->se_rx_prod = i; 958 } 959 960 /* 961 * A frame was downloaded to the chip. It's safe for us to clean up 962 * the list buffers. 963 */ 964 965 void 966 se_txeof(struct se_softc *sc) 967 { 968 struct ifnet *ifp = &sc->sc_ac.ac_if; 969 struct se_list_data *ld = &sc->se_ldata; 970 struct se_chain_data *cd = &sc->se_cdata; 971 struct se_desc *cur_tx; 972 uint32_t txstat; 973 uint i; 974 975 /* 976 * Go through our tx list and free mbufs for those 977 * frames that have been transmitted. 978 */ 979 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 980 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 981 for (i = cd->se_tx_cons; cd->se_tx_cnt > 0; 982 cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) { 983 cur_tx = &ld->se_tx_ring[i]; 984 txstat = letoh32(cur_tx->se_cmdsts); 985 if ((txstat & TDC_OWN) != 0) 986 break; 987 988 ifq_clr_oactive(&ifp->if_snd); 989 990 if (SE_TX_ERROR(txstat) != 0) { 991 if (ifp->if_flags & IFF_DEBUG) 992 printf("%s: tx error %b\n", 993 ifp->if_xname, txstat, TX_ERR_BITS); 994 ifp->if_oerrors++; 995 /* TODO: better error differentiation */ 996 } 997 998 if (cd->se_tx_mbuf[i] != NULL) { 999 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1000 cd->se_tx_map[i]->dm_mapsize, 1001 BUS_DMASYNC_POSTWRITE); 1002 bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 1003 m_free(cd->se_tx_mbuf[i]); 1004 cd->se_tx_mbuf[i] = NULL; 1005 } 1006 1007 cur_tx->se_sts_size = 0; 1008 cur_tx->se_cmdsts = 0; 1009 cur_tx->se_ptr = 0; 1010 cur_tx->se_flags &= htole32(RING_END); 1011 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1012 i * sizeof(*cur_tx), sizeof(*cur_tx), 1013 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1014 } 1015 1016 cd->se_tx_cons = i; 1017 if (cd->se_tx_cnt == 0) 1018 ifp->if_timer = 0; 1019 } 1020 1021 void 1022 se_tick(void *xsc) 1023 { 1024 struct se_softc *sc = xsc; 1025 struct mii_data *mii; 1026 struct ifnet *ifp = &sc->sc_ac.ac_if; 1027 int s; 1028 1029 s = splnet(); 1030 mii = &sc->sc_mii; 1031 mii_tick(mii); 1032 if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 1033 se_miibus_statchg(&sc->sc_dev); 1034 if ((sc->sc_flags & SE_FLAG_LINK) != 0 && 1035 !ifq_empty(&ifp->if_snd)) 1036 se_start(ifp); 1037 } 1038 splx(s); 1039 1040 timeout_add_sec(&sc->sc_tick_tmo, 1); 1041 } 1042 1043 int 1044 se_intr(void *arg) 1045 { 1046 struct se_softc *sc = arg; 1047 struct ifnet *ifp = &sc->sc_ac.ac_if; 1048 uint32_t status; 1049 1050 status = CSR_READ_4(sc, IntrStatus); 1051 if (status == 0xffffffff || (status & SE_INTRS) == 0) { 1052 /* Not ours. */ 1053 return 0; 1054 } 1055 /* Ack interrupts/ */ 1056 CSR_WRITE_4(sc, IntrStatus, status); 1057 /* Disable further interrupts. */ 1058 CSR_WRITE_4(sc, IntrMask, 0); 1059 1060 for (;;) { 1061 if ((ifp->if_flags & IFF_RUNNING) == 0) 1062 break; 1063 if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 1064 se_rxeof(sc); 1065 /* Wakeup Rx MAC. */ 1066 if ((status & INTR_RX_IDLE) != 0) 1067 CSR_WRITE_4(sc, RX_CTL, 1068 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1069 } 1070 if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 1071 se_txeof(sc); 1072 status = CSR_READ_4(sc, IntrStatus); 1073 if ((status & SE_INTRS) == 0) 1074 break; 1075 /* Ack interrupts. */ 1076 CSR_WRITE_4(sc, IntrStatus, status); 1077 } 1078 1079 if ((ifp->if_flags & IFF_RUNNING) != 0) { 1080 /* Re-enable interrupts */ 1081 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1082 if (!ifq_empty(&ifp->if_snd)) 1083 se_start(ifp); 1084 } 1085 1086 return 1; 1087 } 1088 1089 /* 1090 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1091 * pointers to the fragment pointers. 1092 */ 1093 int 1094 se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx) 1095 { 1096 #ifdef SE_DEBUG 1097 struct ifnet *ifp = &sc->sc_ac.ac_if; 1098 #endif 1099 struct mbuf *m; 1100 struct se_list_data *ld = &sc->se_ldata; 1101 struct se_chain_data *cd = &sc->se_cdata; 1102 struct se_desc *desc; 1103 uint i, cnt = 0; 1104 int rc; 1105 1106 /* 1107 * If there's no way we can send any packets, return now. 1108 */ 1109 if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) { 1110 #ifdef SE_DEBUG 1111 if (ifp->if_flags & IFF_DEBUG) 1112 printf("%s: encap failed, not enough TX desc\n", 1113 ifp->if_xname); 1114 #endif 1115 return ENOBUFS; 1116 } 1117 1118 if (m_defrag(m_head, M_DONTWAIT) != 0) { 1119 #ifdef SE_DEBUG 1120 if (ifp->if_flags & IFF_DEBUG) 1121 printf("%s: m_defrag failed\n", ifp->if_xname); 1122 #endif 1123 return ENOBUFS; /* XXX should not be fatal */ 1124 } 1125 1126 /* 1127 * Start packing the mbufs in this chain into 1128 * the fragment pointers. Stop when we run out 1129 * of fragments or hit the end of the mbuf chain. 1130 */ 1131 i = *txidx; 1132 1133 for (m = m_head; m != NULL; m = m->m_next) { 1134 if (m->m_len == 0) 1135 continue; 1136 if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) { 1137 #ifdef SE_DEBUG 1138 if (ifp->if_flags & IFF_DEBUG) 1139 printf("%s: encap failed, not enough TX desc\n", 1140 ifp->if_xname); 1141 #endif 1142 return ENOBUFS; 1143 } 1144 cd->se_tx_mbuf[i] = m; 1145 rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i], 1146 m, BUS_DMA_NOWAIT); 1147 if (rc != 0) 1148 return ENOBUFS; 1149 KASSERT(cd->se_tx_map[i]->dm_nsegs == 1); 1150 bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 1151 cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE); 1152 1153 desc = &ld->se_tx_ring[i]; 1154 desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1155 desc->se_ptr = 1156 htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr); 1157 desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 1158 if (i == SE_TX_RING_CNT - 1) 1159 desc->se_flags |= htole32(RING_END); 1160 desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF | 1161 TDC_CRC | TDC_PAD | TDC_BST); 1162 bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 1163 i * sizeof(*desc), sizeof(*desc), 1164 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1165 1166 SE_INC(i, SE_TX_RING_CNT); 1167 cnt++; 1168 } 1169 1170 /* can't happen */ 1171 if (m != NULL) 1172 return ENOBUFS; 1173 1174 cd->se_tx_cnt += cnt; 1175 *txidx = i; 1176 1177 return 0; 1178 } 1179 1180 /* 1181 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1182 * to the mbuf data regions directly in the transmit lists. We also save a 1183 * copy of the pointers since the transmit list fragment pointers are 1184 * physical addresses. 1185 */ 1186 void 1187 se_start(struct ifnet *ifp) 1188 { 1189 struct se_softc *sc = ifp->if_softc; 1190 struct mbuf *m_head = NULL; 1191 struct se_chain_data *cd = &sc->se_cdata; 1192 uint i, queued = 0; 1193 1194 if ((sc->sc_flags & SE_FLAG_LINK) == 0 || 1195 !(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) { 1196 #ifdef SE_DEBUG 1197 if (ifp->if_flags & IFF_DEBUG) 1198 printf("%s: can't tx, flags 0x%x 0x%04x\n", 1199 ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags); 1200 #endif 1201 return; 1202 } 1203 1204 i = cd->se_tx_prod; 1205 1206 while (cd->se_tx_mbuf[i] == NULL) { 1207 m_head = ifq_deq_begin(&ifp->if_snd); 1208 if (m_head == NULL) 1209 break; 1210 1211 if (se_encap(sc, m_head, &i) != 0) { 1212 ifq_deq_rollback(&ifp->if_snd, m_head); 1213 ifq_set_oactive(&ifp->if_snd); 1214 break; 1215 } 1216 1217 /* now we are committed to transmit the packet */ 1218 ifq_deq_commit(&ifp->if_snd, m_head); 1219 queued++; 1220 1221 /* 1222 * If there's a BPF listener, bounce a copy of this frame 1223 * to him. 1224 */ 1225 #if NBPFILTER > 0 1226 if (ifp->if_bpf) 1227 bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 1228 #endif 1229 } 1230 1231 if (queued > 0) { 1232 /* Transmit */ 1233 cd->se_tx_prod = i; 1234 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 1235 ifp->if_timer = 5; 1236 } 1237 } 1238 1239 int 1240 se_init(struct ifnet *ifp) 1241 { 1242 struct se_softc *sc = ifp->if_softc; 1243 uint16_t rxfilt; 1244 int i; 1245 1246 splassert(IPL_NET); 1247 1248 /* 1249 * Cancel pending I/O and free all RX/TX buffers. 1250 */ 1251 se_stop(sc); 1252 se_reset(sc); 1253 1254 /* Init circular RX list. */ 1255 if (se_list_rx_init(sc) == ENOBUFS) { 1256 se_stop(sc); /* XXX necessary? */ 1257 return ENOBUFS; 1258 } 1259 1260 /* Init TX descriptors. */ 1261 se_list_tx_init(sc); 1262 1263 /* 1264 * Load the address of the RX and TX lists. 1265 */ 1266 CSR_WRITE_4(sc, TX_DESC, 1267 (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr); 1268 CSR_WRITE_4(sc, RX_DESC, 1269 (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr); 1270 1271 CSR_WRITE_4(sc, TxMacControl, 0x60); 1272 CSR_WRITE_4(sc, RxWakeOnLan, 0); 1273 CSR_WRITE_4(sc, RxWakeOnLanData, 0); 1274 CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 1275 SE_RX_PAD_BYTES); 1276 1277 for (i = 0; i < ETHER_ADDR_LEN; i++) 1278 CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]); 1279 /* Configure RX MAC. */ 1280 rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 1281 CSR_WRITE_2(sc, RxMacControl, rxfilt); 1282 1283 /* Program promiscuous mode and multicast filters. */ 1284 se_iff(sc); 1285 1286 /* 1287 * Clear and enable interrupts. 1288 */ 1289 CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 1290 CSR_WRITE_4(sc, IntrMask, SE_INTRS); 1291 1292 /* Enable receiver and transmitter. */ 1293 CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 1294 CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 1295 1296 ifp->if_flags |= IFF_RUNNING; 1297 ifq_clr_oactive(&ifp->if_snd); 1298 1299 sc->sc_flags &= ~SE_FLAG_LINK; 1300 mii_mediachg(&sc->sc_mii); 1301 timeout_add_sec(&sc->sc_tick_tmo, 1); 1302 1303 return 0; 1304 } 1305 1306 /* 1307 * Set media options. 1308 */ 1309 int 1310 se_ifmedia_upd(struct ifnet *ifp) 1311 { 1312 struct se_softc *sc = ifp->if_softc; 1313 struct mii_data *mii; 1314 1315 mii = &sc->sc_mii; 1316 sc->sc_flags &= ~SE_FLAG_LINK; 1317 if (mii->mii_instance) { 1318 struct mii_softc *miisc; 1319 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1320 mii_phy_reset(miisc); 1321 } 1322 return mii_mediachg(mii); 1323 } 1324 1325 /* 1326 * Report current media status. 1327 */ 1328 void 1329 se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1330 { 1331 struct se_softc *sc = ifp->if_softc; 1332 struct mii_data *mii; 1333 1334 mii = &sc->sc_mii; 1335 mii_pollstat(mii); 1336 ifmr->ifm_active = mii->mii_media_active; 1337 ifmr->ifm_status = mii->mii_media_status; 1338 } 1339 1340 int 1341 se_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1342 { 1343 struct se_softc *sc = ifp->if_softc; 1344 struct ifreq *ifr = (struct ifreq *) data; 1345 int s, rc = 0; 1346 1347 s = splnet(); 1348 1349 switch (command) { 1350 case SIOCSIFADDR: 1351 ifp->if_flags |= IFF_UP; 1352 if ((ifp->if_flags & IFF_RUNNING) == 0) 1353 rc = se_init(ifp); 1354 break; 1355 case SIOCSIFFLAGS: 1356 if (ifp->if_flags & IFF_UP) { 1357 if (ifp->if_flags & IFF_RUNNING) 1358 rc = ENETRESET; 1359 else 1360 rc = se_init(ifp); 1361 } else { 1362 if (ifp->if_flags & IFF_RUNNING) 1363 se_stop(sc); 1364 } 1365 break; 1366 case SIOCGIFMEDIA: 1367 case SIOCSIFMEDIA: 1368 rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 1369 break; 1370 default: 1371 rc = ether_ioctl(ifp, &sc->sc_ac, command, data); 1372 break; 1373 } 1374 1375 if (rc == ENETRESET) { 1376 if (ifp->if_flags & IFF_RUNNING) 1377 se_iff(sc); 1378 rc = 0; 1379 } 1380 1381 splx(s); 1382 return rc; 1383 } 1384 1385 void 1386 se_watchdog(struct ifnet *ifp) 1387 { 1388 struct se_softc *sc = ifp->if_softc; 1389 int s; 1390 1391 printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 1392 ifp->if_oerrors++; 1393 1394 s = splnet(); 1395 se_init(ifp); 1396 if (!ifq_empty(&ifp->if_snd)) 1397 se_start(ifp); 1398 splx(s); 1399 } 1400 1401 /* 1402 * Stop the adapter and free any mbufs allocated to the 1403 * RX and TX lists. 1404 */ 1405 void 1406 se_stop(struct se_softc *sc) 1407 { 1408 struct ifnet *ifp = &sc->sc_ac.ac_if; 1409 1410 ifp->if_timer = 0; 1411 ifp->if_flags &= ~IFF_RUNNING; 1412 ifq_clr_oactive(&ifp->if_snd); 1413 timeout_del(&sc->sc_tick_tmo); 1414 mii_down(&sc->sc_mii); 1415 1416 CSR_WRITE_4(sc, IntrMask, 0); 1417 CSR_READ_4(sc, IntrMask); 1418 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1419 /* Stop TX/RX MAC. */ 1420 CSR_WRITE_4(sc, TX_CTL, 0x1a00); 1421 CSR_WRITE_4(sc, RX_CTL, 0x1a00); 1422 /* XXX Can we assume active DMA cycles gone? */ 1423 DELAY(2000); 1424 CSR_WRITE_4(sc, IntrMask, 0); 1425 CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 1426 1427 sc->sc_flags &= ~SE_FLAG_LINK; 1428 se_list_rx_free(sc); 1429 se_list_tx_free(sc); 1430 } 1431