1*3836e7c7Smiod /* $OpenBSD: if_se.c,v 1.27 2024/11/05 18:58:59 miod Exp $ */ 21ec67c9eSjsg 384c5d052Sderaadt /*- 484c5d052Sderaadt * Copyright (c) 2009, 2010 Christopher Zimmermann <madroach@zakweb.de> 531bda99bSmiod * Copyright (c) 2008, 2009, 2010 Nikolay Denev <ndenev@gmail.com> 684c5d052Sderaadt * Copyright (c) 2007, 2008 Alexander Pohoyda <alexander.pohoyda@gmx.net> 784c5d052Sderaadt * Copyright (c) 1997, 1998, 1999 884c5d052Sderaadt * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 984c5d052Sderaadt * 1084c5d052Sderaadt * Redistribution and use in source and binary forms, with or without 1184c5d052Sderaadt * modification, are permitted provided that the following conditions 1284c5d052Sderaadt * are met: 1384c5d052Sderaadt * 1. Redistributions of source code must retain the above copyright 1484c5d052Sderaadt * notice, this list of conditions and the following disclaimer. 1584c5d052Sderaadt * 2. Redistributions in binary form must reproduce the above copyright 1684c5d052Sderaadt * notice, this list of conditions and the following disclaimer in the 1784c5d052Sderaadt * documentation and/or other materials provided with the distribution. 1884c5d052Sderaadt * 3. All advertising materials mentioning features or use of this software 1984c5d052Sderaadt * must display the following acknowledgement: 2084c5d052Sderaadt * This product includes software developed by Bill Paul. 2184c5d052Sderaadt * 4. Neither the name of the author nor the names of any co-contributors 2284c5d052Sderaadt * may be used to endorse or promote products derived from this software 2384c5d052Sderaadt * without specific prior written permission. 2484c5d052Sderaadt * 2584c5d052Sderaadt * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' 2684c5d052Sderaadt * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2784c5d052Sderaadt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 2884c5d052Sderaadt * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AUTHORS OR 2984c5d052Sderaadt * THE VOICES IN THEIR HEADS BE LIABLE FOR ANY DIRECT, INDIRECT, 3084c5d052Sderaadt * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 3184c5d052Sderaadt * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 3284c5d052Sderaadt * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3384c5d052Sderaadt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 3484c5d052Sderaadt * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3584c5d052Sderaadt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 3684c5d052Sderaadt * OF THE POSSIBILITY OF SUCH DAMAGE. 3784c5d052Sderaadt */ 3884c5d052Sderaadt 3984c5d052Sderaadt /* 4031bda99bSmiod * SiS 190/191 PCI Ethernet NIC driver. 4184c5d052Sderaadt * 4284c5d052Sderaadt * Adapted to SiS 190 NIC by Alexander Pohoyda based on the original 4384c5d052Sderaadt * SiS 900 driver by Bill Paul, using SiS 190/191 Solaris driver by 4484c5d052Sderaadt * Masayuki Murayama and SiS 190/191 GNU/Linux driver by K.M. Liu 4584c5d052Sderaadt * <kmliu@sis.com>. Thanks to Pyun YongHyeon <pyunyh@gmail.com> for 4684c5d052Sderaadt * review and very useful comments. 4784c5d052Sderaadt * 4884c5d052Sderaadt * Ported to OpenBSD by Christopher Zimmermann 2009/10 4984c5d052Sderaadt * 5031bda99bSmiod * Adapted to SiS 191 NIC by Nikolay Denev with further ideas from the 5131bda99bSmiod * Linux and Solaris drivers. 5284c5d052Sderaadt */ 5384c5d052Sderaadt 5431bda99bSmiod #include "bpfilter.h" 5531bda99bSmiod 5684c5d052Sderaadt #include <sys/param.h> 5784c5d052Sderaadt #include <sys/systm.h> 5831bda99bSmiod #include <sys/device.h> 5931bda99bSmiod #include <sys/ioctl.h> 6084c5d052Sderaadt #include <sys/mbuf.h> 6131bda99bSmiod #include <sys/timeout.h> 6284c5d052Sderaadt 6384c5d052Sderaadt #include <net/if.h> 6484c5d052Sderaadt #include <net/if_media.h> 6584c5d052Sderaadt 6684c5d052Sderaadt #include <netinet/in.h> 6784c5d052Sderaadt #include <netinet/if_ether.h> 6884c5d052Sderaadt 6931bda99bSmiod #if NBPFILTER > 0 7084c5d052Sderaadt #include <net/bpf.h> 7131bda99bSmiod #endif 7284c5d052Sderaadt 7384c5d052Sderaadt #include <dev/mii/miivar.h> 7484c5d052Sderaadt 7584c5d052Sderaadt #include <dev/pci/pcidevs.h> 7631bda99bSmiod #include <dev/pci/pcireg.h> 7731bda99bSmiod #include <dev/pci/pcivar.h> 7884c5d052Sderaadt 7931bda99bSmiod #include <dev/pci/if_sereg.h> 8031bda99bSmiod 8131bda99bSmiod #define SE_RX_RING_CNT 256 /* [8, 1024] */ 8231bda99bSmiod #define SE_TX_RING_CNT 256 /* [8, 8192] */ 8331bda99bSmiod #define SE_RX_BUF_ALIGN sizeof(uint64_t) 8431bda99bSmiod 8531bda99bSmiod #define SE_RX_RING_SZ (SE_RX_RING_CNT * sizeof(struct se_desc)) 8631bda99bSmiod #define SE_TX_RING_SZ (SE_TX_RING_CNT * sizeof(struct se_desc)) 8731bda99bSmiod 8831bda99bSmiod struct se_list_data { 8931bda99bSmiod struct se_desc *se_rx_ring; 9031bda99bSmiod struct se_desc *se_tx_ring; 9131bda99bSmiod bus_dmamap_t se_rx_dmamap; 9231bda99bSmiod bus_dmamap_t se_tx_dmamap; 9331bda99bSmiod }; 9431bda99bSmiod 9531bda99bSmiod struct se_chain_data { 9631bda99bSmiod struct mbuf *se_rx_mbuf[SE_RX_RING_CNT]; 9731bda99bSmiod struct mbuf *se_tx_mbuf[SE_TX_RING_CNT]; 9831bda99bSmiod bus_dmamap_t se_rx_map[SE_RX_RING_CNT]; 9931bda99bSmiod bus_dmamap_t se_tx_map[SE_TX_RING_CNT]; 10031bda99bSmiod uint se_rx_prod; 10131bda99bSmiod uint se_tx_prod; 10231bda99bSmiod uint se_tx_cons; 10331bda99bSmiod uint se_tx_cnt; 10431bda99bSmiod }; 10531bda99bSmiod 10631bda99bSmiod struct se_softc { 10731bda99bSmiod struct device sc_dev; 10831bda99bSmiod void *sc_ih; 10931bda99bSmiod bus_space_tag_t sc_iot; 11031bda99bSmiod bus_space_handle_t sc_ioh; 11131bda99bSmiod bus_dma_tag_t sc_dmat; 11231bda99bSmiod 11331bda99bSmiod struct mii_data sc_mii; 11431bda99bSmiod struct arpcom sc_ac; 11531bda99bSmiod 11631bda99bSmiod struct se_list_data se_ldata; 11731bda99bSmiod struct se_chain_data se_cdata; 11831bda99bSmiod 11931bda99bSmiod struct timeout sc_tick_tmo; 12031bda99bSmiod 12131bda99bSmiod int sc_flags; 12231bda99bSmiod #define SE_FLAG_FASTETHER 0x0001 12331bda99bSmiod #define SE_FLAG_RGMII 0x0010 12431bda99bSmiod #define SE_FLAG_LINK 0x8000 12531bda99bSmiod }; 12684c5d052Sderaadt 12784c5d052Sderaadt /* 12884c5d052Sderaadt * Various supported device vendors/types and their names. 12984c5d052Sderaadt */ 13084c5d052Sderaadt const struct pci_matchid se_devices[] = { 13184c5d052Sderaadt { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190 }, 13231bda99bSmiod { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_191 } 13384c5d052Sderaadt }; 13484c5d052Sderaadt 13531bda99bSmiod int se_match(struct device *, void *, void *); 13684c5d052Sderaadt void se_attach(struct device *, struct device *, void *); 13731bda99bSmiod int se_activate(struct device *, int); 13884c5d052Sderaadt 13931bda99bSmiod const struct cfattach se_ca = { 14031bda99bSmiod sizeof(struct se_softc), 14131bda99bSmiod se_match, se_attach, NULL, se_activate 14284c5d052Sderaadt }; 14384c5d052Sderaadt 14484c5d052Sderaadt struct cfdriver se_cd = { 145*3836e7c7Smiod NULL, "se", DV_IFNET 14684c5d052Sderaadt }; 14784c5d052Sderaadt 14831bda99bSmiod uint32_t 14931bda99bSmiod se_miibus_cmd(struct se_softc *, uint32_t); 15084c5d052Sderaadt int se_miibus_readreg(struct device *, int, int); 15184c5d052Sderaadt void se_miibus_writereg(struct device *, int, int, int); 15284c5d052Sderaadt void se_miibus_statchg(struct device *); 15384c5d052Sderaadt 15431bda99bSmiod int se_newbuf(struct se_softc *, uint); 15531bda99bSmiod void se_discard_rxbuf(struct se_softc *, uint); 15631bda99bSmiod int se_encap(struct se_softc *, struct mbuf *, uint *); 15731bda99bSmiod void se_rxeof(struct se_softc *); 15831bda99bSmiod void se_txeof(struct se_softc *); 15931bda99bSmiod int se_intr(void *); 16031bda99bSmiod void se_tick(void *); 16131bda99bSmiod void se_start(struct ifnet *); 16231bda99bSmiod int se_ioctl(struct ifnet *, u_long, caddr_t); 16331bda99bSmiod int se_init(struct ifnet *); 16431bda99bSmiod void se_stop(struct se_softc *); 16531bda99bSmiod void se_watchdog(struct ifnet *); 16684c5d052Sderaadt int se_ifmedia_upd(struct ifnet *); 16784c5d052Sderaadt void se_ifmedia_sts(struct ifnet *, struct ifmediareq *); 16884c5d052Sderaadt 16931bda99bSmiod int se_pcib_match(struct pci_attach_args *); 17031bda99bSmiod int se_get_mac_addr_apc(struct se_softc *, uint8_t *); 17131bda99bSmiod int se_get_mac_addr_eeprom(struct se_softc *, uint8_t *); 17231bda99bSmiod uint16_t 17331bda99bSmiod se_read_eeprom(struct se_softc *, int); 17484c5d052Sderaadt 17531bda99bSmiod void se_iff(struct se_softc *); 17631bda99bSmiod void se_reset(struct se_softc *); 17784c5d052Sderaadt int se_list_rx_init(struct se_softc *); 17884c5d052Sderaadt int se_list_rx_free(struct se_softc *); 17984c5d052Sderaadt int se_list_tx_init(struct se_softc *); 18084c5d052Sderaadt int se_list_tx_free(struct se_softc *); 18184c5d052Sderaadt 18231bda99bSmiod /* 18331bda99bSmiod * Register space access macros. 18431bda99bSmiod */ 18584c5d052Sderaadt 18631bda99bSmiod #define CSR_WRITE_4(sc, reg, val) \ 18731bda99bSmiod bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, reg, val) 18831bda99bSmiod #define CSR_WRITE_2(sc, reg, val) \ 18931bda99bSmiod bus_space_write_2((sc)->sc_iot, (sc)->sc_ioh, reg, val) 19031bda99bSmiod #define CSR_WRITE_1(sc, reg, val) \ 19131bda99bSmiod bus_space_write_1((sc)->sc_iot, (sc)->sc_ioh, reg, val) 19284c5d052Sderaadt 19331bda99bSmiod #define CSR_READ_4(sc, reg) \ 19431bda99bSmiod bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, reg) 19531bda99bSmiod #define CSR_READ_2(sc, reg) \ 19631bda99bSmiod bus_space_read_2((sc)->sc_iot, (sc)->sc_ioh, reg) 19731bda99bSmiod #define CSR_READ_1(sc, reg) \ 19831bda99bSmiod bus_space_read_1((sc)->sc_iot, (sc)->sc_ioh, reg) 19984c5d052Sderaadt 20084c5d052Sderaadt /* 20184c5d052Sderaadt * Read a sequence of words from the EEPROM. 20284c5d052Sderaadt */ 20384c5d052Sderaadt uint16_t 20431bda99bSmiod se_read_eeprom(struct se_softc *sc, int offset) 20584c5d052Sderaadt { 20631bda99bSmiod uint32_t val; 20731bda99bSmiod int i; 20884c5d052Sderaadt 20984c5d052Sderaadt KASSERT(offset <= EI_OFFSET); 21084c5d052Sderaadt 21131bda99bSmiod CSR_WRITE_4(sc, ROMInterface, 21231bda99bSmiod EI_REQ | EI_OP_RD | (offset << EI_OFFSET_SHIFT)); 21384c5d052Sderaadt DELAY(500); 21431bda99bSmiod for (i = 0; i < SE_TIMEOUT; i++) { 21531bda99bSmiod val = CSR_READ_4(sc, ROMInterface); 21631bda99bSmiod if ((val & EI_REQ) == 0) 21731bda99bSmiod break; 21884c5d052Sderaadt DELAY(100); 21984c5d052Sderaadt } 22031bda99bSmiod if (i == SE_TIMEOUT) { 22131bda99bSmiod printf("%s: EEPROM read timeout: 0x%08x\n", 22231bda99bSmiod sc->sc_dev.dv_xname, val); 22331bda99bSmiod return 0xffff; 22431bda99bSmiod } 22584c5d052Sderaadt 22631bda99bSmiod return (val & EI_DATA) >> EI_DATA_SHIFT; 22784c5d052Sderaadt } 22884c5d052Sderaadt 22984c5d052Sderaadt int 23031bda99bSmiod se_get_mac_addr_eeprom(struct se_softc *sc, uint8_t *dest) 23184c5d052Sderaadt { 23231bda99bSmiod uint16_t val; 23331bda99bSmiod int i; 23484c5d052Sderaadt 23584c5d052Sderaadt val = se_read_eeprom(sc, EEPROMSignature); 23631bda99bSmiod if (val == 0xffff || val == 0x0000) { 23731bda99bSmiod printf("%s: invalid EEPROM signature : 0x%04x\n", 23831bda99bSmiod sc->sc_dev.dv_xname, val); 23931bda99bSmiod return (EINVAL); 24031bda99bSmiod } 24184c5d052Sderaadt 24284c5d052Sderaadt for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 24384c5d052Sderaadt val = se_read_eeprom(sc, EEPROMMACAddr + i / 2); 24484c5d052Sderaadt dest[i + 0] = (uint8_t)val; 24584c5d052Sderaadt dest[i + 1] = (uint8_t)(val >> 8); 24684c5d052Sderaadt } 24784c5d052Sderaadt 24831bda99bSmiod if ((se_read_eeprom(sc, EEPROMInfo) & 0x80) != 0) 24931bda99bSmiod sc->sc_flags |= SE_FLAG_RGMII; 25084c5d052Sderaadt return (0); 25184c5d052Sderaadt } 25284c5d052Sderaadt 25331bda99bSmiod /* 25431bda99bSmiod * For SiS96x, APC CMOS RAM is used to store Ethernet address. 25531bda99bSmiod * APC CMOS RAM is accessed through ISA bridge. 25631bda99bSmiod */ 25731bda99bSmiod #if defined(__amd64__) || defined(__i386__) 25831bda99bSmiod int 25931bda99bSmiod se_pcib_match(struct pci_attach_args *pa) 26084c5d052Sderaadt { 26131bda99bSmiod const struct pci_matchid apc_devices[] = { 26231bda99bSmiod { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_965 }, 26331bda99bSmiod { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_966 }, 26431bda99bSmiod { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_968 } 26531bda99bSmiod }; 26684c5d052Sderaadt 26731bda99bSmiod return pci_matchbyid(pa, apc_devices, nitems(apc_devices)); 26831bda99bSmiod } 26931bda99bSmiod #endif 27031bda99bSmiod 27131bda99bSmiod int 27231bda99bSmiod se_get_mac_addr_apc(struct se_softc *sc, uint8_t *dest) 27331bda99bSmiod { 27431bda99bSmiod #if defined(__amd64__) || defined(__i386__) 27531bda99bSmiod struct pci_attach_args pa; 27631bda99bSmiod pcireg_t reg; 27731bda99bSmiod bus_space_handle_t ioh; 27831bda99bSmiod int rc, i; 27931bda99bSmiod 28031bda99bSmiod if (pci_find_device(&pa, se_pcib_match) == 0) { 28131bda99bSmiod printf("\n%s: couldn't find PCI-ISA bridge\n", 28231bda99bSmiod sc->sc_dev.dv_xname); 28331bda99bSmiod return EINVAL; 28431bda99bSmiod } 28531bda99bSmiod 28631bda99bSmiod /* Enable port 0x78 and 0x79 to access APC registers. */ 28731bda99bSmiod reg = pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 28831bda99bSmiod pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg & ~0x02); 28931bda99bSmiod DELAY(50); 29031bda99bSmiod (void)pci_conf_read(pa.pa_pc, pa.pa_tag, 0x48); 29131bda99bSmiod 29231bda99bSmiod /* XXX this abuses bus_space implementation knowledge */ 29331bda99bSmiod rc = _bus_space_map(pa.pa_iot, 0x78, 2, 0, &ioh); 29431bda99bSmiod if (rc == 0) { 29531bda99bSmiod /* Read stored Ethernet address. */ 29631bda99bSmiod for (i = 0; i < ETHER_ADDR_LEN; i++) { 29731bda99bSmiod bus_space_write_1(pa.pa_iot, ioh, 0, 0x09 + i); 29831bda99bSmiod dest[i] = bus_space_read_1(pa.pa_iot, ioh, 1); 29931bda99bSmiod } 30031bda99bSmiod bus_space_write_1(pa.pa_iot, ioh, 0, 0x12); 30131bda99bSmiod if ((bus_space_read_1(pa.pa_iot, ioh, 1) & 0x80) != 0) 30231bda99bSmiod sc->sc_flags |= SE_FLAG_RGMII; 30331bda99bSmiod _bus_space_unmap(pa.pa_iot, ioh, 2, NULL); 30431bda99bSmiod } else 30531bda99bSmiod rc = EINVAL; 30631bda99bSmiod 30731bda99bSmiod /* Restore access to APC registers. */ 30831bda99bSmiod pci_conf_write(pa.pa_pc, pa.pa_tag, 0x48, reg); 30931bda99bSmiod 31031bda99bSmiod return rc; 31131bda99bSmiod #endif 31231bda99bSmiod return EINVAL; 31331bda99bSmiod } 31431bda99bSmiod 31531bda99bSmiod uint32_t 31631bda99bSmiod se_miibus_cmd(struct se_softc *sc, uint32_t ctrl) 31731bda99bSmiod { 31831bda99bSmiod int i; 31931bda99bSmiod uint32_t val; 32031bda99bSmiod 32131bda99bSmiod CSR_WRITE_4(sc, GMIIControl, ctrl); 32284c5d052Sderaadt DELAY(10); 32331bda99bSmiod for (i = 0; i < SE_TIMEOUT; i++) { 32431bda99bSmiod val = CSR_READ_4(sc, GMIIControl); 32531bda99bSmiod if ((val & GMI_REQ) == 0) 32631bda99bSmiod return val; 32731bda99bSmiod DELAY(10); 32831bda99bSmiod } 32984c5d052Sderaadt 33031bda99bSmiod return GMI_REQ; 33184c5d052Sderaadt } 33284c5d052Sderaadt 33384c5d052Sderaadt int 33431bda99bSmiod se_miibus_readreg(struct device *self, int phy, int reg) 33584c5d052Sderaadt { 33684c5d052Sderaadt struct se_softc *sc = (struct se_softc *)self; 33731bda99bSmiod uint32_t ctrl, val; 33831bda99bSmiod 33931bda99bSmiod ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 34031bda99bSmiod GMI_OP_RD | GMI_REQ; 34131bda99bSmiod val = se_miibus_cmd(sc, ctrl); 34231bda99bSmiod if ((val & GMI_REQ) != 0) { 34331bda99bSmiod printf("%s: PHY read timeout : %d\n", 34431bda99bSmiod sc->sc_dev.dv_xname, reg); 34531bda99bSmiod return 0; 34631bda99bSmiod } 34731bda99bSmiod return (val & GMI_DATA) >> GMI_DATA_SHIFT; 34884c5d052Sderaadt } 34984c5d052Sderaadt 35084c5d052Sderaadt void 35131bda99bSmiod se_miibus_writereg(struct device *self, int phy, int reg, int data) 35284c5d052Sderaadt { 35384c5d052Sderaadt struct se_softc *sc = (struct se_softc *)self; 35431bda99bSmiod uint32_t ctrl, val; 35531bda99bSmiod 35631bda99bSmiod ctrl = (phy << GMI_PHY_SHIFT) | (reg << GMI_REG_SHIFT) | 35731bda99bSmiod GMI_OP_WR | (data << GMI_DATA_SHIFT) | GMI_REQ; 35831bda99bSmiod val = se_miibus_cmd(sc, ctrl); 35931bda99bSmiod if ((val & GMI_REQ) != 0) { 36031bda99bSmiod printf("%s: PHY write timeout : %d\n", 36131bda99bSmiod sc->sc_dev.dv_xname, reg); 36231bda99bSmiod } 36384c5d052Sderaadt } 36484c5d052Sderaadt 36584c5d052Sderaadt void 36631bda99bSmiod se_miibus_statchg(struct device *self) 36784c5d052Sderaadt { 36884c5d052Sderaadt struct se_softc *sc = (struct se_softc *)self; 36931bda99bSmiod #ifdef SE_DEBUG 37031bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 37131bda99bSmiod #endif 37231bda99bSmiod struct mii_data *mii = &sc->sc_mii; 37331bda99bSmiod uint32_t ctl, speed; 37484c5d052Sderaadt 37531bda99bSmiod speed = 0; 37631bda99bSmiod sc->sc_flags &= ~SE_FLAG_LINK; 37731bda99bSmiod if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 37831bda99bSmiod (IFM_ACTIVE | IFM_AVALID)) { 37931bda99bSmiod switch (IFM_SUBTYPE(mii->mii_media_active)) { 38031bda99bSmiod case IFM_10_T: 38131bda99bSmiod #ifdef SE_DEBUG 38231bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 38331bda99bSmiod printf("%s: 10baseT link\n", ifp->if_xname); 38431bda99bSmiod #endif 38531bda99bSmiod sc->sc_flags |= SE_FLAG_LINK; 38631bda99bSmiod speed = SC_SPEED_10; 38731bda99bSmiod break; 38831bda99bSmiod case IFM_100_TX: 38931bda99bSmiod #ifdef SE_DEBUG 39031bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 39131bda99bSmiod printf("%s: 100baseTX link\n", ifp->if_xname); 39231bda99bSmiod #endif 39331bda99bSmiod sc->sc_flags |= SE_FLAG_LINK; 39431bda99bSmiod speed = SC_SPEED_100; 39531bda99bSmiod break; 39631bda99bSmiod case IFM_1000_T: 39731bda99bSmiod #ifdef SE_DEBUG 39831bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 39931bda99bSmiod printf("%s: 1000baseT link\n", ifp->if_xname); 40031bda99bSmiod #endif 40131bda99bSmiod if ((sc->sc_flags & SE_FLAG_FASTETHER) == 0) { 40231bda99bSmiod sc->sc_flags |= SE_FLAG_LINK; 40331bda99bSmiod speed = SC_SPEED_1000; 40484c5d052Sderaadt } 40531bda99bSmiod break; 40631bda99bSmiod default: 40731bda99bSmiod break; 40884c5d052Sderaadt } 40984c5d052Sderaadt } 41031bda99bSmiod if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 41131bda99bSmiod #ifdef SE_DEBUG 41231bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 41331bda99bSmiod printf("%s: no link\n", ifp->if_xname); 41431bda99bSmiod #endif 41584c5d052Sderaadt return; 41684c5d052Sderaadt } 4174b1a56afSjsg /* Reprogram MAC to resolved speed/duplex/flow-control parameters. */ 41831bda99bSmiod ctl = CSR_READ_4(sc, StationControl); 41931bda99bSmiod ctl &= ~(0x0f000000 | SC_FDX | SC_SPEED_MASK); 42031bda99bSmiod if (speed == SC_SPEED_1000) 42131bda99bSmiod ctl |= 0x07000000; 42231bda99bSmiod else 42331bda99bSmiod ctl |= 0x04000000; 42431bda99bSmiod #ifdef notyet 42531bda99bSmiod if ((sc->sc_flags & SE_FLAG_GMII) != 0) 42631bda99bSmiod ctl |= 0x03000000; 42731bda99bSmiod #endif 42831bda99bSmiod ctl |= speed; 42931bda99bSmiod if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 43031bda99bSmiod ctl |= SC_FDX; 43131bda99bSmiod CSR_WRITE_4(sc, StationControl, ctl); 43231bda99bSmiod if ((sc->sc_flags & SE_FLAG_RGMII) != 0) { 43331bda99bSmiod CSR_WRITE_4(sc, RGMIIDelay, 0x0441); 43431bda99bSmiod CSR_WRITE_4(sc, RGMIIDelay, 0x0440); 43531bda99bSmiod } 43684c5d052Sderaadt } 43784c5d052Sderaadt 43831bda99bSmiod void 43931bda99bSmiod se_iff(struct se_softc *sc) 44031bda99bSmiod { 44131bda99bSmiod struct arpcom *ac = &sc->sc_ac; 44231bda99bSmiod struct ifnet *ifp = &ac->ac_if; 44331bda99bSmiod struct ether_multi *enm; 44431bda99bSmiod struct ether_multistep step; 44531bda99bSmiod uint32_t crc, hashes[2]; 44631bda99bSmiod uint16_t rxfilt; 44731bda99bSmiod 44831bda99bSmiod rxfilt = CSR_READ_2(sc, RxMacControl); 449ac8c058dSmiod rxfilt &= ~(AcceptAllPhys | AcceptBroadcast | AcceptMulticast); 450ac8c058dSmiod ifp->if_flags &= ~IFF_ALLMULTI; 451ac8c058dSmiod 452ac8c058dSmiod /* 453ac8c058dSmiod * Always accept broadcast frames. 454ac8c058dSmiod * Always accept frames destined to our station address. 455ac8c058dSmiod */ 456ac8c058dSmiod rxfilt |= AcceptBroadcast | AcceptMyPhys; 457ac8c058dSmiod 458ac8c058dSmiod if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 459ac8c058dSmiod ifp->if_flags |= IFF_ALLMULTI; 460ac8c058dSmiod if (ifp->if_flags & IFF_PROMISC) 46131bda99bSmiod rxfilt |= AcceptAllPhys; 46284c5d052Sderaadt rxfilt |= AcceptMulticast; 46331bda99bSmiod hashes[0] = hashes[1] = 0xffffffff; 46431bda99bSmiod } else { 46531bda99bSmiod rxfilt |= AcceptMulticast; 46631bda99bSmiod hashes[0] = hashes[1] = 0; 467ac8c058dSmiod 468ac8c058dSmiod ETHER_FIRST_MULTI(step, ac, enm); 46931bda99bSmiod while (enm != NULL) { 47031bda99bSmiod crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 471ac8c058dSmiod 47231bda99bSmiod hashes[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 473ac8c058dSmiod 47484c5d052Sderaadt ETHER_NEXT_MULTI(step, enm); 47584c5d052Sderaadt } 47631bda99bSmiod } 47784c5d052Sderaadt 47884c5d052Sderaadt CSR_WRITE_2(sc, RxMacControl, rxfilt); 47984c5d052Sderaadt CSR_WRITE_4(sc, RxHashTable, hashes[0]); 48084c5d052Sderaadt CSR_WRITE_4(sc, RxHashTable2, hashes[1]); 48184c5d052Sderaadt } 48284c5d052Sderaadt 48384c5d052Sderaadt void 48431bda99bSmiod se_reset(struct se_softc *sc) 48584c5d052Sderaadt { 48684c5d052Sderaadt CSR_WRITE_4(sc, IntrMask, 0); 48784c5d052Sderaadt CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 48884c5d052Sderaadt 48931bda99bSmiod /* Soft reset. */ 49084c5d052Sderaadt CSR_WRITE_4(sc, IntrControl, 0x8000); 49131bda99bSmiod CSR_READ_4(sc, IntrControl); 49284c5d052Sderaadt DELAY(100); 49331bda99bSmiod CSR_WRITE_4(sc, IntrControl, 0); 49431bda99bSmiod /* Stop MAC. */ 49531bda99bSmiod CSR_WRITE_4(sc, TX_CTL, 0x1a00); 49631bda99bSmiod CSR_WRITE_4(sc, RX_CTL, 0x1a00); 49784c5d052Sderaadt 49884c5d052Sderaadt CSR_WRITE_4(sc, IntrMask, 0); 49984c5d052Sderaadt CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 50031bda99bSmiod 50131bda99bSmiod CSR_WRITE_4(sc, GMIIControl, 0); 50284c5d052Sderaadt } 50384c5d052Sderaadt 50484c5d052Sderaadt /* 50584c5d052Sderaadt * Probe for an SiS chip. Check the PCI vendor and device 50684c5d052Sderaadt * IDs against our list and return a device name if we find a match. 50784c5d052Sderaadt */ 50884c5d052Sderaadt int 50931bda99bSmiod se_match(struct device *parent, void *match, void *aux) 51084c5d052Sderaadt { 51131bda99bSmiod struct pci_attach_args *pa = (struct pci_attach_args *)aux; 51231bda99bSmiod 51331bda99bSmiod return pci_matchbyid(pa, se_devices, nitems(se_devices)); 51484c5d052Sderaadt } 51584c5d052Sderaadt 51684c5d052Sderaadt /* 51731bda99bSmiod * Attach the interface. Do ifmedia setup and ethernet/BPF attach. 51884c5d052Sderaadt */ 51984c5d052Sderaadt void 52031bda99bSmiod se_attach(struct device *parent, struct device *self, void *aux) 52184c5d052Sderaadt { 52284c5d052Sderaadt struct se_softc *sc = (struct se_softc *)self; 52331bda99bSmiod struct arpcom *ac = &sc->sc_ac; 52431bda99bSmiod struct ifnet *ifp = &ac->ac_if; 52531bda99bSmiod struct pci_attach_args *pa = (struct pci_attach_args *)aux; 52631bda99bSmiod uint8_t eaddr[ETHER_ADDR_LEN]; 52731bda99bSmiod const char *intrstr; 52884c5d052Sderaadt pci_intr_handle_t ih; 52931bda99bSmiod bus_size_t iosize; 53084c5d052Sderaadt bus_dma_segment_t seg; 53184c5d052Sderaadt struct se_list_data *ld; 53284c5d052Sderaadt struct se_chain_data *cd; 53331bda99bSmiod int nseg; 53431bda99bSmiod uint i; 53531bda99bSmiod int rc; 53684c5d052Sderaadt 53731bda99bSmiod printf(": "); 53884c5d052Sderaadt 53984c5d052Sderaadt /* 54084c5d052Sderaadt * Map control/status registers. 54184c5d052Sderaadt */ 54284c5d052Sderaadt 54331bda99bSmiod rc = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM, 0, 54431bda99bSmiod &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0); 54531bda99bSmiod if (rc != 0) { 54631bda99bSmiod printf("can't map i/o space\n"); 54784c5d052Sderaadt return; 54884c5d052Sderaadt } 54984c5d052Sderaadt 55084c5d052Sderaadt if (pci_intr_map(pa, &ih)) { 55131bda99bSmiod printf("can't map interrupt\n"); 55231bda99bSmiod goto fail1; 55384c5d052Sderaadt } 55431bda99bSmiod intrstr = pci_intr_string(pa->pa_pc, ih); 55531bda99bSmiod sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_NET, se_intr, sc, 55684c5d052Sderaadt self->dv_xname); 55784c5d052Sderaadt if (sc->sc_ih == NULL) { 55831bda99bSmiod printf("can't establish interrupt"); 55984c5d052Sderaadt if (intrstr != NULL) 56084c5d052Sderaadt printf(" at %s", intrstr); 56184c5d052Sderaadt printf("\n"); 56231bda99bSmiod goto fail1; 56384c5d052Sderaadt } 56484c5d052Sderaadt 56531bda99bSmiod printf("%s", intrstr); 56631bda99bSmiod 56731bda99bSmiod if (pa->pa_id == PCI_ID_CODE(PCI_VENDOR_SIS, PCI_PRODUCT_SIS_190)) 56831bda99bSmiod sc->sc_flags |= SE_FLAG_FASTETHER; 56931bda99bSmiod 57084c5d052Sderaadt /* Reset the adapter. */ 57184c5d052Sderaadt se_reset(sc); 57284c5d052Sderaadt 57331bda99bSmiod /* Get MAC address from the EEPROM. */ 57431bda99bSmiod if ((pci_conf_read(pa->pa_pc, pa->pa_tag, 0x70) & (0x01 << 24)) != 0) 57531bda99bSmiod se_get_mac_addr_apc(sc, eaddr); 57631bda99bSmiod else 57731bda99bSmiod se_get_mac_addr_eeprom(sc, eaddr); 57831bda99bSmiod printf(", address %s\n", ether_sprintf(eaddr)); 57931bda99bSmiod bcopy(eaddr, ac->ac_enaddr, ETHER_ADDR_LEN); 58084c5d052Sderaadt 58184c5d052Sderaadt /* 58284c5d052Sderaadt * Now do all the DMA mapping stuff 58384c5d052Sderaadt */ 58484c5d052Sderaadt 58531bda99bSmiod sc->sc_dmat = pa->pa_dmat; 58631bda99bSmiod ld = &sc->se_ldata; 58731bda99bSmiod cd = &sc->se_cdata; 58884c5d052Sderaadt 58984c5d052Sderaadt /* First create TX/RX busdma maps. */ 59084c5d052Sderaadt for (i = 0; i < SE_RX_RING_CNT; i++) { 59131bda99bSmiod rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 59284c5d052Sderaadt 0, BUS_DMA_NOWAIT, &cd->se_rx_map[i]); 59331bda99bSmiod if (rc != 0) { 59431bda99bSmiod printf("%s: cannot init the RX map array\n", 59531bda99bSmiod self->dv_xname); 59631bda99bSmiod goto fail2; 59784c5d052Sderaadt } 59884c5d052Sderaadt } 59984c5d052Sderaadt 60084c5d052Sderaadt for (i = 0; i < SE_TX_RING_CNT; i++) { 60131bda99bSmiod rc = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 60284c5d052Sderaadt 0, BUS_DMA_NOWAIT, &cd->se_tx_map[i]); 60331bda99bSmiod if (rc != 0) { 60431bda99bSmiod printf("%s: cannot init the TX map array\n", 60531bda99bSmiod self->dv_xname); 60631bda99bSmiod goto fail2; 60784c5d052Sderaadt } 60884c5d052Sderaadt } 60984c5d052Sderaadt 61084c5d052Sderaadt /* 61131bda99bSmiod * Now allocate a chunk of DMA-able memory for RX and TX ring 61231bda99bSmiod * descriptors, as a contiguous block of memory. 61331bda99bSmiod * XXX fix deallocation upon error 61484c5d052Sderaadt */ 61584c5d052Sderaadt 61684c5d052Sderaadt /* RX */ 61731bda99bSmiod rc = bus_dmamem_alloc(sc->sc_dmat, SE_RX_RING_SZ, PAGE_SIZE, 0, 61884c5d052Sderaadt &seg, 1, &nseg, BUS_DMA_NOWAIT); 61931bda99bSmiod if (rc != 0) { 62031bda99bSmiod printf("%s: no memory for RX descriptors\n", self->dv_xname); 62131bda99bSmiod goto fail2; 62284c5d052Sderaadt } 62384c5d052Sderaadt 62431bda99bSmiod rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_RX_RING_SZ, 62584c5d052Sderaadt (caddr_t *)&ld->se_rx_ring, BUS_DMA_NOWAIT); 62631bda99bSmiod if (rc != 0) { 62731bda99bSmiod printf("%s: can't map RX descriptors\n", self->dv_xname); 62831bda99bSmiod goto fail2; 62984c5d052Sderaadt } 63084c5d052Sderaadt 63131bda99bSmiod rc = bus_dmamap_create(sc->sc_dmat, SE_RX_RING_SZ, 1, 63284c5d052Sderaadt SE_RX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_rx_dmamap); 63331bda99bSmiod if (rc != 0) { 63431bda99bSmiod printf("%s: can't alloc RX DMA map\n", self->dv_xname); 63531bda99bSmiod goto fail2; 63684c5d052Sderaadt } 63784c5d052Sderaadt 63831bda99bSmiod rc = bus_dmamap_load(sc->sc_dmat, ld->se_rx_dmamap, 63931bda99bSmiod (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ, NULL, BUS_DMA_NOWAIT); 64031bda99bSmiod if (rc != 0) { 64131bda99bSmiod printf("%s: can't load RX DMA map\n", self->dv_xname); 64231bda99bSmiod bus_dmamem_unmap(sc->sc_dmat, 64384c5d052Sderaadt (caddr_t)ld->se_rx_ring, SE_RX_RING_SZ); 64431bda99bSmiod bus_dmamap_destroy(sc->sc_dmat, ld->se_rx_dmamap); 64531bda99bSmiod bus_dmamem_free(sc->sc_dmat, &seg, nseg); 64631bda99bSmiod goto fail2; 64784c5d052Sderaadt } 64884c5d052Sderaadt 64984c5d052Sderaadt /* TX */ 65031bda99bSmiod rc = bus_dmamem_alloc(sc->sc_dmat, SE_TX_RING_SZ, PAGE_SIZE, 0, 65184c5d052Sderaadt &seg, 1, &nseg, BUS_DMA_NOWAIT); 65231bda99bSmiod if (rc != 0) { 65331bda99bSmiod printf("%s: no memory for TX descriptors\n", self->dv_xname); 65431bda99bSmiod goto fail2; 65584c5d052Sderaadt } 65684c5d052Sderaadt 65731bda99bSmiod rc = bus_dmamem_map(sc->sc_dmat, &seg, nseg, SE_TX_RING_SZ, 65884c5d052Sderaadt (caddr_t *)&ld->se_tx_ring, BUS_DMA_NOWAIT); 65931bda99bSmiod if (rc != 0) { 66031bda99bSmiod printf("%s: can't map TX descriptors\n", self->dv_xname); 66131bda99bSmiod goto fail2; 66284c5d052Sderaadt } 66384c5d052Sderaadt 66431bda99bSmiod rc = bus_dmamap_create(sc->sc_dmat, SE_TX_RING_SZ, 1, 66584c5d052Sderaadt SE_TX_RING_SZ, 0, BUS_DMA_NOWAIT, &ld->se_tx_dmamap); 66631bda99bSmiod if (rc != 0) { 66731bda99bSmiod printf("%s: can't alloc TX DMA map\n", self->dv_xname); 66831bda99bSmiod goto fail2; 66984c5d052Sderaadt } 67084c5d052Sderaadt 67131bda99bSmiod rc = bus_dmamap_load(sc->sc_dmat, ld->se_tx_dmamap, 67231bda99bSmiod (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ, NULL, BUS_DMA_NOWAIT); 67331bda99bSmiod if (rc != 0) { 67431bda99bSmiod printf("%s: can't load TX DMA map\n", self->dv_xname); 67531bda99bSmiod bus_dmamem_unmap(sc->sc_dmat, 67684c5d052Sderaadt (caddr_t)ld->se_tx_ring, SE_TX_RING_SZ); 67731bda99bSmiod bus_dmamap_destroy(sc->sc_dmat, ld->se_tx_dmamap); 67831bda99bSmiod bus_dmamem_free(sc->sc_dmat, &seg, nseg); 67931bda99bSmiod goto fail2; 68084c5d052Sderaadt } 68184c5d052Sderaadt 68231bda99bSmiod timeout_set(&sc->sc_tick_tmo, se_tick, sc); 68384c5d052Sderaadt 68431bda99bSmiod ifp = &sc->sc_ac.ac_if; 68584c5d052Sderaadt ifp->if_softc = sc; 68684c5d052Sderaadt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 68784c5d052Sderaadt ifp->if_ioctl = se_ioctl; 68884c5d052Sderaadt ifp->if_start = se_start; 68984c5d052Sderaadt ifp->if_watchdog = se_watchdog; 690cf96265bSbluhm ifq_init_maxlen(&ifp->if_snd, SE_TX_RING_CNT - 1); 69184c5d052Sderaadt bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 69284c5d052Sderaadt 69313f326c7Sbrad ifp->if_capabilities = IFCAP_VLAN_MTU; 69413f326c7Sbrad 69584c5d052Sderaadt /* 69684c5d052Sderaadt * Do MII setup. 69784c5d052Sderaadt */ 69884c5d052Sderaadt 69984c5d052Sderaadt sc->sc_mii.mii_ifp = ifp; 70084c5d052Sderaadt sc->sc_mii.mii_readreg = se_miibus_readreg; 70184c5d052Sderaadt sc->sc_mii.mii_writereg = se_miibus_writereg; 70284c5d052Sderaadt sc->sc_mii.mii_statchg = se_miibus_statchg; 70331bda99bSmiod ifmedia_init(&sc->sc_mii.mii_media, 0, se_ifmedia_upd, 70431bda99bSmiod se_ifmedia_sts); 70531bda99bSmiod mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 70631bda99bSmiod MII_OFFSET_ANY, 0); 70784c5d052Sderaadt 70884c5d052Sderaadt if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 70931bda99bSmiod /* No PHY attached */ 71031bda99bSmiod ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL, 71131bda99bSmiod 0, NULL); 71231bda99bSmiod ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_MANUAL); 71384c5d052Sderaadt } else 71484c5d052Sderaadt ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO); 71584c5d052Sderaadt 71684c5d052Sderaadt /* 71784c5d052Sderaadt * Call MI attach routine. 71884c5d052Sderaadt */ 71984c5d052Sderaadt if_attach(ifp); 72084c5d052Sderaadt ether_ifattach(ifp); 72184c5d052Sderaadt 72284c5d052Sderaadt return; 72384c5d052Sderaadt 72431bda99bSmiod fail2: 72531bda99bSmiod pci_intr_disestablish(pa->pa_pc, sc->sc_ih); 72631bda99bSmiod fail1: 72731bda99bSmiod bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); 72884c5d052Sderaadt } 72984c5d052Sderaadt 73031bda99bSmiod int 73131bda99bSmiod se_activate(struct device *self, int act) 73284c5d052Sderaadt { 73331bda99bSmiod struct se_softc *sc = (struct se_softc *)self; 73431bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 73584c5d052Sderaadt 73631bda99bSmiod switch (act) { 73731bda99bSmiod case DVACT_SUSPEND: 73831bda99bSmiod if (ifp->if_flags & IFF_RUNNING) 73984c5d052Sderaadt se_stop(sc); 74031bda99bSmiod break; 74131bda99bSmiod case DVACT_RESUME: 74231bda99bSmiod if (ifp->if_flags & IFF_UP) 74331bda99bSmiod (void)se_init(ifp); 74431bda99bSmiod break; 74584c5d052Sderaadt } 7468bb071cdSderaadt return (0); 74731bda99bSmiod } 74884c5d052Sderaadt 74984c5d052Sderaadt /* 75084c5d052Sderaadt * Initialize the TX descriptors. 75184c5d052Sderaadt */ 75284c5d052Sderaadt int 75331bda99bSmiod se_list_tx_init(struct se_softc *sc) 75484c5d052Sderaadt { 75584c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 75684c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 75784c5d052Sderaadt 75884c5d052Sderaadt bzero(ld->se_tx_ring, SE_TX_RING_SZ); 75931bda99bSmiod ld->se_tx_ring[SE_TX_RING_CNT - 1].se_flags = htole32(RING_END); 76022b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 76122b64ad4Smiod BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 76231bda99bSmiod cd->se_tx_prod = 0; 76331bda99bSmiod cd->se_tx_cons = 0; 76431bda99bSmiod cd->se_tx_cnt = 0; 76584c5d052Sderaadt 76631bda99bSmiod return 0; 76784c5d052Sderaadt } 76884c5d052Sderaadt 76984c5d052Sderaadt int 77031bda99bSmiod se_list_tx_free(struct se_softc *sc) 77184c5d052Sderaadt { 77284c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 77331bda99bSmiod uint i; 77484c5d052Sderaadt 77584c5d052Sderaadt for (i = 0; i < SE_TX_RING_CNT; i++) { 77684c5d052Sderaadt if (cd->se_tx_mbuf[i] != NULL) { 77731bda99bSmiod bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 77884c5d052Sderaadt m_free(cd->se_tx_mbuf[i]); 77984c5d052Sderaadt cd->se_tx_mbuf[i] = NULL; 78084c5d052Sderaadt } 78184c5d052Sderaadt } 78284c5d052Sderaadt 78331bda99bSmiod return 0; 78484c5d052Sderaadt } 78584c5d052Sderaadt 78684c5d052Sderaadt /* 78722b64ad4Smiod * Initialize the RX descriptors and allocate mbufs for them. 78884c5d052Sderaadt */ 78984c5d052Sderaadt int 79031bda99bSmiod se_list_rx_init(struct se_softc *sc) 79184c5d052Sderaadt { 79284c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 79384c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 79431bda99bSmiod uint i; 79584c5d052Sderaadt 79684c5d052Sderaadt bzero(ld->se_rx_ring, SE_RX_RING_SZ); 79722b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 79822b64ad4Smiod BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 79984c5d052Sderaadt for (i = 0; i < SE_RX_RING_CNT; i++) { 80031bda99bSmiod if (se_newbuf(sc, i) != 0) 80131bda99bSmiod return ENOBUFS; 80284c5d052Sderaadt } 80384c5d052Sderaadt 80484c5d052Sderaadt cd->se_rx_prod = 0; 80584c5d052Sderaadt 80631bda99bSmiod return 0; 80784c5d052Sderaadt } 80884c5d052Sderaadt 80984c5d052Sderaadt int 81031bda99bSmiod se_list_rx_free(struct se_softc *sc) 81184c5d052Sderaadt { 81284c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 81331bda99bSmiod uint i; 81484c5d052Sderaadt 81584c5d052Sderaadt for (i = 0; i < SE_RX_RING_CNT; i++) { 81684c5d052Sderaadt if (cd->se_rx_mbuf[i] != NULL) { 81731bda99bSmiod bus_dmamap_unload(sc->sc_dmat, cd->se_rx_map[i]); 81884c5d052Sderaadt m_free(cd->se_rx_mbuf[i]); 81984c5d052Sderaadt cd->se_rx_mbuf[i] = NULL; 82084c5d052Sderaadt } 82184c5d052Sderaadt } 82284c5d052Sderaadt 82331bda99bSmiod return 0; 82484c5d052Sderaadt } 82584c5d052Sderaadt 82684c5d052Sderaadt /* 82784c5d052Sderaadt * Initialize an RX descriptor and attach an MBUF cluster. 82884c5d052Sderaadt */ 82984c5d052Sderaadt int 83031bda99bSmiod se_newbuf(struct se_softc *sc, uint i) 83184c5d052Sderaadt { 83231bda99bSmiod #ifdef SE_DEBUG 83331bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 83431bda99bSmiod #endif 83584c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 83684c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 83731bda99bSmiod struct se_desc *desc; 83831bda99bSmiod struct mbuf *m; 83931bda99bSmiod int rc; 84084c5d052Sderaadt 841471f2571Sjan m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES); 84284c5d052Sderaadt if (m == NULL) { 84331bda99bSmiod #ifdef SE_DEBUG 84431bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 845471f2571Sjan printf("%s: MCLGETL failed\n", ifp->if_xname); 84631bda99bSmiod #endif 84731bda99bSmiod return ENOBUFS; 84884c5d052Sderaadt } 84984c5d052Sderaadt m->m_len = m->m_pkthdr.len = MCLBYTES; 85031bda99bSmiod m_adj(m, SE_RX_BUF_ALIGN); 85184c5d052Sderaadt 85231bda99bSmiod rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_rx_map[i], 85384c5d052Sderaadt m, BUS_DMA_NOWAIT); 85484c5d052Sderaadt KASSERT(cd->se_rx_map[i]->dm_nsegs == 1); 85531bda99bSmiod if (rc != 0) { 85631bda99bSmiod m_freem(m); 85731bda99bSmiod return ENOBUFS; 85831bda99bSmiod } 85922b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 86022b64ad4Smiod cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_PREREAD); 86184c5d052Sderaadt 86231bda99bSmiod cd->se_rx_mbuf[i] = m; 86331bda99bSmiod desc = &ld->se_rx_ring[i]; 86431bda99bSmiod desc->se_sts_size = 0; 86531bda99bSmiod desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 86631bda99bSmiod desc->se_ptr = htole32((uint32_t)cd->se_rx_map[i]->dm_segs[0].ds_addr); 86731bda99bSmiod desc->se_flags = htole32(cd->se_rx_map[i]->dm_segs[0].ds_len); 86831bda99bSmiod if (i == SE_RX_RING_CNT - 1) 86931bda99bSmiod desc->se_flags |= htole32(RING_END); 87022b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 87122b64ad4Smiod sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 87231bda99bSmiod 87331bda99bSmiod return 0; 87431bda99bSmiod } 87531bda99bSmiod 87631bda99bSmiod void 87731bda99bSmiod se_discard_rxbuf(struct se_softc *sc, uint i) 87831bda99bSmiod { 87931bda99bSmiod struct se_list_data *ld = &sc->se_ldata; 88031bda99bSmiod struct se_desc *desc; 88131bda99bSmiod 88231bda99bSmiod desc = &ld->se_rx_ring[i]; 88331bda99bSmiod desc->se_sts_size = 0; 88431bda99bSmiod desc->se_cmdsts = htole32(RDC_OWN | RDC_INTR); 88531bda99bSmiod desc->se_flags = htole32(MCLBYTES - SE_RX_BUF_ALIGN); 88631bda99bSmiod if (i == SE_RX_RING_CNT - 1) 88731bda99bSmiod desc->se_flags |= htole32(RING_END); 88822b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, i * sizeof(*desc), 88922b64ad4Smiod sizeof(*desc), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 89084c5d052Sderaadt } 89184c5d052Sderaadt 89284c5d052Sderaadt /* 89384c5d052Sderaadt * A frame has been uploaded: pass the resulting mbuf chain up to 89484c5d052Sderaadt * the higher level protocols. 89584c5d052Sderaadt */ 89684c5d052Sderaadt void 89731bda99bSmiod se_rxeof(struct se_softc *sc) 89884c5d052Sderaadt { 89931bda99bSmiod struct mbuf *m; 900bdcfb926Smpi struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 90131bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 90284c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 90384c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 90484c5d052Sderaadt struct se_desc *cur_rx; 90531bda99bSmiod uint32_t rxinfo, rxstat; 90631bda99bSmiod uint i; 90784c5d052Sderaadt 90822b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_rx_dmamap, 0, SE_RX_RING_SZ, 90922b64ad4Smiod BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 91031bda99bSmiod for (i = cd->se_rx_prod; ; SE_INC(i, SE_RX_RING_CNT)) { 91131bda99bSmiod cur_rx = &ld->se_rx_ring[i]; 91231bda99bSmiod rxinfo = letoh32(cur_rx->se_cmdsts); 91331bda99bSmiod if ((rxinfo & RDC_OWN) != 0) 91431bda99bSmiod break; 91531bda99bSmiod rxstat = letoh32(cur_rx->se_sts_size); 91684c5d052Sderaadt 91784c5d052Sderaadt /* 91884c5d052Sderaadt * If an error occurs, update stats, clear the 91984c5d052Sderaadt * status word and leave the mbuf cluster in place: 92084c5d052Sderaadt * it should simply get re-used next time this descriptor 92184c5d052Sderaadt * comes up in the ring. 92284c5d052Sderaadt */ 92331bda99bSmiod if ((rxstat & RDS_CRCOK) == 0 || SE_RX_ERROR(rxstat) != 0 || 92431bda99bSmiod SE_RX_NSEGS(rxstat) != 1) { 92531bda99bSmiod /* XXX We don't support multi-segment frames yet. */ 92631bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 92731bda99bSmiod printf("%s: rx error %b\n", 92831bda99bSmiod ifp->if_xname, rxstat, RX_ERR_BITS); 92931bda99bSmiod se_discard_rxbuf(sc, i); 93084c5d052Sderaadt ifp->if_ierrors++; 93184c5d052Sderaadt continue; 93284c5d052Sderaadt } 93384c5d052Sderaadt 93484c5d052Sderaadt /* No errors; receive the packet. */ 93522b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, cd->se_rx_map[i], 0, 93622b64ad4Smiod cd->se_rx_map[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 93731bda99bSmiod m = cd->se_rx_mbuf[i]; 93831bda99bSmiod if (se_newbuf(sc, i) != 0) { 93931bda99bSmiod se_discard_rxbuf(sc, i); 94031bda99bSmiod ifp->if_iqdrops++; 94184c5d052Sderaadt continue; 94284c5d052Sderaadt } 94331bda99bSmiod /* 94431bda99bSmiod * Account for 10 bytes auto padding which is used 94531bda99bSmiod * to align IP header on a 32bit boundary. Also note, 94631bda99bSmiod * CRC bytes are automatically removed by the hardware. 94731bda99bSmiod */ 94831bda99bSmiod m->m_data += SE_RX_PAD_BYTES; 94931bda99bSmiod m->m_pkthdr.len = m->m_len = 95031bda99bSmiod SE_RX_BYTES(rxstat) - SE_RX_PAD_BYTES; 95184c5d052Sderaadt 952bdcfb926Smpi ml_enqueue(&ml, m); 95384c5d052Sderaadt } 95484c5d052Sderaadt 955bdcfb926Smpi if_input(ifp, &ml); 956bdcfb926Smpi 95784c5d052Sderaadt cd->se_rx_prod = i; 95884c5d052Sderaadt } 95984c5d052Sderaadt 96084c5d052Sderaadt /* 96184c5d052Sderaadt * A frame was downloaded to the chip. It's safe for us to clean up 96284c5d052Sderaadt * the list buffers. 96384c5d052Sderaadt */ 96484c5d052Sderaadt 96584c5d052Sderaadt void 96631bda99bSmiod se_txeof(struct se_softc *sc) 96784c5d052Sderaadt { 96831bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 96984c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 97084c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 97184c5d052Sderaadt struct se_desc *cur_tx; 97231bda99bSmiod uint32_t txstat; 97331bda99bSmiod uint i; 97484c5d052Sderaadt 97584c5d052Sderaadt /* 97684c5d052Sderaadt * Go through our tx list and free mbufs for those 97784c5d052Sderaadt * frames that have been transmitted. 97884c5d052Sderaadt */ 97922b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 0, SE_TX_RING_SZ, 98022b64ad4Smiod BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 98131bda99bSmiod for (i = cd->se_tx_cons; cd->se_tx_cnt > 0; 98231bda99bSmiod cd->se_tx_cnt--, SE_INC(i, SE_TX_RING_CNT)) { 98384c5d052Sderaadt cur_tx = &ld->se_tx_ring[i]; 98484c5d052Sderaadt txstat = letoh32(cur_tx->se_cmdsts); 98531bda99bSmiod if ((txstat & TDC_OWN) != 0) 98631bda99bSmiod break; 98784c5d052Sderaadt 988de6cd8fbSdlg ifq_clr_oactive(&ifp->if_snd); 98984c5d052Sderaadt 99031bda99bSmiod if (SE_TX_ERROR(txstat) != 0) { 99131bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 99231bda99bSmiod printf("%s: tx error %b\n", 99331bda99bSmiod ifp->if_xname, txstat, TX_ERR_BITS); 99484c5d052Sderaadt ifp->if_oerrors++; 99584c5d052Sderaadt /* TODO: better error differentiation */ 99688a08f2aSdlg } 99731bda99bSmiod 99884c5d052Sderaadt if (cd->se_tx_mbuf[i] != NULL) { 99931bda99bSmiod bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 100031bda99bSmiod cd->se_tx_map[i]->dm_mapsize, 100131bda99bSmiod BUS_DMASYNC_POSTWRITE); 100231bda99bSmiod bus_dmamap_unload(sc->sc_dmat, cd->se_tx_map[i]); 100384c5d052Sderaadt m_free(cd->se_tx_mbuf[i]); 100484c5d052Sderaadt cd->se_tx_mbuf[i] = NULL; 100584c5d052Sderaadt } 100631bda99bSmiod 100784c5d052Sderaadt cur_tx->se_sts_size = 0; 100884c5d052Sderaadt cur_tx->se_cmdsts = 0; 100984c5d052Sderaadt cur_tx->se_ptr = 0; 101022b64ad4Smiod cur_tx->se_flags &= htole32(RING_END); 101122b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 101222b64ad4Smiod i * sizeof(*cur_tx), sizeof(*cur_tx), 101322b64ad4Smiod BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 101484c5d052Sderaadt } 101584c5d052Sderaadt 101684c5d052Sderaadt cd->se_tx_cons = i; 101731bda99bSmiod if (cd->se_tx_cnt == 0) 101831bda99bSmiod ifp->if_timer = 0; 101984c5d052Sderaadt } 102084c5d052Sderaadt 102184c5d052Sderaadt void 102231bda99bSmiod se_tick(void *xsc) 102384c5d052Sderaadt { 102484c5d052Sderaadt struct se_softc *sc = xsc; 102584c5d052Sderaadt struct mii_data *mii; 102631bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 102784c5d052Sderaadt int s; 102884c5d052Sderaadt 102984c5d052Sderaadt s = splnet(); 103084c5d052Sderaadt mii = &sc->sc_mii; 103184c5d052Sderaadt mii_tick(mii); 103231bda99bSmiod if ((sc->sc_flags & SE_FLAG_LINK) == 0) { 103331bda99bSmiod se_miibus_statchg(&sc->sc_dev); 103431bda99bSmiod if ((sc->sc_flags & SE_FLAG_LINK) != 0 && 10350cae21bdSpatrick !ifq_empty(&ifp->if_snd)) 103684c5d052Sderaadt se_start(ifp); 103784c5d052Sderaadt } 103884c5d052Sderaadt splx(s); 103931bda99bSmiod 104031bda99bSmiod timeout_add_sec(&sc->sc_tick_tmo, 1); 104184c5d052Sderaadt } 104284c5d052Sderaadt 104384c5d052Sderaadt int 104431bda99bSmiod se_intr(void *arg) 104584c5d052Sderaadt { 104684c5d052Sderaadt struct se_softc *sc = arg; 104731bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 104831bda99bSmiod uint32_t status; 104984c5d052Sderaadt 105031bda99bSmiod status = CSR_READ_4(sc, IntrStatus); 105131bda99bSmiod if (status == 0xffffffff || (status & SE_INTRS) == 0) { 105231bda99bSmiod /* Not ours. */ 105331bda99bSmiod return 0; 105431bda99bSmiod } 105531bda99bSmiod /* Ack interrupts/ */ 105631bda99bSmiod CSR_WRITE_4(sc, IntrStatus, status); 105731bda99bSmiod /* Disable further interrupts. */ 105831bda99bSmiod CSR_WRITE_4(sc, IntrMask, 0); 105984c5d052Sderaadt 106084c5d052Sderaadt for (;;) { 106131bda99bSmiod if ((ifp->if_flags & IFF_RUNNING) == 0) 106284c5d052Sderaadt break; 106331bda99bSmiod if ((status & (INTR_RX_DONE | INTR_RX_IDLE)) != 0) { 106484c5d052Sderaadt se_rxeof(sc); 106531bda99bSmiod /* Wakeup Rx MAC. */ 106631bda99bSmiod if ((status & INTR_RX_IDLE) != 0) 106731bda99bSmiod CSR_WRITE_4(sc, RX_CTL, 106831bda99bSmiod 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 106931bda99bSmiod } 107031bda99bSmiod if ((status & (INTR_TX_DONE | INTR_TX_IDLE)) != 0) 107131bda99bSmiod se_txeof(sc); 107231bda99bSmiod status = CSR_READ_4(sc, IntrStatus); 107331bda99bSmiod if ((status & SE_INTRS) == 0) 107431bda99bSmiod break; 107531bda99bSmiod /* Ack interrupts. */ 107631bda99bSmiod CSR_WRITE_4(sc, IntrStatus, status); 107784c5d052Sderaadt } 107884c5d052Sderaadt 107931bda99bSmiod if ((ifp->if_flags & IFF_RUNNING) != 0) { 108031bda99bSmiod /* Re-enable interrupts */ 108131bda99bSmiod CSR_WRITE_4(sc, IntrMask, SE_INTRS); 10820cae21bdSpatrick if (!ifq_empty(&ifp->if_snd)) 108384c5d052Sderaadt se_start(ifp); 108431bda99bSmiod } 108584c5d052Sderaadt 108631bda99bSmiod return 1; 108784c5d052Sderaadt } 108884c5d052Sderaadt 108984c5d052Sderaadt /* 109084c5d052Sderaadt * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 109184c5d052Sderaadt * pointers to the fragment pointers. 109284c5d052Sderaadt */ 109384c5d052Sderaadt int 109431bda99bSmiod se_encap(struct se_softc *sc, struct mbuf *m_head, uint32_t *txidx) 109584c5d052Sderaadt { 109631bda99bSmiod #ifdef SE_DEBUG 109731bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 109831bda99bSmiod #endif 109984c5d052Sderaadt struct mbuf *m; 110084c5d052Sderaadt struct se_list_data *ld = &sc->se_ldata; 110184c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 110231bda99bSmiod struct se_desc *desc; 110331bda99bSmiod uint i, cnt = 0; 110431bda99bSmiod int rc; 110584c5d052Sderaadt 110684c5d052Sderaadt /* 110784c5d052Sderaadt * If there's no way we can send any packets, return now. 110884c5d052Sderaadt */ 110931bda99bSmiod if (SE_TX_RING_CNT - cd->se_tx_cnt < 2) { 111031bda99bSmiod #ifdef SE_DEBUG 111131bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 111231bda99bSmiod printf("%s: encap failed, not enough TX desc\n", 111331bda99bSmiod ifp->if_xname); 111484c5d052Sderaadt #endif 111531bda99bSmiod return ENOBUFS; 111631bda99bSmiod } 111731bda99bSmiod 111831bda99bSmiod if (m_defrag(m_head, M_DONTWAIT) != 0) { 111931bda99bSmiod #ifdef SE_DEBUG 112031bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 112131bda99bSmiod printf("%s: m_defrag failed\n", ifp->if_xname); 112231bda99bSmiod #endif 112331bda99bSmiod return ENOBUFS; /* XXX should not be fatal */ 112431bda99bSmiod } 112584c5d052Sderaadt 112684c5d052Sderaadt /* 112784c5d052Sderaadt * Start packing the mbufs in this chain into 112884c5d052Sderaadt * the fragment pointers. Stop when we run out 112984c5d052Sderaadt * of fragments or hit the end of the mbuf chain. 113084c5d052Sderaadt */ 113184c5d052Sderaadt i = *txidx; 113284c5d052Sderaadt 113384c5d052Sderaadt for (m = m_head; m != NULL; m = m->m_next) { 113484c5d052Sderaadt if (m->m_len == 0) 113584c5d052Sderaadt continue; 113631bda99bSmiod if ((SE_TX_RING_CNT - (cd->se_tx_cnt + cnt)) < 2) { 113731bda99bSmiod #ifdef SE_DEBUG 113831bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 113931bda99bSmiod printf("%s: encap failed, not enough TX desc\n", 114031bda99bSmiod ifp->if_xname); 114131bda99bSmiod #endif 114231bda99bSmiod return ENOBUFS; 114384c5d052Sderaadt } 114431bda99bSmiod cd->se_tx_mbuf[i] = m; 114531bda99bSmiod rc = bus_dmamap_load_mbuf(sc->sc_dmat, cd->se_tx_map[i], 114631bda99bSmiod m, BUS_DMA_NOWAIT); 114731bda99bSmiod if (rc != 0) 114831bda99bSmiod return ENOBUFS; 114984c5d052Sderaadt KASSERT(cd->se_tx_map[i]->dm_nsegs == 1); 115031bda99bSmiod bus_dmamap_sync(sc->sc_dmat, cd->se_tx_map[i], 0, 115122b64ad4Smiod cd->se_tx_map[i]->dm_mapsize, BUS_DMASYNC_PREWRITE); 115231bda99bSmiod 115331bda99bSmiod desc = &ld->se_tx_ring[i]; 115431bda99bSmiod desc->se_sts_size = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 115531bda99bSmiod desc->se_ptr = 115631bda99bSmiod htole32((uint32_t)cd->se_tx_map[i]->dm_segs->ds_addr); 115731bda99bSmiod desc->se_flags = htole32(cd->se_tx_map[i]->dm_segs->ds_len); 115831bda99bSmiod if (i == SE_TX_RING_CNT - 1) 115931bda99bSmiod desc->se_flags |= htole32(RING_END); 116031bda99bSmiod desc->se_cmdsts = htole32(TDC_OWN | TDC_INTR | TDC_DEF | 116131bda99bSmiod TDC_CRC | TDC_PAD | TDC_BST); 116222b64ad4Smiod bus_dmamap_sync(sc->sc_dmat, ld->se_tx_dmamap, 116322b64ad4Smiod i * sizeof(*desc), sizeof(*desc), 116422b64ad4Smiod BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 116531bda99bSmiod 116684c5d052Sderaadt SE_INC(i, SE_TX_RING_CNT); 116784c5d052Sderaadt cnt++; 116884c5d052Sderaadt } 116984c5d052Sderaadt 117031bda99bSmiod /* can't happen */ 117131bda99bSmiod if (m != NULL) 117231bda99bSmiod return ENOBUFS; 117384c5d052Sderaadt 117484c5d052Sderaadt cd->se_tx_cnt += cnt; 117584c5d052Sderaadt *txidx = i; 117684c5d052Sderaadt 117731bda99bSmiod return 0; 117884c5d052Sderaadt } 117984c5d052Sderaadt 118084c5d052Sderaadt /* 118184c5d052Sderaadt * Main transmit routine. To avoid having to do mbuf copies, we put pointers 118284c5d052Sderaadt * to the mbuf data regions directly in the transmit lists. We also save a 118384c5d052Sderaadt * copy of the pointers since the transmit list fragment pointers are 118484c5d052Sderaadt * physical addresses. 118584c5d052Sderaadt */ 118684c5d052Sderaadt void 118731bda99bSmiod se_start(struct ifnet *ifp) 118884c5d052Sderaadt { 118984c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 119084c5d052Sderaadt struct mbuf *m_head = NULL; 119184c5d052Sderaadt struct se_chain_data *cd = &sc->se_cdata; 119231bda99bSmiod uint i, queued = 0; 119384c5d052Sderaadt 119431bda99bSmiod if ((sc->sc_flags & SE_FLAG_LINK) == 0 || 1195de6cd8fbSdlg !(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) { 119631bda99bSmiod #ifdef SE_DEBUG 119731bda99bSmiod if (ifp->if_flags & IFF_DEBUG) 119831bda99bSmiod printf("%s: can't tx, flags 0x%x 0x%04x\n", 119931bda99bSmiod ifp->if_xname, sc->sc_flags, (uint)ifp->if_flags); 120031bda99bSmiod #endif 120184c5d052Sderaadt return; 120284c5d052Sderaadt } 120384c5d052Sderaadt 120484c5d052Sderaadt i = cd->se_tx_prod; 120584c5d052Sderaadt 120684c5d052Sderaadt while (cd->se_tx_mbuf[i] == NULL) { 1207b5d83b91Sdlg m_head = ifq_deq_begin(&ifp->if_snd); 120884c5d052Sderaadt if (m_head == NULL) 120984c5d052Sderaadt break; 121084c5d052Sderaadt 121131bda99bSmiod if (se_encap(sc, m_head, &i) != 0) { 1212b5d83b91Sdlg ifq_deq_rollback(&ifp->if_snd, m_head); 1213de6cd8fbSdlg ifq_set_oactive(&ifp->if_snd); 121484c5d052Sderaadt break; 121584c5d052Sderaadt } 121684c5d052Sderaadt 121784c5d052Sderaadt /* now we are committed to transmit the packet */ 1218b5d83b91Sdlg ifq_deq_commit(&ifp->if_snd, m_head); 121984c5d052Sderaadt queued++; 122084c5d052Sderaadt 122184c5d052Sderaadt /* 122284c5d052Sderaadt * If there's a BPF listener, bounce a copy of this frame 122384c5d052Sderaadt * to him. 122484c5d052Sderaadt */ 122584c5d052Sderaadt #if NBPFILTER > 0 122684c5d052Sderaadt if (ifp->if_bpf) 122784c5d052Sderaadt bpf_mtap(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 122884c5d052Sderaadt #endif 122984c5d052Sderaadt } 123084c5d052Sderaadt 123131bda99bSmiod if (queued > 0) { 123284c5d052Sderaadt /* Transmit */ 123384c5d052Sderaadt cd->se_tx_prod = i; 123431bda99bSmiod CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB | TX_CTL_POLL); 123531bda99bSmiod ifp->if_timer = 5; 123684c5d052Sderaadt } 123784c5d052Sderaadt } 123884c5d052Sderaadt 123984c5d052Sderaadt int 124031bda99bSmiod se_init(struct ifnet *ifp) 124184c5d052Sderaadt { 124284c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 124331bda99bSmiod uint16_t rxfilt; 124431bda99bSmiod int i; 124584c5d052Sderaadt 124631bda99bSmiod splassert(IPL_NET); 124731bda99bSmiod 124884c5d052Sderaadt /* 124984c5d052Sderaadt * Cancel pending I/O and free all RX/TX buffers. 125084c5d052Sderaadt */ 125184c5d052Sderaadt se_stop(sc); 125231bda99bSmiod se_reset(sc); 125384c5d052Sderaadt 125484c5d052Sderaadt /* Init circular RX list. */ 125584c5d052Sderaadt if (se_list_rx_init(sc) == ENOBUFS) { 125631bda99bSmiod se_stop(sc); /* XXX necessary? */ 125731bda99bSmiod return ENOBUFS; 125884c5d052Sderaadt } 125984c5d052Sderaadt 126084c5d052Sderaadt /* Init TX descriptors. */ 126184c5d052Sderaadt se_list_tx_init(sc); 126284c5d052Sderaadt 126384c5d052Sderaadt /* 126484c5d052Sderaadt * Load the address of the RX and TX lists. 126584c5d052Sderaadt */ 126631bda99bSmiod CSR_WRITE_4(sc, TX_DESC, 126731bda99bSmiod (uint32_t)sc->se_ldata.se_tx_dmamap->dm_segs[0].ds_addr); 126831bda99bSmiod CSR_WRITE_4(sc, RX_DESC, 126931bda99bSmiod (uint32_t)sc->se_ldata.se_rx_dmamap->dm_segs[0].ds_addr); 127084c5d052Sderaadt 127131bda99bSmiod CSR_WRITE_4(sc, TxMacControl, 0x60); 127231bda99bSmiod CSR_WRITE_4(sc, RxWakeOnLan, 0); 127331bda99bSmiod CSR_WRITE_4(sc, RxWakeOnLanData, 0); 127413f326c7Sbrad CSR_WRITE_2(sc, RxMPSControl, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN + 127513f326c7Sbrad SE_RX_PAD_BYTES); 127684c5d052Sderaadt 127731bda99bSmiod for (i = 0; i < ETHER_ADDR_LEN; i++) 127831bda99bSmiod CSR_WRITE_1(sc, RxMacAddr + i, sc->sc_ac.ac_enaddr[i]); 127931bda99bSmiod /* Configure RX MAC. */ 128031bda99bSmiod rxfilt = RXMAC_STRIP_FCS | RXMAC_PAD_ENB | RXMAC_CSUM_ENB; 128131bda99bSmiod CSR_WRITE_2(sc, RxMacControl, rxfilt); 1282ac8c058dSmiod 1283ac8c058dSmiod /* Program promiscuous mode and multicast filters. */ 128431bda99bSmiod se_iff(sc); 128584c5d052Sderaadt 128684c5d052Sderaadt /* 128731bda99bSmiod * Clear and enable interrupts. 128884c5d052Sderaadt */ 128931bda99bSmiod CSR_WRITE_4(sc, IntrStatus, 0xFFFFFFFF); 129031bda99bSmiod CSR_WRITE_4(sc, IntrMask, SE_INTRS); 129184c5d052Sderaadt 129284c5d052Sderaadt /* Enable receiver and transmitter. */ 129331bda99bSmiod CSR_WRITE_4(sc, TX_CTL, 0x1a00 | TX_CTL_ENB); 129431bda99bSmiod CSR_WRITE_4(sc, RX_CTL, 0x1a00 | 0x000c | RX_CTL_POLL | RX_CTL_ENB); 129584c5d052Sderaadt 129684c5d052Sderaadt ifp->if_flags |= IFF_RUNNING; 1297de6cd8fbSdlg ifq_clr_oactive(&ifp->if_snd); 129884c5d052Sderaadt 129931bda99bSmiod sc->sc_flags &= ~SE_FLAG_LINK; 130031bda99bSmiod mii_mediachg(&sc->sc_mii); 130131bda99bSmiod timeout_add_sec(&sc->sc_tick_tmo, 1); 130284c5d052Sderaadt 130384c5d052Sderaadt return 0; 130484c5d052Sderaadt } 130584c5d052Sderaadt 130684c5d052Sderaadt /* 130784c5d052Sderaadt * Set media options. 130884c5d052Sderaadt */ 130984c5d052Sderaadt int 131031bda99bSmiod se_ifmedia_upd(struct ifnet *ifp) 131184c5d052Sderaadt { 131284c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 131384c5d052Sderaadt struct mii_data *mii; 131484c5d052Sderaadt 131584c5d052Sderaadt mii = &sc->sc_mii; 131631bda99bSmiod sc->sc_flags &= ~SE_FLAG_LINK; 131784c5d052Sderaadt if (mii->mii_instance) { 131884c5d052Sderaadt struct mii_softc *miisc; 131984c5d052Sderaadt LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 132084c5d052Sderaadt mii_phy_reset(miisc); 132184c5d052Sderaadt } 132231bda99bSmiod return mii_mediachg(mii); 132384c5d052Sderaadt } 132484c5d052Sderaadt 132584c5d052Sderaadt /* 132684c5d052Sderaadt * Report current media status. 132784c5d052Sderaadt */ 132884c5d052Sderaadt void 132931bda99bSmiod se_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 133084c5d052Sderaadt { 133184c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 133284c5d052Sderaadt struct mii_data *mii; 133384c5d052Sderaadt 133484c5d052Sderaadt mii = &sc->sc_mii; 133584c5d052Sderaadt mii_pollstat(mii); 133684c5d052Sderaadt ifmr->ifm_active = mii->mii_media_active; 133784c5d052Sderaadt ifmr->ifm_status = mii->mii_media_status; 133884c5d052Sderaadt } 133984c5d052Sderaadt 134084c5d052Sderaadt int 134131bda99bSmiod se_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 134284c5d052Sderaadt { 134384c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 134484c5d052Sderaadt struct ifreq *ifr = (struct ifreq *) data; 134531bda99bSmiod int s, rc = 0; 134684c5d052Sderaadt 134784c5d052Sderaadt s = splnet(); 134884c5d052Sderaadt 134984c5d052Sderaadt switch (command) { 135031bda99bSmiod case SIOCSIFADDR: 135131bda99bSmiod ifp->if_flags |= IFF_UP; 135231bda99bSmiod if ((ifp->if_flags & IFF_RUNNING) == 0) 135331bda99bSmiod rc = se_init(ifp); 135484c5d052Sderaadt break; 135531bda99bSmiod case SIOCSIFFLAGS: 135631bda99bSmiod if (ifp->if_flags & IFF_UP) { 135731bda99bSmiod if (ifp->if_flags & IFF_RUNNING) 135831bda99bSmiod rc = ENETRESET; 135931bda99bSmiod else 136031bda99bSmiod rc = se_init(ifp); 136131bda99bSmiod } else { 136231bda99bSmiod if (ifp->if_flags & IFF_RUNNING) 136331bda99bSmiod se_stop(sc); 136431bda99bSmiod } 136584c5d052Sderaadt break; 136684c5d052Sderaadt case SIOCGIFMEDIA: 136784c5d052Sderaadt case SIOCSIFMEDIA: 136831bda99bSmiod rc = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, command); 136984c5d052Sderaadt break; 137084c5d052Sderaadt default: 137131bda99bSmiod rc = ether_ioctl(ifp, &sc->sc_ac, command, data); 137284c5d052Sderaadt break; 137384c5d052Sderaadt } 137484c5d052Sderaadt 137531bda99bSmiod if (rc == ENETRESET) { 137631bda99bSmiod if (ifp->if_flags & IFF_RUNNING) 137731bda99bSmiod se_iff(sc); 137831bda99bSmiod rc = 0; 137931bda99bSmiod } 138031bda99bSmiod 138184c5d052Sderaadt splx(s); 138231bda99bSmiod return rc; 138384c5d052Sderaadt } 138484c5d052Sderaadt 138584c5d052Sderaadt void 138631bda99bSmiod se_watchdog(struct ifnet *ifp) 138784c5d052Sderaadt { 138884c5d052Sderaadt struct se_softc *sc = ifp->if_softc; 138984c5d052Sderaadt int s; 139084c5d052Sderaadt 139131bda99bSmiod printf("%s: watchdog timeout\n", sc->sc_dev.dv_xname); 139231bda99bSmiod ifp->if_oerrors++; 139384c5d052Sderaadt 139484c5d052Sderaadt s = splnet(); 139584c5d052Sderaadt se_init(ifp); 13960cae21bdSpatrick if (!ifq_empty(&ifp->if_snd)) 139731bda99bSmiod se_start(ifp); 139884c5d052Sderaadt splx(s); 139984c5d052Sderaadt } 140084c5d052Sderaadt 140184c5d052Sderaadt /* 140284c5d052Sderaadt * Stop the adapter and free any mbufs allocated to the 140384c5d052Sderaadt * RX and TX lists. 140484c5d052Sderaadt */ 140584c5d052Sderaadt void 140631bda99bSmiod se_stop(struct se_softc *sc) 140784c5d052Sderaadt { 140831bda99bSmiod struct ifnet *ifp = &sc->sc_ac.ac_if; 140984c5d052Sderaadt 141031bda99bSmiod ifp->if_timer = 0; 1411de6cd8fbSdlg ifp->if_flags &= ~IFF_RUNNING; 1412de6cd8fbSdlg ifq_clr_oactive(&ifp->if_snd); 141331bda99bSmiod timeout_del(&sc->sc_tick_tmo); 141431bda99bSmiod mii_down(&sc->sc_mii); 141584c5d052Sderaadt 141631bda99bSmiod CSR_WRITE_4(sc, IntrMask, 0); 141731bda99bSmiod CSR_READ_4(sc, IntrMask); 141831bda99bSmiod CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 141931bda99bSmiod /* Stop TX/RX MAC. */ 142031bda99bSmiod CSR_WRITE_4(sc, TX_CTL, 0x1a00); 142131bda99bSmiod CSR_WRITE_4(sc, RX_CTL, 0x1a00); 142231bda99bSmiod /* XXX Can we assume active DMA cycles gone? */ 142331bda99bSmiod DELAY(2000); 142431bda99bSmiod CSR_WRITE_4(sc, IntrMask, 0); 142531bda99bSmiod CSR_WRITE_4(sc, IntrStatus, 0xffffffff); 142684c5d052Sderaadt 142731bda99bSmiod sc->sc_flags &= ~SE_FLAG_LINK; 142884c5d052Sderaadt se_list_rx_free(sc); 142984c5d052Sderaadt se_list_tx_free(sc); 143084c5d052Sderaadt } 1431