1*9f9778c7Sandvar /* $NetBSD: pq3etsec.c,v 1.61 2025/01/07 17:39:45 andvar Exp $ */ 2b8ea2c8cSmatt /*- 3b8ea2c8cSmatt * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. 4b8ea2c8cSmatt * All rights reserved. 5b8ea2c8cSmatt * 6b8ea2c8cSmatt * This code is derived from software contributed to The NetBSD Foundation 7b8ea2c8cSmatt * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects 8b8ea2c8cSmatt * Agency and which was developed by Matt Thomas of 3am Software Foundry. 9b8ea2c8cSmatt * 10b8ea2c8cSmatt * This material is based upon work supported by the Defense Advanced Research 11b8ea2c8cSmatt * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under 12b8ea2c8cSmatt * Contract No. N66001-09-C-2073. 13b8ea2c8cSmatt * Approved for Public Release, Distribution Unlimited 14b8ea2c8cSmatt * 15b8ea2c8cSmatt * Redistribution and use in source and binary forms, with or without 16b8ea2c8cSmatt * modification, are permitted provided that the following conditions 17b8ea2c8cSmatt * are met: 18b8ea2c8cSmatt * 1. Redistributions of source code must retain the above copyright 19b8ea2c8cSmatt * notice, this list of conditions and the following disclaimer. 20b8ea2c8cSmatt * 2. Redistributions in binary form must reproduce the above copyright 21b8ea2c8cSmatt * notice, this list of conditions and the following disclaimer in the 22b8ea2c8cSmatt * documentation and/or other materials provided with the distribution. 23b8ea2c8cSmatt * 24b8ea2c8cSmatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25b8ea2c8cSmatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26b8ea2c8cSmatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27b8ea2c8cSmatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28b8ea2c8cSmatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29b8ea2c8cSmatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30b8ea2c8cSmatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31b8ea2c8cSmatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32b8ea2c8cSmatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33b8ea2c8cSmatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34b8ea2c8cSmatt * POSSIBILITY OF SUCH DAMAGE. 35b8ea2c8cSmatt */ 36b8ea2c8cSmatt 3716031f7dSrin #include <sys/cdefs.h> 38*9f9778c7Sandvar __KERNEL_RCSID(0, "$NetBSD: pq3etsec.c,v 1.61 2025/01/07 17:39:45 andvar Exp $"); 3916031f7dSrin 4016031f7dSrin #ifdef _KERNEL_OPT 41b8ea2c8cSmatt #include "opt_inet.h" 4220c73c17Smatt #include "opt_mpc85xx.h" 435e43e8ebSnonaka #include "opt_multiprocessor.h" 445e43e8ebSnonaka #include "opt_net_mpsafe.h" 4516031f7dSrin #endif 4603c42f4dSmatt 47b8ea2c8cSmatt #include <sys/param.h> 48b8ea2c8cSmatt #include <sys/cpu.h> 49b8ea2c8cSmatt #include <sys/device.h> 50b8ea2c8cSmatt #include <sys/mbuf.h> 51b8ea2c8cSmatt #include <sys/ioctl.h> 52b8ea2c8cSmatt #include <sys/intr.h> 53b8ea2c8cSmatt #include <sys/bus.h> 54b8ea2c8cSmatt #include <sys/kernel.h> 55b8ea2c8cSmatt #include <sys/kmem.h> 56b8ea2c8cSmatt #include <sys/proc.h> 57b8ea2c8cSmatt #include <sys/atomic.h> 58b8ea2c8cSmatt #include <sys/callout.h> 596e15a820Snonaka #include <sys/sysctl.h> 60b8ea2c8cSmatt 6112a7d27eSrin #include <sys/rndsource.h> 6212a7d27eSrin 63b8ea2c8cSmatt #include <net/if.h> 64b8ea2c8cSmatt #include <net/if_dl.h> 65b8ea2c8cSmatt #include <net/if_ether.h> 66b8ea2c8cSmatt #include <net/if_media.h> 6787d4693bSmsaitoh #include <net/bpf.h> 68b8ea2c8cSmatt 69b8ea2c8cSmatt #include <dev/mii/miivar.h> 70b8ea2c8cSmatt 71b8ea2c8cSmatt #ifdef INET 72b8ea2c8cSmatt #include <netinet/in.h> 73b8ea2c8cSmatt #include <netinet/in_systm.h> 74b8ea2c8cSmatt #include <netinet/ip.h> 75b8ea2c8cSmatt #include <netinet/in_offload.h> 76b8ea2c8cSmatt #endif /* INET */ 77b8ea2c8cSmatt #ifdef INET6 78b8ea2c8cSmatt #include <netinet6/in6.h> 79b8ea2c8cSmatt #include <netinet/ip6.h> 80b8ea2c8cSmatt #endif 81b8ea2c8cSmatt #include <netinet6/in6_offload.h> 82b8ea2c8cSmatt 83b8ea2c8cSmatt #include <powerpc/spr.h> 84b8ea2c8cSmatt #include <powerpc/booke/spr.h> 85b8ea2c8cSmatt #include <powerpc/booke/cpuvar.h> 86b8ea2c8cSmatt #include <powerpc/booke/e500var.h> 87b8ea2c8cSmatt #include <powerpc/booke/e500reg.h> 88b8ea2c8cSmatt #include <powerpc/booke/etsecreg.h> 89b8ea2c8cSmatt 90b8ea2c8cSmatt #define M_HASFCB M_LINK2 /* tx packet has FCB prepended */ 91b8ea2c8cSmatt 92b8ea2c8cSmatt #define ETSEC_MAXTXMBUFS 30 93b8ea2c8cSmatt #define ETSEC_NTXSEGS 30 94b8ea2c8cSmatt #define ETSEC_MAXRXMBUFS 511 95b8ea2c8cSmatt #define ETSEC_MINRXMBUFS 32 96b8ea2c8cSmatt #define ETSEC_NRXSEGS 1 97b8ea2c8cSmatt 98b8ea2c8cSmatt #define IFCAP_RCTRL_IPCSEN IFCAP_CSUM_IPv4_Rx 99b8ea2c8cSmatt #define IFCAP_RCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Rx \ 100b8ea2c8cSmatt | IFCAP_CSUM_UDPv4_Rx \ 101b8ea2c8cSmatt | IFCAP_CSUM_TCPv6_Rx \ 102b8ea2c8cSmatt | IFCAP_CSUM_UDPv6_Rx) 103b8ea2c8cSmatt 104b8ea2c8cSmatt #define IFCAP_TCTRL_IPCSEN IFCAP_CSUM_IPv4_Tx 105b8ea2c8cSmatt #define IFCAP_TCTRL_TUCSEN (IFCAP_CSUM_TCPv4_Tx \ 106b8ea2c8cSmatt | IFCAP_CSUM_UDPv4_Tx \ 107b8ea2c8cSmatt | IFCAP_CSUM_TCPv6_Tx \ 108b8ea2c8cSmatt | IFCAP_CSUM_UDPv6_Tx) 109b8ea2c8cSmatt 110b8ea2c8cSmatt #define IFCAP_ETSEC (IFCAP_RCTRL_IPCSEN | IFCAP_RCTRL_TUCSEN \ 111b8ea2c8cSmatt | IFCAP_TCTRL_IPCSEN | IFCAP_TCTRL_TUCSEN) 112b8ea2c8cSmatt 113b8ea2c8cSmatt #define M_CSUM_IP (M_CSUM_CIP | M_CSUM_CTU) 114b8ea2c8cSmatt #define M_CSUM_IP6 (M_CSUM_TCPv6 | M_CSUM_UDPv6) 115b8ea2c8cSmatt #define M_CSUM_TUP (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 116b8ea2c8cSmatt #define M_CSUM_UDP (M_CSUM_UDPv4 | M_CSUM_UDPv6) 117b8ea2c8cSmatt #define M_CSUM_IP4 (M_CSUM_IPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4) 118b8ea2c8cSmatt #define M_CSUM_CIP (M_CSUM_IPv4) 119b8ea2c8cSmatt #define M_CSUM_CTU (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv6) 120b8ea2c8cSmatt 121b8ea2c8cSmatt struct pq3etsec_txqueue { 122b8ea2c8cSmatt bus_dmamap_t txq_descmap; 123b8ea2c8cSmatt volatile struct txbd *txq_consumer; 124b8ea2c8cSmatt volatile struct txbd *txq_producer; 125b8ea2c8cSmatt volatile struct txbd *txq_first; 126b8ea2c8cSmatt volatile struct txbd *txq_last; 127b8ea2c8cSmatt struct ifqueue txq_mbufs; 128b8ea2c8cSmatt struct mbuf *txq_next; 129b8ea2c8cSmatt #ifdef ETSEC_DEBUG 130b8ea2c8cSmatt struct mbuf *txq_lmbufs[512]; 131b8ea2c8cSmatt #endif 132b8ea2c8cSmatt uint32_t txq_qmask; 133b8ea2c8cSmatt uint32_t txq_free; 134b8ea2c8cSmatt uint32_t txq_threshold; 135b8ea2c8cSmatt uint32_t txq_lastintr; 136b8ea2c8cSmatt bus_size_t txq_reg_tbase; 137b8ea2c8cSmatt bus_dma_segment_t txq_descmap_seg; 138b8ea2c8cSmatt }; 139b8ea2c8cSmatt 140b8ea2c8cSmatt struct pq3etsec_rxqueue { 141b8ea2c8cSmatt bus_dmamap_t rxq_descmap; 142b8ea2c8cSmatt volatile struct rxbd *rxq_consumer; 143b8ea2c8cSmatt volatile struct rxbd *rxq_producer; 144b8ea2c8cSmatt volatile struct rxbd *rxq_first; 145b8ea2c8cSmatt volatile struct rxbd *rxq_last; 146b8ea2c8cSmatt struct mbuf *rxq_mhead; 147b8ea2c8cSmatt struct mbuf **rxq_mtail; 148b8ea2c8cSmatt struct mbuf *rxq_mconsumer; 149b8ea2c8cSmatt #ifdef ETSEC_DEBUG 150b8ea2c8cSmatt struct mbuf *rxq_mbufs[512]; 151b8ea2c8cSmatt #endif 152b8ea2c8cSmatt uint32_t rxq_qmask; 153b8ea2c8cSmatt uint32_t rxq_inuse; 154b8ea2c8cSmatt uint32_t rxq_threshold; 155b8ea2c8cSmatt bus_size_t rxq_reg_rbase; 156b8ea2c8cSmatt bus_size_t rxq_reg_rbptr; 157b8ea2c8cSmatt bus_dma_segment_t rxq_descmap_seg; 158b8ea2c8cSmatt }; 159b8ea2c8cSmatt 160b8ea2c8cSmatt struct pq3etsec_mapcache { 161b8ea2c8cSmatt u_int dmc_nmaps; 162b8ea2c8cSmatt u_int dmc_maxseg; 163b8ea2c8cSmatt u_int dmc_maxmaps; 164b8ea2c8cSmatt u_int dmc_maxmapsize; 165b8ea2c8cSmatt bus_dmamap_t dmc_maps[0]; 166b8ea2c8cSmatt }; 167b8ea2c8cSmatt 168b8ea2c8cSmatt struct pq3etsec_softc { 169b8ea2c8cSmatt device_t sc_dev; 17020d6accbSmatt device_t sc_mdio_dev; 171b8ea2c8cSmatt struct ethercom sc_ec; 172b8ea2c8cSmatt #define sc_if sc_ec.ec_if 173b8ea2c8cSmatt struct mii_data sc_mii; 174b8ea2c8cSmatt bus_space_tag_t sc_bst; 175b8ea2c8cSmatt bus_space_handle_t sc_bsh; 17620c73c17Smatt bus_space_handle_t sc_mdio_bsh; 177b8ea2c8cSmatt bus_dma_tag_t sc_dmat; 178b8ea2c8cSmatt int sc_phy_addr; 179b8ea2c8cSmatt prop_dictionary_t sc_intrmap; 180b8ea2c8cSmatt uint32_t sc_intrmask; 181b8ea2c8cSmatt 182b8ea2c8cSmatt uint32_t sc_soft_flags; 183b8ea2c8cSmatt #define SOFT_RESET 0x0001 184b8ea2c8cSmatt #define SOFT_RXINTR 0x0010 185b8ea2c8cSmatt #define SOFT_RXBSY 0x0020 186b8ea2c8cSmatt #define SOFT_TXINTR 0x0100 187b8ea2c8cSmatt #define SOFT_TXERROR 0x0200 188b8ea2c8cSmatt 189b8ea2c8cSmatt struct pq3etsec_txqueue sc_txq; 190b8ea2c8cSmatt struct pq3etsec_rxqueue sc_rxq; 191b8ea2c8cSmatt uint32_t sc_txerrors; 192b8ea2c8cSmatt uint32_t sc_rxerrors; 193b8ea2c8cSmatt 194b8ea2c8cSmatt size_t sc_rx_adjlen; 195b8ea2c8cSmatt 196b8ea2c8cSmatt /* 197b8ea2c8cSmatt * Copies of various ETSEC registers. 198b8ea2c8cSmatt */ 199b8ea2c8cSmatt uint32_t sc_imask; 200b8ea2c8cSmatt uint32_t sc_maccfg1; 201b8ea2c8cSmatt uint32_t sc_maccfg2; 202b8ea2c8cSmatt uint32_t sc_maxfrm; 203b8ea2c8cSmatt uint32_t sc_ecntrl; 204b8ea2c8cSmatt uint32_t sc_dmactrl; 205b8ea2c8cSmatt uint32_t sc_macstnaddr1; 206b8ea2c8cSmatt uint32_t sc_macstnaddr2; 207b8ea2c8cSmatt uint32_t sc_tctrl; 208b8ea2c8cSmatt uint32_t sc_rctrl; 209b8ea2c8cSmatt uint32_t sc_gaddr[16]; 210b8ea2c8cSmatt uint64_t sc_macaddrs[15]; 211b8ea2c8cSmatt 212b8ea2c8cSmatt void *sc_tx_ih; 213b8ea2c8cSmatt void *sc_rx_ih; 214b8ea2c8cSmatt void *sc_error_ih; 215b8ea2c8cSmatt void *sc_soft_ih; 216b8ea2c8cSmatt 217b8ea2c8cSmatt kmutex_t *sc_lock; 2181b7941c2Snonaka kmutex_t *sc_hwlock; 219b8ea2c8cSmatt 220b8ea2c8cSmatt struct evcnt sc_ev_tx_stall; 221b8ea2c8cSmatt struct evcnt sc_ev_tx_intr; 222b8ea2c8cSmatt struct evcnt sc_ev_rx_stall; 223b8ea2c8cSmatt struct evcnt sc_ev_rx_intr; 224b8ea2c8cSmatt struct evcnt sc_ev_error_intr; 225b8ea2c8cSmatt struct evcnt sc_ev_soft_intr; 226b8ea2c8cSmatt struct evcnt sc_ev_tx_pause; 227b8ea2c8cSmatt struct evcnt sc_ev_rx_pause; 228b8ea2c8cSmatt struct evcnt sc_ev_mii_ticks; 229b8ea2c8cSmatt 230b8ea2c8cSmatt struct callout sc_mii_callout; 231b8ea2c8cSmatt uint64_t sc_mii_last_tick; 232b8ea2c8cSmatt 233b8ea2c8cSmatt struct ifqueue sc_rx_bufcache; 234b8ea2c8cSmatt struct pq3etsec_mapcache *sc_rx_mapcache; 235b8ea2c8cSmatt struct pq3etsec_mapcache *sc_tx_mapcache; 2366e15a820Snonaka 2376e15a820Snonaka /* Interrupt Coalescing parameters */ 2386e15a820Snonaka int sc_ic_rx_time; 2396e15a820Snonaka int sc_ic_rx_count; 2406e15a820Snonaka int sc_ic_tx_time; 2416e15a820Snonaka int sc_ic_tx_count; 24212a7d27eSrin 24312a7d27eSrin krndsource_t rnd_source; 244b8ea2c8cSmatt }; 245b8ea2c8cSmatt 2466e15a820Snonaka #define ETSEC_IC_RX_ENABLED(sc) \ 2476e15a820Snonaka ((sc)->sc_ic_rx_time != 0 && (sc)->sc_ic_rx_count != 0) 2486e15a820Snonaka #define ETSEC_IC_TX_ENABLED(sc) \ 2496e15a820Snonaka ((sc)->sc_ic_tx_time != 0 && (sc)->sc_ic_tx_count != 0) 2506e15a820Snonaka 25120d6accbSmatt struct pq3mdio_softc { 25220d6accbSmatt device_t mdio_dev; 25320d6accbSmatt 25420d6accbSmatt bus_space_tag_t mdio_bst; 25520d6accbSmatt bus_space_handle_t mdio_bsh; 25620d6accbSmatt }; 25720d6accbSmatt 258b8ea2c8cSmatt static int pq3etsec_match(device_t, cfdata_t, void *); 259b8ea2c8cSmatt static void pq3etsec_attach(device_t, device_t, void *); 260b8ea2c8cSmatt 26120d6accbSmatt static int pq3mdio_match(device_t, cfdata_t, void *); 26220d6accbSmatt static void pq3mdio_attach(device_t, device_t, void *); 26320d6accbSmatt 264b8ea2c8cSmatt static void pq3etsec_ifstart(struct ifnet *); 265b8ea2c8cSmatt static void pq3etsec_ifwatchdog(struct ifnet *); 266b8ea2c8cSmatt static int pq3etsec_ifinit(struct ifnet *); 267b8ea2c8cSmatt static void pq3etsec_ifstop(struct ifnet *, int); 268b8ea2c8cSmatt static int pq3etsec_ifioctl(struct ifnet *, u_long, void *); 269b8ea2c8cSmatt 270b8ea2c8cSmatt static int pq3etsec_mapcache_create(struct pq3etsec_softc *, 2714fe92317Smatt struct pq3etsec_mapcache **, size_t, size_t, size_t); 272b8ea2c8cSmatt static void pq3etsec_mapcache_destroy(struct pq3etsec_softc *, 273b8ea2c8cSmatt struct pq3etsec_mapcache *); 274b8ea2c8cSmatt static bus_dmamap_t pq3etsec_mapcache_get(struct pq3etsec_softc *, 275b8ea2c8cSmatt struct pq3etsec_mapcache *); 276b8ea2c8cSmatt static void pq3etsec_mapcache_put(struct pq3etsec_softc *, 277b8ea2c8cSmatt struct pq3etsec_mapcache *, bus_dmamap_t); 278b8ea2c8cSmatt 279b8ea2c8cSmatt static int pq3etsec_txq_attach(struct pq3etsec_softc *, 280b8ea2c8cSmatt struct pq3etsec_txqueue *, u_int); 281b8ea2c8cSmatt static void pq3etsec_txq_purge(struct pq3etsec_softc *, 282b8ea2c8cSmatt struct pq3etsec_txqueue *); 283b8ea2c8cSmatt static void pq3etsec_txq_reset(struct pq3etsec_softc *, 284b8ea2c8cSmatt struct pq3etsec_txqueue *); 285b8ea2c8cSmatt static bool pq3etsec_txq_consume(struct pq3etsec_softc *, 286b8ea2c8cSmatt struct pq3etsec_txqueue *); 287b8ea2c8cSmatt static bool pq3etsec_txq_produce(struct pq3etsec_softc *, 288b8ea2c8cSmatt struct pq3etsec_txqueue *, struct mbuf *m); 289b8ea2c8cSmatt static bool pq3etsec_txq_active_p(struct pq3etsec_softc *, 290b8ea2c8cSmatt struct pq3etsec_txqueue *); 291b8ea2c8cSmatt 292b8ea2c8cSmatt static int pq3etsec_rxq_attach(struct pq3etsec_softc *, 293b8ea2c8cSmatt struct pq3etsec_rxqueue *, u_int); 294b8ea2c8cSmatt static bool pq3etsec_rxq_produce(struct pq3etsec_softc *, 295b8ea2c8cSmatt struct pq3etsec_rxqueue *); 296b8ea2c8cSmatt static void pq3etsec_rxq_purge(struct pq3etsec_softc *, 297b8ea2c8cSmatt struct pq3etsec_rxqueue *, bool); 298b8ea2c8cSmatt static void pq3etsec_rxq_reset(struct pq3etsec_softc *, 299b8ea2c8cSmatt struct pq3etsec_rxqueue *); 300b8ea2c8cSmatt 301b8ea2c8cSmatt static void pq3etsec_mc_setup(struct pq3etsec_softc *); 302b8ea2c8cSmatt 303b8ea2c8cSmatt static void pq3etsec_mii_tick(void *); 304b8ea2c8cSmatt static int pq3etsec_rx_intr(void *); 305b8ea2c8cSmatt static int pq3etsec_tx_intr(void *); 306b8ea2c8cSmatt static int pq3etsec_error_intr(void *); 307b8ea2c8cSmatt static void pq3etsec_soft_intr(void *); 308b8ea2c8cSmatt 3096e15a820Snonaka static void pq3etsec_set_ic_rx(struct pq3etsec_softc *); 3106e15a820Snonaka static void pq3etsec_set_ic_tx(struct pq3etsec_softc *); 3116e15a820Snonaka 3126e15a820Snonaka static void pq3etsec_sysctl_setup(struct sysctllog **, struct pq3etsec_softc *); 3136e15a820Snonaka 314b8ea2c8cSmatt CFATTACH_DECL_NEW(pq3etsec, sizeof(struct pq3etsec_softc), 315b8ea2c8cSmatt pq3etsec_match, pq3etsec_attach, NULL, NULL); 316b8ea2c8cSmatt 31720d6accbSmatt CFATTACH_DECL_NEW(pq3mdio_tsec, sizeof(struct pq3mdio_softc), 31820d6accbSmatt pq3mdio_match, pq3mdio_attach, NULL, NULL); 31920d6accbSmatt 32020d6accbSmatt CFATTACH_DECL_NEW(pq3mdio_cpunode, sizeof(struct pq3mdio_softc), 32120d6accbSmatt pq3mdio_match, pq3mdio_attach, NULL, NULL); 32220d6accbSmatt 32320d6accbSmatt static inline uint32_t 32420d6accbSmatt etsec_mdio_read(struct pq3mdio_softc *mdio, bus_size_t off) 325b8ea2c8cSmatt { 32620d6accbSmatt return bus_space_read_4(mdio->mdio_bst, mdio->mdio_bsh, off); 32720d6accbSmatt } 328b8ea2c8cSmatt 32920d6accbSmatt static inline void 33020d6accbSmatt etsec_mdio_write(struct pq3mdio_softc *mdio, bus_size_t off, uint32_t data) 33120d6accbSmatt { 33220d6accbSmatt bus_space_write_4(mdio->mdio_bst, mdio->mdio_bsh, off, data); 333b8ea2c8cSmatt } 334b8ea2c8cSmatt 335b8ea2c8cSmatt static inline uint32_t 336b8ea2c8cSmatt etsec_read(struct pq3etsec_softc *sc, bus_size_t off) 337b8ea2c8cSmatt { 338b8ea2c8cSmatt return bus_space_read_4(sc->sc_bst, sc->sc_bsh, off); 339b8ea2c8cSmatt } 340b8ea2c8cSmatt 34120d6accbSmatt static int 34220d6accbSmatt pq3mdio_find(device_t parent, cfdata_t cf, const int *ldesc, void *aux) 34320d6accbSmatt { 34420d6accbSmatt return strcmp(cf->cf_name, "mdio") == 0; 34520d6accbSmatt } 34620d6accbSmatt 34720d6accbSmatt static int 34820d6accbSmatt pq3mdio_match(device_t parent, cfdata_t cf, void *aux) 34920d6accbSmatt { 35020d6accbSmatt const uint16_t svr = (mfspr(SPR_SVR) & ~0x80000) >> 16; 35120d6accbSmatt const bool p1025_p = (svr == (SVR_P1025v1 >> 16) 35220d6accbSmatt || svr == (SVR_P1016v1 >> 16)); 35320d6accbSmatt 35420d6accbSmatt if (device_is_a(parent, "cpunode")) { 355f3b7cf62Snonaka if (!p1025_p 35620d6accbSmatt || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 35720d6accbSmatt return 0; 35820d6accbSmatt 35920d6accbSmatt return 1; 36020d6accbSmatt } 36120d6accbSmatt 36220d6accbSmatt if (device_is_a(parent, "tsec")) { 363f3b7cf62Snonaka if (p1025_p 36420d6accbSmatt || !e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 36520d6accbSmatt return 0; 36620d6accbSmatt 36720d6accbSmatt return 1; 36820d6accbSmatt } 36920d6accbSmatt 37020d6accbSmatt return 0; 37120d6accbSmatt } 37220d6accbSmatt 37320d6accbSmatt static void 37420d6accbSmatt pq3mdio_attach(device_t parent, device_t self, void *aux) 37520d6accbSmatt { 37620d6accbSmatt struct pq3mdio_softc * const mdio = device_private(self); 37720d6accbSmatt struct cpunode_attach_args * const cna = aux; 37820d6accbSmatt struct cpunode_locators * const cnl = &cna->cna_locs; 37920d6accbSmatt 38020d6accbSmatt mdio->mdio_dev = self; 38120d6accbSmatt 38220d6accbSmatt if (device_is_a(parent, "cpunode")) { 38320d6accbSmatt struct cpunode_softc * const psc = device_private(parent); 38420d6accbSmatt psc->sc_children |= cna->cna_childmask; 38520d6accbSmatt 38620d6accbSmatt mdio->mdio_bst = cna->cna_memt; 38720d6accbSmatt if (bus_space_map(mdio->mdio_bst, cnl->cnl_addr, 38820d6accbSmatt cnl->cnl_size, 0, &mdio->mdio_bsh) != 0) { 38920d6accbSmatt aprint_error(": error mapping registers @ %#x\n", 39020d6accbSmatt cnl->cnl_addr); 39120d6accbSmatt return; 39220d6accbSmatt } 39320d6accbSmatt } else { 39420d6accbSmatt struct pq3etsec_softc * const sc = device_private(parent); 39520d6accbSmatt 39620d6accbSmatt KASSERT(device_is_a(parent, "tsec")); 39720d6accbSmatt KASSERTMSG(cnl->cnl_addr == ETSEC1_BASE 39820d6accbSmatt || cnl->cnl_addr == ETSEC2_BASE 39920d6accbSmatt || cnl->cnl_addr == ETSEC3_BASE 40020d6accbSmatt || cnl->cnl_addr == ETSEC4_BASE, 40120d6accbSmatt "unknown tsec addr %x", cnl->cnl_addr); 40220d6accbSmatt 40320d6accbSmatt mdio->mdio_bst = sc->sc_bst; 40420d6accbSmatt mdio->mdio_bsh = sc->sc_bsh; 40520d6accbSmatt } 40620d6accbSmatt 40720d6accbSmatt aprint_normal("\n"); 40820d6accbSmatt } 40920d6accbSmatt 41020d6accbSmatt static int 411a5cdd4b4Smsaitoh pq3mdio_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 41220d6accbSmatt { 41320d6accbSmatt struct pq3mdio_softc * const mdio = device_private(self); 41420d6accbSmatt uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 41520d6accbSmatt 41620d6accbSmatt etsec_mdio_write(mdio, MIIMADD, 41720d6accbSmatt __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 41820d6accbSmatt 41920d6accbSmatt etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 42020d6accbSmatt etsec_mdio_write(mdio, MIIMCOM, MIIMCOM_READ); 42120d6accbSmatt 42220d6accbSmatt while (etsec_mdio_read(mdio, MIIMIND) != 0) { 42320d6accbSmatt delay(1); 42420d6accbSmatt } 425a5cdd4b4Smsaitoh *val = etsec_mdio_read(mdio, MIIMSTAT) &0xffff; 42620d6accbSmatt 42720d6accbSmatt if (miimcom == MIIMCOM_SCAN) 42820d6accbSmatt etsec_mdio_write(mdio, MIIMCOM, miimcom); 42920d6accbSmatt 43020d6accbSmatt #if 0 43120d6accbSmatt aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 43220d6accbSmatt __func__, phy, reg, data); 43320d6accbSmatt #endif 434a5cdd4b4Smsaitoh return 0; 43520d6accbSmatt } 43620d6accbSmatt 437a5cdd4b4Smsaitoh static int 438a5cdd4b4Smsaitoh pq3mdio_mii_writereg(device_t self, int phy, int reg, uint16_t data) 43920d6accbSmatt { 44020d6accbSmatt struct pq3mdio_softc * const mdio = device_private(self); 44120d6accbSmatt uint32_t miimcom = etsec_mdio_read(mdio, MIIMCOM); 44220d6accbSmatt 44320d6accbSmatt #if 0 44420d6accbSmatt aprint_normal_dev(mdio->mdio_dev, "%s: phy %d reg %d: %#x\n", 44520d6accbSmatt __func__, phy, reg, data); 44620d6accbSmatt #endif 44720d6accbSmatt 44820d6accbSmatt etsec_mdio_write(mdio, MIIMADD, 44920d6accbSmatt __SHIFTIN(phy, MIIMADD_PHY) | __SHIFTIN(reg, MIIMADD_REG)); 45020d6accbSmatt etsec_mdio_write(mdio, MIIMCOM, 0); /* clear any past bits */ 45120d6accbSmatt etsec_mdio_write(mdio, MIIMCON, data); 45220d6accbSmatt 45320d6accbSmatt int timo = 1000; /* 1ms */ 45420d6accbSmatt while ((etsec_mdio_read(mdio, MIIMIND) & MIIMIND_BUSY) && --timo > 0) { 45520d6accbSmatt delay(1); 45620d6accbSmatt } 45720d6accbSmatt 45820d6accbSmatt if (miimcom == MIIMCOM_SCAN) 45920d6accbSmatt etsec_mdio_write(mdio, MIIMCOM, miimcom); 46020d6accbSmatt 461a5cdd4b4Smsaitoh return 0; 46220d6accbSmatt } 46320d6accbSmatt 464b8ea2c8cSmatt static inline void 465b8ea2c8cSmatt etsec_write(struct pq3etsec_softc *sc, bus_size_t off, uint32_t data) 466b8ea2c8cSmatt { 467b8ea2c8cSmatt bus_space_write_4(sc->sc_bst, sc->sc_bsh, off, data); 468b8ea2c8cSmatt } 469b8ea2c8cSmatt 470b8ea2c8cSmatt static void 47120d6accbSmatt pq3etsec_mii_statchg(struct ifnet *ifp) 472b8ea2c8cSmatt { 47320d6accbSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 474b8ea2c8cSmatt struct mii_data * const mii = &sc->sc_mii; 475b8ea2c8cSmatt 476b8ea2c8cSmatt uint32_t maccfg1 = sc->sc_maccfg1; 477b8ea2c8cSmatt uint32_t maccfg2 = sc->sc_maccfg2; 478b8ea2c8cSmatt uint32_t ecntrl = sc->sc_ecntrl; 479b8ea2c8cSmatt 480b8ea2c8cSmatt maccfg1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 481b8ea2c8cSmatt maccfg2 &= ~(MACCFG2_IFMODE | MACCFG2_FD); 482b8ea2c8cSmatt 483b8ea2c8cSmatt if (sc->sc_mii.mii_media_active & IFM_FDX) { 484b8ea2c8cSmatt maccfg2 |= MACCFG2_FD; 485b8ea2c8cSmatt } 486b8ea2c8cSmatt 487b8ea2c8cSmatt /* 488b8ea2c8cSmatt * Now deal with the flow control bits. 489b8ea2c8cSmatt */ 490b8ea2c8cSmatt if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO 491b8ea2c8cSmatt && (mii->mii_media_active & IFM_ETH_FMASK)) { 492b8ea2c8cSmatt if (mii->mii_media_active & IFM_ETH_RXPAUSE) 493b8ea2c8cSmatt maccfg1 |= MACCFG1_RX_FLOW; 494b8ea2c8cSmatt if (mii->mii_media_active & IFM_ETH_TXPAUSE) 495b8ea2c8cSmatt maccfg1 |= MACCFG1_TX_FLOW; 496b8ea2c8cSmatt } 497b8ea2c8cSmatt 498b8ea2c8cSmatt /* 499b8ea2c8cSmatt * Now deal with the speed. 500b8ea2c8cSmatt */ 501b8ea2c8cSmatt if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { 502b8ea2c8cSmatt maccfg2 |= MACCFG2_IFMODE_GMII; 503b8ea2c8cSmatt } else { 504b8ea2c8cSmatt maccfg2 |= MACCFG2_IFMODE_MII; 505b8ea2c8cSmatt ecntrl &= ~ECNTRL_R100M; 506b8ea2c8cSmatt if (IFM_SUBTYPE(mii->mii_media_active) != IFM_10_T) { 507b8ea2c8cSmatt ecntrl |= ECNTRL_R100M; 508b8ea2c8cSmatt } 509b8ea2c8cSmatt } 510b8ea2c8cSmatt 511b8ea2c8cSmatt /* 512b8ea2c8cSmatt * If things are different, re-init things. 513b8ea2c8cSmatt */ 514b8ea2c8cSmatt if (maccfg1 != sc->sc_maccfg1 515b8ea2c8cSmatt || maccfg2 != sc->sc_maccfg2 516b8ea2c8cSmatt || ecntrl != sc->sc_ecntrl) { 517b8ea2c8cSmatt if (sc->sc_if.if_flags & IFF_RUNNING) 518b8ea2c8cSmatt atomic_or_uint(&sc->sc_soft_flags, SOFT_RESET); 519b8ea2c8cSmatt sc->sc_maccfg1 = maccfg1; 520b8ea2c8cSmatt sc->sc_maccfg2 = maccfg2; 521b8ea2c8cSmatt sc->sc_ecntrl = ecntrl; 522b8ea2c8cSmatt } 523b8ea2c8cSmatt } 524b8ea2c8cSmatt 525b8ea2c8cSmatt #if 0 526b8ea2c8cSmatt static void 527b8ea2c8cSmatt pq3etsec_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 528b8ea2c8cSmatt { 529b8ea2c8cSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 530b8ea2c8cSmatt 531b8ea2c8cSmatt mii_pollstat(&sc->sc_mii); 532b8ea2c8cSmatt ether_mediastatus(ifp, ifmr); 533b8ea2c8cSmatt ifmr->ifm_status = sc->sc_mii.mii_media_status; 534b8ea2c8cSmatt ifmr->ifm_active = sc->sc_mii.mii_media_active; 535b8ea2c8cSmatt } 536b8ea2c8cSmatt 537b8ea2c8cSmatt static int 538b8ea2c8cSmatt pq3etsec_mediachange(struct ifnet *ifp) 539b8ea2c8cSmatt { 540b8ea2c8cSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 541b8ea2c8cSmatt 542b8ea2c8cSmatt if ((ifp->if_flags & IFF_UP) == 0) 543b8ea2c8cSmatt return 0; 544b8ea2c8cSmatt 545b8ea2c8cSmatt int rv = mii_mediachg(&sc->sc_mii); 546b8ea2c8cSmatt return (rv == ENXIO) ? 0 : rv; 547b8ea2c8cSmatt } 548b8ea2c8cSmatt #endif 549b8ea2c8cSmatt 55020d6accbSmatt static int 55120d6accbSmatt pq3etsec_match(device_t parent, cfdata_t cf, void *aux) 55220c73c17Smatt { 55320c73c17Smatt 55420d6accbSmatt if (!e500_cpunode_submatch(parent, cf, cf->cf_name, aux)) 55520d6accbSmatt return 0; 55620d6accbSmatt 55720d6accbSmatt return 1; 55820c73c17Smatt } 55920c73c17Smatt 560b8ea2c8cSmatt static void 561b8ea2c8cSmatt pq3etsec_attach(device_t parent, device_t self, void *aux) 562b8ea2c8cSmatt { 563b8ea2c8cSmatt struct cpunode_softc * const psc = device_private(parent); 564b8ea2c8cSmatt struct pq3etsec_softc * const sc = device_private(self); 565811add33Smsaitoh struct mii_data * const mii = &sc->sc_mii; 566b8ea2c8cSmatt struct cpunode_attach_args * const cna = aux; 567b8ea2c8cSmatt struct cpunode_locators * const cnl = &cna->cna_locs; 568036ca983Smatt cfdata_t cf = device_cfdata(self); 569b8ea2c8cSmatt int error; 570b8ea2c8cSmatt 571b8ea2c8cSmatt psc->sc_children |= cna->cna_childmask; 572b8ea2c8cSmatt sc->sc_dev = self; 573b8ea2c8cSmatt sc->sc_bst = cna->cna_memt; 574b8ea2c8cSmatt sc->sc_dmat = &booke_bus_dma_tag; 575b8ea2c8cSmatt 576b8ea2c8cSmatt /* 57720d6accbSmatt * Pull out the mdio bus and phy we are supposed to use. 578b8ea2c8cSmatt */ 57920d6accbSmatt const int mdio = cf->cf_loc[CPUNODECF_MDIO]; 58020d6accbSmatt const int phy = cf->cf_loc[CPUNODECF_PHY]; 58120d6accbSmatt if (mdio != CPUNODECF_MDIO_DEFAULT) 58220d6accbSmatt aprint_normal(" mdio %d", mdio); 583b8ea2c8cSmatt 584b8ea2c8cSmatt /* 585b8ea2c8cSmatt * See if the phy is in the config file... 586b8ea2c8cSmatt */ 58720d6accbSmatt if (phy != CPUNODECF_PHY_DEFAULT) { 58820d6accbSmatt sc->sc_phy_addr = phy; 589b8ea2c8cSmatt } else { 590b8ea2c8cSmatt unsigned char prop_name[20]; 591b8ea2c8cSmatt snprintf(prop_name, sizeof(prop_name), "tsec%u-phy-addr", 592b8ea2c8cSmatt cnl->cnl_instance); 593b8ea2c8cSmatt sc->sc_phy_addr = board_info_get_number(prop_name); 594b8ea2c8cSmatt } 595e2fc3501Smatt if (sc->sc_phy_addr != MII_PHY_ANY) 596b8ea2c8cSmatt aprint_normal(" phy %d", sc->sc_phy_addr); 597b8ea2c8cSmatt 598b8ea2c8cSmatt error = bus_space_map(sc->sc_bst, cnl->cnl_addr, cnl->cnl_size, 0, 599b8ea2c8cSmatt &sc->sc_bsh); 600b8ea2c8cSmatt if (error) { 601b8ea2c8cSmatt aprint_error(": error mapping registers: %d\n", error); 602b8ea2c8cSmatt return; 603b8ea2c8cSmatt } 604b8ea2c8cSmatt 605b8ea2c8cSmatt /* 606b8ea2c8cSmatt * Assume firmware has aready set the mac address and fetch it 607b8ea2c8cSmatt * before we reinit it. 608b8ea2c8cSmatt */ 609b8ea2c8cSmatt sc->sc_macstnaddr2 = etsec_read(sc, MACSTNADDR2); 610b8ea2c8cSmatt sc->sc_macstnaddr1 = etsec_read(sc, MACSTNADDR1); 611b8ea2c8cSmatt sc->sc_rctrl = RCTRL_DEFAULT; 612ca66ac7aSmatt sc->sc_ecntrl = etsec_read(sc, ECNTRL); 613ca66ac7aSmatt sc->sc_maccfg1 = etsec_read(sc, MACCFG1); 61412ca21c5Smatt sc->sc_maccfg2 = etsec_read(sc, MACCFG2) | MACCFG2_DEFAULT; 615b8ea2c8cSmatt 616b8ea2c8cSmatt if (sc->sc_macstnaddr1 == 0 && sc->sc_macstnaddr2 == 0) { 617b8ea2c8cSmatt size_t len; 618b8ea2c8cSmatt const uint8_t *mac_addr = 619b8ea2c8cSmatt board_info_get_data("tsec-mac-addr-base", &len); 620b8ea2c8cSmatt KASSERT(len == ETHER_ADDR_LEN); 621b8ea2c8cSmatt sc->sc_macstnaddr2 = 622b8ea2c8cSmatt (mac_addr[1] << 24) 623b8ea2c8cSmatt | (mac_addr[0] << 16); 624b8ea2c8cSmatt sc->sc_macstnaddr1 = 625b8ea2c8cSmatt ((mac_addr[5] + cnl->cnl_instance - 1) << 24) 626b8ea2c8cSmatt | (mac_addr[4] << 16) 627b8ea2c8cSmatt | (mac_addr[3] << 8) 628b8ea2c8cSmatt | (mac_addr[2] << 0); 629b8ea2c8cSmatt #if 0 630b8ea2c8cSmatt aprint_error(": mac-address unknown\n"); 631b8ea2c8cSmatt return; 632b8ea2c8cSmatt #endif 633b8ea2c8cSmatt } 634b8ea2c8cSmatt 6351b7941c2Snonaka sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 6361b7941c2Snonaka sc->sc_hwlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM); 6371b7941c2Snonaka 6381b7941c2Snonaka callout_init(&sc->sc_mii_callout, CALLOUT_MPSAFE); 6391b7941c2Snonaka callout_setfunc(&sc->sc_mii_callout, pq3etsec_mii_tick, sc); 6401b7941c2Snonaka 6411b7941c2Snonaka /* Disable interrupts */ 6421b7941c2Snonaka etsec_write(sc, IMASK, 0); 643b8ea2c8cSmatt 644b8ea2c8cSmatt error = pq3etsec_rxq_attach(sc, &sc->sc_rxq, 0); 645b8ea2c8cSmatt if (error) { 646b8ea2c8cSmatt aprint_error(": failed to init rxq: %d\n", error); 647cbaadec7Smsaitoh goto fail_1; 648b8ea2c8cSmatt } 649b8ea2c8cSmatt 650b8ea2c8cSmatt error = pq3etsec_txq_attach(sc, &sc->sc_txq, 0); 651b8ea2c8cSmatt if (error) { 652b8ea2c8cSmatt aprint_error(": failed to init txq: %d\n", error); 653cbaadec7Smsaitoh goto fail_2; 654b8ea2c8cSmatt } 655b8ea2c8cSmatt 656b8ea2c8cSmatt error = pq3etsec_mapcache_create(sc, &sc->sc_rx_mapcache, 6574fe92317Smatt ETSEC_MAXRXMBUFS, MCLBYTES, ETSEC_NRXSEGS); 658b8ea2c8cSmatt if (error) { 659b8ea2c8cSmatt aprint_error(": failed to allocate rx dmamaps: %d\n", error); 660cbaadec7Smsaitoh goto fail_3; 661b8ea2c8cSmatt } 662b8ea2c8cSmatt 663b8ea2c8cSmatt error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 6644fe92317Smatt ETSEC_MAXTXMBUFS, MCLBYTES, ETSEC_NTXSEGS); 665b8ea2c8cSmatt if (error) { 666b8ea2c8cSmatt aprint_error(": failed to allocate tx dmamaps: %d\n", error); 667cbaadec7Smsaitoh goto fail_4; 668b8ea2c8cSmatt } 669b8ea2c8cSmatt 670b8ea2c8cSmatt sc->sc_tx_ih = intr_establish(cnl->cnl_intrs[0], IPL_VM, IST_ONCHIP, 671b8ea2c8cSmatt pq3etsec_tx_intr, sc); 672b8ea2c8cSmatt if (sc->sc_tx_ih == NULL) { 673b8ea2c8cSmatt aprint_error(": failed to establish tx interrupt: %d\n", 674b8ea2c8cSmatt cnl->cnl_intrs[0]); 675cbaadec7Smsaitoh goto fail_5; 676b8ea2c8cSmatt } 677b8ea2c8cSmatt 678b8ea2c8cSmatt sc->sc_rx_ih = intr_establish(cnl->cnl_intrs[1], IPL_VM, IST_ONCHIP, 679b8ea2c8cSmatt pq3etsec_rx_intr, sc); 680b8ea2c8cSmatt if (sc->sc_rx_ih == NULL) { 681b8ea2c8cSmatt aprint_error(": failed to establish rx interrupt: %d\n", 682b8ea2c8cSmatt cnl->cnl_intrs[1]); 683cbaadec7Smsaitoh goto fail_6; 684b8ea2c8cSmatt } 685b8ea2c8cSmatt 686b8ea2c8cSmatt sc->sc_error_ih = intr_establish(cnl->cnl_intrs[2], IPL_VM, IST_ONCHIP, 687b8ea2c8cSmatt pq3etsec_error_intr, sc); 688b8ea2c8cSmatt if (sc->sc_error_ih == NULL) { 689b8ea2c8cSmatt aprint_error(": failed to establish error interrupt: %d\n", 690b8ea2c8cSmatt cnl->cnl_intrs[2]); 691cbaadec7Smsaitoh goto fail_7; 692b8ea2c8cSmatt } 693b8ea2c8cSmatt 6945e43e8ebSnonaka int softint_flags = SOFTINT_NET; 6955e43e8ebSnonaka #if !defined(MULTIPROCESSOR) || defined(NET_MPSAFE) 6965e43e8ebSnonaka softint_flags |= SOFTINT_MPSAFE; 6975e43e8ebSnonaka #endif /* !MULTIPROCESSOR || NET_MPSAFE */ 6985e43e8ebSnonaka sc->sc_soft_ih = softint_establish(softint_flags, 699b8ea2c8cSmatt pq3etsec_soft_intr, sc); 700b8ea2c8cSmatt if (sc->sc_soft_ih == NULL) { 701b8ea2c8cSmatt aprint_error(": failed to establish soft interrupt\n"); 702cbaadec7Smsaitoh goto fail_8; 703b8ea2c8cSmatt } 704b8ea2c8cSmatt 70520d6accbSmatt /* 706836f08a8Snonaka * If there was no MDIO 70720d6accbSmatt */ 70820d6accbSmatt if (mdio == CPUNODECF_MDIO_DEFAULT) { 709b8ea2c8cSmatt aprint_normal("\n"); 7102685996bSthorpej cfdata_t mdio_cf = config_search(self, cna, 711c7fb772bSthorpej CFARGS(.submatch = pq3mdio_find)); 71220d6accbSmatt if (mdio_cf != NULL) { 7132685996bSthorpej sc->sc_mdio_dev = 714c7fb772bSthorpej config_attach(self, mdio_cf, cna, NULL, CFARGS_NONE); 71520d6accbSmatt } 71620d6accbSmatt } else { 71720d6accbSmatt sc->sc_mdio_dev = device_find_by_driver_unit("mdio", mdio); 71820d6accbSmatt if (sc->sc_mdio_dev == NULL) { 71920d6accbSmatt aprint_error(": failed to locate mdio device\n"); 720cbaadec7Smsaitoh goto fail_9; 72120d6accbSmatt } 72220d6accbSmatt aprint_normal("\n"); 72320d6accbSmatt } 724b8ea2c8cSmatt 7258059ca17Smatt etsec_write(sc, ATTR, ATTR_DEFAULT); 7268059ca17Smatt etsec_write(sc, ATTRELI, ATTRELI_DEFAULT); 7278059ca17Smatt 728100a3398Sandvar /* Enable interrupt coalescing */ 7296e15a820Snonaka sc->sc_ic_rx_time = 768; 7306e15a820Snonaka sc->sc_ic_rx_count = 16; 7316e15a820Snonaka sc->sc_ic_tx_time = 768; 7326e15a820Snonaka sc->sc_ic_tx_count = 16; 7336e15a820Snonaka pq3etsec_set_ic_rx(sc); 7346e15a820Snonaka pq3etsec_set_ic_tx(sc); 7356e15a820Snonaka 7361b7941c2Snonaka char enaddr[ETHER_ADDR_LEN] = { 7371b7941c2Snonaka [0] = sc->sc_macstnaddr2 >> 16, 7381b7941c2Snonaka [1] = sc->sc_macstnaddr2 >> 24, 7391b7941c2Snonaka [2] = sc->sc_macstnaddr1 >> 0, 7401b7941c2Snonaka [3] = sc->sc_macstnaddr1 >> 8, 7411b7941c2Snonaka [4] = sc->sc_macstnaddr1 >> 16, 7421b7941c2Snonaka [5] = sc->sc_macstnaddr1 >> 24, 7431b7941c2Snonaka }; 744b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 745b8ea2c8cSmatt ether_sprintf(enaddr)); 746b8ea2c8cSmatt 747b8ea2c8cSmatt const char * const xname = device_xname(sc->sc_dev); 748b8ea2c8cSmatt struct ethercom * const ec = &sc->sc_ec; 749b8ea2c8cSmatt struct ifnet * const ifp = &ec->ec_if; 750b8ea2c8cSmatt 751811add33Smsaitoh ec->ec_mii = mii; 752b8ea2c8cSmatt 753811add33Smsaitoh mii->mii_ifp = ifp; 754811add33Smsaitoh mii->mii_readreg = pq3mdio_mii_readreg; 755811add33Smsaitoh mii->mii_writereg = pq3mdio_mii_writereg; 756811add33Smsaitoh mii->mii_statchg = pq3etsec_mii_statchg; 757b8ea2c8cSmatt 758811add33Smsaitoh ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); 759b8ea2c8cSmatt 76020d6accbSmatt if (sc->sc_mdio_dev != NULL && sc->sc_phy_addr < 32) { 761811add33Smsaitoh mii_attach(sc->sc_mdio_dev, mii, 0xffffffff, 762b8ea2c8cSmatt sc->sc_phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); 763b8ea2c8cSmatt 764811add33Smsaitoh if (LIST_FIRST(&mii->mii_phys) == NULL) { 765811add33Smsaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 766811add33Smsaitoh 0, NULL); 767811add33Smsaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 768b8ea2c8cSmatt } else { 769b8ea2c8cSmatt callout_schedule(&sc->sc_mii_callout, hz); 770811add33Smsaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 771b8ea2c8cSmatt } 77291d8d986Smatt } else { 773811add33Smsaitoh ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 774811add33Smsaitoh 0, NULL); 775811add33Smsaitoh ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_1000_T | IFM_FDX); 77691d8d986Smatt } 777b8ea2c8cSmatt 778b8ea2c8cSmatt ec->ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING 779b8ea2c8cSmatt | ETHERCAP_JUMBO_MTU; 780e27c9b11Smsaitoh ec->ec_capenable = ETHERCAP_VLAN_HWTAGGING; 781b8ea2c8cSmatt 782b8ea2c8cSmatt strlcpy(ifp->if_xname, xname, IFNAMSIZ); 783b8ea2c8cSmatt ifp->if_softc = sc; 784b8ea2c8cSmatt ifp->if_capabilities = IFCAP_ETSEC; 785b8ea2c8cSmatt ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 786b8ea2c8cSmatt ifp->if_ioctl = pq3etsec_ifioctl; 787b8ea2c8cSmatt ifp->if_start = pq3etsec_ifstart; 788b8ea2c8cSmatt ifp->if_watchdog = pq3etsec_ifwatchdog; 789b8ea2c8cSmatt ifp->if_init = pq3etsec_ifinit; 790b8ea2c8cSmatt ifp->if_stop = pq3etsec_ifstop; 791b8ea2c8cSmatt IFQ_SET_READY(&ifp->if_snd); 792b8ea2c8cSmatt 793b8ea2c8cSmatt /* 794b8ea2c8cSmatt * Attach the interface. 795b8ea2c8cSmatt */ 796076e3579Sriastradh if_initialize(ifp); 797cbaadec7Smsaitoh pq3etsec_sysctl_setup(NULL, sc); 79824ff2346Srin if_attach(ifp); 79924ff2346Srin if_deferred_start_init(ifp, NULL); 800b8ea2c8cSmatt ether_ifattach(ifp, enaddr); 801b8ea2c8cSmatt 80212a7d27eSrin rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 80312a7d27eSrin RND_FLAG_DEFAULT); 80412a7d27eSrin 8058c5b560fSnonaka pq3etsec_ifstop(ifp, true); 8068c5b560fSnonaka 807b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_rx_stall, EVCNT_TYPE_MISC, 808b8ea2c8cSmatt NULL, xname, "rx stall"); 809b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_tx_stall, EVCNT_TYPE_MISC, 810b8ea2c8cSmatt NULL, xname, "tx stall"); 811b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_tx_intr, EVCNT_TYPE_INTR, 812b8ea2c8cSmatt NULL, xname, "tx intr"); 813b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_rx_intr, EVCNT_TYPE_INTR, 814b8ea2c8cSmatt NULL, xname, "rx intr"); 815b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_error_intr, EVCNT_TYPE_INTR, 816b8ea2c8cSmatt NULL, xname, "error intr"); 817b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_soft_intr, EVCNT_TYPE_INTR, 818b8ea2c8cSmatt NULL, xname, "soft intr"); 819b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_tx_pause, EVCNT_TYPE_MISC, 820b8ea2c8cSmatt NULL, xname, "tx pause"); 821b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_rx_pause, EVCNT_TYPE_MISC, 822b8ea2c8cSmatt NULL, xname, "rx pause"); 823b8ea2c8cSmatt evcnt_attach_dynamic(&sc->sc_ev_mii_ticks, EVCNT_TYPE_MISC, 824b8ea2c8cSmatt NULL, xname, "mii ticks"); 825cbaadec7Smsaitoh return; 826cbaadec7Smsaitoh 827cbaadec7Smsaitoh fail_9: 828cbaadec7Smsaitoh softint_disestablish(sc->sc_soft_ih); 829cbaadec7Smsaitoh fail_8: 830cbaadec7Smsaitoh intr_disestablish(sc->sc_error_ih); 831cbaadec7Smsaitoh fail_7: 832cbaadec7Smsaitoh intr_disestablish(sc->sc_rx_ih); 833cbaadec7Smsaitoh fail_6: 834cbaadec7Smsaitoh intr_disestablish(sc->sc_tx_ih); 835cbaadec7Smsaitoh fail_5: 836cbaadec7Smsaitoh pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 837cbaadec7Smsaitoh fail_4: 838cbaadec7Smsaitoh pq3etsec_mapcache_destroy(sc, sc->sc_rx_mapcache); 839cbaadec7Smsaitoh fail_3: 840cbaadec7Smsaitoh #if 0 /* notyet */ 841cbaadec7Smsaitoh pq3etsec_txq_detach(sc); 842cbaadec7Smsaitoh #endif 843cbaadec7Smsaitoh fail_2: 844cbaadec7Smsaitoh #if 0 /* notyet */ 845cbaadec7Smsaitoh pq3etsec_rxq_detach(sc); 846cbaadec7Smsaitoh #endif 847cbaadec7Smsaitoh fail_1: 848cbaadec7Smsaitoh callout_destroy(&sc->sc_mii_callout); 849cbaadec7Smsaitoh mutex_obj_free(sc->sc_lock); 850cbaadec7Smsaitoh mutex_obj_free(sc->sc_hwlock); 851cbaadec7Smsaitoh bus_space_unmap(sc->sc_bst, sc->sc_bsh, cnl->cnl_size); 852b8ea2c8cSmatt } 853b8ea2c8cSmatt 854b8ea2c8cSmatt static uint64_t 855b8ea2c8cSmatt pq3etsec_macaddr_create(const uint8_t *lladdr) 856b8ea2c8cSmatt { 857b8ea2c8cSmatt uint64_t macaddr = 0; 858b8ea2c8cSmatt 859b8ea2c8cSmatt lladdr += ETHER_ADDR_LEN; 860b8ea2c8cSmatt for (u_int i = ETHER_ADDR_LEN; i-- > 0; ) { 861b8ea2c8cSmatt macaddr = (macaddr << 8) | *--lladdr; 862b8ea2c8cSmatt } 863b8ea2c8cSmatt return macaddr << 16; 864b8ea2c8cSmatt } 865b8ea2c8cSmatt 866b8ea2c8cSmatt static int 867b8ea2c8cSmatt pq3etsec_ifinit(struct ifnet *ifp) 868b8ea2c8cSmatt { 869b8ea2c8cSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 870b8ea2c8cSmatt int error = 0; 871b8ea2c8cSmatt 872d1579b2dSriastradh sc->sc_maxfrm = uimax(ifp->if_mtu + 32, MCLBYTES); 873b8ea2c8cSmatt if (ifp->if_mtu > ETHERMTU_JUMBO) 874b8ea2c8cSmatt return error; 875b8ea2c8cSmatt 876b8ea2c8cSmatt KASSERT(ifp->if_flags & IFF_UP); 877b8ea2c8cSmatt 878b8ea2c8cSmatt /* 879b8ea2c8cSmatt * Stop the interface (steps 1 to 4 in the Soft Reset and 880b8ea2c8cSmatt * Reconfigurating Procedure. 881b8ea2c8cSmatt */ 882b8ea2c8cSmatt pq3etsec_ifstop(ifp, 0); 883b8ea2c8cSmatt 884b8ea2c8cSmatt /* 885b8ea2c8cSmatt * If our frame size has changed (or it's our first time through) 886b8ea2c8cSmatt * destroy the existing transmit mapcache. 887b8ea2c8cSmatt */ 888b8ea2c8cSmatt if (sc->sc_tx_mapcache != NULL 889b8ea2c8cSmatt && sc->sc_maxfrm != sc->sc_tx_mapcache->dmc_maxmapsize) { 890b8ea2c8cSmatt pq3etsec_mapcache_destroy(sc, sc->sc_tx_mapcache); 891b8ea2c8cSmatt sc->sc_tx_mapcache = NULL; 892b8ea2c8cSmatt } 893b8ea2c8cSmatt 894b8ea2c8cSmatt if (sc->sc_tx_mapcache == NULL) { 895b8ea2c8cSmatt error = pq3etsec_mapcache_create(sc, &sc->sc_tx_mapcache, 8964fe92317Smatt ETSEC_MAXTXMBUFS, sc->sc_maxfrm, ETSEC_NTXSEGS); 897b8ea2c8cSmatt if (error) 898b8ea2c8cSmatt return error; 899b8ea2c8cSmatt } 900b8ea2c8cSmatt 901b8ea2c8cSmatt sc->sc_ev_mii_ticks.ev_count++; 902b8ea2c8cSmatt mii_tick(&sc->sc_mii); 903b8ea2c8cSmatt 904b8ea2c8cSmatt if (ifp->if_flags & IFF_PROMISC) { 905b8ea2c8cSmatt sc->sc_rctrl |= RCTRL_PROM; 906b8ea2c8cSmatt } else { 907b8ea2c8cSmatt sc->sc_rctrl &= ~RCTRL_PROM; 908b8ea2c8cSmatt } 909b8ea2c8cSmatt 910b8ea2c8cSmatt uint32_t rctrl_prsdep = 0; 911811add33Smsaitoh sc->sc_rctrl &= 912811add33Smsaitoh ~(RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP); 913b8ea2c8cSmatt if (VLAN_ATTACHED(&sc->sc_ec)) { 914b8ea2c8cSmatt sc->sc_rctrl |= RCTRL_VLEX; 915b8ea2c8cSmatt rctrl_prsdep = RCTRL_PRSDEP_L2; 916b8ea2c8cSmatt } 917b8ea2c8cSmatt if (ifp->if_capenable & IFCAP_RCTRL_IPCSEN) { 918b8ea2c8cSmatt sc->sc_rctrl |= RCTRL_IPCSEN; 919b8ea2c8cSmatt rctrl_prsdep = RCTRL_PRSDEP_L3; 920b8ea2c8cSmatt } 921b8ea2c8cSmatt if (ifp->if_capenable & IFCAP_RCTRL_TUCSEN) { 922b8ea2c8cSmatt sc->sc_rctrl |= RCTRL_TUCSEN; 923b8ea2c8cSmatt rctrl_prsdep = RCTRL_PRSDEP_L4; 924b8ea2c8cSmatt } 925b8ea2c8cSmatt sc->sc_rctrl |= rctrl_prsdep; 926b8ea2c8cSmatt #if 0 927811add33Smsaitoh if (sc->sc_rctrl 928811add33Smsaitoh & (RCTRL_IPCSEN | RCTRL_TUCSEN | RCTRL_VLEX | RCTRL_PRSDEP)) 929b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, 930b8ea2c8cSmatt "rctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlex=%"PRIuMAX" prsdep=%"PRIuMAX"\n", 931b8ea2c8cSmatt sc->sc_rctrl, 932b8ea2c8cSmatt __SHIFTOUT(sc->sc_rctrl, RCTRL_IPCSEN), 933b8ea2c8cSmatt __SHIFTOUT(sc->sc_rctrl, RCTRL_TUCSEN), 934b8ea2c8cSmatt __SHIFTOUT(sc->sc_rctrl, RCTRL_VLEX), 935b8ea2c8cSmatt __SHIFTOUT(sc->sc_rctrl, RCTRL_PRSDEP)); 936b8ea2c8cSmatt #endif 937b8ea2c8cSmatt 938b8ea2c8cSmatt sc->sc_tctrl &= ~(TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS); 939b8ea2c8cSmatt if (VLAN_ATTACHED(&sc->sc_ec)) /* is this really true */ 940b8ea2c8cSmatt sc->sc_tctrl |= TCTRL_VLINS; 941b8ea2c8cSmatt if (ifp->if_capenable & IFCAP_TCTRL_IPCSEN) 942b8ea2c8cSmatt sc->sc_tctrl |= TCTRL_IPCSEN; 943b8ea2c8cSmatt if (ifp->if_capenable & IFCAP_TCTRL_TUCSEN) 944b8ea2c8cSmatt sc->sc_tctrl |= TCTRL_TUCSEN; 945b8ea2c8cSmatt #if 0 946b8ea2c8cSmatt if (sc->sc_tctrl & (TCTRL_IPCSEN | TCTRL_TUCSEN | TCTRL_VLINS)) 947b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, 948b8ea2c8cSmatt "tctrl=%#x ipcsen=%"PRIuMAX" tucsen=%"PRIuMAX" vlins=%"PRIuMAX"\n", 949b8ea2c8cSmatt sc->sc_tctrl, 950b8ea2c8cSmatt __SHIFTOUT(sc->sc_tctrl, TCTRL_IPCSEN), 951b8ea2c8cSmatt __SHIFTOUT(sc->sc_tctrl, TCTRL_TUCSEN), 952b8ea2c8cSmatt __SHIFTOUT(sc->sc_tctrl, TCTRL_VLINS)); 953b8ea2c8cSmatt #endif 954b8ea2c8cSmatt 955b8ea2c8cSmatt sc->sc_maccfg1 &= ~(MACCFG1_TX_EN | MACCFG1_RX_EN); 956b8ea2c8cSmatt 957b8ea2c8cSmatt const uint64_t macstnaddr = 958b8ea2c8cSmatt pq3etsec_macaddr_create(CLLADDR(ifp->if_sadl)); 959b8ea2c8cSmatt 960b8ea2c8cSmatt sc->sc_imask = IEVENT_DPE; 961b8ea2c8cSmatt 962b8ea2c8cSmatt /* 5. Load TDBPH, TBASEH, TBASE0-TBASE7 with new Tx BD pointers */ 963b8ea2c8cSmatt pq3etsec_rxq_reset(sc, &sc->sc_rxq); 964b8ea2c8cSmatt pq3etsec_rxq_produce(sc, &sc->sc_rxq); /* fill with rx buffers */ 965b8ea2c8cSmatt 966b8ea2c8cSmatt /* 6. Load RDBPH, RBASEH, RBASE0-RBASE7 with new Rx BD pointers */ 967b8ea2c8cSmatt pq3etsec_txq_reset(sc, &sc->sc_txq); 968b8ea2c8cSmatt 969b8ea2c8cSmatt /* 7. Setup other MAC registers (MACCFG2, MAXFRM, etc.) */ 970b8ea2c8cSmatt KASSERT(MACCFG2_PADCRC & sc->sc_maccfg2); 971b8ea2c8cSmatt etsec_write(sc, MAXFRM, sc->sc_maxfrm); 972b8ea2c8cSmatt etsec_write(sc, MACSTNADDR1, (uint32_t)(macstnaddr >> 32)); 973b8ea2c8cSmatt etsec_write(sc, MACSTNADDR2, (uint32_t)(macstnaddr >> 0)); 974b8ea2c8cSmatt etsec_write(sc, MACCFG1, sc->sc_maccfg1); 975b8ea2c8cSmatt etsec_write(sc, MACCFG2, sc->sc_maccfg2); 976b8ea2c8cSmatt etsec_write(sc, ECNTRL, sc->sc_ecntrl); 977b8ea2c8cSmatt 978b8ea2c8cSmatt /* 8. Setup group address hash table (GADDR0-GADDR15) */ 979b8ea2c8cSmatt pq3etsec_mc_setup(sc); 980b8ea2c8cSmatt 981b8ea2c8cSmatt /* 9. Setup receive frame filer table (via RQFAR, RQFCR, and RQFPR) */ 982b8ea2c8cSmatt etsec_write(sc, MRBLR, MCLBYTES); 983b8ea2c8cSmatt 984b8ea2c8cSmatt /* 10. Setup WWR, WOP, TOD bits in DMACTRL register */ 985b8ea2c8cSmatt sc->sc_dmactrl |= DMACTRL_DEFAULT; 986b8ea2c8cSmatt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 987b8ea2c8cSmatt 988b8ea2c8cSmatt /* 11. Enable transmit queues in TQUEUE, and ensure that the transmit scheduling mode is correctly set in TCTRL. */ 989b8ea2c8cSmatt etsec_write(sc, TQUEUE, TQUEUE_EN0); 990b8ea2c8cSmatt sc->sc_imask |= IEVENT_TXF | IEVENT_TXE | IEVENT_TXC; 991b8ea2c8cSmatt 992b8ea2c8cSmatt etsec_write(sc, TCTRL, sc->sc_tctrl); /* for TOE stuff */ 993b8ea2c8cSmatt 994b8ea2c8cSmatt /* 12. Enable receive queues in RQUEUE, */ 995b8ea2c8cSmatt etsec_write(sc, RQUEUE, RQUEUE_EN0 | RQUEUE_EX0); 996b8ea2c8cSmatt sc->sc_imask |= IEVENT_RXF | IEVENT_BSY | IEVENT_RXC; 997b8ea2c8cSmatt 998b8ea2c8cSmatt /* and optionally set TOE functionality in RCTRL. */ 999b8ea2c8cSmatt etsec_write(sc, RCTRL, sc->sc_rctrl); 1000b8ea2c8cSmatt sc->sc_rx_adjlen = __SHIFTOUT(sc->sc_rctrl, RCTRL_PAL); 1001b8ea2c8cSmatt if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) 1002b8ea2c8cSmatt sc->sc_rx_adjlen += sizeof(struct rxfcb); 1003b8ea2c8cSmatt 1004b8ea2c8cSmatt /* 13. Clear THLT and TXF bits in TSTAT register by writing 1 to them */ 1005b8ea2c8cSmatt etsec_write(sc, TSTAT, TSTAT_THLT | TSTAT_TXF); 1006b8ea2c8cSmatt 1007b8ea2c8cSmatt /* 14. Clear QHLT and RXF bits in RSTAT register by writing 1 to them.*/ 1008b8ea2c8cSmatt etsec_write(sc, RSTAT, RSTAT_QHLT | RSTAT_RXF); 1009b8ea2c8cSmatt 1010b8ea2c8cSmatt /* 15. Clear GRS/GTS bits in DMACTRL (do not change other bits) */ 1011b8ea2c8cSmatt sc->sc_dmactrl &= ~(DMACTRL_GRS | DMACTRL_GTS); 1012b8ea2c8cSmatt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1013b8ea2c8cSmatt 1014b8ea2c8cSmatt /* 16. Enable Tx_EN/Rx_EN in MACCFG1 register */ 1015b8ea2c8cSmatt etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1016b8ea2c8cSmatt etsec_write(sc, MACCFG1, sc->sc_maccfg1 | MACCFG1_TX_EN|MACCFG1_RX_EN); 1017b8ea2c8cSmatt 1018b8ea2c8cSmatt sc->sc_soft_flags = 0; 1019b8ea2c8cSmatt 1020b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 1021b8ea2c8cSmatt 1022b8ea2c8cSmatt ifp->if_flags |= IFF_RUNNING; 1023b8ea2c8cSmatt 1024b8ea2c8cSmatt return error; 1025b8ea2c8cSmatt } 1026b8ea2c8cSmatt 1027b8ea2c8cSmatt static void 1028b8ea2c8cSmatt pq3etsec_ifstop(struct ifnet *ifp, int disable) 1029b8ea2c8cSmatt { 1030b8ea2c8cSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 1031b8ea2c8cSmatt 1032b8ea2c8cSmatt KASSERT(!cpu_intr_p()); 1033b8ea2c8cSmatt const uint32_t imask_gsc_mask = IEVENT_GTSC | IEVENT_GRSC; 1034b8ea2c8cSmatt /* 1035b8ea2c8cSmatt * Clear the GTSC and GRSC from the interrupt mask until 1036b8ea2c8cSmatt * we are ready for them. Then clear them from IEVENT, 1037b8ea2c8cSmatt * request the graceful shutdown, and then enable the 1038b8ea2c8cSmatt * GTSC and GRSC bits in the mask. This should cause the 1039b8ea2c8cSmatt * error interrupt to fire which will issue a wakeup to 1040b8ea2c8cSmatt * allow us to resume. 1041b8ea2c8cSmatt */ 1042b8ea2c8cSmatt 1043b8ea2c8cSmatt /* 1044b8ea2c8cSmatt * 1. Set GRS/GTS bits in DMACTRL register 1045b8ea2c8cSmatt */ 1046b8ea2c8cSmatt sc->sc_dmactrl |= DMACTRL_GRS | DMACTRL_GTS; 1047b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask & ~imask_gsc_mask); 1048b8ea2c8cSmatt etsec_write(sc, IEVENT, imask_gsc_mask); 1049b8ea2c8cSmatt etsec_write(sc, DMACTRL, sc->sc_dmactrl); 1050b8ea2c8cSmatt 1051b8ea2c8cSmatt if (etsec_read(sc, MACCFG1) & (MACCFG1_TX_EN | MACCFG1_RX_EN)) { 1052b8ea2c8cSmatt /* 1053b8ea2c8cSmatt * 2. Poll GRSC/GTSC bits in IEVENT register until both are set 1054b8ea2c8cSmatt */ 1055b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask | imask_gsc_mask); 1056b8ea2c8cSmatt 1057b8ea2c8cSmatt u_int timo = 1000; 1058b8ea2c8cSmatt uint32_t ievent = etsec_read(sc, IEVENT); 1059b8ea2c8cSmatt while ((ievent & imask_gsc_mask) != imask_gsc_mask) { 1060b8ea2c8cSmatt if (--timo == 0) { 1061b8ea2c8cSmatt aprint_error_dev(sc->sc_dev, 1062b8ea2c8cSmatt "WARNING: " 1063b8ea2c8cSmatt "request to stop failed (IEVENT=%#x)\n", 1064b8ea2c8cSmatt ievent); 1065b8ea2c8cSmatt break; 1066b8ea2c8cSmatt } 1067b8ea2c8cSmatt delay(10); 1068b8ea2c8cSmatt ievent = etsec_read(sc, IEVENT); 1069b8ea2c8cSmatt } 1070b8ea2c8cSmatt } 1071b8ea2c8cSmatt 1072b8ea2c8cSmatt /* 1073b8ea2c8cSmatt * Now reset the controller. 1074b8ea2c8cSmatt * 1075b8ea2c8cSmatt * 3. Set SOFT_RESET bit in MACCFG1 register 1076b8ea2c8cSmatt * 4. Clear SOFT_RESET bit in MACCFG1 register 1077b8ea2c8cSmatt */ 1078b8ea2c8cSmatt etsec_write(sc, MACCFG1, MACCFG1_SOFT_RESET); 1079b8ea2c8cSmatt etsec_write(sc, MACCFG1, 0); 1080b8ea2c8cSmatt etsec_write(sc, IMASK, 0); 1081b8ea2c8cSmatt etsec_write(sc, IEVENT, ~0); 1082b8ea2c8cSmatt sc->sc_imask = 0; 1083b8ea2c8cSmatt ifp->if_flags &= ~IFF_RUNNING; 1084b8ea2c8cSmatt 1085b8ea2c8cSmatt uint32_t tbipa = etsec_read(sc, TBIPA); 1086b8ea2c8cSmatt if (tbipa == sc->sc_phy_addr) { 1087b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, "relocating TBI\n"); 1088b8ea2c8cSmatt etsec_write(sc, TBIPA, 0x1f); 1089b8ea2c8cSmatt } 1090b8ea2c8cSmatt uint32_t miimcfg = etsec_read(sc, MIIMCFG); 1091b8ea2c8cSmatt etsec_write(sc, MIIMCFG, MIIMCFG_RESET); 1092b8ea2c8cSmatt etsec_write(sc, MIIMCFG, miimcfg); 1093b8ea2c8cSmatt 1094b8ea2c8cSmatt /* 1095*9f9778c7Sandvar * Let's consume any remaining transmitted packets. And if we are 1096b8ea2c8cSmatt * disabling the interface, purge ourselves of any untransmitted 1097b8ea2c8cSmatt * packets. But don't consume any received packets, just drop them. 1098b8ea2c8cSmatt * If we aren't disabling the interface, save the mbufs in the 1099b8ea2c8cSmatt * receive queue for reuse. 1100b8ea2c8cSmatt */ 1101b8ea2c8cSmatt pq3etsec_rxq_purge(sc, &sc->sc_rxq, disable); 1102b8ea2c8cSmatt pq3etsec_txq_consume(sc, &sc->sc_txq); 1103b8ea2c8cSmatt if (disable) { 1104b8ea2c8cSmatt pq3etsec_txq_purge(sc, &sc->sc_txq); 11051296cd79Snonaka IFQ_PURGE(&ifp->if_snd); 1106b8ea2c8cSmatt } 1107b8ea2c8cSmatt } 1108b8ea2c8cSmatt 1109b8ea2c8cSmatt static void 1110b8ea2c8cSmatt pq3etsec_ifwatchdog(struct ifnet *ifp) 1111b8ea2c8cSmatt { 1112b8ea2c8cSmatt } 1113b8ea2c8cSmatt 1114b8ea2c8cSmatt static void 1115b8ea2c8cSmatt pq3etsec_mc_setup( 1116b8ea2c8cSmatt struct pq3etsec_softc *sc) 1117b8ea2c8cSmatt { 1118b8ea2c8cSmatt struct ethercom * const ec = &sc->sc_ec; 1119b8ea2c8cSmatt struct ifnet * const ifp = &sc->sc_if; 1120b8ea2c8cSmatt struct ether_multi *enm; 1121b8ea2c8cSmatt struct ether_multistep step; 1122b8ea2c8cSmatt uint32_t *gaddr = sc->sc_gaddr + ((sc->sc_rctrl & RCTRL_GHTX) ? 0 : 8); 1123b8ea2c8cSmatt const uint32_t crc_shift = 32 - ((sc->sc_rctrl & RCTRL_GHTX) ? 9 : 8); 1124b8ea2c8cSmatt 1125b8ea2c8cSmatt memset(sc->sc_gaddr, 0, sizeof(sc->sc_gaddr)); 1126b8ea2c8cSmatt memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1127b8ea2c8cSmatt 1128b8ea2c8cSmatt ifp->if_flags &= ~IFF_ALLMULTI; 1129b8ea2c8cSmatt 113083759283Smsaitoh ETHER_LOCK(ec); 1131b8ea2c8cSmatt ETHER_FIRST_MULTI(step, ec, enm); 1132b8ea2c8cSmatt for (u_int i = 0; enm != NULL; ) { 1133b8ea2c8cSmatt const char *addr = enm->enm_addrlo; 1134b8ea2c8cSmatt if (memcmp(addr, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { 1135b8ea2c8cSmatt ifp->if_flags |= IFF_ALLMULTI; 1136b8ea2c8cSmatt memset(gaddr, 0xff, 32 << (crc_shift & 1)); 1137b8ea2c8cSmatt memset(sc->sc_macaddrs, 0, sizeof(sc->sc_macaddrs)); 1138b8ea2c8cSmatt break; 1139b8ea2c8cSmatt } 1140b8ea2c8cSmatt if ((sc->sc_rctrl & RCTRL_EMEN) 1141b8ea2c8cSmatt && i < __arraycount(sc->sc_macaddrs)) { 1142b8ea2c8cSmatt sc->sc_macaddrs[i++] = pq3etsec_macaddr_create(addr); 1143b8ea2c8cSmatt } else { 1144b8ea2c8cSmatt uint32_t crc = ether_crc32_be(addr, ETHER_ADDR_LEN); 1145b8ea2c8cSmatt #if 0 1146b8ea2c8cSmatt printf("%s: %s: crc=%#x: %#x: [%u,%u]=%#x\n", __func__, 1147b8ea2c8cSmatt ether_sprintf(addr), crc, 1148b8ea2c8cSmatt crc >> crc_shift, 1149b8ea2c8cSmatt crc >> (crc_shift + 5), 1150b8ea2c8cSmatt (crc >> crc_shift) & 31, 1151b8ea2c8cSmatt 1 << (((crc >> crc_shift) & 31) ^ 31)); 1152b8ea2c8cSmatt #endif 1153b8ea2c8cSmatt /* 1154b8ea2c8cSmatt * The documentation doesn't completely follow PowerPC 1155b8ea2c8cSmatt * bit order. The BE crc32 (H) for 01:00:5E:00:00:01 1156b8ea2c8cSmatt * is 0x7fa32d9b. By empirical testing, the 1157b8ea2c8cSmatt * corresponding hash bit is word 3, bit 31 (ppc bit 1158b8ea2c8cSmatt * order). Since 3 << 31 | 31 is 0x7f, we deduce 1159b8ea2c8cSmatt * H[0:2] selects the register while H[3:7] selects 1160b8ea2c8cSmatt * the bit (ppc bit order). 1161b8ea2c8cSmatt */ 1162b8ea2c8cSmatt crc >>= crc_shift; 1163b8ea2c8cSmatt gaddr[crc / 32] |= 1 << ((crc & 31) ^ 31); 1164b8ea2c8cSmatt } 1165b8ea2c8cSmatt ETHER_NEXT_MULTI(step, enm); 1166b8ea2c8cSmatt } 116783759283Smsaitoh ETHER_UNLOCK(ec); 1168b8ea2c8cSmatt for (u_int i = 0; i < 8; i++) { 1169b8ea2c8cSmatt etsec_write(sc, IGADDR(i), sc->sc_gaddr[i]); 1170b8ea2c8cSmatt etsec_write(sc, GADDR(i), sc->sc_gaddr[i+8]); 1171b8ea2c8cSmatt #if 0 1172b8ea2c8cSmatt if (sc->sc_gaddr[i] || sc->sc_gaddr[i+8]) 1173b8ea2c8cSmatt printf("%s: IGADDR%u(%#x)=%#x GADDR%u(%#x)=%#x\n", __func__, 1174b8ea2c8cSmatt i, IGADDR(i), etsec_read(sc, IGADDR(i)), 1175b8ea2c8cSmatt i, GADDR(i), etsec_read(sc, GADDR(i))); 1176b8ea2c8cSmatt #endif 1177b8ea2c8cSmatt } 1178b8ea2c8cSmatt for (u_int i = 0; i < __arraycount(sc->sc_macaddrs); i++) { 1179b8ea2c8cSmatt uint64_t macaddr = sc->sc_macaddrs[i]; 1180b8ea2c8cSmatt etsec_write(sc, MACnADDR1(i), (uint32_t)(macaddr >> 32)); 1181b8ea2c8cSmatt etsec_write(sc, MACnADDR2(i), (uint32_t)(macaddr >> 0)); 1182b8ea2c8cSmatt #if 0 1183b8ea2c8cSmatt if (macaddr) 1184b8ea2c8cSmatt printf("%s: MAC%02uADDR2(%08x)=%#x MAC%02uADDR2(%#x)=%08x\n", __func__, 1185b8ea2c8cSmatt i+1, MACnADDR1(i), etsec_read(sc, MACnADDR1(i)), 1186b8ea2c8cSmatt i+1, MACnADDR2(i), etsec_read(sc, MACnADDR2(i))); 1187b8ea2c8cSmatt #endif 1188b8ea2c8cSmatt } 1189b8ea2c8cSmatt } 1190b8ea2c8cSmatt 1191b8ea2c8cSmatt static int 1192b8ea2c8cSmatt pq3etsec_ifioctl(struct ifnet *ifp, u_long cmd, void *data) 1193b8ea2c8cSmatt { 1194b8ea2c8cSmatt struct pq3etsec_softc *sc = ifp->if_softc; 1195b8ea2c8cSmatt struct ifreq * const ifr = data; 1196b8ea2c8cSmatt const int s = splnet(); 1197b8ea2c8cSmatt int error; 1198b8ea2c8cSmatt 1199b8ea2c8cSmatt switch (cmd) { 1200b8ea2c8cSmatt case SIOCSIFMEDIA: 1201b8ea2c8cSmatt /* Flow control requires full-duplex mode. */ 1202b8ea2c8cSmatt if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1203b8ea2c8cSmatt (ifr->ifr_media & IFM_FDX) == 0) 1204b8ea2c8cSmatt ifr->ifr_media &= ~IFM_ETH_FMASK; 1205b8ea2c8cSmatt if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1206b8ea2c8cSmatt if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1207b8ea2c8cSmatt /* We can do both TXPAUSE and RXPAUSE. */ 1208b8ea2c8cSmatt ifr->ifr_media |= 1209b8ea2c8cSmatt IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1210b8ea2c8cSmatt } 1211b8ea2c8cSmatt } 1212b8ea2c8cSmatt error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1213b8ea2c8cSmatt break; 1214b8ea2c8cSmatt 1215b8ea2c8cSmatt default: 1216b8ea2c8cSmatt error = ether_ioctl(ifp, cmd, data); 1217b8ea2c8cSmatt if (error != ENETRESET) 1218b8ea2c8cSmatt break; 1219b8ea2c8cSmatt 1220b8ea2c8cSmatt if (cmd == SIOCADDMULTI || cmd == SIOCDELMULTI) { 1221b8ea2c8cSmatt error = 0; 1222b8ea2c8cSmatt if (ifp->if_flags & IFF_RUNNING) 1223b8ea2c8cSmatt pq3etsec_mc_setup(sc); 1224b8ea2c8cSmatt break; 1225b8ea2c8cSmatt } 1226b8ea2c8cSmatt error = pq3etsec_ifinit(ifp); 1227b8ea2c8cSmatt break; 1228b8ea2c8cSmatt } 1229b8ea2c8cSmatt 1230b8ea2c8cSmatt splx(s); 1231b8ea2c8cSmatt return error; 1232b8ea2c8cSmatt } 1233b8ea2c8cSmatt 1234b8ea2c8cSmatt static void 1235b8ea2c8cSmatt pq3etsec_rxq_desc_presync( 1236b8ea2c8cSmatt struct pq3etsec_softc *sc, 1237b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq, 1238b8ea2c8cSmatt volatile struct rxbd *rxbd, 1239b8ea2c8cSmatt size_t count) 1240b8ea2c8cSmatt { 1241b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1242b8ea2c8cSmatt (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1243b8ea2c8cSmatt BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1244b8ea2c8cSmatt } 1245b8ea2c8cSmatt 1246b8ea2c8cSmatt static void 1247b8ea2c8cSmatt pq3etsec_rxq_desc_postsync( 1248b8ea2c8cSmatt struct pq3etsec_softc *sc, 1249b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq, 1250b8ea2c8cSmatt volatile struct rxbd *rxbd, 1251b8ea2c8cSmatt size_t count) 1252b8ea2c8cSmatt { 1253b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, rxq->rxq_descmap, 1254b8ea2c8cSmatt (rxbd - rxq->rxq_first) * sizeof(*rxbd), count * sizeof(*rxbd), 1255b8ea2c8cSmatt BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1256b8ea2c8cSmatt } 1257b8ea2c8cSmatt 1258b8ea2c8cSmatt static void 1259b8ea2c8cSmatt pq3etsec_txq_desc_presync( 1260b8ea2c8cSmatt struct pq3etsec_softc *sc, 1261b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1262b8ea2c8cSmatt volatile struct txbd *txbd, 1263b8ea2c8cSmatt size_t count) 1264b8ea2c8cSmatt { 1265b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1266b8ea2c8cSmatt (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1267b8ea2c8cSmatt BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1268b8ea2c8cSmatt } 1269b8ea2c8cSmatt 1270b8ea2c8cSmatt static void 1271b8ea2c8cSmatt pq3etsec_txq_desc_postsync( 1272b8ea2c8cSmatt struct pq3etsec_softc *sc, 1273b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1274b8ea2c8cSmatt volatile struct txbd *txbd, 1275b8ea2c8cSmatt size_t count) 1276b8ea2c8cSmatt { 1277b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap, 1278b8ea2c8cSmatt (txbd - txq->txq_first) * sizeof(*txbd), count * sizeof(*txbd), 1279b8ea2c8cSmatt BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1280b8ea2c8cSmatt } 1281b8ea2c8cSmatt 1282b8ea2c8cSmatt static bus_dmamap_t 1283b8ea2c8cSmatt pq3etsec_mapcache_get( 1284b8ea2c8cSmatt struct pq3etsec_softc *sc, 1285b8ea2c8cSmatt struct pq3etsec_mapcache *dmc) 1286b8ea2c8cSmatt { 12874fe92317Smatt KASSERT(dmc->dmc_nmaps > 0); 1288b8ea2c8cSmatt KASSERT(dmc->dmc_maps[dmc->dmc_nmaps-1] != NULL); 1289b8ea2c8cSmatt return dmc->dmc_maps[--dmc->dmc_nmaps]; 1290b8ea2c8cSmatt } 1291b8ea2c8cSmatt 1292b8ea2c8cSmatt static void 1293b8ea2c8cSmatt pq3etsec_mapcache_put( 1294b8ea2c8cSmatt struct pq3etsec_softc *sc, 1295b8ea2c8cSmatt struct pq3etsec_mapcache *dmc, 1296b8ea2c8cSmatt bus_dmamap_t map) 1297b8ea2c8cSmatt { 1298b8ea2c8cSmatt KASSERT(map != NULL); 1299b8ea2c8cSmatt KASSERT(dmc->dmc_nmaps < dmc->dmc_maxmaps); 1300b8ea2c8cSmatt dmc->dmc_maps[dmc->dmc_nmaps++] = map; 1301b8ea2c8cSmatt } 1302b8ea2c8cSmatt 1303b8ea2c8cSmatt static void 1304b8ea2c8cSmatt pq3etsec_mapcache_destroy( 1305b8ea2c8cSmatt struct pq3etsec_softc *sc, 1306b8ea2c8cSmatt struct pq3etsec_mapcache *dmc) 1307b8ea2c8cSmatt { 1308b8ea2c8cSmatt const size_t dmc_size = 1309b8ea2c8cSmatt offsetof(struct pq3etsec_mapcache, dmc_maps[dmc->dmc_maxmaps]); 1310b8ea2c8cSmatt 1311b8ea2c8cSmatt for (u_int i = 0; i < dmc->dmc_maxmaps; i++) { 1312b8ea2c8cSmatt bus_dmamap_destroy(sc->sc_dmat, dmc->dmc_maps[i]); 1313b8ea2c8cSmatt } 1314a8a82a56Smatt kmem_intr_free(dmc, dmc_size); 1315b8ea2c8cSmatt } 1316b8ea2c8cSmatt 1317b8ea2c8cSmatt static int 1318b8ea2c8cSmatt pq3etsec_mapcache_create( 1319b8ea2c8cSmatt struct pq3etsec_softc *sc, 1320b8ea2c8cSmatt struct pq3etsec_mapcache **dmc_p, 1321b8ea2c8cSmatt size_t maxmaps, 1322b8ea2c8cSmatt size_t maxmapsize, 1323b8ea2c8cSmatt size_t maxseg) 1324b8ea2c8cSmatt { 1325b8ea2c8cSmatt const size_t dmc_size = 1326b8ea2c8cSmatt offsetof(struct pq3etsec_mapcache, dmc_maps[maxmaps]); 1327a8a82a56Smatt struct pq3etsec_mapcache * const dmc = 1328a8a82a56Smatt kmem_intr_zalloc(dmc_size, KM_NOSLEEP); 1329b8ea2c8cSmatt 1330b8ea2c8cSmatt dmc->dmc_maxmaps = maxmaps; 13314fe92317Smatt dmc->dmc_nmaps = maxmaps; 1332b8ea2c8cSmatt dmc->dmc_maxmapsize = maxmapsize; 1333b8ea2c8cSmatt dmc->dmc_maxseg = maxseg; 1334b8ea2c8cSmatt 13354fe92317Smatt for (u_int i = 0; i < maxmaps; i++) { 1336b8ea2c8cSmatt int error = bus_dmamap_create(sc->sc_dmat, dmc->dmc_maxmapsize, 1337b8ea2c8cSmatt dmc->dmc_maxseg, dmc->dmc_maxmapsize, 0, 1338b8ea2c8cSmatt BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dmc->dmc_maps[i]); 1339b8ea2c8cSmatt if (error) { 1340b8ea2c8cSmatt aprint_error_dev(sc->sc_dev, 1341b8ea2c8cSmatt "failed to creat dma map cache " 13424fe92317Smatt "entry %u of %zu: %d\n", 13434fe92317Smatt i, maxmaps, error); 1344b8ea2c8cSmatt while (i-- > 0) { 1345b8ea2c8cSmatt bus_dmamap_destroy(sc->sc_dmat, 1346b8ea2c8cSmatt dmc->dmc_maps[i]); 1347b8ea2c8cSmatt } 1348a8a82a56Smatt kmem_intr_free(dmc, dmc_size); 1349b8ea2c8cSmatt return error; 1350b8ea2c8cSmatt } 1351b8ea2c8cSmatt KASSERT(dmc->dmc_maps[i] != NULL); 1352b8ea2c8cSmatt } 1353b8ea2c8cSmatt 1354b8ea2c8cSmatt *dmc_p = dmc; 1355b8ea2c8cSmatt 1356b8ea2c8cSmatt return 0; 1357b8ea2c8cSmatt } 1358b8ea2c8cSmatt 1359b8ea2c8cSmatt #if 0 1360b8ea2c8cSmatt static void 1361b8ea2c8cSmatt pq3etsec_dmamem_free( 1362b8ea2c8cSmatt bus_dma_tag_t dmat, 1363b8ea2c8cSmatt size_t map_size, 1364b8ea2c8cSmatt bus_dma_segment_t *seg, 1365b8ea2c8cSmatt bus_dmamap_t map, 1366b8ea2c8cSmatt void *kvap) 1367b8ea2c8cSmatt { 1368b8ea2c8cSmatt bus_dmamap_destroy(dmat, map); 1369b8ea2c8cSmatt bus_dmamem_unmap(dmat, kvap, map_size); 1370b8ea2c8cSmatt bus_dmamem_free(dmat, seg, 1); 1371b8ea2c8cSmatt } 1372b8ea2c8cSmatt #endif 1373b8ea2c8cSmatt 1374b8ea2c8cSmatt static int 1375b8ea2c8cSmatt pq3etsec_dmamem_alloc( 1376b8ea2c8cSmatt bus_dma_tag_t dmat, 1377b8ea2c8cSmatt size_t map_size, 1378b8ea2c8cSmatt bus_dma_segment_t *seg, 1379b8ea2c8cSmatt bus_dmamap_t *map, 1380b8ea2c8cSmatt void **kvap) 1381b8ea2c8cSmatt { 1382b8ea2c8cSmatt int error; 1383b8ea2c8cSmatt int nseg; 1384b8ea2c8cSmatt 1385b8ea2c8cSmatt *kvap = NULL; 1386b8ea2c8cSmatt *map = NULL; 1387b8ea2c8cSmatt 1388b8ea2c8cSmatt error = bus_dmamem_alloc(dmat, map_size, PAGE_SIZE, 0, 1389b8ea2c8cSmatt seg, 1, &nseg, 0); 1390b8ea2c8cSmatt if (error) 1391b8ea2c8cSmatt return error; 1392b8ea2c8cSmatt 1393b8ea2c8cSmatt KASSERT(nseg == 1); 1394b8ea2c8cSmatt 1395b8ea2c8cSmatt error = bus_dmamem_map(dmat, seg, nseg, map_size, (void **)kvap, 1396b8ea2c8cSmatt BUS_DMA_COHERENT); 1397b8ea2c8cSmatt if (error == 0) { 1398b8ea2c8cSmatt error = bus_dmamap_create(dmat, map_size, 1, map_size, 0, 0, 1399b8ea2c8cSmatt map); 1400b8ea2c8cSmatt if (error == 0) { 1401b8ea2c8cSmatt error = bus_dmamap_load(dmat, *map, *kvap, map_size, 1402b8ea2c8cSmatt NULL, 0); 1403b8ea2c8cSmatt if (error == 0) 1404b8ea2c8cSmatt return 0; 1405b8ea2c8cSmatt bus_dmamap_destroy(dmat, *map); 1406b8ea2c8cSmatt *map = NULL; 1407b8ea2c8cSmatt } 1408b8ea2c8cSmatt bus_dmamem_unmap(dmat, *kvap, map_size); 1409b8ea2c8cSmatt *kvap = NULL; 1410b8ea2c8cSmatt } 1411b8ea2c8cSmatt bus_dmamem_free(dmat, seg, nseg); 1412b8ea2c8cSmatt return 0; 1413b8ea2c8cSmatt } 1414b8ea2c8cSmatt 1415b8ea2c8cSmatt static struct mbuf * 1416b8ea2c8cSmatt pq3etsec_rx_buf_alloc( 1417b8ea2c8cSmatt struct pq3etsec_softc *sc) 1418b8ea2c8cSmatt { 1419b8ea2c8cSmatt struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA); 1420b8ea2c8cSmatt if (m == NULL) { 1421b8ea2c8cSmatt printf("%s:%d: %s\n", __func__, __LINE__, "m_gethdr"); 1422b8ea2c8cSmatt return NULL; 1423b8ea2c8cSmatt } 1424b8ea2c8cSmatt MCLGET(m, M_DONTWAIT); 1425b8ea2c8cSmatt if ((m->m_flags & M_EXT) == 0) { 1426b8ea2c8cSmatt printf("%s:%d: %s\n", __func__, __LINE__, "MCLGET"); 1427b8ea2c8cSmatt m_freem(m); 1428b8ea2c8cSmatt return NULL; 1429b8ea2c8cSmatt } 1430b8ea2c8cSmatt m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 1431b8ea2c8cSmatt 1432b8ea2c8cSmatt bus_dmamap_t map = pq3etsec_mapcache_get(sc, sc->sc_rx_mapcache); 1433b8ea2c8cSmatt if (map == NULL) { 1434b8ea2c8cSmatt printf("%s:%d: %s\n", __func__, __LINE__, "map get"); 1435b8ea2c8cSmatt m_freem(m); 1436b8ea2c8cSmatt return NULL; 1437b8ea2c8cSmatt } 1438b8ea2c8cSmatt M_SETCTX(m, map); 1439b8ea2c8cSmatt m->m_len = m->m_pkthdr.len = MCLBYTES; 1440b8ea2c8cSmatt int error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1441b8ea2c8cSmatt BUS_DMA_READ | BUS_DMA_NOWAIT); 1442b8ea2c8cSmatt if (error) { 1443b8ea2c8cSmatt aprint_error_dev(sc->sc_dev, "fail to load rx dmamap: %d\n", 1444b8ea2c8cSmatt error); 1445b8ea2c8cSmatt M_SETCTX(m, NULL); 1446b8ea2c8cSmatt m_freem(m); 1447b8ea2c8cSmatt pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1448b8ea2c8cSmatt return NULL; 1449b8ea2c8cSmatt } 1450b8ea2c8cSmatt KASSERT(map->dm_mapsize == MCLBYTES); 1451b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1452b8ea2c8cSmatt BUS_DMASYNC_PREREAD); 1453b8ea2c8cSmatt 1454b8ea2c8cSmatt return m; 1455b8ea2c8cSmatt } 1456b8ea2c8cSmatt 1457b8ea2c8cSmatt static void 1458b8ea2c8cSmatt pq3etsec_rx_map_unload( 1459b8ea2c8cSmatt struct pq3etsec_softc *sc, 1460b8ea2c8cSmatt struct mbuf *m) 1461b8ea2c8cSmatt { 1462b8ea2c8cSmatt KASSERT(m); 1463b8ea2c8cSmatt for (; m != NULL; m = m->m_next) { 1464b8ea2c8cSmatt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1465b8ea2c8cSmatt KASSERT(map); 1466b8ea2c8cSmatt KASSERT(map->dm_mapsize == MCLBYTES); 1467b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_len, 1468b8ea2c8cSmatt BUS_DMASYNC_POSTREAD); 1469b8ea2c8cSmatt bus_dmamap_unload(sc->sc_dmat, map); 1470b8ea2c8cSmatt pq3etsec_mapcache_put(sc, sc->sc_rx_mapcache, map); 1471b8ea2c8cSmatt M_SETCTX(m, NULL); 1472b8ea2c8cSmatt } 1473b8ea2c8cSmatt } 1474b8ea2c8cSmatt 1475b8ea2c8cSmatt static bool 1476b8ea2c8cSmatt pq3etsec_rxq_produce( 1477b8ea2c8cSmatt struct pq3etsec_softc *sc, 1478b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq) 1479b8ea2c8cSmatt { 1480b8ea2c8cSmatt volatile struct rxbd *producer = rxq->rxq_producer; 1481b8ea2c8cSmatt #if 0 1482b8ea2c8cSmatt size_t inuse = rxq->rxq_inuse; 1483b8ea2c8cSmatt #endif 1484b8ea2c8cSmatt while (rxq->rxq_inuse < rxq->rxq_threshold) { 1485b8ea2c8cSmatt struct mbuf *m; 1486b8ea2c8cSmatt IF_DEQUEUE(&sc->sc_rx_bufcache, m); 1487b8ea2c8cSmatt if (m == NULL) { 1488b8ea2c8cSmatt m = pq3etsec_rx_buf_alloc(sc); 1489b8ea2c8cSmatt if (m == NULL) { 1490b8ea2c8cSmatt printf("%s: pq3etsec_rx_buf_alloc failed\n", __func__); 1491b8ea2c8cSmatt break; 1492b8ea2c8cSmatt } 1493b8ea2c8cSmatt } 1494b8ea2c8cSmatt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1495b8ea2c8cSmatt KASSERT(map); 1496b8ea2c8cSmatt 1497b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1498b8ea2c8cSmatt KASSERT(rxq->rxq_mbufs[producer-rxq->rxq_first] == NULL); 1499b8ea2c8cSmatt rxq->rxq_mbufs[producer-rxq->rxq_first] = m; 1500b8ea2c8cSmatt #endif 1501b8ea2c8cSmatt 1502b8ea2c8cSmatt /* rxbd_len is write-only by the ETSEC */ 1503b8ea2c8cSmatt producer->rxbd_bufptr = map->dm_segs[0].ds_addr; 1504b8ea2c8cSmatt membar_producer(); 1505b8ea2c8cSmatt producer->rxbd_flags |= RXBD_E; 1506b8ea2c8cSmatt if (__predict_false(rxq->rxq_mhead == NULL)) { 1507b8ea2c8cSmatt KASSERT(producer == rxq->rxq_consumer); 1508b8ea2c8cSmatt rxq->rxq_mconsumer = m; 1509b8ea2c8cSmatt } 1510b8ea2c8cSmatt *rxq->rxq_mtail = m; 1511b8ea2c8cSmatt rxq->rxq_mtail = &m->m_next; 1512b8ea2c8cSmatt m->m_len = MCLBYTES; 1513b8ea2c8cSmatt m->m_next = NULL; 1514b8ea2c8cSmatt rxq->rxq_inuse++; 1515b8ea2c8cSmatt if (++producer == rxq->rxq_last) { 1516b8ea2c8cSmatt membar_producer(); 1517b8ea2c8cSmatt pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1518b8ea2c8cSmatt rxq->rxq_last - rxq->rxq_producer); 1519b8ea2c8cSmatt producer = rxq->rxq_producer = rxq->rxq_first; 1520b8ea2c8cSmatt } 1521b8ea2c8cSmatt } 1522b8ea2c8cSmatt if (producer != rxq->rxq_producer) { 1523b8ea2c8cSmatt membar_producer(); 1524b8ea2c8cSmatt pq3etsec_rxq_desc_presync(sc, rxq, rxq->rxq_producer, 1525b8ea2c8cSmatt producer - rxq->rxq_producer); 1526b8ea2c8cSmatt rxq->rxq_producer = producer; 1527b8ea2c8cSmatt } 1528b8ea2c8cSmatt uint32_t qhlt = etsec_read(sc, RSTAT) & RSTAT_QHLT; 1529b8ea2c8cSmatt if (qhlt) { 1530b8ea2c8cSmatt KASSERT(qhlt & rxq->rxq_qmask); 1531b8ea2c8cSmatt sc->sc_ev_rx_stall.ev_count++; 1532b8ea2c8cSmatt etsec_write(sc, RSTAT, RSTAT_QHLT & rxq->rxq_qmask); 1533b8ea2c8cSmatt } 1534b8ea2c8cSmatt #if 0 1535b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, 1536b8ea2c8cSmatt "%s: buffers inuse went from %zu to %zu\n", 1537b8ea2c8cSmatt __func__, inuse, rxq->rxq_inuse); 1538b8ea2c8cSmatt #endif 1539b8ea2c8cSmatt return true; 1540b8ea2c8cSmatt } 1541b8ea2c8cSmatt 1542b8ea2c8cSmatt static bool 1543b8ea2c8cSmatt pq3etsec_rx_offload( 1544b8ea2c8cSmatt struct pq3etsec_softc *sc, 1545b8ea2c8cSmatt struct mbuf *m, 1546b8ea2c8cSmatt const struct rxfcb *fcb) 1547b8ea2c8cSmatt { 1548b8ea2c8cSmatt if (fcb->rxfcb_flags & RXFCB_VLN) { 1549a6e88d78Sknakahara vlan_set_tag(m, fcb->rxfcb_vlctl); 1550b8ea2c8cSmatt } 1551b8ea2c8cSmatt if ((fcb->rxfcb_flags & RXFCB_IP) == 0 1552b8ea2c8cSmatt || (fcb->rxfcb_flags & (RXFCB_CIP | RXFCB_CTU)) == 0) 1553b8ea2c8cSmatt return true; 1554b8ea2c8cSmatt int csum_flags = 0; 1555b8ea2c8cSmatt if ((fcb->rxfcb_flags & (RXFCB_IP6 | RXFCB_CIP)) == RXFCB_CIP) { 1556b8ea2c8cSmatt csum_flags |= M_CSUM_IPv4; 1557b8ea2c8cSmatt if (fcb->rxfcb_flags & RXFCB_EIP) 1558b8ea2c8cSmatt csum_flags |= M_CSUM_IPv4_BAD; 1559b8ea2c8cSmatt } 1560b8ea2c8cSmatt if ((fcb->rxfcb_flags & RXFCB_CTU) == RXFCB_CTU) { 1561b8ea2c8cSmatt int ipv_flags; 1562b8ea2c8cSmatt if (fcb->rxfcb_flags & RXFCB_IP6) 1563b8ea2c8cSmatt ipv_flags = M_CSUM_TCPv6 | M_CSUM_UDPv6; 1564b8ea2c8cSmatt else 1565b8ea2c8cSmatt ipv_flags = M_CSUM_TCPv4 | M_CSUM_UDPv4; 1566b8ea2c8cSmatt if (fcb->rxfcb_pro == IPPROTO_TCP) { 1567b8ea2c8cSmatt csum_flags |= (M_CSUM_TCPv4 |M_CSUM_TCPv6) & ipv_flags; 1568b8ea2c8cSmatt } else { 1569b8ea2c8cSmatt csum_flags |= (M_CSUM_UDPv4 |M_CSUM_UDPv6) & ipv_flags; 1570b8ea2c8cSmatt } 1571b8ea2c8cSmatt if (fcb->rxfcb_flags & RXFCB_ETU) 1572b8ea2c8cSmatt csum_flags |= M_CSUM_TCP_UDP_BAD; 1573b8ea2c8cSmatt } 1574b8ea2c8cSmatt 1575b8ea2c8cSmatt m->m_pkthdr.csum_flags = csum_flags; 1576b8ea2c8cSmatt return true; 1577b8ea2c8cSmatt } 1578b8ea2c8cSmatt 1579b8ea2c8cSmatt static void 1580b8ea2c8cSmatt pq3etsec_rx_input( 1581b8ea2c8cSmatt struct pq3etsec_softc *sc, 1582b8ea2c8cSmatt struct mbuf *m, 1583b8ea2c8cSmatt uint16_t rxbd_flags) 1584b8ea2c8cSmatt { 1585b8ea2c8cSmatt struct ifnet * const ifp = &sc->sc_if; 1586b8ea2c8cSmatt 1587b8ea2c8cSmatt pq3etsec_rx_map_unload(sc, m); 1588b8ea2c8cSmatt 1589b8ea2c8cSmatt if ((sc->sc_rctrl & RCTRL_PRSDEP) != RCTRL_PRSDEP_OFF) { 1590b8ea2c8cSmatt struct rxfcb fcb = *mtod(m, struct rxfcb *); 1591b8ea2c8cSmatt if (!pq3etsec_rx_offload(sc, m, &fcb)) 1592b8ea2c8cSmatt return; 1593b8ea2c8cSmatt } 1594b8ea2c8cSmatt m_adj(m, sc->sc_rx_adjlen); 1595b8ea2c8cSmatt 1596b8ea2c8cSmatt if (rxbd_flags & RXBD_M) 1597b8ea2c8cSmatt m->m_flags |= M_PROMISC; 1598b8ea2c8cSmatt if (rxbd_flags & RXBD_BC) 1599b8ea2c8cSmatt m->m_flags |= M_BCAST; 1600b8ea2c8cSmatt if (rxbd_flags & RXBD_MC) 1601b8ea2c8cSmatt m->m_flags |= M_MCAST; 1602b8ea2c8cSmatt m->m_flags |= M_HASFCS; 1603d938d837Sozaki-r m_set_rcvif(m, &sc->sc_if); 1604b8ea2c8cSmatt 1605b8ea2c8cSmatt /* 1606b8ea2c8cSmatt * Let's give it to the network subsystm to deal with. 1607b8ea2c8cSmatt */ 160824ff2346Srin if_percpuq_enqueue(ifp->if_percpuq, m); 1609b8ea2c8cSmatt } 1610b8ea2c8cSmatt 1611b8ea2c8cSmatt static void 1612b8ea2c8cSmatt pq3etsec_rxq_consume( 1613b8ea2c8cSmatt struct pq3etsec_softc *sc, 1614b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq) 1615b8ea2c8cSmatt { 1616b8ea2c8cSmatt struct ifnet * const ifp = &sc->sc_if; 1617b8ea2c8cSmatt volatile struct rxbd *consumer = rxq->rxq_consumer; 1618b8ea2c8cSmatt size_t rxconsumed = 0; 1619b8ea2c8cSmatt 1620b8ea2c8cSmatt etsec_write(sc, RSTAT, RSTAT_RXF & rxq->rxq_qmask); 1621b8ea2c8cSmatt 1622b8ea2c8cSmatt for (;;) { 1623b8ea2c8cSmatt if (consumer == rxq->rxq_producer) { 1624b8ea2c8cSmatt rxq->rxq_consumer = consumer; 1625b8ea2c8cSmatt rxq->rxq_inuse -= rxconsumed; 16268059ca17Smatt KASSERT(rxq->rxq_inuse == 0); 162784148c19Srin break; 1628b8ea2c8cSmatt } 1629b8ea2c8cSmatt pq3etsec_rxq_desc_postsync(sc, rxq, consumer, 1); 1630b8ea2c8cSmatt const uint16_t rxbd_flags = consumer->rxbd_flags; 1631b8ea2c8cSmatt if (rxbd_flags & RXBD_E) { 1632b8ea2c8cSmatt rxq->rxq_consumer = consumer; 1633b8ea2c8cSmatt rxq->rxq_inuse -= rxconsumed; 163484148c19Srin break; 1635b8ea2c8cSmatt } 1636b8ea2c8cSmatt KASSERT(rxq->rxq_mconsumer != NULL); 1637b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1638b8ea2c8cSmatt KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1639b8ea2c8cSmatt #endif 1640b8ea2c8cSmatt #if 0 1641b8ea2c8cSmatt printf("%s: rxdb[%u]: flags=%#x len=%#x: %08x %08x %08x %08x\n", 1642b8ea2c8cSmatt __func__, 1643b8ea2c8cSmatt consumer - rxq->rxq_first, rxbd_flags, consumer->rxbd_len, 1644b8ea2c8cSmatt mtod(rxq->rxq_mconsumer, int *)[0], 1645b8ea2c8cSmatt mtod(rxq->rxq_mconsumer, int *)[1], 1646b8ea2c8cSmatt mtod(rxq->rxq_mconsumer, int *)[2], 1647b8ea2c8cSmatt mtod(rxq->rxq_mconsumer, int *)[3]); 1648b8ea2c8cSmatt #endif 1649b8ea2c8cSmatt /* 1650b8ea2c8cSmatt * We own this packet again. Clear all flags except wrap. 1651b8ea2c8cSmatt */ 1652b8ea2c8cSmatt rxconsumed++; 1653b8ea2c8cSmatt consumer->rxbd_flags = rxbd_flags & (RXBD_W | RXBD_I); 1654b8ea2c8cSmatt 1655b8ea2c8cSmatt /* 1656b8ea2c8cSmatt * If this descriptor has the LAST bit set and no errors, 1657b8ea2c8cSmatt * it's a valid input packet. 1658b8ea2c8cSmatt */ 1659b8ea2c8cSmatt if ((rxbd_flags & (RXBD_L | RXBD_ERRORS)) == RXBD_L) { 1660b8ea2c8cSmatt size_t rxbd_len = consumer->rxbd_len; 1661b8ea2c8cSmatt struct mbuf *m = rxq->rxq_mhead; 1662b8ea2c8cSmatt struct mbuf *m_last = rxq->rxq_mconsumer; 1663b8ea2c8cSmatt if ((rxq->rxq_mhead = m_last->m_next) == NULL) 1664b8ea2c8cSmatt rxq->rxq_mtail = &rxq->rxq_mhead; 1665b8ea2c8cSmatt rxq->rxq_mconsumer = rxq->rxq_mhead; 1666b8ea2c8cSmatt m_last->m_next = NULL; 1667b8ea2c8cSmatt m_last->m_len = rxbd_len & (MCLBYTES - 1); 1668b8ea2c8cSmatt m->m_pkthdr.len = rxbd_len; 1669b8ea2c8cSmatt pq3etsec_rx_input(sc, m, rxbd_flags); 1670b8ea2c8cSmatt } else if (rxbd_flags & RXBD_L) { 1671b8ea2c8cSmatt KASSERT(rxbd_flags & RXBD_ERRORS); 1672b8ea2c8cSmatt struct mbuf *m; 1673b8ea2c8cSmatt /* 1674b8ea2c8cSmatt * We encountered an error, take the mbufs and add 1675b8ea2c8cSmatt * then to the rx bufcache so we can reuse them. 1676b8ea2c8cSmatt */ 1677d4bc9d11Sthorpej if_statinc(ifp, if_ierrors); 1678b8ea2c8cSmatt for (m = rxq->rxq_mhead; 1679b8ea2c8cSmatt m != rxq->rxq_mconsumer; 1680b8ea2c8cSmatt m = m->m_next) { 1681b8ea2c8cSmatt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1682b8ea2c8cSmatt } 1683b8ea2c8cSmatt m = rxq->rxq_mconsumer; 1684b8ea2c8cSmatt if ((rxq->rxq_mhead = m->m_next) == NULL) 1685b8ea2c8cSmatt rxq->rxq_mtail = &rxq->rxq_mhead; 1686b8ea2c8cSmatt rxq->rxq_mconsumer = m->m_next; 1687b8ea2c8cSmatt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1688b8ea2c8cSmatt } else { 1689b8ea2c8cSmatt rxq->rxq_mconsumer = rxq->rxq_mconsumer->m_next; 1690b8ea2c8cSmatt } 1691b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1692b8ea2c8cSmatt rxq->rxq_mbufs[consumer - rxq->rxq_first] = NULL; 1693b8ea2c8cSmatt #endif 1694b8ea2c8cSmatt 1695b8ea2c8cSmatt /* 1696b8ea2c8cSmatt * Wrap at the last entry! 1697b8ea2c8cSmatt */ 1698b8ea2c8cSmatt if (rxbd_flags & RXBD_W) { 1699b8ea2c8cSmatt KASSERT(consumer + 1 == rxq->rxq_last); 1700b8ea2c8cSmatt consumer = rxq->rxq_first; 1701b8ea2c8cSmatt } else { 1702b8ea2c8cSmatt consumer++; 1703b8ea2c8cSmatt } 1704b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1705b8ea2c8cSmatt KASSERT(rxq->rxq_mbufs[consumer - rxq->rxq_first] == rxq->rxq_mconsumer); 1706b8ea2c8cSmatt #endif 1707b8ea2c8cSmatt } 170812a7d27eSrin 170912a7d27eSrin if (rxconsumed != 0) 171012a7d27eSrin rnd_add_uint32(&sc->rnd_source, rxconsumed); 1711b8ea2c8cSmatt } 1712b8ea2c8cSmatt 1713b8ea2c8cSmatt static void 1714b8ea2c8cSmatt pq3etsec_rxq_purge( 1715b8ea2c8cSmatt struct pq3etsec_softc *sc, 1716b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq, 1717b8ea2c8cSmatt bool discard) 1718b8ea2c8cSmatt { 1719b8ea2c8cSmatt struct mbuf *m; 1720b8ea2c8cSmatt 1721b8ea2c8cSmatt if ((m = rxq->rxq_mhead) != NULL) { 1722b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1723b8ea2c8cSmatt memset(rxq->rxq_mbufs, 0, sizeof(rxq->rxq_mbufs)); 1724b8ea2c8cSmatt #endif 1725b8ea2c8cSmatt 1726b8ea2c8cSmatt if (discard) { 1727b8ea2c8cSmatt pq3etsec_rx_map_unload(sc, m); 1728b8ea2c8cSmatt m_freem(m); 1729b8ea2c8cSmatt } else { 1730b8ea2c8cSmatt while (m != NULL) { 1731b8ea2c8cSmatt struct mbuf *m0 = m->m_next; 1732b8ea2c8cSmatt m->m_next = NULL; 1733b8ea2c8cSmatt IF_ENQUEUE(&sc->sc_rx_bufcache, m); 1734b8ea2c8cSmatt m = m0; 1735b8ea2c8cSmatt } 1736b8ea2c8cSmatt } 1737b8ea2c8cSmatt } 1738b8ea2c8cSmatt 1739b8ea2c8cSmatt rxq->rxq_mconsumer = NULL; 1740b8ea2c8cSmatt rxq->rxq_mhead = NULL; 1741b8ea2c8cSmatt rxq->rxq_mtail = &rxq->rxq_mhead; 1742b8ea2c8cSmatt rxq->rxq_inuse = 0; 1743b8ea2c8cSmatt } 1744b8ea2c8cSmatt 1745b8ea2c8cSmatt static void 1746b8ea2c8cSmatt pq3etsec_rxq_reset( 1747b8ea2c8cSmatt struct pq3etsec_softc *sc, 1748b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq) 1749b8ea2c8cSmatt { 1750b8ea2c8cSmatt /* 1751b8ea2c8cSmatt * sync all the descriptors 1752b8ea2c8cSmatt */ 1753b8ea2c8cSmatt pq3etsec_rxq_desc_postsync(sc, rxq, rxq->rxq_first, 1754b8ea2c8cSmatt rxq->rxq_last - rxq->rxq_first); 1755b8ea2c8cSmatt 1756b8ea2c8cSmatt /* 1757b8ea2c8cSmatt * Make sure we own all descriptors in the ring. 1758b8ea2c8cSmatt */ 1759b8ea2c8cSmatt volatile struct rxbd *rxbd; 1760b8ea2c8cSmatt for (rxbd = rxq->rxq_first; rxbd < rxq->rxq_last - 1; rxbd++) { 1761b8ea2c8cSmatt rxbd->rxbd_flags = RXBD_I; 1762b8ea2c8cSmatt } 1763b8ea2c8cSmatt 1764b8ea2c8cSmatt /* 1765b8ea2c8cSmatt * Last descriptor has the wrap flag. 1766b8ea2c8cSmatt */ 1767b8ea2c8cSmatt rxbd->rxbd_flags = RXBD_W | RXBD_I; 1768b8ea2c8cSmatt 1769b8ea2c8cSmatt /* 1770b8ea2c8cSmatt * Reset the producer consumer indexes. 1771b8ea2c8cSmatt */ 1772b8ea2c8cSmatt rxq->rxq_consumer = rxq->rxq_first; 1773b8ea2c8cSmatt rxq->rxq_producer = rxq->rxq_first; 1774b8ea2c8cSmatt rxq->rxq_inuse = 0; 1775b8ea2c8cSmatt if (rxq->rxq_threshold < ETSEC_MINRXMBUFS) 1776b8ea2c8cSmatt rxq->rxq_threshold = ETSEC_MINRXMBUFS; 1777b8ea2c8cSmatt 1778b8ea2c8cSmatt sc->sc_imask |= IEVENT_RXF | IEVENT_BSY; 1779b8ea2c8cSmatt 1780b8ea2c8cSmatt /* 1781b8ea2c8cSmatt * Restart the transmit at the first descriptor 1782b8ea2c8cSmatt */ 1783b8ea2c8cSmatt etsec_write(sc, rxq->rxq_reg_rbase, rxq->rxq_descmap->dm_segs->ds_addr); 1784b8ea2c8cSmatt } 1785b8ea2c8cSmatt 1786b8ea2c8cSmatt static int 1787b8ea2c8cSmatt pq3etsec_rxq_attach( 1788b8ea2c8cSmatt struct pq3etsec_softc *sc, 1789b8ea2c8cSmatt struct pq3etsec_rxqueue *rxq, 1790b8ea2c8cSmatt u_int qno) 1791b8ea2c8cSmatt { 1792b8ea2c8cSmatt size_t map_size = PAGE_SIZE; 1793b8ea2c8cSmatt size_t desc_count = map_size / sizeof(struct rxbd); 1794b8ea2c8cSmatt int error; 1795b8ea2c8cSmatt void *descs; 1796b8ea2c8cSmatt 1797b8ea2c8cSmatt error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1798b8ea2c8cSmatt &rxq->rxq_descmap_seg, &rxq->rxq_descmap, &descs); 1799b8ea2c8cSmatt if (error) 1800b8ea2c8cSmatt return error; 1801b8ea2c8cSmatt 1802b8ea2c8cSmatt memset(descs, 0, map_size); 1803b8ea2c8cSmatt rxq->rxq_first = descs; 1804b8ea2c8cSmatt rxq->rxq_last = rxq->rxq_first + desc_count; 1805b8ea2c8cSmatt rxq->rxq_consumer = descs; 1806b8ea2c8cSmatt rxq->rxq_producer = descs; 1807b8ea2c8cSmatt 1808b8ea2c8cSmatt pq3etsec_rxq_purge(sc, rxq, true); 1809b8ea2c8cSmatt pq3etsec_rxq_reset(sc, rxq); 1810b8ea2c8cSmatt 1811b8ea2c8cSmatt rxq->rxq_reg_rbase = RBASEn(qno); 1812b8ea2c8cSmatt rxq->rxq_qmask = RSTAT_QHLTn(qno) | RSTAT_RXFn(qno); 1813b8ea2c8cSmatt 1814b8ea2c8cSmatt return 0; 1815b8ea2c8cSmatt } 1816b8ea2c8cSmatt 1817b8ea2c8cSmatt static bool 1818b8ea2c8cSmatt pq3etsec_txq_active_p( 1819b8ea2c8cSmatt struct pq3etsec_softc * const sc, 1820b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 1821b8ea2c8cSmatt { 1822b8ea2c8cSmatt return !IF_IS_EMPTY(&txq->txq_mbufs); 1823b8ea2c8cSmatt } 1824b8ea2c8cSmatt 1825b8ea2c8cSmatt static bool 1826b8ea2c8cSmatt pq3etsec_txq_fillable_p( 1827b8ea2c8cSmatt struct pq3etsec_softc * const sc, 1828b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 1829b8ea2c8cSmatt { 1830b8ea2c8cSmatt return txq->txq_free >= txq->txq_threshold; 1831b8ea2c8cSmatt } 1832b8ea2c8cSmatt 1833b8ea2c8cSmatt static int 1834b8ea2c8cSmatt pq3etsec_txq_attach( 1835b8ea2c8cSmatt struct pq3etsec_softc *sc, 1836b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1837b8ea2c8cSmatt u_int qno) 1838b8ea2c8cSmatt { 1839b8ea2c8cSmatt size_t map_size = PAGE_SIZE; 1840b8ea2c8cSmatt size_t desc_count = map_size / sizeof(struct txbd); 1841b8ea2c8cSmatt int error; 1842b8ea2c8cSmatt void *descs; 1843b8ea2c8cSmatt 1844b8ea2c8cSmatt error = pq3etsec_dmamem_alloc(sc->sc_dmat, map_size, 1845b8ea2c8cSmatt &txq->txq_descmap_seg, &txq->txq_descmap, &descs); 1846b8ea2c8cSmatt if (error) 1847b8ea2c8cSmatt return error; 1848b8ea2c8cSmatt 1849b8ea2c8cSmatt memset(descs, 0, map_size); 1850b8ea2c8cSmatt txq->txq_first = descs; 1851b8ea2c8cSmatt txq->txq_last = txq->txq_first + desc_count; 1852b8ea2c8cSmatt txq->txq_consumer = descs; 1853b8ea2c8cSmatt txq->txq_producer = descs; 1854b8ea2c8cSmatt 1855b8ea2c8cSmatt IFQ_SET_MAXLEN(&txq->txq_mbufs, ETSEC_MAXTXMBUFS); 1856b8ea2c8cSmatt 1857b8ea2c8cSmatt txq->txq_reg_tbase = TBASEn(qno); 1858b8ea2c8cSmatt txq->txq_qmask = TSTAT_THLTn(qno) | TSTAT_TXFn(qno); 1859b8ea2c8cSmatt 1860b8ea2c8cSmatt pq3etsec_txq_reset(sc, txq); 1861b8ea2c8cSmatt 1862b8ea2c8cSmatt return 0; 1863b8ea2c8cSmatt } 1864b8ea2c8cSmatt 1865b8ea2c8cSmatt static int 1866b8ea2c8cSmatt pq3etsec_txq_map_load( 1867b8ea2c8cSmatt struct pq3etsec_softc *sc, 1868b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1869b8ea2c8cSmatt struct mbuf *m) 1870b8ea2c8cSmatt { 1871b8ea2c8cSmatt bus_dmamap_t map; 1872b8ea2c8cSmatt int error; 1873b8ea2c8cSmatt 1874b8ea2c8cSmatt map = M_GETCTX(m, bus_dmamap_t); 1875b8ea2c8cSmatt if (map != NULL) 1876b8ea2c8cSmatt return 0; 1877b8ea2c8cSmatt 1878b8ea2c8cSmatt map = pq3etsec_mapcache_get(sc, sc->sc_tx_mapcache); 1879b8ea2c8cSmatt if (map == NULL) 1880b8ea2c8cSmatt return ENOMEM; 1881b8ea2c8cSmatt 1882b8ea2c8cSmatt error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m, 1883b8ea2c8cSmatt BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1884b8ea2c8cSmatt if (error) 1885b8ea2c8cSmatt return error; 1886b8ea2c8cSmatt 1887b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, map, 0, m->m_pkthdr.len, 1888b8ea2c8cSmatt BUS_DMASYNC_PREWRITE); 1889b8ea2c8cSmatt M_SETCTX(m, map); 1890b8ea2c8cSmatt return 0; 1891b8ea2c8cSmatt } 1892b8ea2c8cSmatt 1893b8ea2c8cSmatt static void 1894b8ea2c8cSmatt pq3etsec_txq_map_unload( 1895b8ea2c8cSmatt struct pq3etsec_softc *sc, 1896b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1897b8ea2c8cSmatt struct mbuf *m) 1898b8ea2c8cSmatt { 1899b8ea2c8cSmatt KASSERT(m); 1900b8ea2c8cSmatt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1901b8ea2c8cSmatt bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, 1902b8ea2c8cSmatt BUS_DMASYNC_POSTWRITE); 1903b8ea2c8cSmatt bus_dmamap_unload(sc->sc_dmat, map); 1904b8ea2c8cSmatt pq3etsec_mapcache_put(sc, sc->sc_tx_mapcache, map); 1905b8ea2c8cSmatt } 1906b8ea2c8cSmatt 1907b8ea2c8cSmatt static bool 1908b8ea2c8cSmatt pq3etsec_txq_produce( 1909b8ea2c8cSmatt struct pq3etsec_softc *sc, 1910b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 1911b8ea2c8cSmatt struct mbuf *m) 1912b8ea2c8cSmatt { 1913b8ea2c8cSmatt bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); 1914b8ea2c8cSmatt 1915b8ea2c8cSmatt if (map->dm_nsegs > txq->txq_free) 1916b8ea2c8cSmatt return false; 1917b8ea2c8cSmatt 1918b8ea2c8cSmatt /* 1919b8ea2c8cSmatt * TCP Offload flag must be set in the first descriptor. 1920b8ea2c8cSmatt */ 1921b8ea2c8cSmatt volatile struct txbd *producer = txq->txq_producer; 1922b8ea2c8cSmatt uint16_t last_flags = TXBD_L; 1923b8ea2c8cSmatt uint16_t first_flags = TXBD_R 1924b8ea2c8cSmatt | ((m->m_flags & M_HASFCB) ? TXBD_TOE : 0); 1925b8ea2c8cSmatt 1926b8ea2c8cSmatt /* 1927b8ea2c8cSmatt * If we've produced enough descriptors without consuming any 1928b8ea2c8cSmatt * we need to ask for an interrupt to reclaim some. 1929b8ea2c8cSmatt */ 1930b8ea2c8cSmatt txq->txq_lastintr += map->dm_nsegs; 19316e15a820Snonaka if (ETSEC_IC_TX_ENABLED(sc) 19326e15a820Snonaka || txq->txq_lastintr >= txq->txq_threshold 1933b8ea2c8cSmatt || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) { 1934b8ea2c8cSmatt txq->txq_lastintr = 0; 1935b8ea2c8cSmatt last_flags |= TXBD_I; 1936b8ea2c8cSmatt } 1937b8ea2c8cSmatt 1938b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1939b8ea2c8cSmatt KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1940b8ea2c8cSmatt #endif 1941b8ea2c8cSmatt KASSERT(producer != txq->txq_last); 1942b8ea2c8cSmatt producer->txbd_bufptr = map->dm_segs[0].ds_addr; 1943b8ea2c8cSmatt producer->txbd_len = map->dm_segs[0].ds_len; 1944b8ea2c8cSmatt 1945b8ea2c8cSmatt if (map->dm_nsegs > 1) { 1946b8ea2c8cSmatt volatile struct txbd *start = producer + 1; 1947b8ea2c8cSmatt size_t count = map->dm_nsegs - 1; 1948b8ea2c8cSmatt for (u_int i = 1; i < map->dm_nsegs; i++) { 1949b8ea2c8cSmatt if (__predict_false(++producer == txq->txq_last)) { 1950b8ea2c8cSmatt producer = txq->txq_first; 1951b8ea2c8cSmatt if (start < txq->txq_last) { 1952b8ea2c8cSmatt pq3etsec_txq_desc_presync(sc, txq, 1953b8ea2c8cSmatt start, txq->txq_last - start); 1954b8ea2c8cSmatt count -= txq->txq_last - start; 1955b8ea2c8cSmatt } 1956b8ea2c8cSmatt start = txq->txq_first; 1957b8ea2c8cSmatt } 1958b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1959b8ea2c8cSmatt KASSERT(txq->txq_lmbufs[producer - txq->txq_first] == NULL); 1960b8ea2c8cSmatt #endif 1961b8ea2c8cSmatt producer->txbd_bufptr = map->dm_segs[i].ds_addr; 1962b8ea2c8cSmatt producer->txbd_len = map->dm_segs[i].ds_len; 1963b8ea2c8cSmatt producer->txbd_flags = TXBD_R 1964b8ea2c8cSmatt | (producer->txbd_flags & TXBD_W) 1965b8ea2c8cSmatt | (i == map->dm_nsegs - 1 ? last_flags : 0); 1966b8ea2c8cSmatt #if 0 1967b8ea2c8cSmatt printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, producer - txq->txq_first, 1968b8ea2c8cSmatt producer->txbd_flags, producer->txbd_len, producer->txbd_bufptr); 1969b8ea2c8cSmatt #endif 1970b8ea2c8cSmatt } 1971b8ea2c8cSmatt pq3etsec_txq_desc_presync(sc, txq, start, count); 1972b8ea2c8cSmatt } else { 1973b8ea2c8cSmatt first_flags |= last_flags; 1974b8ea2c8cSmatt } 1975b8ea2c8cSmatt 1976b8ea2c8cSmatt membar_producer(); 1977b8ea2c8cSmatt txq->txq_producer->txbd_flags = 1978b8ea2c8cSmatt first_flags | (txq->txq_producer->txbd_flags & TXBD_W); 1979b8ea2c8cSmatt #if 0 1980b8ea2c8cSmatt printf("%s: txbd[%u]=%#x/%u/%#x\n", __func__, 1981b8ea2c8cSmatt txq->txq_producer - txq->txq_first, txq->txq_producer->txbd_flags, 1982b8ea2c8cSmatt txq->txq_producer->txbd_len, txq->txq_producer->txbd_bufptr); 1983b8ea2c8cSmatt #endif 1984b8ea2c8cSmatt pq3etsec_txq_desc_presync(sc, txq, txq->txq_producer, 1); 1985b8ea2c8cSmatt 1986b8ea2c8cSmatt /* 1987b8ea2c8cSmatt * Reduce free count by the number of segments we consumed. 1988b8ea2c8cSmatt */ 1989b8ea2c8cSmatt txq->txq_free -= map->dm_nsegs; 1990b8ea2c8cSmatt KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer); 1991b8ea2c8cSmatt KASSERT(map->dm_nsegs == 1 || (txq->txq_producer->txbd_flags & TXBD_L) == 0); 1992b8ea2c8cSmatt KASSERT(producer->txbd_flags & TXBD_L); 1993b8ea2c8cSmatt #ifdef ETSEC_DEBUG 1994b8ea2c8cSmatt txq->txq_lmbufs[producer - txq->txq_first] = m; 1995b8ea2c8cSmatt #endif 1996b8ea2c8cSmatt 1997b8ea2c8cSmatt #if 0 1998b8ea2c8cSmatt printf("%s: mbuf %p: produced a %u byte packet in %u segments (%u..%u)\n", 1999b8ea2c8cSmatt __func__, m, m->m_pkthdr.len, map->dm_nsegs, 2000b8ea2c8cSmatt txq->txq_producer - txq->txq_first, producer - txq->txq_first); 2001b8ea2c8cSmatt #endif 2002b8ea2c8cSmatt 2003b8ea2c8cSmatt if (++producer == txq->txq_last) 2004b8ea2c8cSmatt txq->txq_producer = txq->txq_first; 2005b8ea2c8cSmatt else 2006b8ea2c8cSmatt txq->txq_producer = producer; 2007b8ea2c8cSmatt IF_ENQUEUE(&txq->txq_mbufs, m); 2008b8ea2c8cSmatt 2009b8ea2c8cSmatt /* 2010b8ea2c8cSmatt * Restart the transmitter. 2011b8ea2c8cSmatt */ 2012b8ea2c8cSmatt etsec_write(sc, TSTAT, txq->txq_qmask & TSTAT_THLT); /* W1C */ 2013b8ea2c8cSmatt 2014b8ea2c8cSmatt return true; 2015b8ea2c8cSmatt } 2016b8ea2c8cSmatt 2017b8ea2c8cSmatt static void 2018b8ea2c8cSmatt pq3etsec_tx_offload( 2019b8ea2c8cSmatt struct pq3etsec_softc *sc, 2020b8ea2c8cSmatt struct pq3etsec_txqueue *txq, 2021b8ea2c8cSmatt struct mbuf **mp) 2022b8ea2c8cSmatt { 2023b8ea2c8cSmatt struct mbuf *m = *mp; 2024b8ea2c8cSmatt u_int csum_flags = m->m_pkthdr.csum_flags; 2025a6e88d78Sknakahara bool have_vtag; 2026a6e88d78Sknakahara uint16_t vtag; 2027b8ea2c8cSmatt 2028b8ea2c8cSmatt KASSERT(m->m_flags & M_PKTHDR); 2029b8ea2c8cSmatt 2030a6e88d78Sknakahara have_vtag = vlan_has_tag(m); 20316b6a6635Sknakahara vtag = (have_vtag) ? vlan_get_tag(m) : 0; 2032a6e88d78Sknakahara 2033b8ea2c8cSmatt /* 2034b8ea2c8cSmatt * Let see if we are doing any offload first. 2035b8ea2c8cSmatt */ 2036a6e88d78Sknakahara if (csum_flags == 0 && !have_vtag) { 2037b8ea2c8cSmatt m->m_flags &= ~M_HASFCB; 2038b8ea2c8cSmatt return; 2039b8ea2c8cSmatt } 2040b8ea2c8cSmatt 2041b8ea2c8cSmatt uint16_t flags = 0; 2042b8ea2c8cSmatt if (csum_flags & M_CSUM_IP) { 2043b8ea2c8cSmatt flags |= TXFCB_IP 2044b8ea2c8cSmatt | ((csum_flags & M_CSUM_IP6) ? TXFCB_IP6 : 0) 2045b8ea2c8cSmatt | ((csum_flags & M_CSUM_TUP) ? TXFCB_TUP : 0) 2046b8ea2c8cSmatt | ((csum_flags & M_CSUM_UDP) ? TXFCB_UDP : 0) 2047b8ea2c8cSmatt | ((csum_flags & M_CSUM_CIP) ? TXFCB_CIP : 0) 2048b8ea2c8cSmatt | ((csum_flags & M_CSUM_CTU) ? TXFCB_CTU : 0); 2049b8ea2c8cSmatt } 2050a6e88d78Sknakahara if (have_vtag) { 2051b8ea2c8cSmatt flags |= TXFCB_VLN; 2052b8ea2c8cSmatt } 2053b8ea2c8cSmatt if (flags == 0) { 2054b8ea2c8cSmatt m->m_flags &= ~M_HASFCB; 2055b8ea2c8cSmatt return; 2056b8ea2c8cSmatt } 2057b8ea2c8cSmatt 2058b8ea2c8cSmatt struct txfcb fcb; 2059b8ea2c8cSmatt fcb.txfcb_flags = flags; 2060b8ea2c8cSmatt if (csum_flags & M_CSUM_IPv4) 2061b8ea2c8cSmatt fcb.txfcb_l4os = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data); 2062b8ea2c8cSmatt else 2063ecc6d10cSmaxv fcb.txfcb_l4os = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data); 2064b8ea2c8cSmatt fcb.txfcb_l3os = ETHER_HDR_LEN; 2065b8ea2c8cSmatt fcb.txfcb_phcs = 0; 20666b6a6635Sknakahara fcb.txfcb_vlctl = vtag; 2067b8ea2c8cSmatt 2068b8ea2c8cSmatt #if 0 2069b8ea2c8cSmatt printf("%s: csum_flags=%#x: txfcb flags=%#x lsos=%u l4os=%u phcs=%u vlctl=%#x\n", 2070b8ea2c8cSmatt __func__, csum_flags, fcb.txfcb_flags, fcb.txfcb_l3os, fcb.txfcb_l4os, 2071b8ea2c8cSmatt fcb.txfcb_phcs, fcb.txfcb_vlctl); 2072b8ea2c8cSmatt #endif 2073b8ea2c8cSmatt 2074b8ea2c8cSmatt if (M_LEADINGSPACE(m) >= sizeof(fcb)) { 2075b8ea2c8cSmatt m->m_data -= sizeof(fcb); 2076b8ea2c8cSmatt m->m_len += sizeof(fcb); 2077b8ea2c8cSmatt } else if (!(m->m_flags & M_EXT) && MHLEN - m->m_len >= sizeof(fcb)) { 2078b8ea2c8cSmatt memmove(m->m_pktdat + sizeof(fcb), m->m_data, m->m_len); 2079b8ea2c8cSmatt m->m_data = m->m_pktdat; 2080b8ea2c8cSmatt m->m_len += sizeof(fcb); 2081b8ea2c8cSmatt } else { 2082b8ea2c8cSmatt struct mbuf *mn; 2083b8ea2c8cSmatt MGET(mn, M_DONTWAIT, m->m_type); 2084b8ea2c8cSmatt if (mn == NULL) { 2085b8ea2c8cSmatt if (csum_flags & M_CSUM_IP4) { 2086b8ea2c8cSmatt #ifdef INET 20873574e990Smaxv in_undefer_cksum(m, ETHER_HDR_LEN, 2088b8ea2c8cSmatt csum_flags & M_CSUM_IP4); 2089b8ea2c8cSmatt #else 2090b8ea2c8cSmatt panic("%s: impossible M_CSUM flags %#x", 2091b8ea2c8cSmatt device_xname(sc->sc_dev), csum_flags); 2092b8ea2c8cSmatt #endif 2093b8ea2c8cSmatt } else if (csum_flags & M_CSUM_IP6) { 2094b8ea2c8cSmatt #ifdef INET6 2095d22d69a6Smaxv in6_undefer_cksum(m, ETHER_HDR_LEN, 2096b8ea2c8cSmatt csum_flags & M_CSUM_IP6); 2097b8ea2c8cSmatt #else 2098b8ea2c8cSmatt panic("%s: impossible M_CSUM flags %#x", 2099b8ea2c8cSmatt device_xname(sc->sc_dev), csum_flags); 2100b8ea2c8cSmatt #endif 2101b8ea2c8cSmatt } 2102b8ea2c8cSmatt 2103b8ea2c8cSmatt m->m_flags &= ~M_HASFCB; 2104b8ea2c8cSmatt return; 2105b8ea2c8cSmatt } 2106b8ea2c8cSmatt 2107b1305a6dSmaxv m_move_pkthdr(mn, m); 2108b8ea2c8cSmatt mn->m_next = m; 2109b8ea2c8cSmatt m = mn; 21105b040abeSmaxv m_align(m, sizeof(fcb)); 2111b8ea2c8cSmatt m->m_len = sizeof(fcb); 2112b8ea2c8cSmatt *mp = m; 2113b8ea2c8cSmatt } 2114b8ea2c8cSmatt m->m_pkthdr.len += sizeof(fcb); 2115b8ea2c8cSmatt m->m_flags |= M_HASFCB; 2116b8ea2c8cSmatt *mtod(m, struct txfcb *) = fcb; 2117b8ea2c8cSmatt return; 2118b8ea2c8cSmatt } 2119b8ea2c8cSmatt 2120b8ea2c8cSmatt static bool 2121b8ea2c8cSmatt pq3etsec_txq_enqueue( 2122b8ea2c8cSmatt struct pq3etsec_softc *sc, 2123b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 2124b8ea2c8cSmatt { 2125b8ea2c8cSmatt for (;;) { 2126b8ea2c8cSmatt if (IF_QFULL(&txq->txq_mbufs)) 2127b8ea2c8cSmatt return false; 2128b8ea2c8cSmatt struct mbuf *m = txq->txq_next; 2129b8ea2c8cSmatt if (m == NULL) { 2130b8ea2c8cSmatt int s = splnet(); 21311296cd79Snonaka IFQ_DEQUEUE(&sc->sc_if.if_snd, m); 2132b8ea2c8cSmatt splx(s); 2133b8ea2c8cSmatt if (m == NULL) 2134b8ea2c8cSmatt return true; 2135b8ea2c8cSmatt M_SETCTX(m, NULL); 2136b8ea2c8cSmatt pq3etsec_tx_offload(sc, txq, &m); 2137b8ea2c8cSmatt } else { 2138b8ea2c8cSmatt txq->txq_next = NULL; 2139b8ea2c8cSmatt } 2140b8ea2c8cSmatt int error = pq3etsec_txq_map_load(sc, txq, m); 2141b8ea2c8cSmatt if (error) { 2142b8ea2c8cSmatt aprint_error_dev(sc->sc_dev, 2143b8ea2c8cSmatt "discarded packet due to " 2144b8ea2c8cSmatt "dmamap load failure: %d\n", error); 2145b8ea2c8cSmatt m_freem(m); 2146b8ea2c8cSmatt continue; 2147b8ea2c8cSmatt } 2148b8ea2c8cSmatt KASSERT(txq->txq_next == NULL); 2149b8ea2c8cSmatt if (!pq3etsec_txq_produce(sc, txq, m)) { 2150b8ea2c8cSmatt txq->txq_next = m; 2151b8ea2c8cSmatt return false; 2152b8ea2c8cSmatt } 2153b8ea2c8cSmatt KASSERT(txq->txq_next == NULL); 2154b8ea2c8cSmatt } 2155b8ea2c8cSmatt } 2156b8ea2c8cSmatt 2157b8ea2c8cSmatt static bool 2158b8ea2c8cSmatt pq3etsec_txq_consume( 2159b8ea2c8cSmatt struct pq3etsec_softc *sc, 2160b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 2161b8ea2c8cSmatt { 2162b8ea2c8cSmatt struct ifnet * const ifp = &sc->sc_if; 2163b8ea2c8cSmatt volatile struct txbd *consumer = txq->txq_consumer; 2164b8ea2c8cSmatt size_t txfree = 0; 216584148c19Srin bool ret; 2166b8ea2c8cSmatt 2167b8ea2c8cSmatt #if 0 2168b8ea2c8cSmatt printf("%s: entry: free=%zu\n", __func__, txq->txq_free); 2169b8ea2c8cSmatt #endif 2170b8ea2c8cSmatt etsec_write(sc, TSTAT, TSTAT_TXF & txq->txq_qmask); 2171b8ea2c8cSmatt 2172b8ea2c8cSmatt for (;;) { 2173b8ea2c8cSmatt if (consumer == txq->txq_producer) { 2174b8ea2c8cSmatt txq->txq_consumer = consumer; 2175b8ea2c8cSmatt txq->txq_free += txfree; 2176d1579b2dSriastradh txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 2177b8ea2c8cSmatt KASSERT(txq->txq_lastintr == 0); 217884148c19Srin KASSERT(txq->txq_free == 217984148c19Srin txq->txq_last - txq->txq_first - 1); 218084148c19Srin ret = true; 218184148c19Srin break; 2182b8ea2c8cSmatt } 2183b8ea2c8cSmatt pq3etsec_txq_desc_postsync(sc, txq, consumer, 1); 2184b8ea2c8cSmatt const uint16_t txbd_flags = consumer->txbd_flags; 2185b8ea2c8cSmatt if (txbd_flags & TXBD_R) { 2186b8ea2c8cSmatt txq->txq_consumer = consumer; 2187b8ea2c8cSmatt txq->txq_free += txfree; 2188d1579b2dSriastradh txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree); 218984148c19Srin ret = pq3etsec_txq_fillable_p(sc, txq); 219084148c19Srin break; 2191b8ea2c8cSmatt } 2192b8ea2c8cSmatt 2193b8ea2c8cSmatt /* 2194b8ea2c8cSmatt * If this is the last descriptor in the chain, get the 2195b8ea2c8cSmatt * mbuf, free its dmamap, and free the mbuf chain itself. 2196b8ea2c8cSmatt */ 2197b8ea2c8cSmatt if (txbd_flags & TXBD_L) { 2198b8ea2c8cSmatt struct mbuf *m; 2199b8ea2c8cSmatt 2200b8ea2c8cSmatt IF_DEQUEUE(&txq->txq_mbufs, m); 2201b8ea2c8cSmatt #ifdef ETSEC_DEBUG 2202325494feSjym KASSERTMSG( 2203325494feSjym m == txq->txq_lmbufs[consumer-txq->txq_first], 2204325494feSjym "%s: %p [%u]: flags %#x m (%p) != %p (%p)", 2205325494feSjym __func__, consumer, consumer - txq->txq_first, 2206325494feSjym txbd_flags, m, 2207325494feSjym &txq->txq_lmbufs[consumer-txq->txq_first], 2208325494feSjym txq->txq_lmbufs[consumer-txq->txq_first]); 2209b8ea2c8cSmatt #endif 2210b8ea2c8cSmatt KASSERT(m); 2211b8ea2c8cSmatt pq3etsec_txq_map_unload(sc, txq, m); 2212b8ea2c8cSmatt #if 0 2213b8ea2c8cSmatt printf("%s: mbuf %p: consumed a %u byte packet\n", 2214b8ea2c8cSmatt __func__, m, m->m_pkthdr.len); 2215b8ea2c8cSmatt #endif 2216b8ea2c8cSmatt if (m->m_flags & M_HASFCB) 2217b8ea2c8cSmatt m_adj(m, sizeof(struct txfcb)); 22183cd62456Smsaitoh bpf_mtap(ifp, m, BPF_D_OUT); 221965146fd9Smartin net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2220be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_opackets); 2221be6f2fceSriastradh if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len); 2222b8ea2c8cSmatt if (m->m_flags & M_MCAST) 2223be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_omcasts); 2224b8ea2c8cSmatt if (txbd_flags & TXBD_ERRORS) 2225be6f2fceSriastradh if_statinc_ref(ifp, nsr, if_oerrors); 2226d4bc9d11Sthorpej IF_STAT_PUTREF(ifp); 2227b8ea2c8cSmatt m_freem(m); 2228b8ea2c8cSmatt #ifdef ETSEC_DEBUG 2229b8ea2c8cSmatt txq->txq_lmbufs[consumer - txq->txq_first] = NULL; 2230b8ea2c8cSmatt #endif 2231b8ea2c8cSmatt } else { 2232b8ea2c8cSmatt #ifdef ETSEC_DEBUG 2233b8ea2c8cSmatt KASSERT(txq->txq_lmbufs[consumer-txq->txq_first] == NULL); 2234b8ea2c8cSmatt #endif 2235b8ea2c8cSmatt } 2236b8ea2c8cSmatt 2237b8ea2c8cSmatt /* 2238b8ea2c8cSmatt * We own this packet again. Clear all flags except wrap. 2239b8ea2c8cSmatt */ 2240b8ea2c8cSmatt txfree++; 2241b8ea2c8cSmatt //consumer->txbd_flags = txbd_flags & TXBD_W; 2242b8ea2c8cSmatt 2243b8ea2c8cSmatt /* 2244b8ea2c8cSmatt * Wrap at the last entry! 2245b8ea2c8cSmatt */ 2246b8ea2c8cSmatt if (txbd_flags & TXBD_W) { 2247b8ea2c8cSmatt KASSERT(consumer + 1 == txq->txq_last); 2248b8ea2c8cSmatt consumer = txq->txq_first; 2249b8ea2c8cSmatt } else { 2250b8ea2c8cSmatt consumer++; 2251b8ea2c8cSmatt KASSERT(consumer < txq->txq_last); 2252b8ea2c8cSmatt } 2253b8ea2c8cSmatt } 225412a7d27eSrin 225512a7d27eSrin if (txfree != 0) 225612a7d27eSrin rnd_add_uint32(&sc->rnd_source, txfree); 225784148c19Srin return ret; 2258b8ea2c8cSmatt } 2259b8ea2c8cSmatt 2260b8ea2c8cSmatt static void 2261b8ea2c8cSmatt pq3etsec_txq_purge( 2262b8ea2c8cSmatt struct pq3etsec_softc *sc, 2263b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 2264b8ea2c8cSmatt { 2265b8ea2c8cSmatt struct mbuf *m; 2266b8ea2c8cSmatt KASSERT((etsec_read(sc, MACCFG1) & MACCFG1_TX_EN) == 0); 2267b8ea2c8cSmatt 2268b8ea2c8cSmatt for (;;) { 2269b8ea2c8cSmatt IF_DEQUEUE(&txq->txq_mbufs, m); 2270b8ea2c8cSmatt if (m == NULL) 2271b8ea2c8cSmatt break; 2272b8ea2c8cSmatt pq3etsec_txq_map_unload(sc, txq, m); 2273b8ea2c8cSmatt m_freem(m); 2274b8ea2c8cSmatt } 2275b8ea2c8cSmatt if ((m = txq->txq_next) != NULL) { 2276b8ea2c8cSmatt txq->txq_next = NULL; 2277b8ea2c8cSmatt pq3etsec_txq_map_unload(sc, txq, m); 2278b8ea2c8cSmatt m_freem(m); 2279b8ea2c8cSmatt } 2280b8ea2c8cSmatt #ifdef ETSEC_DEBUG 2281b8ea2c8cSmatt memset(txq->txq_lmbufs, 0, sizeof(txq->txq_lmbufs)); 2282b8ea2c8cSmatt #endif 2283b8ea2c8cSmatt } 2284b8ea2c8cSmatt 2285b8ea2c8cSmatt static void 2286b8ea2c8cSmatt pq3etsec_txq_reset( 2287b8ea2c8cSmatt struct pq3etsec_softc *sc, 2288b8ea2c8cSmatt struct pq3etsec_txqueue *txq) 2289b8ea2c8cSmatt { 2290b8ea2c8cSmatt /* 2291b8ea2c8cSmatt * sync all the descriptors 2292b8ea2c8cSmatt */ 2293b8ea2c8cSmatt pq3etsec_txq_desc_postsync(sc, txq, txq->txq_first, 2294b8ea2c8cSmatt txq->txq_last - txq->txq_first); 2295b8ea2c8cSmatt 2296b8ea2c8cSmatt /* 2297b8ea2c8cSmatt * Make sure we own all descriptors in the ring. 2298b8ea2c8cSmatt */ 2299b8ea2c8cSmatt volatile struct txbd *txbd; 2300b8ea2c8cSmatt for (txbd = txq->txq_first; txbd < txq->txq_last - 1; txbd++) { 2301b8ea2c8cSmatt txbd->txbd_flags = 0; 2302b8ea2c8cSmatt } 2303b8ea2c8cSmatt 2304b8ea2c8cSmatt /* 2305b8ea2c8cSmatt * Last descriptor has the wrap flag. 2306b8ea2c8cSmatt */ 2307b8ea2c8cSmatt txbd->txbd_flags = TXBD_W; 2308b8ea2c8cSmatt 2309b8ea2c8cSmatt /* 2310b8ea2c8cSmatt * Reset the producer consumer indexes. 2311b8ea2c8cSmatt */ 2312b8ea2c8cSmatt txq->txq_consumer = txq->txq_first; 2313b8ea2c8cSmatt txq->txq_producer = txq->txq_first; 2314b8ea2c8cSmatt txq->txq_free = txq->txq_last - txq->txq_first - 1; 2315b8ea2c8cSmatt txq->txq_threshold = txq->txq_free / 2; 2316b8ea2c8cSmatt txq->txq_lastintr = 0; 2317b8ea2c8cSmatt 2318b8ea2c8cSmatt /* 2319b8ea2c8cSmatt * What do we want to get interrupted on? 2320b8ea2c8cSmatt */ 2321b8ea2c8cSmatt sc->sc_imask |= IEVENT_TXF | IEVENT_TXE; 2322b8ea2c8cSmatt 2323b8ea2c8cSmatt /* 2324b8ea2c8cSmatt * Restart the transmit at the first descriptor 2325b8ea2c8cSmatt */ 2326b8ea2c8cSmatt etsec_write(sc, txq->txq_reg_tbase, txq->txq_descmap->dm_segs->ds_addr); 2327b8ea2c8cSmatt } 2328b8ea2c8cSmatt 2329b8ea2c8cSmatt static void 2330b8ea2c8cSmatt pq3etsec_ifstart(struct ifnet *ifp) 2331b8ea2c8cSmatt { 2332b8ea2c8cSmatt struct pq3etsec_softc * const sc = ifp->if_softc; 2333b8ea2c8cSmatt 233489856394Sthorpej if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { 23351b7941c2Snonaka return; 23361b7941c2Snonaka } 23371b7941c2Snonaka 2338b8ea2c8cSmatt atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2339b8ea2c8cSmatt softint_schedule(sc->sc_soft_ih); 2340b8ea2c8cSmatt } 2341b8ea2c8cSmatt 2342b8ea2c8cSmatt static void 2343b8ea2c8cSmatt pq3etsec_tx_error( 2344b8ea2c8cSmatt struct pq3etsec_softc * const sc) 2345b8ea2c8cSmatt { 2346b8ea2c8cSmatt struct pq3etsec_txqueue * const txq = &sc->sc_txq; 2347b8ea2c8cSmatt 2348b8ea2c8cSmatt pq3etsec_txq_consume(sc, txq); 2349b8ea2c8cSmatt 2350811add33Smsaitoh if (sc->sc_txerrors 2351811add33Smsaitoh & (IEVENT_LC | IEVENT_CRL | IEVENT_XFUN | IEVENT_BABT)) { 2352b8ea2c8cSmatt } else if (sc->sc_txerrors & IEVENT_EBERR) { 2353b8ea2c8cSmatt } 2354b8ea2c8cSmatt 2355b8ea2c8cSmatt if (pq3etsec_txq_active_p(sc, txq)) 2356b8ea2c8cSmatt etsec_write(sc, TSTAT, TSTAT_THLT & txq->txq_qmask); 2357b8ea2c8cSmatt if (!pq3etsec_txq_enqueue(sc, txq)) { 2358b8ea2c8cSmatt sc->sc_ev_tx_stall.ev_count++; 2359b8ea2c8cSmatt } 2360b8ea2c8cSmatt 2361b8ea2c8cSmatt sc->sc_txerrors = 0; 2362b8ea2c8cSmatt } 2363b8ea2c8cSmatt 2364b8ea2c8cSmatt int 2365b8ea2c8cSmatt pq3etsec_tx_intr(void *arg) 2366b8ea2c8cSmatt { 2367b8ea2c8cSmatt struct pq3etsec_softc * const sc = arg; 2368b8ea2c8cSmatt 23691b7941c2Snonaka mutex_enter(sc->sc_hwlock); 23701b7941c2Snonaka 2371b8ea2c8cSmatt sc->sc_ev_tx_intr.ev_count++; 2372b8ea2c8cSmatt 2373b8ea2c8cSmatt uint32_t ievent = etsec_read(sc, IEVENT); 2374b8ea2c8cSmatt ievent &= IEVENT_TXF | IEVENT_TXB; 2375b8ea2c8cSmatt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2376b8ea2c8cSmatt 2377b8ea2c8cSmatt #if 0 2378b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2379b8ea2c8cSmatt __func__, ievent, etsec_read(sc, IMASK)); 2380b8ea2c8cSmatt #endif 2381b8ea2c8cSmatt 23821b7941c2Snonaka if (ievent == 0) { 23831b7941c2Snonaka mutex_exit(sc->sc_hwlock); 2384b8ea2c8cSmatt return 0; 23851b7941c2Snonaka } 2386b8ea2c8cSmatt 2387b8ea2c8cSmatt sc->sc_imask &= ~(IEVENT_TXF | IEVENT_TXB); 2388b8ea2c8cSmatt atomic_or_uint(&sc->sc_soft_flags, SOFT_TXINTR); 2389b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2390b8ea2c8cSmatt softint_schedule(sc->sc_soft_ih); 23911b7941c2Snonaka 23921b7941c2Snonaka mutex_exit(sc->sc_hwlock); 23931b7941c2Snonaka 2394b8ea2c8cSmatt return 1; 2395b8ea2c8cSmatt } 2396b8ea2c8cSmatt 2397b8ea2c8cSmatt int 2398b8ea2c8cSmatt pq3etsec_rx_intr(void *arg) 2399b8ea2c8cSmatt { 2400b8ea2c8cSmatt struct pq3etsec_softc * const sc = arg; 2401b8ea2c8cSmatt 24021b7941c2Snonaka mutex_enter(sc->sc_hwlock); 24031b7941c2Snonaka 2404b8ea2c8cSmatt sc->sc_ev_rx_intr.ev_count++; 2405b8ea2c8cSmatt 2406b8ea2c8cSmatt uint32_t ievent = etsec_read(sc, IEVENT); 2407b8ea2c8cSmatt ievent &= IEVENT_RXF | IEVENT_RXB; 2408b8ea2c8cSmatt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 24091b7941c2Snonaka if (ievent == 0) { 24101b7941c2Snonaka mutex_exit(sc->sc_hwlock); 2411b8ea2c8cSmatt return 0; 24121b7941c2Snonaka } 2413b8ea2c8cSmatt 2414b8ea2c8cSmatt #if 0 2415b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x\n", __func__, ievent); 2416b8ea2c8cSmatt #endif 2417b8ea2c8cSmatt 2418b8ea2c8cSmatt sc->sc_imask &= ~(IEVENT_RXF | IEVENT_RXB); 2419b8ea2c8cSmatt atomic_or_uint(&sc->sc_soft_flags, SOFT_RXINTR); 2420b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2421b8ea2c8cSmatt softint_schedule(sc->sc_soft_ih); 24221b7941c2Snonaka 24231b7941c2Snonaka mutex_exit(sc->sc_hwlock); 24241b7941c2Snonaka 2425b8ea2c8cSmatt return 1; 2426b8ea2c8cSmatt } 2427b8ea2c8cSmatt 2428b8ea2c8cSmatt int 2429b8ea2c8cSmatt pq3etsec_error_intr(void *arg) 2430b8ea2c8cSmatt { 2431b8ea2c8cSmatt struct pq3etsec_softc * const sc = arg; 2432b8ea2c8cSmatt 24331b7941c2Snonaka mutex_enter(sc->sc_hwlock); 24341b7941c2Snonaka 2435b8ea2c8cSmatt sc->sc_ev_error_intr.ev_count++; 2436b8ea2c8cSmatt 2437b8ea2c8cSmatt for (int rv = 0, soft_flags = 0;; rv = 1) { 2438b8ea2c8cSmatt uint32_t ievent = etsec_read(sc, IEVENT); 2439b8ea2c8cSmatt ievent &= ~(IEVENT_RXF | IEVENT_RXB | IEVENT_TXF | IEVENT_TXB); 2440b8ea2c8cSmatt etsec_write(sc, IEVENT, ievent); /* write 1 to clear */ 2441b8ea2c8cSmatt if (ievent == 0) { 2442b8ea2c8cSmatt if (soft_flags) { 2443b8ea2c8cSmatt atomic_or_uint(&sc->sc_soft_flags, soft_flags); 2444b8ea2c8cSmatt softint_schedule(sc->sc_soft_ih); 2445b8ea2c8cSmatt } 24461b7941c2Snonaka mutex_exit(sc->sc_hwlock); 2447b8ea2c8cSmatt return rv; 2448b8ea2c8cSmatt } 2449b8ea2c8cSmatt #if 0 2450b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, "%s: ievent=%#x imask=%#x\n", 2451b8ea2c8cSmatt __func__, ievent, etsec_read(sc, IMASK)); 2452b8ea2c8cSmatt #endif 2453b8ea2c8cSmatt 2454b8ea2c8cSmatt if (ievent & (IEVENT_GRSC | IEVENT_GTSC)) { 2455b8ea2c8cSmatt sc->sc_imask &= ~(IEVENT_GRSC | IEVENT_GTSC); 2456b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2457b8ea2c8cSmatt wakeup(sc); 2458b8ea2c8cSmatt } 2459b8ea2c8cSmatt if (ievent & (IEVENT_MMRD | IEVENT_MMWR)) { 2460b8ea2c8cSmatt sc->sc_imask &= ~(IEVENT_MMRD | IEVENT_MMWR); 2461b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2462b8ea2c8cSmatt wakeup(&sc->sc_mii); 2463b8ea2c8cSmatt } 2464b8ea2c8cSmatt if (ievent & IEVENT_BSY) { 2465b8ea2c8cSmatt soft_flags |= SOFT_RXBSY; 2466b8ea2c8cSmatt sc->sc_imask &= ~IEVENT_BSY; 2467b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2468b8ea2c8cSmatt } 2469b8ea2c8cSmatt if (ievent & IEVENT_TXE) { 2470b8ea2c8cSmatt soft_flags |= SOFT_TXERROR; 2471b8ea2c8cSmatt sc->sc_imask &= ~IEVENT_TXE; 2472b8ea2c8cSmatt sc->sc_txerrors |= ievent; 2473b8ea2c8cSmatt } 2474b8ea2c8cSmatt if (ievent & IEVENT_TXC) { 2475b8ea2c8cSmatt sc->sc_ev_tx_pause.ev_count++; 2476b8ea2c8cSmatt } 2477b8ea2c8cSmatt if (ievent & IEVENT_RXC) { 2478b8ea2c8cSmatt sc->sc_ev_rx_pause.ev_count++; 2479b8ea2c8cSmatt } 2480b8ea2c8cSmatt if (ievent & IEVENT_DPE) { 2481b8ea2c8cSmatt soft_flags |= SOFT_RESET; 2482b8ea2c8cSmatt sc->sc_imask &= ~IEVENT_DPE; 2483b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 2484b8ea2c8cSmatt } 2485b8ea2c8cSmatt } 2486b8ea2c8cSmatt } 2487b8ea2c8cSmatt 2488b8ea2c8cSmatt void 2489b8ea2c8cSmatt pq3etsec_soft_intr(void *arg) 2490b8ea2c8cSmatt { 2491b8ea2c8cSmatt struct pq3etsec_softc * const sc = arg; 2492b8ea2c8cSmatt struct ifnet * const ifp = &sc->sc_if; 24931b7941c2Snonaka uint32_t imask = 0; 2494b8ea2c8cSmatt 2495b8ea2c8cSmatt mutex_enter(sc->sc_lock); 2496b8ea2c8cSmatt 2497b8ea2c8cSmatt u_int soft_flags = atomic_swap_uint(&sc->sc_soft_flags, 0); 2498b8ea2c8cSmatt 2499b8ea2c8cSmatt sc->sc_ev_soft_intr.ev_count++; 2500b8ea2c8cSmatt 2501b8ea2c8cSmatt if (soft_flags & SOFT_RESET) { 2502b8ea2c8cSmatt int s = splnet(); 2503b8ea2c8cSmatt pq3etsec_ifinit(ifp); 2504b8ea2c8cSmatt splx(s); 2505b8ea2c8cSmatt soft_flags = 0; 2506b8ea2c8cSmatt } 2507b8ea2c8cSmatt 2508b8ea2c8cSmatt if (soft_flags & SOFT_RXBSY) { 2509b8ea2c8cSmatt struct pq3etsec_rxqueue * const rxq = &sc->sc_rxq; 2510b8ea2c8cSmatt size_t threshold = 5 * rxq->rxq_threshold / 4; 2511b8ea2c8cSmatt if (threshold >= rxq->rxq_last - rxq->rxq_first) { 2512b8ea2c8cSmatt threshold = rxq->rxq_last - rxq->rxq_first - 1; 2513b8ea2c8cSmatt } else { 25141b7941c2Snonaka imask |= IEVENT_BSY; 2515b8ea2c8cSmatt } 2516b8ea2c8cSmatt aprint_normal_dev(sc->sc_dev, 2517b8ea2c8cSmatt "increasing receive buffers from %zu to %zu\n", 2518b8ea2c8cSmatt rxq->rxq_threshold, threshold); 2519b8ea2c8cSmatt rxq->rxq_threshold = threshold; 2520b8ea2c8cSmatt } 2521b8ea2c8cSmatt 2522b8ea2c8cSmatt if ((soft_flags & SOFT_TXINTR) 2523b8ea2c8cSmatt || pq3etsec_txq_active_p(sc, &sc->sc_txq)) { 2524b8ea2c8cSmatt /* 2525b8ea2c8cSmatt * Let's do what we came here for. Consume transmitted 2526c2f2b1bfSmsaitoh * packets off the transmit ring. 2527b8ea2c8cSmatt */ 2528b8ea2c8cSmatt if (!pq3etsec_txq_consume(sc, &sc->sc_txq) 2529b8ea2c8cSmatt || !pq3etsec_txq_enqueue(sc, &sc->sc_txq)) { 2530b8ea2c8cSmatt sc->sc_ev_tx_stall.ev_count++; 2531b8ea2c8cSmatt } 25321b7941c2Snonaka imask |= IEVENT_TXF; 2533b8ea2c8cSmatt } 2534b8ea2c8cSmatt 2535b8ea2c8cSmatt if (soft_flags & (SOFT_RXINTR | SOFT_RXBSY)) { 253687d4693bSmsaitoh /* Let's consume */ 2537b8ea2c8cSmatt pq3etsec_rxq_consume(sc, &sc->sc_rxq); 25381b7941c2Snonaka imask |= IEVENT_RXF; 2539b8ea2c8cSmatt } 2540b8ea2c8cSmatt 2541b8ea2c8cSmatt if (soft_flags & SOFT_TXERROR) { 2542b8ea2c8cSmatt pq3etsec_tx_error(sc); 25431b7941c2Snonaka imask |= IEVENT_TXE; 2544b8ea2c8cSmatt } 2545b8ea2c8cSmatt 2546b8ea2c8cSmatt if (ifp->if_flags & IFF_RUNNING) { 2547b8ea2c8cSmatt pq3etsec_rxq_produce(sc, &sc->sc_rxq); 25481b7941c2Snonaka mutex_spin_enter(sc->sc_hwlock); 25491b7941c2Snonaka sc->sc_imask |= imask; 2550b8ea2c8cSmatt etsec_write(sc, IMASK, sc->sc_imask); 25511b7941c2Snonaka mutex_spin_exit(sc->sc_hwlock); 2552b8ea2c8cSmatt } else { 2553b8ea2c8cSmatt KASSERT((soft_flags & SOFT_RXBSY) == 0); 2554b8ea2c8cSmatt } 2555b8ea2c8cSmatt 2556b8ea2c8cSmatt mutex_exit(sc->sc_lock); 2557b8ea2c8cSmatt } 2558b8ea2c8cSmatt 2559b8ea2c8cSmatt static void 2560b8ea2c8cSmatt pq3etsec_mii_tick(void *arg) 2561b8ea2c8cSmatt { 2562b8ea2c8cSmatt struct pq3etsec_softc * const sc = arg; 2563b8ea2c8cSmatt mutex_enter(sc->sc_lock); 2564b8ea2c8cSmatt callout_ack(&sc->sc_mii_callout); 2565b8ea2c8cSmatt sc->sc_ev_mii_ticks.ev_count++; 2566b8ea2c8cSmatt #ifdef DEBUG 2567b8ea2c8cSmatt uint64_t now = mftb(); 2568b8ea2c8cSmatt if (now - sc->sc_mii_last_tick < cpu_timebase - 5000) { 2569b8ea2c8cSmatt aprint_debug_dev(sc->sc_dev, "%s: diff=%"PRIu64"\n", 2570b8ea2c8cSmatt __func__, now - sc->sc_mii_last_tick); 2571b8ea2c8cSmatt callout_stop(&sc->sc_mii_callout); 2572b8ea2c8cSmatt } 2573b8ea2c8cSmatt #endif 2574b8ea2c8cSmatt mii_tick(&sc->sc_mii); 2575b8ea2c8cSmatt int s = splnet(); 2576b8ea2c8cSmatt if (sc->sc_soft_flags & SOFT_RESET) 2577b8ea2c8cSmatt softint_schedule(sc->sc_soft_ih); 2578b8ea2c8cSmatt splx(s); 2579b8ea2c8cSmatt callout_schedule(&sc->sc_mii_callout, hz); 258068270447Smatt #ifdef DEBUG 2581b8ea2c8cSmatt sc->sc_mii_last_tick = now; 258268270447Smatt #endif 2583b8ea2c8cSmatt mutex_exit(sc->sc_lock); 2584b8ea2c8cSmatt } 25856e15a820Snonaka 25866e15a820Snonaka static void 25876e15a820Snonaka pq3etsec_set_ic_rx(struct pq3etsec_softc *sc) 25886e15a820Snonaka { 25896e15a820Snonaka uint32_t reg; 25906e15a820Snonaka 25916e15a820Snonaka if (ETSEC_IC_RX_ENABLED(sc)) { 25926e15a820Snonaka reg = RXIC_ICEN; 25936e15a820Snonaka reg |= RXIC_ICFT_SET(sc->sc_ic_rx_count); 25946e15a820Snonaka reg |= RXIC_ICTT_SET(sc->sc_ic_rx_time); 25956e15a820Snonaka } else { 25966e15a820Snonaka /* Disable RX interrupt coalescing */ 25976e15a820Snonaka reg = 0; 25986e15a820Snonaka } 25996e15a820Snonaka 26006e15a820Snonaka etsec_write(sc, RXIC, reg); 26016e15a820Snonaka } 26026e15a820Snonaka 26036e15a820Snonaka static void 26046e15a820Snonaka pq3etsec_set_ic_tx(struct pq3etsec_softc *sc) 26056e15a820Snonaka { 26066e15a820Snonaka uint32_t reg; 26076e15a820Snonaka 26086e15a820Snonaka if (ETSEC_IC_TX_ENABLED(sc)) { 26096e15a820Snonaka reg = TXIC_ICEN; 26106e15a820Snonaka reg |= TXIC_ICFT_SET(sc->sc_ic_tx_count); 26116e15a820Snonaka reg |= TXIC_ICTT_SET(sc->sc_ic_tx_time); 26126e15a820Snonaka } else { 26136e15a820Snonaka /* Disable TX interrupt coalescing */ 26146e15a820Snonaka reg = 0; 26156e15a820Snonaka } 26166e15a820Snonaka 26176e15a820Snonaka etsec_write(sc, TXIC, reg); 26186e15a820Snonaka } 26196e15a820Snonaka 26206e15a820Snonaka /* 26216e15a820Snonaka * sysctl 26226e15a820Snonaka */ 26236e15a820Snonaka static int 26246e15a820Snonaka pq3etsec_sysctl_ic_time_helper(SYSCTLFN_ARGS, int *valuep) 26256e15a820Snonaka { 26266e15a820Snonaka struct sysctlnode node = *rnode; 26276e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 26286e15a820Snonaka int value = *valuep; 26296e15a820Snonaka int error; 26306e15a820Snonaka 26316e15a820Snonaka node.sysctl_data = &value; 26326e15a820Snonaka error = sysctl_lookup(SYSCTLFN_CALL(&node)); 26336e15a820Snonaka if (error != 0 || newp == NULL) 26346e15a820Snonaka return error; 26356e15a820Snonaka 26366e15a820Snonaka if (value < 0 || value > 65535) 26376e15a820Snonaka return EINVAL; 26386e15a820Snonaka 26396e15a820Snonaka mutex_enter(sc->sc_lock); 26406e15a820Snonaka *valuep = value; 26416e15a820Snonaka if (valuep == &sc->sc_ic_rx_time) 26426e15a820Snonaka pq3etsec_set_ic_rx(sc); 26436e15a820Snonaka else 26446e15a820Snonaka pq3etsec_set_ic_tx(sc); 26456e15a820Snonaka mutex_exit(sc->sc_lock); 26466e15a820Snonaka 26476e15a820Snonaka return 0; 26486e15a820Snonaka } 26496e15a820Snonaka 26506e15a820Snonaka static int 26516e15a820Snonaka pq3etsec_sysctl_ic_count_helper(SYSCTLFN_ARGS, int *valuep) 26526e15a820Snonaka { 26536e15a820Snonaka struct sysctlnode node = *rnode; 26546e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 26556e15a820Snonaka int value = *valuep; 26566e15a820Snonaka int error; 26576e15a820Snonaka 26586e15a820Snonaka node.sysctl_data = &value; 26596e15a820Snonaka error = sysctl_lookup(SYSCTLFN_CALL(&node)); 26606e15a820Snonaka if (error != 0 || newp == NULL) 26616e15a820Snonaka return error; 26626e15a820Snonaka 26636e15a820Snonaka if (value < 0 || value > 255) 26646e15a820Snonaka return EINVAL; 26656e15a820Snonaka 26666e15a820Snonaka mutex_enter(sc->sc_lock); 26676e15a820Snonaka *valuep = value; 26686e15a820Snonaka if (valuep == &sc->sc_ic_rx_count) 26696e15a820Snonaka pq3etsec_set_ic_rx(sc); 26706e15a820Snonaka else 26716e15a820Snonaka pq3etsec_set_ic_tx(sc); 26726e15a820Snonaka mutex_exit(sc->sc_lock); 26736e15a820Snonaka 26746e15a820Snonaka return 0; 26756e15a820Snonaka } 26766e15a820Snonaka 26776e15a820Snonaka static int 26786e15a820Snonaka pq3etsec_sysctl_ic_rx_time_helper(SYSCTLFN_ARGS) 26796e15a820Snonaka { 26806e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 26816e15a820Snonaka 26826e15a820Snonaka return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 26836e15a820Snonaka &sc->sc_ic_rx_time); 26846e15a820Snonaka } 26856e15a820Snonaka 26866e15a820Snonaka static int 26876e15a820Snonaka pq3etsec_sysctl_ic_rx_count_helper(SYSCTLFN_ARGS) 26886e15a820Snonaka { 26896e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 26906e15a820Snonaka 26916e15a820Snonaka return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 26926e15a820Snonaka &sc->sc_ic_rx_count); 26936e15a820Snonaka } 26946e15a820Snonaka 26956e15a820Snonaka static int 26966e15a820Snonaka pq3etsec_sysctl_ic_tx_time_helper(SYSCTLFN_ARGS) 26976e15a820Snonaka { 26986e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 26996e15a820Snonaka 27006e15a820Snonaka return pq3etsec_sysctl_ic_time_helper(SYSCTLFN_CALL(rnode), 27016e15a820Snonaka &sc->sc_ic_tx_time); 27026e15a820Snonaka } 27036e15a820Snonaka 27046e15a820Snonaka static int 27056e15a820Snonaka pq3etsec_sysctl_ic_tx_count_helper(SYSCTLFN_ARGS) 27066e15a820Snonaka { 27076e15a820Snonaka struct pq3etsec_softc *sc = rnode->sysctl_data; 27086e15a820Snonaka 27096e15a820Snonaka return pq3etsec_sysctl_ic_count_helper(SYSCTLFN_CALL(rnode), 27106e15a820Snonaka &sc->sc_ic_tx_count); 27116e15a820Snonaka } 27126e15a820Snonaka 27136e15a820Snonaka static void pq3etsec_sysctl_setup(struct sysctllog **clog, 27146e15a820Snonaka struct pq3etsec_softc *sc) 27156e15a820Snonaka { 27166e15a820Snonaka const struct sysctlnode *cnode, *rnode; 27176e15a820Snonaka 27186e15a820Snonaka if (sysctl_createv(clog, 0, NULL, &rnode, 27196e15a820Snonaka CTLFLAG_PERMANENT, 27206e15a820Snonaka CTLTYPE_NODE, device_xname(sc->sc_dev), 27216e15a820Snonaka SYSCTL_DESCR("TSEC interface"), 27226e15a820Snonaka NULL, 0, NULL, 0, 27236e15a820Snonaka CTL_HW, CTL_CREATE, CTL_EOL) != 0) 27246e15a820Snonaka goto bad; 27256e15a820Snonaka 27266e15a820Snonaka if (sysctl_createv(clog, 0, &rnode, &rnode, 27276e15a820Snonaka CTLFLAG_PERMANENT, 27286e15a820Snonaka CTLTYPE_NODE, "int_coal", 27296e15a820Snonaka SYSCTL_DESCR("Interrupts coalescing"), 27306e15a820Snonaka NULL, 0, NULL, 0, 27316e15a820Snonaka CTL_CREATE, CTL_EOL) != 0) 27326e15a820Snonaka goto bad; 27336e15a820Snonaka 27346e15a820Snonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 27356e15a820Snonaka CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 27366e15a820Snonaka CTLTYPE_INT, "rx_time", 27376e15a820Snonaka SYSCTL_DESCR("RX time threshold (0-65535)"), 27386e15a820Snonaka pq3etsec_sysctl_ic_rx_time_helper, 0, (void *)sc, 0, 27396e15a820Snonaka CTL_CREATE, CTL_EOL) != 0) 27406e15a820Snonaka goto bad; 27416e15a820Snonaka 27426e15a820Snonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 27436e15a820Snonaka CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 27446e15a820Snonaka CTLTYPE_INT, "rx_count", 27456e15a820Snonaka SYSCTL_DESCR("RX frame count threshold (0-255)"), 27466e15a820Snonaka pq3etsec_sysctl_ic_rx_count_helper, 0, (void *)sc, 0, 27476e15a820Snonaka CTL_CREATE, CTL_EOL) != 0) 27486e15a820Snonaka goto bad; 27496e15a820Snonaka 27506e15a820Snonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 27516e15a820Snonaka CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 27526e15a820Snonaka CTLTYPE_INT, "tx_time", 27536e15a820Snonaka SYSCTL_DESCR("TX time threshold (0-65535)"), 27546e15a820Snonaka pq3etsec_sysctl_ic_tx_time_helper, 0, (void *)sc, 0, 27556e15a820Snonaka CTL_CREATE, CTL_EOL) != 0) 27566e15a820Snonaka goto bad; 27576e15a820Snonaka 27586e15a820Snonaka if (sysctl_createv(clog, 0, &rnode, &cnode, 27596e15a820Snonaka CTLFLAG_PERMANENT | CTLFLAG_READWRITE, 27606e15a820Snonaka CTLTYPE_INT, "tx_count", 27616e15a820Snonaka SYSCTL_DESCR("TX frame count threshold (0-255)"), 27626e15a820Snonaka pq3etsec_sysctl_ic_tx_count_helper, 0, (void *)sc, 0, 27636e15a820Snonaka CTL_CREATE, CTL_EOL) != 0) 27646e15a820Snonaka goto bad; 27656e15a820Snonaka 27666e15a820Snonaka return; 27676e15a820Snonaka 27686e15a820Snonaka bad: 27696e15a820Snonaka aprint_error_dev(sc->sc_dev, "could not attach sysctl nodes\n"); 27706e15a820Snonaka } 2771