1 /* $NetBSD: gemvar.h,v 1.23 2012/02/02 19:43:03 tls Exp $ */ 2 3 /* 4 * 5 * Copyright (C) 2001 Eduardo Horvath. 6 * All rights reserved. 7 * 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 #ifndef _IF_GEMVAR_H 33 #define _IF_GEMVAR_H 34 35 36 #include <sys/queue.h> 37 #include <sys/callout.h> 38 39 #include <sys/rnd.h> 40 41 /* 42 * Misc. definitions for the Sun ``Gem'' Ethernet controller family driver. 43 */ 44 45 /* 46 * Transmit descriptor list size. This is arbitrary, but allocate 47 * enough descriptors for 64 pending transmissions and 16 segments 48 * per packet. 49 */ 50 #define GEM_NTXSEGS 16 51 52 #define GEM_TXQUEUELEN 64 53 #define GEM_NTXDESC (GEM_TXQUEUELEN * GEM_NTXSEGS) 54 #define GEM_NTXDESC_MASK (GEM_NTXDESC - 1) 55 #define GEM_NEXTTX(x) ((x + 1) & GEM_NTXDESC_MASK) 56 57 /* 58 * Receive descriptor list size. We have one Rx buffer per incoming 59 * packet, so this logic is a little simpler. 60 */ 61 #define GEM_NRXDESC 128 62 #define GEM_NRXDESC_MASK (GEM_NRXDESC - 1) 63 #define GEM_PREVRX(x) ((x - 1) & GEM_NRXDESC_MASK) 64 #define GEM_NEXTRX(x) ((x + 1) & GEM_NRXDESC_MASK) 65 66 /* 67 * Control structures are DMA'd to the GEM chip. We allocate them in 68 * a single clump that maps to a single DMA segment to make several things 69 * easier. 70 */ 71 struct gem_control_data { 72 /* 73 * The transmit descriptors. 74 */ 75 struct gem_desc gcd_txdescs[GEM_NTXDESC]; 76 77 /* 78 * The receive descriptors. 79 */ 80 struct gem_desc gcd_rxdescs[GEM_NRXDESC]; 81 }; 82 83 #define GEM_CDOFF(x) offsetof(struct gem_control_data, x) 84 #define GEM_CDTXOFF(x) GEM_CDOFF(gcd_txdescs[(x)]) 85 #define GEM_CDRXOFF(x) GEM_CDOFF(gcd_rxdescs[(x)]) 86 87 /* 88 * Software state for transmit jobs. 89 */ 90 struct gem_txsoft { 91 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 92 bus_dmamap_t txs_dmamap; /* our DMA map */ 93 int txs_firstdesc; /* first descriptor in packet */ 94 int txs_lastdesc; /* last descriptor in packet */ 95 int txs_ndescs; /* number of descriptors */ 96 SIMPLEQ_ENTRY(gem_txsoft) txs_q; 97 }; 98 99 SIMPLEQ_HEAD(gem_txsq, gem_txsoft); 100 101 /* 102 * Software state for receive jobs. 103 */ 104 struct gem_rxsoft { 105 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 106 bus_dmamap_t rxs_dmamap; /* our DMA map */ 107 }; 108 109 enum gem_attach_stage { 110 GEM_ATT_BACKEND_2 = 0 111 , GEM_ATT_BACKEND_1 112 , GEM_ATT_FINISHED 113 , GEM_ATT_MII 114 , GEM_ATT_7 115 , GEM_ATT_6 116 , GEM_ATT_5 117 , GEM_ATT_4 118 , GEM_ATT_3 119 , GEM_ATT_2 120 , GEM_ATT_1 121 , GEM_ATT_0 122 , GEM_ATT_BACKEND_0 123 }; 124 125 /* 126 * Software state per device. 127 */ 128 struct gem_softc { 129 device_t sc_dev; /* generic device information */ 130 struct ethercom sc_ethercom; /* ethernet common data */ 131 struct mii_data sc_mii; /* MII media control */ 132 struct callout sc_tick_ch; /* tick callout */ 133 134 /* The following bus handles are to be provided by the bus front-end */ 135 bus_space_tag_t sc_bustag; /* bus tag */ 136 bus_dma_tag_t sc_dmatag; /* bus dma tag */ 137 bus_dmamap_t sc_dmamap; /* bus dma handle */ 138 bus_space_handle_t sc_h1; /* bus space handle for bank 1 regs */ 139 bus_space_handle_t sc_h2; /* bus space handle for bank 2 regs */ 140 bus_size_t sc_size; /* bank 1 size */ 141 142 int sc_phys[2]; /* MII instance -> PHY map */ 143 144 int sc_mif_config; /* Selected MII reg setting */ 145 uint32_t sc_mii_anar; /* copy of PCS GEM_MII_ANAR register */ 146 int sc_mii_media; /* Media selected for PCS MII */ 147 148 u_int sc_variant; /* which GEM are we dealing with? */ 149 #define GEM_UNKNOWN 0 /* don't know */ 150 #define GEM_SUN_GEM 1 /* Sun GEM variant */ 151 #define GEM_SUN_ERI 2 /* Sun ERI variant */ 152 #define GEM_APPLE_GMAC 3 /* Apple GMAC variant */ 153 #define GEM_APPLE_K2_GMAC 4 /* Apple K2 GMAC */ 154 155 #define GEM_IS_SUN(sc) \ 156 ((sc)->sc_variant == GEM_SUN_GEM || \ 157 (sc)->sc_variant == GEM_SUN_ERI) 158 #define GEM_IS_APPLE(sc) \ 159 ((sc)->sc_variant == GEM_APPLE_GMAC || \ 160 (sc)->sc_variant == GEM_APPLE_K2_GMAC) 161 162 int sc_chiprev; /* hardware revision */ 163 164 u_int sc_flags; /* */ 165 short sc_if_flags; /* copy of ifp->if_flags */ 166 #define GEM_GIGABIT 0x0001 /* has a gigabit PHY */ 167 #define GEM_LINK 0x0002 /* link is up */ 168 #define GEM_PCI 0x0004 /* XXX PCI busses are little-endian */ 169 #define GEM_SERDES 0x0008 /* use the SERDES */ 170 #define GEM_SERIAL 0x0010 /* use the serial link */ 171 172 /* 173 * Ring buffer DMA stuff. 174 */ 175 bus_dma_segment_t sc_cdseg; /* control data memory */ 176 int sc_cdnseg; /* number of segments */ 177 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 178 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 179 180 bus_dmamap_t sc_nulldmamap; /* for small packets padding */ 181 182 /* 183 * Software state for transmit and receive descriptors. 184 */ 185 struct gem_txsoft sc_txsoft[GEM_TXQUEUELEN]; 186 struct gem_rxsoft sc_rxsoft[GEM_NRXDESC]; 187 188 /* 189 * Control data structures. 190 */ 191 struct gem_control_data *sc_control_data; 192 #define sc_txdescs sc_control_data->gcd_txdescs 193 #define sc_rxdescs sc_control_data->gcd_rxdescs 194 195 int sc_txfree; /* number of free Tx descriptors */ 196 int sc_txnext; /* next ready Tx descriptor */ 197 int sc_txwin; /* Tx descriptors since last Tx int */ 198 199 struct gem_txsq sc_txfreeq; /* free Tx descsofts */ 200 struct gem_txsq sc_txdirtyq; /* dirty Tx descsofts */ 201 202 int sc_rxptr; /* next ready RX descriptor/descsoft */ 203 int sc_rxfifosize; /* Rx FIFO size (bytes) */ 204 205 /* ========== */ 206 int sc_inited; 207 int sc_meminited; 208 int sc_debug; 209 void *sc_sh; /* shutdownhook cookie */ 210 211 /* Special hardware hooks */ 212 void (*sc_hwreset)(struct gem_softc *); 213 void (*sc_hwinit)(struct gem_softc *); 214 215 krndsource_t rnd_source; 216 217 struct evcnt sc_ev_intr; 218 #ifdef GEM_COUNTERS 219 struct evcnt sc_ev_txint; 220 struct evcnt sc_ev_rxint; 221 struct evcnt sc_ev_rxnobuf; 222 struct evcnt sc_ev_rxfull; 223 struct evcnt sc_ev_rxhist[9]; 224 #endif 225 226 enum gem_attach_stage sc_att_stage; 227 }; 228 229 #ifdef GEM_COUNTERS 230 #define GEM_COUNTER_INCR(sc, ctr) ((void) (sc->ctr.ev_count++)) 231 #else 232 #define GEM_COUNTER_INCR(sc, ctr) ((void) sc) 233 #endif 234 235 236 #define GEM_DMA_READ(sc, v) \ 237 (((sc)->sc_flags & GEM_PCI) ? le64toh(v) : be64toh(v)) 238 #define GEM_DMA_WRITE(sc, v) \ 239 (((sc)->sc_flags & GEM_PCI) ? htole64(v) : htobe64(v)) 240 241 #define GEM_CDTXADDR(sc, x) ((sc)->sc_cddma + GEM_CDTXOFF((x))) 242 #define GEM_CDRXADDR(sc, x) ((sc)->sc_cddma + GEM_CDRXOFF((x))) 243 244 #define GEM_CDADDR(sc) ((sc)->sc_cddma + GEM_CDOFF) 245 246 #define GEM_CDTXSYNC(sc, x, n, ops) \ 247 do { \ 248 int __x, __n; \ 249 \ 250 __x = (x); \ 251 __n = (n); \ 252 \ 253 /* If it will wrap around, sync to the end of the ring. */ \ 254 if ((__x + __n) > GEM_NTXDESC) { \ 255 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \ 256 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * \ 257 (GEM_NTXDESC - __x), (ops)); \ 258 __n -= (GEM_NTXDESC - __x); \ 259 __x = 0; \ 260 } \ 261 \ 262 /* Now sync whatever is left. */ \ 263 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \ 264 GEM_CDTXOFF(__x), sizeof(struct gem_desc) * __n, (ops)); \ 265 } while (0) 266 267 #define GEM_CDRXSYNC(sc, x, ops) \ 268 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \ 269 GEM_CDRXOFF((x)), sizeof(struct gem_desc), (ops)) 270 271 #define GEM_CDSYNC(sc, ops) \ 272 bus_dmamap_sync((sc)->sc_dmatag, (sc)->sc_cddmamap, \ 273 0, sizeof(struct gem_control_data), (ops)) 274 275 #define GEM_INIT_RXDESC(sc, x) \ 276 do { \ 277 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 278 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 279 struct mbuf *__m = __rxs->rxs_mbuf; \ 280 \ 281 __m->m_data = __m->m_ext.ext_buf; \ 282 __rxd->gd_addr = \ 283 GEM_DMA_WRITE((sc), __rxs->rxs_dmamap->dm_segs[0].ds_addr); \ 284 __rxd->gd_flags = \ 285 GEM_DMA_WRITE((sc), \ 286 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ 287 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 288 GEM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 289 } while (0) 290 291 #define GEM_UPDATE_RXDESC(sc, x) \ 292 do { \ 293 struct gem_rxsoft *__rxs = &sc->sc_rxsoft[(x)]; \ 294 struct gem_desc *__rxd = &sc->sc_rxdescs[(x)]; \ 295 struct mbuf *__m = __rxs->rxs_mbuf; \ 296 \ 297 __rxd->gd_flags = \ 298 GEM_DMA_WRITE((sc), \ 299 (((__m->m_ext.ext_size)<<GEM_RD_BUFSHIFT) \ 300 & GEM_RD_BUFSIZE) | GEM_RD_OWN); \ 301 } while (0) 302 303 #ifdef _KERNEL 304 bool gem_shutdown(device_t, int); 305 bool gem_suspend(device_t, const pmf_qual_t *); 306 bool gem_resume(device_t, const pmf_qual_t *); 307 void gem_attach(struct gem_softc *, const uint8_t *); 308 int gem_intr(void *); 309 int gem_detach(struct gem_softc *, int); 310 311 void gem_reset(struct gem_softc *); 312 #endif /* _KERNEL */ 313 314 315 #endif 316