1 /* $NetBSD: rtl81x9var.h,v 1.40 2007/12/09 20:27:59 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD Id: if_rlreg.h,v 1.9 1999/06/20 18:56:09 wpaul Exp 35 */ 36 37 #include "rnd.h" 38 39 #if NRND > 0 40 #include <sys/rnd.h> 41 #endif 42 43 #define RTK_ETHER_ALIGN 2 44 #define RTK_RXSTAT_LEN 4 45 46 #ifdef __NO_STRICT_ALIGNMENT 47 /* 48 * XXX According to PR kern/33763, some 8168 and variants can't DMA 49 * XXX RX packet data into unaligned buffer. This means such chips will 50 * XXX never work on !__NO_STRICT_ALIGNMENT hosts without copying buffer. 51 */ 52 #define RE_ETHER_ALIGN 0 53 #else 54 #define RE_ETHER_ALIGN 2 55 #endif 56 57 struct rtk_type { 58 uint16_t rtk_vid; 59 uint16_t rtk_did; 60 int rtk_basetype; 61 #define RTK_8129 1 62 #define RTK_8139 2 63 #define RTK_8139CPLUS 3 64 #define RTK_8169 4 65 #define RTK_8168 5 66 #define RTK_8101E 6 67 const char *rtk_name; 68 }; 69 70 struct rtk_mii_frame { 71 uint8_t mii_stdelim; 72 uint8_t mii_opcode; 73 uint8_t mii_phyaddr; 74 uint8_t mii_regaddr; 75 uint8_t mii_turnaround; 76 uint16_t mii_data; 77 }; 78 79 /* 80 * MII constants 81 */ 82 #define RTK_MII_STARTDELIM 0x01 83 #define RTK_MII_READOP 0x02 84 #define RTK_MII_WRITEOP 0x01 85 #define RTK_MII_TURNAROUND 0x02 86 87 88 /* 89 * The RealTek doesn't use a fragment-based descriptor mechanism. 90 * Instead, there are only four register sets, each or which represents 91 * one 'descriptor.' Basically, each TX descriptor is just a contiguous 92 * packet buffer (32-bit aligned!) and we place the buffer addresses in 93 * the registers so the chip knows where they are. 94 * 95 * We can sort of kludge together the same kind of buffer management 96 * used in previous drivers, but we have to do buffer copies almost all 97 * the time, so it doesn't really buy us much. 98 * 99 * For reception, there's just one large buffer where the chip stores 100 * all received packets. 101 */ 102 103 #ifdef dreamcast 104 /* 105 * XXX dreamcast has only 32KB DMA'able memory on its PCI bridge. 106 * XXX Maybe this should be handled by prop_dictionary, or 107 * XXX some other new API which returns available DMA resources. 108 */ 109 #define RTK_RX_BUF_SZ RTK_RXBUF_16 110 #else 111 #define RTK_RX_BUF_SZ RTK_RXBUF_64 112 #endif 113 #define RTK_RXBUFLEN RTK_RXBUF_LEN(RTK_RX_BUF_SZ) 114 #define RTK_TX_LIST_CNT 4 115 116 /* 117 * The 8139C+ and 8169 gigE chips support descriptor-based TX 118 * and RX. In fact, they even support TCP large send. Descriptors 119 * must be allocated in contiguous blocks that are aligned on a 120 * 256-byte boundary. The RX rings can hold a maximum of 64 descriptors. 121 * The TX rings can hold upto 64 descriptors on 8139C+, and 122 * 1024 descriptors on 8169 gigE chips. 123 */ 124 #define RE_RING_ALIGN 256 125 126 /* 127 * Size of descriptors and TX queue. 128 * These numbers must be power of two to simplify RE_NEXT_*() macro. 129 */ 130 #define RE_RX_DESC_CNT 64 131 #define RE_TX_DESC_CNT_8139 64 132 #define RE_TX_DESC_CNT_8169 1024 133 #define RE_TX_QLEN 64 134 135 #define RE_NTXDESC_RSVD 4 136 137 struct re_rxsoft { 138 struct mbuf *rxs_mbuf; 139 bus_dmamap_t rxs_dmamap; 140 }; 141 142 struct re_txq { 143 struct mbuf *txq_mbuf; 144 bus_dmamap_t txq_dmamap; 145 int txq_descidx; 146 int txq_nsegs; 147 }; 148 149 struct re_list_data { 150 struct re_txq re_txq[RE_TX_QLEN]; 151 int re_txq_considx; 152 int re_txq_prodidx; 153 int re_txq_free; 154 155 bus_dmamap_t re_tx_list_map; 156 struct re_desc *re_tx_list; 157 int re_tx_free; /* # of free descriptors */ 158 int re_tx_nextfree; /* next descriptor to use */ 159 int re_tx_desc_cnt; /* # of descriptors */ 160 bus_dma_segment_t re_tx_listseg; 161 int re_tx_listnseg; 162 163 struct re_rxsoft re_rxsoft[RE_RX_DESC_CNT]; 164 bus_dmamap_t re_rx_list_map; 165 struct re_desc *re_rx_list; 166 int re_rx_prodidx; 167 bus_dma_segment_t re_rx_listseg; 168 int re_rx_listnseg; 169 }; 170 171 struct rtk_tx_desc { 172 SIMPLEQ_ENTRY(rtk_tx_desc) txd_q; 173 struct mbuf *txd_mbuf; 174 bus_dmamap_t txd_dmamap; 175 bus_addr_t txd_txaddr; 176 bus_addr_t txd_txstat; 177 }; 178 179 struct rtk_softc { 180 struct device sc_dev; /* generic device structures */ 181 struct ethercom ethercom; /* interface info */ 182 struct mii_data mii; 183 struct callout rtk_tick_ch; /* tick callout */ 184 bus_space_handle_t rtk_bhandle; /* bus space handle */ 185 bus_space_tag_t rtk_btag; /* bus space tag */ 186 u_int sc_quirk; /* chip quirks */ 187 #define RTKQ_8129 0x00000001 /* 8129 */ 188 #define RTKQ_8139CPLUS 0x00000002 /* 8139C+ */ 189 #define RTKQ_8169NONS 0x00000004 /* old non-single 8169 */ 190 #define RTKQ_PCIE 0x00000008 /* PCIe variants */ 191 192 bus_dma_tag_t sc_dmat; 193 194 bus_dma_segment_t sc_dmaseg; /* for rtk(4) */ 195 int sc_dmanseg; /* for rtk(4) */ 196 197 bus_dmamap_t recv_dmamap; /* for rtk(4) */ 198 void * rtk_rx_buf; 199 200 struct rtk_tx_desc rtk_tx_descs[RTK_TX_LIST_CNT]; 201 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_free; 202 SIMPLEQ_HEAD(, rtk_tx_desc) rtk_tx_dirty; 203 204 struct re_list_data re_ldata; 205 struct mbuf *re_head; 206 struct mbuf *re_tail; 207 uint32_t re_rxlenmask; 208 int re_testmode; 209 210 int sc_flags; /* misc flags */ 211 #define RTK_ATTACHED 0x00000001 /* attach has succeeded */ 212 #define RTK_ENABLED 0x00000002 /* chip is enabled */ 213 #define RTK_IS_ENABLED(sc) ((sc)->sc_flags & RTK_ENABLED) 214 215 int sc_txthresh; /* Early tx threshold */ 216 int sc_rev; /* MII revision */ 217 218 /* Power management hooks. */ 219 int (*sc_enable) (struct rtk_softc *); 220 void (*sc_disable) (struct rtk_softc *); 221 #if NRND > 0 222 rndsource_element_t rnd_source; 223 #endif 224 }; 225 226 #define RE_TX_DESC_CNT(sc) ((sc)->re_ldata.re_tx_desc_cnt) 227 #define RE_TX_LIST_SZ(sc) (RE_TX_DESC_CNT(sc) * sizeof(struct re_desc)) 228 #define RE_NEXT_TX_DESC(sc, x) (((x) + 1) & (RE_TX_DESC_CNT(sc) - 1)) 229 230 #define RE_RX_LIST_SZ (RE_RX_DESC_CNT * sizeof(struct re_desc)) 231 #define RE_NEXT_RX_DESC(sc, x) (((x) + 1) & (RE_RX_DESC_CNT - 1)) 232 233 #define RE_NEXT_TXQ(sc, x) (((x) + 1) & (RE_TX_QLEN - 1)) 234 235 #define RE_TXDESCSYNC(sc, idx, ops) \ 236 bus_dmamap_sync((sc)->sc_dmat, \ 237 (sc)->re_ldata.re_tx_list_map, \ 238 sizeof(struct re_desc) * (idx), \ 239 sizeof(struct re_desc), \ 240 (ops)) 241 #define RE_RXDESCSYNC(sc, idx, ops) \ 242 bus_dmamap_sync((sc)->sc_dmat, \ 243 (sc)->re_ldata.re_rx_list_map, \ 244 sizeof(struct re_desc) * (idx), \ 245 sizeof(struct re_desc), \ 246 (ops)) 247 248 /* 249 * re(4) hardware ip4csum-tx could be mangled with 28 byte or less IP packets 250 */ 251 #define RE_IP4CSUMTX_MINLEN 28 252 #define RE_IP4CSUMTX_PADLEN (ETHER_HDR_LEN + RE_IP4CSUMTX_MINLEN) 253 /* 254 * XXX 255 * We are allocating pad DMA buffer after RX DMA descs for now 256 * because RE_TX_LIST_SZ(sc) always occupies whole page but 257 * RE_RX_LIST_SZ is less than PAGE_SIZE so there is some unused region. 258 */ 259 #define RE_RX_DMAMEM_SZ (RE_RX_LIST_SZ + RE_IP4CSUMTX_PADLEN) 260 #define RE_TXPADOFF RE_RX_LIST_SZ 261 #define RE_TXPADDADDR(sc) \ 262 ((sc)->re_ldata.re_rx_list_map->dm_segs[0].ds_addr + RE_TXPADOFF) 263 264 265 #define RTK_TXTH_MAX RTK_TXTH_1536 266 267 /* 268 * register space access macros 269 */ 270 #define CSR_WRITE_4(sc, reg, val) \ 271 bus_space_write_4(sc->rtk_btag, sc->rtk_bhandle, reg, val) 272 #define CSR_WRITE_2(sc, reg, val) \ 273 bus_space_write_2(sc->rtk_btag, sc->rtk_bhandle, reg, val) 274 #define CSR_WRITE_1(sc, reg, val) \ 275 bus_space_write_1(sc->rtk_btag, sc->rtk_bhandle, reg, val) 276 277 #define CSR_READ_4(sc, reg) \ 278 bus_space_read_4(sc->rtk_btag, sc->rtk_bhandle, reg) 279 #define CSR_READ_2(sc, reg) \ 280 bus_space_read_2(sc->rtk_btag, sc->rtk_bhandle, reg) 281 #define CSR_READ_1(sc, reg) \ 282 bus_space_read_1(sc->rtk_btag, sc->rtk_bhandle, reg) 283 284 #define RTK_TIMEOUT 1000 285 286 /* 287 * PCI low memory base and low I/O base registers 288 */ 289 290 #define RTK_PCI_LOIO 0x10 291 #define RTK_PCI_LOMEM 0x14 292 293 #ifdef _KERNEL 294 uint16_t rtk_read_eeprom(struct rtk_softc *, int, int); 295 void rtk_setmulti(struct rtk_softc *); 296 void rtk_attach(struct rtk_softc *); 297 int rtk_detach(struct rtk_softc *); 298 int rtk_activate(struct device *, enum devact); 299 int rtk_intr(void *); 300 #endif /* _KERNEL */ 301