1 /* $OpenBSD: if_tht.c,v 1.143 2020/12/17 23:36:47 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Gwynne <dlg@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 /* 20 * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets, 21 * see http://www.tehutinetworks.net/. 22 * 23 * This driver was made possible because Tehuti networks provided 24 * hardware and documentation. Thanks! 25 */ 26 27 #include "bpfilter.h" 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/sockio.h> 32 #include <sys/mbuf.h> 33 #include <sys/kernel.h> 34 #include <sys/socket.h> 35 #include <sys/malloc.h> 36 #include <sys/device.h> 37 #include <sys/timeout.h> 38 #include <sys/queue.h> 39 #include <sys/rwlock.h> 40 #include <sys/time.h> 41 42 #include <machine/bus.h> 43 44 #include <dev/pci/pcireg.h> 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pcidevs.h> 47 48 #include <net/if.h> 49 #include <net/if_media.h> 50 51 #if NBPFILTER > 0 52 #include <net/bpf.h> 53 #endif 54 55 #include <netinet/in.h> 56 #include <netinet/if_ether.h> 57 58 #ifdef THT_DEBUG 59 #define THT_D_FIFO (1<<0) 60 #define THT_D_TX (1<<1) 61 #define THT_D_RX (1<<2) 62 #define THT_D_INTR (1<<3) 63 64 int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR; 65 66 #define DPRINTF(l, f...) do { if (thtdebug & (l)) printf(f); } while (0) 67 #else 68 #define DPRINTF(l, f...) 69 #endif 70 71 /* registers */ 72 73 #define THT_PCI_BAR 0x10 74 75 #define _Q(_q) ((_q) * 4) 76 77 /* General Configuration */ 78 #define THT_REG_END_SEL 0x5448 /* PCI Endian Select */ 79 #define THT_REG_CLKPLL 0x5000 80 #define THT_REG_CLKPLL_PLLLK (1<<9) /* PLL is locked */ 81 #define THT_REG_CLKPLL_RSTEND (1<<8) /* Reset ended */ 82 #define THT_REG_CLKPLL_TXF_DIS (1<<3) /* TX Free disabled */ 83 #define THT_REG_CLKPLL_VNT_STOP (1<<2) /* VENETO Stop */ 84 #define THT_REG_CLKPLL_PLLRST (1<<1) /* PLL Reset */ 85 #define THT_REG_CLKPLL_SFTRST (1<<0) /* Software Reset */ 86 /* Descriptors and FIFO Registers */ 87 #define THT_REG_TXT_CFG0(_q) (0x4040 + _Q(_q)) /* CFG0 TX Task queues */ 88 #define THT_REG_RXF_CFG0(_q) (0x4050 + _Q(_q)) /* CFG0 RX Free queues */ 89 #define THT_REG_RXD_CFG0(_q) (0x4060 + _Q(_q)) /* CFG0 RX DSC queues */ 90 #define THT_REG_TXF_CFG0(_q) (0x4070 + _Q(_q)) /* CFG0 TX Free queues */ 91 #define THT_REG_TXT_CFG1(_q) (0x4000 + _Q(_q)) /* CFG1 TX Task queues */ 92 #define THT_REG_RXF_CFG1(_q) (0x4010 + _Q(_q)) /* CFG1 RX Free queues */ 93 #define THT_REG_RXD_CFG1(_q) (0x4020 + _Q(_q)) /* CFG1 RX DSC queues */ 94 #define THT_REG_TXF_CFG1(_q) (0x4030 + _Q(_q)) /* CFG1 TX Free queues */ 95 #define THT_REG_TXT_RPTR(_q) (0x40c0 + _Q(_q)) /* TX Task read ptr */ 96 #define THT_REG_RXF_RPTR(_q) (0x40d0 + _Q(_q)) /* RX Free read ptr */ 97 #define THT_REG_RXD_RPTR(_q) (0x40e0 + _Q(_q)) /* RX DSC read ptr */ 98 #define THT_REG_TXF_RPTR(_q) (0x40f0 + _Q(_q)) /* TX Free read ptr */ 99 #define THT_REG_TXT_WPTR(_q) (0x4080 + _Q(_q)) /* TX Task write ptr */ 100 #define THT_REG_RXF_WPTR(_q) (0x4090 + _Q(_q)) /* RX Free write ptr */ 101 #define THT_REG_RXD_WPTR(_q) (0x40a0 + _Q(_q)) /* RX DSC write ptr */ 102 #define THT_REG_TXF_WPTR(_q) (0x40b0 + _Q(_q)) /* TX Free write ptr */ 103 #define THT_REG_HTB_ADDR 0x4100 /* HTB Addressing Mechanism enable */ 104 #define THT_REG_HTB_ADDR_HI 0x4110 /* High HTB Address */ 105 #define THT_REG_HTB_ST_TMR 0x3290 /* HTB Timer */ 106 #define THT_REG_RDINTCM(_q) (0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */ 107 #define THT_REG_RDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */ 108 #define THT_REG_RDINTCM_RXF_TH(_c) ((_c)<<16) /* rxf intr req thresh */ 109 #define THT_REG_RDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */ 110 #define THT_REG_RDINTCM_COAL(_c) (_c) /* coalescing timer */ 111 #define THT_REG_TDINTCM(_q) (0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */ 112 #define THT_REG_TDINTCM_PKT_TH(_c) ((_c)<<20) /* pkt count threshold */ 113 #define THT_REG_TDINTCM_COAL_RC (1<<15) /* coalescing timer recharge */ 114 #define THT_REG_TDINTCM_COAL(_c) (_c) /* coalescing timer */ 115 /* 10G Ethernet MAC */ 116 #define THT_REG_10G_REV 0x6000 /* Revision */ 117 #define THT_REG_10G_SCR 0x6004 /* Scratch */ 118 #define THT_REG_10G_CTL 0x6008 /* Control/Status */ 119 #define THT_REG_10G_CTL_CMD_FRAME_EN (1<<13) /* cmd frame enable */ 120 #define THT_REG_10G_CTL_SW_RESET (1<<12) /* sw reset */ 121 #define THT_REG_10G_CTL_STATS_AUTO_CLR (1<<11) /* auto clear statistics */ 122 #define THT_REG_10G_CTL_LOOPBACK (1<<10) /* enable loopback */ 123 #define THT_REG_10G_CTL_TX_ADDR_INS (1<<9) /* set mac on tx */ 124 #define THT_REG_10G_CTL_PAUSE_IGNORE (1<<8) /* ignore pause */ 125 #define THT_REG_10G_CTL_PAUSE_FWD (1<<7) /* forward pause */ 126 #define THT_REG_10G_CTL_CRC_FWD (1<<6) /* crc forward */ 127 #define THT_REG_10G_CTL_PAD (1<<5) /* frame padding */ 128 #define THT_REG_10G_CTL_PROMISC (1<<4) /* promiscuous mode */ 129 #define THT_REG_10G_CTL_WAN_MODE (1<<3) /* WAN mode */ 130 #define THT_REG_10G_CTL_RX_EN (1<<1) /* RX enable */ 131 #define THT_REG_10G_CTL_TX_EN (1<<0) /* TX enable */ 132 #define THT_REG_10G_FRM_LEN 0x6014 /* Frame Length */ 133 #define THT_REG_10G_PAUSE 0x6018 /* Pause Quanta */ 134 #define THT_REG_10G_RX_SEC 0x601c /* RX Section */ 135 #define THT_REG_10G_TX_SEC 0x6020 /* TX Section */ 136 #define THT_REG_10G_SEC_AVAIL(_t) (_t) /* section available thresh*/ 137 #define THT_REG_10G_SEC_EMPTY(_t) ((_t)<<16) /* section empty avail */ 138 #define THT_REG_10G_RFIFO_AEF 0x6024 /* RX FIFO Almost Empty/Full */ 139 #define THT_REG_10G_TFIFO_AEF 0x6028 /* TX FIFO Almost Empty/Full */ 140 #define THT_REG_10G_FIFO_AE(_t) (_t) /* almost empty */ 141 #define THT_REG_10G_FIFO_AF(_t) ((_t)<<16) /* almost full */ 142 #define THT_REG_10G_SM_STAT 0x6030 /* MDIO Status */ 143 #define THT_REG_10G_SM_CMD 0x6034 /* MDIO Command */ 144 #define THT_REG_10G_SM_DAT 0x6038 /* MDIO Data */ 145 #define THT_REG_10G_SM_ADD 0x603c /* MDIO Address */ 146 #define THT_REG_10G_STAT 0x6040 /* Status */ 147 /* Statistic Counters */ 148 /* XXX todo */ 149 /* Status Registers */ 150 #define THT_REG_MAC_LNK_STAT 0x0200 /* Link Status */ 151 #define THT_REG_MAC_LNK_STAT_DIS (1<<4) /* Mac Stats read disable */ 152 #define THT_REG_MAC_LNK_STAT_LINK (1<<2) /* Link State */ 153 #define THT_REG_MAC_LNK_STAT_REM_FAULT (1<<1) /* Remote Fault */ 154 #define THT_REG_MAC_LNK_STAT_LOC_FAULT (1<<0) /* Local Fault */ 155 /* Interrupt Registers */ 156 #define THT_REG_ISR 0x5100 /* Interrupt Status */ 157 #define THT_REG_ISR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */ 158 #define THT_REG_ISR_GPIO (1<<26) /* GPIO */ 159 #define THT_REG_ISR_RFRSH (1<<25) /* DDR Refresh */ 160 #define THT_REG_ISR_SWI (1<<23) /* software interrupt */ 161 #define THT_REG_ISR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */ 162 #define THT_REG_ISR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */ 163 #define THT_REG_ISR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */ 164 #define THT_REG_ISR_TMR(_t) (1<<(6+(_t))) /* timer */ 165 #define THT_REG_ISR_VNT (1<<5) /* optistrata */ 166 #define THT_REG_ISR_RxFL (1<<4) /* RX Full */ 167 #define THT_REG_ISR_TR (1<<2) /* table read */ 168 #define THT_REG_ISR_PCIE_LNK_INT (1<<1) /* pcie link fail */ 169 #define THT_REG_ISR_GPLE_CLR (1<<0) /* pcie timeout */ 170 #define THT_FMT_ISR "\020" "\035LINKCHG1" "\034LINKCHG0" \ 171 "\033GPIO" "\032RFRSH" "\030SWI" \ 172 "\027RXF3" "\026RXF2" "\025RXF1" \ 173 "\024RXF0" "\023TXF3" "\022TXF2" \ 174 "\021TXF1" "\020TXF0" "\017RXD3" \ 175 "\016RXD2" "\015RXD1" "\014RXD0" \ 176 "\012TMR3" "\011TMR2" "\010TMR1" \ 177 "\007TMR0" "\006VNT" "\005RxFL" \ 178 "\003TR" "\002PCI_LNK_INT" \ 179 "\001GPLE_CLR" 180 #define THT_REG_ISR_GTI 0x5080 /* GTI Interrupt Status */ 181 #define THT_REG_IMR 0x5110 /* Interrupt Mask */ 182 #define THT_REG_IMR_LINKCHG(_p) (1<<(27+(_p))) /* link changed */ 183 #define THT_REG_IMR_GPIO (1<<26) /* GPIO */ 184 #define THT_REG_IMR_RFRSH (1<<25) /* DDR Refresh */ 185 #define THT_REG_IMR_SWI (1<<23) /* software interrupt */ 186 #define THT_REG_IMR_RXF(_q) (1<<(19+(_q))) /* rx free fifo */ 187 #define THT_REG_IMR_TXF(_q) (1<<(15+(_q))) /* tx free fifo */ 188 #define THT_REG_IMR_RXD(_q) (1<<(11+(_q))) /* rx desc fifo */ 189 #define THT_REG_IMR_TMR(_t) (1<<(6+(_t))) /* timer */ 190 #define THT_REG_IMR_VNT (1<<5) /* optistrata */ 191 #define THT_REG_IMR_RxFL (1<<4) /* RX Full */ 192 #define THT_REG_IMR_TR (1<<2) /* table read */ 193 #define THT_REG_IMR_PCIE_LNK_INT (1<<1) /* pcie link fail */ 194 #define THT_REG_IMR_GPLE_CLR (1<<0) /* pcie timeout */ 195 #define THT_REG_IMR_GTI 0x5090 /* GTI Interrupt Mask */ 196 #define THT_REG_ISR_MSK 0x5140 /* ISR Masked */ 197 /* Global Counters */ 198 /* XXX todo */ 199 /* DDR2 SDRAM Controller Registers */ 200 /* XXX TBD */ 201 /* EEPROM Registers */ 202 /* XXX todo */ 203 /* Init arbitration and status registers */ 204 #define THT_REG_INIT_SEMAPHORE 0x5170 /* Init Semaphore */ 205 #define THT_REG_INIT_STATUS 0x5180 /* Init Status */ 206 /* PCI Credits Registers */ 207 /* XXX todo */ 208 /* TX Arbitration Registers */ 209 #define THT_REG_TXTSK_PR(_q) (0x41b0 + _Q(_q)) /* TX Queue Priority */ 210 /* RX Part Registers */ 211 #define THT_REG_RX_FLT 0x1240 /* RX Filter Configuration */ 212 #define THT_REG_RX_FLT_ATXER (1<<15) /* accept with xfer err */ 213 #define THT_REG_RX_FLT_ATRM (1<<14) /* accept with term err */ 214 #define THT_REG_RX_FLT_AFTSQ (1<<13) /* accept with fault seq */ 215 #define THT_REG_RX_FLT_OSEN (1<<12) /* enable pkts */ 216 #define THT_REG_RX_FLT_APHER (1<<11) /* accept with phy err */ 217 #define THT_REG_RX_FLT_TXFC (1<<10) /* TX flow control */ 218 #define THT_REG_RX_FLT_FDA (1<<8) /* filter direct address */ 219 #define THT_REG_RX_FLT_AOF (1<<7) /* accept overflow frame */ 220 #define THT_REG_RX_FLT_ACF (1<<6) /* accept control frame */ 221 #define THT_REG_RX_FLT_ARUNT (1<<5) /* accept runt */ 222 #define THT_REG_RX_FLT_ACRC (1<<4) /* accept crc error */ 223 #define THT_REG_RX_FLT_AM (1<<3) /* accept multicast */ 224 #define THT_REG_RX_FLT_AB (1<<2) /* accept broadcast */ 225 #define THT_REG_RX_FLT_PRM_MASK 0x3 /* promiscuous mode */ 226 #define THT_REG_RX_FLT_PRM_NORMAL 0x0 /* normal mode */ 227 #define THT_REG_RX_FLT_PRM_ALL 0x1 /* pass all incoming frames */ 228 #define THT_REG_RX_MAX_FRAME 0x12c0 /* Max Frame Size */ 229 #define THT_REG_RX_UNC_MAC0 0x1250 /* MAC Address low word */ 230 #define THT_REG_RX_UNC_MAC1 0x1260 /* MAC Address mid word */ 231 #define THT_REG_RX_UNC_MAC2 0x1270 /* MAC Address high word */ 232 #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8) 233 #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8) 234 #define THT_REG_RX_MAC_MCST_CNT 15 235 #define THT_REG_RX_MCST_HASH 0x1a00 /* imperfect multicast filter hash */ 236 #define THT_REG_RX_MCST_HASH_SIZE (256 / NBBY) 237 /* OptiStrata Debug Registers */ 238 #define THT_REG_VPC 0x2300 /* Program Counter */ 239 #define THT_REG_VLI 0x2310 /* Last Interrupt */ 240 #define THT_REG_VIC 0x2320 /* Interrupts Count */ 241 #define THT_REG_VTMR 0x2330 /* Timer */ 242 #define THT_REG_VGLB 0x2340 /* Global */ 243 /* SW Reset Registers */ 244 #define THT_REG_RST_PRT 0x7000 /* Reset Port */ 245 #define THT_REG_RST_PRT_ACTIVE 0x1 /* port reset is active */ 246 #define THT_REG_DIS_PRT 0x7010 /* Disable Port */ 247 #define THT_REG_RST_QU_0 0x7020 /* Reset Queue 0 */ 248 #define THT_REG_RST_QU_1 0x7028 /* Reset Queue 1 */ 249 #define THT_REG_DIS_QU_0 0x7030 /* Disable Queue 0 */ 250 #define THT_REG_DIS_QU_1 0x7038 /* Disable Queue 1 */ 251 252 #define THT_PORT_SIZE 0x8000 253 #define THT_PORT_REGION(_p) ((_p) * THT_PORT_SIZE) 254 #define THT_NQUEUES 4 255 256 #define THT_FIFO_ALIGN 4096 257 #define THT_FIFO_SIZE_4k 0x0 258 #define THT_FIFO_SIZE_8k 0x1 259 #define THT_FIFO_SIZE_16k 0x2 260 #define THT_FIFO_SIZE_32k 0x3 261 #define THT_FIFO_SIZE(_r) (4096 * (1<<(_r))) 262 #define THT_FIFO_GAP 8 /* keep 8 bytes between ptrs */ 263 #define THT_FIFO_PTR_MASK 0x00007ff8 /* rptr/wptr mask */ 264 265 #define THT_FIFO_DESC_LEN 208 /* a descriptor cant be bigger than this */ 266 267 #define THT_IMR_DOWN(_p) (THT_REG_IMR_LINKCHG(_p)) 268 #define THT_IMR_UP(_p) (THT_REG_IMR_LINKCHG(_p) | \ 269 THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \ 270 THT_REG_IMR_RXD(0)) 271 272 /* hardware structures (we're using the 64 bit variants) */ 273 274 /* physical buffer descriptor */ 275 struct tht_pbd { 276 u_int32_t addr_lo; 277 u_int32_t addr_hi; 278 u_int32_t len; 279 } __packed; 280 #define THT_PBD_PKTLEN (64 * 1024) 281 282 /* rx free fifo */ 283 struct tht_rx_free { 284 u_int16_t bc; /* buffer count (0:4) */ 285 u_int16_t type; 286 287 u_int64_t uid; 288 289 /* followed by a pdb list */ 290 } __packed; 291 #define THT_RXF_TYPE 1 292 #define THT_RXF_1ST_PDB_LEN 128 293 #define THT_RXF_SGL_LEN ((THT_FIFO_DESC_LEN - \ 294 sizeof(struct tht_rx_free)) / \ 295 sizeof(struct tht_pbd)) 296 #define THT_RXF_PKT_NUM 128 297 298 /* rx descriptor */ 299 struct tht_rx_desc { 300 u_int32_t flags; 301 #define THT_RXD_FLAGS_BC(_f) ((_f) & 0x1f) /* buffer count */ 302 #define THT_RXD_FLAGS_RXFQ(_f) (((_f)>>8) & 0x3) /* rxf queue id */ 303 #define THT_RXD_FLAGS_TO (1<<15) 304 #define THT_RXD_FLAGS_TYPE(_f) (((_f)>>16) & 0xf) /* desc type */ 305 #define THT_RXD_FLAGS_OVF (1<<21) /* overflow error */ 306 #define THT_RXD_FLAGS_RUNT (1<<22) /* runt error */ 307 #define THT_RXD_FLAGS_CRC (1<<23) /* crc error */ 308 #define THT_RXD_FLAGS_UDPCS (1<<24) /* udp checksum error */ 309 #define THT_RXD_FLAGS_TCPCS (1<<25) /* tcp checksum error */ 310 #define THT_RXD_FLAGS_IPCS (1<<26) /* ip checksum error */ 311 #define THT_RXD_FLAGS_PKT_ID 0x70000000 312 #define THT_RXD_FLAGS_PKT_ID_NONIP 0x00000000 313 #define THT_RXD_FLAGS_PKT_ID_TCP4 0x10000000 314 #define THT_RXD_FLAGS_PKT_ID_UDP4 0x20000000 315 #define THT_RXD_FLAGS_PKT_ID_IPV4 0x30000000 316 #define THT_RXD_FLAGS_PKT_ID_TCP6 0x50000000 317 #define THT_RXD_FLAGS_PKT_ID_UDP6 0x60000000 318 #define THT_RXD_FLAGS_PKT_ID_IPV6 0x70000000 319 #define THT_RXD_FLAGS_VTAG (1<<31) 320 u_int16_t len; 321 u_int16_t vlan; 322 #define THT_RXD_VLAN_ID(_v) ((_v) & 0xfff) 323 #define THT_RXD_VLAN_CFI (1<<12) 324 #define THT_RXD_VLAN_PRI(_v) ((_v) & 0x7) >> 13) 325 326 u_int64_t uid; 327 } __packed; 328 #define THT_RXD_TYPE 2 329 330 /* rx decriptor type 3: data chain instruction */ 331 struct tht_rx_desc_dc { 332 /* preceded by tht_rx_desc */ 333 334 u_int16_t cd_offset; 335 u_int16_t flags; 336 337 u_int8_t data[4]; 338 } __packed; 339 #define THT_RXD_TYPE_DC 3 340 341 /* rx descriptor type 4: rss (recv side scaling) information */ 342 struct tht_rx_desc_rss { 343 /* preceded by tht_rx_desc */ 344 345 u_int8_t rss_hft; 346 u_int8_t rss_type; 347 u_int8_t rss_tcpu; 348 u_int8_t reserved; 349 350 u_int32_t rss_hash; 351 } __packed; 352 #define THT_RXD_TYPE_RSS 4 353 354 /* tx task fifo */ 355 struct tht_tx_task { 356 u_int32_t flags; 357 #define THT_TXT_FLAGS_BC(_f) (_f) /* buffer count */ 358 #define THT_TXT_FLAGS_UDPCS (1<<5) /* udp checksum */ 359 #define THT_TXT_FLAGS_TCPCS (1<<6) /* tcp checksum */ 360 #define THT_TXT_FLAGS_IPCS (1<<7) /* ip checksum */ 361 #define THT_TXT_FLAGS_VTAG (1<<8) /* insert vlan tag */ 362 #define THT_TXT_FLAGS_LGSND (1<<9) /* tcp large send enabled */ 363 #define THT_TXT_FLAGS_FRAG (1<<10) /* ip fragmentation enabled */ 364 #define THT_TXT_FLAGS_CFI (1<<12) /* canonical format indicator */ 365 #define THT_TXT_FLAGS_PRIO(_f) ((_f)<<13) /* vlan priority */ 366 #define THT_TXT_FLAGS_VLAN(_f) ((_f)<<20) /* vlan id */ 367 u_int16_t mss_mtu; 368 u_int16_t len; 369 370 u_int64_t uid; 371 372 /* followed by a pbd list */ 373 } __packed; 374 #define THT_TXT_TYPE (3<<16) 375 #define THT_TXT_SGL_LEN ((THT_FIFO_DESC_LEN - \ 376 sizeof(struct tht_tx_task)) / \ 377 sizeof(struct tht_pbd)) 378 #define THT_TXT_PKT_NUM 128 379 380 /* tx free fifo */ 381 struct tht_tx_free { 382 u_int32_t status; 383 384 u_int64_t uid; 385 386 u_int32_t pad; 387 } __packed; 388 389 /* pci controller autoconf glue */ 390 391 struct thtc_softc { 392 struct device sc_dev; 393 394 bus_dma_tag_t sc_dmat; 395 396 bus_space_tag_t sc_memt; 397 bus_space_handle_t sc_memh; 398 bus_size_t sc_mems; 399 void *sc_ih; 400 }; 401 402 int thtc_match(struct device *, void *, void *); 403 void thtc_attach(struct device *, struct device *, void *); 404 int thtc_print(void *, const char *); 405 406 struct cfattach thtc_ca = { 407 sizeof(struct thtc_softc), thtc_match, thtc_attach 408 }; 409 410 struct cfdriver thtc_cd = { 411 NULL, "thtc", DV_DULL 412 }; 413 414 /* glue between the controller and the port */ 415 416 struct tht_attach_args { 417 int taa_port; 418 419 struct pci_attach_args *taa_pa; 420 }; 421 422 /* tht itself */ 423 424 struct tht_dmamem { 425 bus_dmamap_t tdm_map; 426 bus_dma_segment_t tdm_seg; 427 size_t tdm_size; 428 caddr_t tdm_kva; 429 }; 430 #define THT_DMA_MAP(_tdm) ((_tdm)->tdm_map) 431 #define THT_DMA_DVA(_tdm) ((_tdm)->tdm_map->dm_segs[0].ds_addr) 432 #define THT_DMA_KVA(_tdm) ((void *)(_tdm)->tdm_kva) 433 434 struct tht_fifo_desc { 435 bus_size_t tfd_cfg0; 436 bus_size_t tfd_cfg1; 437 bus_size_t tfd_rptr; 438 bus_size_t tfd_wptr; 439 u_int32_t tfd_size; 440 int tfd_write; 441 }; 442 #define THT_FIFO_PRE_SYNC(_d) ((_d)->tfd_write ? \ 443 BUS_DMASYNC_PREWRITE : \ 444 BUS_DMASYNC_PREREAD) 445 #define THT_FIFO_POST_SYNC(_d) ((_d)->tfd_write ? \ 446 BUS_DMASYNC_POSTWRITE : \ 447 BUS_DMASYNC_POSTREAD) 448 449 struct tht_fifo { 450 struct tht_fifo_desc *tf_desc; 451 struct tht_dmamem *tf_mem; 452 int tf_len; 453 int tf_rptr; 454 int tf_wptr; 455 int tf_ready; 456 }; 457 458 struct tht_pkt { 459 u_int64_t tp_id; 460 461 bus_dmamap_t tp_dmap; 462 struct mbuf *tp_m; 463 464 TAILQ_ENTRY(tht_pkt) tp_link; 465 }; 466 467 struct tht_pkt_list { 468 struct tht_pkt *tpl_pkts; 469 TAILQ_HEAD(, tht_pkt) tpl_free; 470 TAILQ_HEAD(, tht_pkt) tpl_used; 471 }; 472 473 struct tht_softc { 474 struct device sc_dev; 475 struct thtc_softc *sc_thtc; 476 int sc_port; 477 478 bus_space_handle_t sc_memh; 479 480 struct arpcom sc_ac; 481 struct ifmedia sc_media; 482 struct timeval sc_mediacheck; 483 484 u_int16_t sc_lladdr[3]; 485 486 struct tht_pkt_list sc_tx_list; 487 struct tht_pkt_list sc_rx_list; 488 489 struct tht_fifo sc_txt; 490 struct tht_fifo sc_rxf; 491 struct tht_fifo sc_rxd; 492 struct tht_fifo sc_txf; 493 494 u_int32_t sc_imr; 495 496 struct rwlock sc_lock; 497 }; 498 499 int tht_match(struct device *, void *, void *); 500 void tht_attach(struct device *, struct device *, void *); 501 void tht_mountroot(struct device *); 502 int tht_intr(void *); 503 504 struct cfattach tht_ca = { 505 sizeof(struct tht_softc), tht_match, tht_attach 506 }; 507 508 struct cfdriver tht_cd = { 509 NULL, "tht", DV_IFNET 510 }; 511 512 /* pkts */ 513 int tht_pkt_alloc(struct tht_softc *, 514 struct tht_pkt_list *, int, int); 515 void tht_pkt_free(struct tht_softc *, 516 struct tht_pkt_list *); 517 void tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *); 518 struct tht_pkt *tht_pkt_get(struct tht_pkt_list *); 519 struct tht_pkt *tht_pkt_used(struct tht_pkt_list *); 520 521 /* fifos */ 522 523 struct tht_fifo_desc tht_txt_desc = { 524 THT_REG_TXT_CFG0(0), 525 THT_REG_TXT_CFG1(0), 526 THT_REG_TXT_RPTR(0), 527 THT_REG_TXT_WPTR(0), 528 THT_FIFO_SIZE_16k, 529 1 530 }; 531 532 struct tht_fifo_desc tht_rxf_desc = { 533 THT_REG_RXF_CFG0(0), 534 THT_REG_RXF_CFG1(0), 535 THT_REG_RXF_RPTR(0), 536 THT_REG_RXF_WPTR(0), 537 THT_FIFO_SIZE_16k, 538 1 539 }; 540 541 struct tht_fifo_desc tht_rxd_desc = { 542 THT_REG_RXD_CFG0(0), 543 THT_REG_RXD_CFG1(0), 544 THT_REG_RXD_RPTR(0), 545 THT_REG_RXD_WPTR(0), 546 THT_FIFO_SIZE_16k, 547 0 548 }; 549 550 struct tht_fifo_desc tht_txf_desc = { 551 THT_REG_TXF_CFG0(0), 552 THT_REG_TXF_CFG1(0), 553 THT_REG_TXF_RPTR(0), 554 THT_REG_TXF_WPTR(0), 555 THT_FIFO_SIZE_4k, 556 0 557 }; 558 559 int tht_fifo_alloc(struct tht_softc *, struct tht_fifo *, 560 struct tht_fifo_desc *); 561 void tht_fifo_free(struct tht_softc *, struct tht_fifo *); 562 563 size_t tht_fifo_readable(struct tht_softc *, 564 struct tht_fifo *); 565 size_t tht_fifo_writable(struct tht_softc *, 566 struct tht_fifo *); 567 void tht_fifo_pre(struct tht_softc *, 568 struct tht_fifo *); 569 void tht_fifo_read(struct tht_softc *, struct tht_fifo *, 570 void *, size_t); 571 void tht_fifo_write(struct tht_softc *, struct tht_fifo *, 572 void *, size_t); 573 void tht_fifo_write_dmap(struct tht_softc *, 574 struct tht_fifo *, bus_dmamap_t); 575 void tht_fifo_write_pad(struct tht_softc *, 576 struct tht_fifo *, int); 577 void tht_fifo_post(struct tht_softc *, 578 struct tht_fifo *); 579 580 /* port operations */ 581 void tht_lladdr_read(struct tht_softc *); 582 void tht_lladdr_write(struct tht_softc *); 583 int tht_sw_reset(struct tht_softc *); 584 int tht_fw_load(struct tht_softc *); 585 void tht_link_state(struct tht_softc *); 586 587 /* interface operations */ 588 int tht_ioctl(struct ifnet *, u_long, caddr_t); 589 void tht_watchdog(struct ifnet *); 590 void tht_start(struct ifnet *); 591 int tht_load_pkt(struct tht_softc *, struct tht_pkt *, 592 struct mbuf *); 593 void tht_txf(struct tht_softc *sc); 594 595 void tht_rxf_fill(struct tht_softc *, int); 596 void tht_rxf_drain(struct tht_softc *); 597 void tht_rxd(struct tht_softc *); 598 599 void tht_up(struct tht_softc *); 600 void tht_iff(struct tht_softc *); 601 void tht_down(struct tht_softc *); 602 603 /* ifmedia operations */ 604 int tht_media_change(struct ifnet *); 605 void tht_media_status(struct ifnet *, struct ifmediareq *); 606 607 /* wrapper around dma memory */ 608 struct tht_dmamem *tht_dmamem_alloc(struct tht_softc *, bus_size_t, 609 bus_size_t); 610 void tht_dmamem_free(struct tht_softc *, 611 struct tht_dmamem *); 612 613 /* bus space operations */ 614 u_int32_t tht_read(struct tht_softc *, bus_size_t); 615 void tht_write(struct tht_softc *, bus_size_t, u_int32_t); 616 void tht_write_region(struct tht_softc *, bus_size_t, 617 void *, size_t); 618 int tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t, 619 u_int32_t, int); 620 int tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t, 621 u_int32_t, int); 622 623 #define tht_set(_s, _r, _b) tht_write((_s), (_r), \ 624 tht_read((_s), (_r)) | (_b)) 625 #define tht_clr(_s, _r, _b) tht_write((_s), (_r), \ 626 tht_read((_s), (_r)) & ~(_b)) 627 #define tht_wait_set(_s, _r, _b, _t) tht_wait_eq((_s), (_r), \ 628 (_b), (_b), (_t)) 629 630 631 /* misc */ 632 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) 633 #define LWORDS(_b) (((_b) + 7) >> 3) 634 635 636 struct thtc_device { 637 pci_vendor_id_t td_vendor; 638 pci_vendor_id_t td_product; 639 u_int td_nports; 640 }; 641 642 const struct thtc_device *thtc_lookup(struct pci_attach_args *); 643 644 static const struct thtc_device thtc_devices[] = { 645 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3009, 1 }, 646 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3010, 1 }, 647 { PCI_VENDOR_TEHUTI, PCI_PRODUCT_TEHUTI_TN3014, 2 } 648 }; 649 650 const struct thtc_device * 651 thtc_lookup(struct pci_attach_args *pa) 652 { 653 int i; 654 const struct thtc_device *td; 655 656 for (i = 0; i < nitems(thtc_devices); i++) { 657 td = &thtc_devices[i]; 658 if (td->td_vendor == PCI_VENDOR(pa->pa_id) && 659 td->td_product == PCI_PRODUCT(pa->pa_id)) 660 return (td); 661 } 662 663 return (NULL); 664 } 665 666 int 667 thtc_match(struct device *parent, void *match, void *aux) 668 { 669 struct pci_attach_args *pa = aux; 670 671 if (thtc_lookup(pa) != NULL) 672 return (1); 673 674 return (0); 675 } 676 677 void 678 thtc_attach(struct device *parent, struct device *self, void *aux) 679 { 680 struct thtc_softc *sc = (struct thtc_softc *)self; 681 struct pci_attach_args *pa = aux; 682 pcireg_t memtype; 683 const struct thtc_device *td; 684 struct tht_attach_args taa; 685 pci_intr_handle_t ih; 686 int i; 687 688 bzero(&taa, sizeof(taa)); 689 td = thtc_lookup(pa); 690 691 sc->sc_dmat = pa->pa_dmat; 692 693 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR); 694 if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt, 695 &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) { 696 printf(": unable to map host registers\n"); 697 return; 698 } 699 700 if (pci_intr_map(pa, &ih) != 0) { 701 printf(": unable to map interrupt\n"); 702 goto unmap; 703 } 704 705 sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, 706 IPL_NET, tht_intr, sc, DEVNAME(sc)); 707 if (sc->sc_ih == NULL) { 708 printf(": unable to establish interrupt\n"); 709 return; 710 } 711 printf(": %s\n", pci_intr_string(pa->pa_pc, ih)); 712 713 taa.taa_pa = pa; 714 for (i = 0; i < td->td_nports; i++) { 715 taa.taa_port = i; 716 717 config_found(self, &taa, thtc_print); 718 } 719 720 return; 721 722 unmap: 723 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems); 724 sc->sc_mems = 0; 725 } 726 727 int 728 thtc_print(void *aux, const char *pnp) 729 { 730 struct tht_attach_args *taa = aux; 731 732 if (pnp != NULL) 733 printf("\"%s\" at %s", tht_cd.cd_name, pnp); 734 735 printf(" port %d", taa->taa_port); 736 737 return (UNCONF); 738 } 739 740 int 741 tht_match(struct device *parent, void *match, void *aux) 742 { 743 return (1); 744 } 745 746 void 747 tht_attach(struct device *parent, struct device *self, void *aux) 748 { 749 struct thtc_softc *csc = (struct thtc_softc *)parent; 750 struct tht_softc *sc = (struct tht_softc *)self; 751 struct tht_attach_args *taa = aux; 752 struct ifnet *ifp; 753 754 sc->sc_thtc = csc; 755 sc->sc_port = taa->taa_port; 756 sc->sc_imr = THT_IMR_DOWN(sc->sc_port); 757 rw_init(&sc->sc_lock, "thtioc"); 758 759 if (bus_space_subregion(csc->sc_memt, csc->sc_memh, 760 THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE, 761 &sc->sc_memh) != 0) { 762 printf(": unable to map port registers\n"); 763 return; 764 } 765 766 if (tht_sw_reset(sc) != 0) { 767 printf(": unable to reset port\n"); 768 /* bus_space(9) says we dont have to free subregions */ 769 return; 770 } 771 772 tht_lladdr_read(sc); 773 bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN); 774 775 ifp = &sc->sc_ac.ac_if; 776 ifp->if_softc = sc; 777 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 778 ifp->if_capabilities = IFCAP_VLAN_MTU; 779 ifp->if_ioctl = tht_ioctl; 780 ifp->if_start = tht_start; 781 ifp->if_watchdog = tht_watchdog; 782 ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */ 783 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ); 784 ifq_set_maxlen(&ifp->if_snd, 400); 785 786 ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status); 787 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL); 788 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 789 790 if_attach(ifp); 791 ether_ifattach(ifp); 792 793 printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr)); 794 795 config_mountroot(self, tht_mountroot); 796 } 797 798 void 799 tht_mountroot(struct device *self) 800 { 801 struct tht_softc *sc = (struct tht_softc *)self; 802 803 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0) 804 return; 805 806 if (tht_fw_load(sc) != 0) 807 printf("%s: firmware load failed\n", DEVNAME(sc)); 808 809 tht_sw_reset(sc); 810 811 tht_fifo_free(sc, &sc->sc_txt); 812 813 tht_link_state(sc); 814 tht_write(sc, THT_REG_IMR, sc->sc_imr); 815 } 816 817 int 818 tht_intr(void *arg) 819 { 820 struct thtc_softc *thtc = arg; 821 struct tht_softc *sc = arg; 822 struct device *d; 823 struct ifnet *ifp; 824 u_int32_t isr; 825 int rv = 0; 826 827 for (d = TAILQ_NEXT(&thtc->sc_dev, dv_list); d != NULL; 828 d = TAILQ_NEXT(d, dv_list)) { 829 sc = (struct tht_softc *)d; 830 831 isr = tht_read(sc, THT_REG_ISR); 832 if (isr == 0x0) { 833 tht_write(sc, THT_REG_IMR, sc->sc_imr); 834 continue; 835 } 836 rv = 1; 837 838 DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR); 839 840 if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1))) 841 tht_link_state(sc); 842 843 ifp = &sc->sc_ac.ac_if; 844 if (ifp->if_flags & IFF_RUNNING) { 845 if (ISSET(isr, THT_REG_ISR_RXD(0))) 846 tht_rxd(sc); 847 848 if (ISSET(isr, THT_REG_ISR_RXF(0))) 849 tht_rxf_fill(sc, 0); 850 851 if (ISSET(isr, THT_REG_ISR_TXF(0))) 852 tht_txf(sc); 853 854 tht_start(ifp); 855 } 856 tht_write(sc, THT_REG_IMR, sc->sc_imr); 857 } 858 return (rv); 859 } 860 861 int 862 tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr) 863 { 864 struct tht_softc *sc = ifp->if_softc; 865 struct ifreq *ifr = (struct ifreq *)addr; 866 int s, error = 0; 867 868 rw_enter_write(&sc->sc_lock); 869 s = splnet(); 870 871 switch (cmd) { 872 case SIOCSIFADDR: 873 ifp->if_flags |= IFF_UP; 874 /* FALLTHROUGH */ 875 876 case SIOCSIFFLAGS: 877 if (ifp->if_flags & IFF_UP) { 878 if (ifp->if_flags & IFF_RUNNING) 879 error = ENETRESET; 880 else 881 tht_up(sc); 882 } else { 883 if (ifp->if_flags & IFF_RUNNING) 884 tht_down(sc); 885 } 886 break; 887 888 case SIOCGIFMEDIA: 889 case SIOCSIFMEDIA: 890 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 891 break; 892 893 default: 894 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr); 895 } 896 897 if (error == ENETRESET) { 898 if (ifp->if_flags & IFF_RUNNING) 899 tht_iff(sc); 900 error = 0; 901 } 902 903 splx(s); 904 rw_exit_write(&sc->sc_lock); 905 906 return (error); 907 } 908 909 void 910 tht_up(struct tht_softc *sc) 911 { 912 struct ifnet *ifp = &sc->sc_ac.ac_if; 913 914 if (ISSET(ifp->if_flags, IFF_RUNNING)) { 915 return; 916 } 917 918 if (tht_pkt_alloc(sc, &sc->sc_tx_list, THT_TXT_PKT_NUM, 919 THT_TXT_SGL_LEN) != 0) 920 return; 921 if (tht_pkt_alloc(sc, &sc->sc_rx_list, THT_RXF_PKT_NUM, 922 THT_RXF_SGL_LEN) != 0) 923 goto free_tx_list; 924 925 if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0) 926 goto free_rx_list; 927 if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0) 928 goto free_txt; 929 if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0) 930 goto free_rxf; 931 if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0) 932 goto free_rxd; 933 934 tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN); 935 tht_write(sc, THT_REG_10G_PAUSE, 0x96); 936 tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) | 937 THT_REG_10G_SEC_EMPTY(0x80)); 938 tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) | 939 THT_REG_10G_SEC_EMPTY(0xe0)); 940 tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) | 941 THT_REG_10G_FIFO_AF(0x0)); 942 tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) | 943 THT_REG_10G_FIFO_AF(0x0)); 944 tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN | 945 THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD | 946 THT_REG_10G_CTL_PROMISC); 947 948 tht_write(sc, THT_REG_VGLB, 0); 949 950 tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN); 951 952 tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) | 953 THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC | 954 THT_REG_RDINTCM_COAL(0x20)); 955 tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) | 956 THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20)); 957 958 bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN); 959 tht_lladdr_write(sc); 960 961 /* populate rxf fifo */ 962 tht_rxf_fill(sc, 1); 963 964 /* program promiscuous mode and multicast filters */ 965 tht_iff(sc); 966 967 ifp->if_flags |= IFF_RUNNING; 968 ifq_clr_oactive(&ifp->if_snd); 969 970 /* enable interrupts */ 971 sc->sc_imr = THT_IMR_UP(sc->sc_port); 972 tht_write(sc, THT_REG_IMR, sc->sc_imr); 973 974 return; 975 976 free_rxd: 977 tht_fifo_free(sc, &sc->sc_rxd); 978 free_rxf: 979 tht_fifo_free(sc, &sc->sc_rxf); 980 free_txt: 981 tht_fifo_free(sc, &sc->sc_txt); 982 983 tht_sw_reset(sc); 984 985 free_rx_list: 986 tht_pkt_free(sc, &sc->sc_rx_list); 987 free_tx_list: 988 tht_pkt_free(sc, &sc->sc_tx_list); 989 } 990 991 void 992 tht_iff(struct tht_softc *sc) 993 { 994 struct ifnet *ifp = &sc->sc_ac.ac_if; 995 struct ether_multi *enm; 996 struct ether_multistep step; 997 u_int32_t rxf; 998 u_int8_t imf[THT_REG_RX_MCST_HASH_SIZE]; 999 u_int8_t hash; 1000 int i; 1001 1002 ifp->if_flags &= ~IFF_ALLMULTI; 1003 1004 rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB; 1005 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) { 1006 tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0); 1007 tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0); 1008 } 1009 memset(imf, 0x00, sizeof(imf)); 1010 1011 if (ifp->if_flags & IFF_PROMISC) { 1012 ifp->if_flags |= IFF_ALLMULTI; 1013 rxf |= THT_REG_RX_FLT_PRM_ALL; 1014 } else if (sc->sc_ac.ac_multirangecnt > 0) { 1015 ifp->if_flags |= IFF_ALLMULTI; 1016 memset(imf, 0xff, sizeof(imf)); 1017 } else { 1018 ETHER_FIRST_MULTI(step, &sc->sc_ac, enm); 1019 1020 #if 0 1021 /* fill the perfect multicast filters */ 1022 for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) { 1023 if (enm == NULL) 1024 break; 1025 1026 tht_write(sc, THT_REG_RX_MAC_MCST0(i), 1027 (enm->enm_addrlo[0] << 0) | 1028 (enm->enm_addrlo[1] << 8) | 1029 (enm->enm_addrlo[2] << 16) | 1030 (enm->enm_addrlo[3] << 24)); 1031 tht_write(sc, THT_REG_RX_MAC_MCST1(i), 1032 (enm->enm_addrlo[4] << 0) | 1033 (enm->enm_addrlo[5] << 8)); 1034 1035 ETHER_NEXT_MULTI(step, enm); 1036 } 1037 #endif 1038 1039 /* fill the imperfect multicast filter with whats left */ 1040 while (enm != NULL) { 1041 hash = 0x00; 1042 for (i = 0; i < ETHER_ADDR_LEN; i++) 1043 hash ^= enm->enm_addrlo[i]; 1044 setbit(imf, hash); 1045 1046 ETHER_NEXT_MULTI(step, enm); 1047 } 1048 } 1049 1050 tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf)); 1051 tht_write(sc, THT_REG_RX_FLT, rxf); 1052 } 1053 1054 void 1055 tht_down(struct tht_softc *sc) 1056 { 1057 struct ifnet *ifp = &sc->sc_ac.ac_if; 1058 1059 if (!ISSET(ifp->if_flags, IFF_RUNNING)) { 1060 return; 1061 } 1062 1063 ifp->if_flags &= ~(IFF_RUNNING | IFF_ALLMULTI); 1064 ifq_clr_oactive(&ifp->if_snd); 1065 1066 while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len && 1067 tht_fifo_readable(sc, &sc->sc_txf) > 0) 1068 tsleep_nsec(sc, 0, "thtdown", SEC_TO_NSEC(1)); 1069 1070 sc->sc_imr = THT_IMR_DOWN(sc->sc_port); 1071 tht_write(sc, THT_REG_IMR, sc->sc_imr); 1072 1073 tht_sw_reset(sc); 1074 1075 tht_fifo_free(sc, &sc->sc_txf); 1076 tht_fifo_free(sc, &sc->sc_rxd); 1077 tht_fifo_free(sc, &sc->sc_rxf); 1078 tht_fifo_free(sc, &sc->sc_txt); 1079 1080 /* free mbufs that were on the rxf fifo */ 1081 tht_rxf_drain(sc); 1082 1083 tht_pkt_free(sc, &sc->sc_rx_list); 1084 tht_pkt_free(sc, &sc->sc_tx_list); 1085 } 1086 1087 void 1088 tht_start(struct ifnet *ifp) 1089 { 1090 struct tht_softc *sc = ifp->if_softc; 1091 struct tht_pkt *pkt; 1092 struct tht_tx_task txt; 1093 u_int32_t flags; 1094 struct mbuf *m; 1095 int bc; 1096 1097 if (!(ifp->if_flags & IFF_RUNNING)) 1098 return; 1099 if (ifq_is_oactive(&ifp->if_snd)) 1100 return; 1101 if (ifq_empty(&ifp->if_snd)) 1102 return; 1103 1104 if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN) 1105 return; 1106 1107 bzero(&txt, sizeof(txt)); 1108 1109 tht_fifo_pre(sc, &sc->sc_txt); 1110 1111 do { 1112 m = ifq_deq_begin(&ifp->if_snd); 1113 if (m == NULL) 1114 break; 1115 1116 pkt = tht_pkt_get(&sc->sc_tx_list); 1117 if (pkt == NULL) { 1118 ifq_deq_rollback(&ifp->if_snd, m); 1119 ifq_set_oactive(&ifp->if_snd); 1120 break; 1121 } 1122 1123 ifq_deq_commit(&ifp->if_snd, m); 1124 if (tht_load_pkt(sc, pkt, m) != 0) { 1125 m_freem(m); 1126 tht_pkt_put(&sc->sc_tx_list, pkt); 1127 ifp->if_oerrors++; 1128 break; 1129 } 1130 /* thou shalt not use m after this point, only pkt->tp_m */ 1131 1132 #if NBPFILTER > 0 1133 if (ifp->if_bpf) 1134 bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT); 1135 #endif 1136 1137 bc = sizeof(txt) + 1138 sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs; 1139 1140 flags = THT_TXT_TYPE | LWORDS(bc); 1141 txt.flags = htole32(flags); 1142 txt.len = htole16(pkt->tp_m->m_pkthdr.len); 1143 txt.uid = pkt->tp_id; 1144 1145 DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n", 1146 DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len); 1147 1148 tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt)); 1149 tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap); 1150 tht_fifo_write_pad(sc, &sc->sc_txt, bc); 1151 1152 bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0, 1153 pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE); 1154 1155 } while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN); 1156 1157 tht_fifo_post(sc, &sc->sc_txt); 1158 } 1159 1160 int 1161 tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m) 1162 { 1163 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1164 bus_dmamap_t dmap = pkt->tp_dmap; 1165 struct mbuf *m0 = NULL; 1166 1167 switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) { 1168 case 0: 1169 pkt->tp_m = m; 1170 break; 1171 1172 case EFBIG: /* mbuf chain is too fragmented */ 1173 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1174 if (m0 == NULL) 1175 return (ENOBUFS); 1176 if (m->m_pkthdr.len > MHLEN) { 1177 MCLGET(m0, M_DONTWAIT); 1178 if (!(m0->m_flags & M_EXT)) { 1179 m_freem(m0); 1180 return (ENOBUFS); 1181 } 1182 } 1183 m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t)); 1184 m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len; 1185 if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) { 1186 m_freem(m0); 1187 return (ENOBUFS); 1188 } 1189 1190 m_freem(m); 1191 pkt->tp_m = m0; 1192 break; 1193 1194 default: 1195 return (ENOBUFS); 1196 } 1197 1198 return (0); 1199 } 1200 1201 void 1202 tht_txf(struct tht_softc *sc) 1203 { 1204 struct ifnet *ifp = &sc->sc_ac.ac_if; 1205 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1206 bus_dmamap_t dmap; 1207 struct tht_tx_free txf; 1208 struct tht_pkt *pkt; 1209 1210 if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf)) 1211 return; 1212 1213 tht_fifo_pre(sc, &sc->sc_txf); 1214 1215 do { 1216 tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf)); 1217 1218 DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid); 1219 1220 pkt = &sc->sc_tx_list.tpl_pkts[txf.uid]; 1221 dmap = pkt->tp_dmap; 1222 1223 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1224 BUS_DMASYNC_POSTWRITE); 1225 bus_dmamap_unload(dmat, dmap); 1226 1227 m_freem(pkt->tp_m); 1228 1229 tht_pkt_put(&sc->sc_tx_list, pkt); 1230 1231 } while (sc->sc_txf.tf_ready >= sizeof(txf)); 1232 1233 ifq_clr_oactive(&ifp->if_snd); 1234 1235 tht_fifo_post(sc, &sc->sc_txf); 1236 } 1237 1238 void 1239 tht_rxf_fill(struct tht_softc *sc, int wait) 1240 { 1241 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1242 bus_dmamap_t dmap; 1243 struct tht_rx_free rxf; 1244 struct tht_pkt *pkt; 1245 struct mbuf *m; 1246 int bc; 1247 1248 if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN) 1249 return; 1250 1251 tht_fifo_pre(sc, &sc->sc_rxf); 1252 1253 for (;;) { 1254 if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL) 1255 goto done; 1256 1257 MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA); 1258 if (m == NULL) 1259 goto put_pkt; 1260 1261 MCLGET(m, wait ? M_WAIT : M_DONTWAIT); 1262 if (!ISSET(m->m_flags, M_EXT)) 1263 goto free_m; 1264 1265 m->m_data += ETHER_ALIGN; 1266 m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN; 1267 1268 dmap = pkt->tp_dmap; 1269 if (bus_dmamap_load_mbuf(dmat, dmap, m, 1270 wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) 1271 goto free_m; 1272 1273 pkt->tp_m = m; 1274 1275 bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs; 1276 1277 rxf.bc = htole16(LWORDS(bc)); 1278 rxf.type = htole16(THT_RXF_TYPE); 1279 rxf.uid = pkt->tp_id; 1280 1281 tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf)); 1282 tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap); 1283 tht_fifo_write_pad(sc, &sc->sc_rxf, bc); 1284 1285 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1286 BUS_DMASYNC_PREREAD); 1287 1288 if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN) 1289 goto done; 1290 } 1291 1292 free_m: 1293 m_freem(m); 1294 put_pkt: 1295 tht_pkt_put(&sc->sc_rx_list, pkt); 1296 done: 1297 tht_fifo_post(sc, &sc->sc_rxf); 1298 } 1299 1300 void 1301 tht_rxf_drain(struct tht_softc *sc) 1302 { 1303 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1304 bus_dmamap_t dmap; 1305 struct tht_pkt *pkt; 1306 1307 while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) { 1308 dmap = pkt->tp_dmap; 1309 1310 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1311 BUS_DMASYNC_POSTREAD); 1312 bus_dmamap_unload(dmat, dmap); 1313 1314 m_freem(pkt->tp_m); 1315 1316 tht_pkt_put(&sc->sc_rx_list, pkt); 1317 } 1318 } 1319 1320 void 1321 tht_rxd(struct tht_softc *sc) 1322 { 1323 struct ifnet *ifp = &sc->sc_ac.ac_if; 1324 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1325 bus_dmamap_t dmap; 1326 struct tht_rx_desc rxd; 1327 struct tht_pkt *pkt; 1328 struct mbuf *m; 1329 struct mbuf_list ml = MBUF_LIST_INITIALIZER(); 1330 int bc; 1331 u_int32_t flags; 1332 1333 if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd)) 1334 return; 1335 1336 tht_fifo_pre(sc, &sc->sc_rxd); 1337 1338 do { 1339 tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd)); 1340 1341 flags = letoh32(rxd.flags); 1342 bc = THT_RXD_FLAGS_BC(flags) * 8; 1343 bc -= sizeof(rxd); 1344 pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid]; 1345 1346 dmap = pkt->tp_dmap; 1347 1348 bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize, 1349 BUS_DMASYNC_POSTREAD); 1350 bus_dmamap_unload(dmat, dmap); 1351 1352 m = pkt->tp_m; 1353 m->m_pkthdr.len = m->m_len = letoh16(rxd.len); 1354 1355 /* XXX process type 3 rx descriptors */ 1356 1357 ml_enqueue(&ml, m); 1358 1359 tht_pkt_put(&sc->sc_rx_list, pkt); 1360 1361 while (bc > 0) { 1362 static u_int32_t pad; 1363 1364 tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad)); 1365 bc -= sizeof(pad); 1366 } 1367 } while (sc->sc_rxd.tf_ready >= sizeof(rxd)); 1368 1369 tht_fifo_post(sc, &sc->sc_rxd); 1370 1371 if_input(ifp, &ml); 1372 1373 /* put more pkts on the fifo */ 1374 tht_rxf_fill(sc, 0); 1375 } 1376 1377 void 1378 tht_watchdog(struct ifnet *ifp) 1379 { 1380 /* do nothing */ 1381 } 1382 1383 int 1384 tht_media_change(struct ifnet *ifp) 1385 { 1386 /* ignore */ 1387 return (0); 1388 } 1389 1390 void 1391 tht_media_status(struct ifnet *ifp, struct ifmediareq *imr) 1392 { 1393 struct tht_softc *sc = ifp->if_softc; 1394 1395 imr->ifm_active = IFM_ETHER | IFM_AUTO; 1396 imr->ifm_status = IFM_AVALID; 1397 1398 tht_link_state(sc); 1399 1400 if (LINK_STATE_IS_UP(ifp->if_link_state)) 1401 imr->ifm_status |= IFM_ACTIVE; 1402 } 1403 1404 int 1405 tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf, 1406 struct tht_fifo_desc *tfd) 1407 { 1408 u_int64_t dva; 1409 1410 tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size); 1411 tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN); 1412 if (tf->tf_mem == NULL) 1413 return (1); 1414 1415 tf->tf_desc = tfd; 1416 tf->tf_rptr = tf->tf_wptr = 0; 1417 1418 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1419 0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd)); 1420 1421 dva = THT_DMA_DVA(tf->tf_mem); 1422 tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size); 1423 tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32)); 1424 1425 return (0); 1426 } 1427 1428 void 1429 tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf) 1430 { 1431 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1432 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc)); 1433 tht_dmamem_free(sc, tf->tf_mem); 1434 } 1435 1436 size_t 1437 tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf) 1438 { 1439 tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr); 1440 tf->tf_wptr &= THT_FIFO_PTR_MASK; 1441 tf->tf_ready = tf->tf_wptr - tf->tf_rptr; 1442 if (tf->tf_ready < 0) 1443 tf->tf_ready += tf->tf_len; 1444 1445 DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n", 1446 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1447 1448 return (tf->tf_ready); 1449 } 1450 1451 size_t 1452 tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf) 1453 { 1454 tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr); 1455 tf->tf_rptr &= THT_FIFO_PTR_MASK; 1456 tf->tf_ready = tf->tf_rptr - tf->tf_wptr; 1457 if (tf->tf_ready <= 0) 1458 tf->tf_ready += tf->tf_len; 1459 1460 DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n", 1461 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1462 1463 return (tf->tf_ready); 1464 } 1465 1466 void 1467 tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf) 1468 { 1469 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1470 0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc)); 1471 } 1472 1473 void 1474 tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf, 1475 void *buf, size_t buflen) 1476 { 1477 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem); 1478 u_int8_t *desc = buf; 1479 size_t len; 1480 1481 tf->tf_ready -= buflen; 1482 1483 len = tf->tf_len - tf->tf_rptr; 1484 1485 if (len < buflen) { 1486 memcpy(desc, fifo + tf->tf_rptr, len); 1487 1488 buflen -= len; 1489 desc += len; 1490 1491 tf->tf_rptr = 0; 1492 } 1493 1494 memcpy(desc, fifo + tf->tf_rptr, buflen); 1495 tf->tf_rptr += buflen; 1496 1497 DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n", 1498 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1499 } 1500 1501 void 1502 tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf, 1503 void *buf, size_t buflen) 1504 { 1505 u_int8_t *fifo = THT_DMA_KVA(tf->tf_mem); 1506 u_int8_t *desc = buf; 1507 size_t len; 1508 1509 tf->tf_ready -= buflen; 1510 1511 len = tf->tf_len - tf->tf_wptr; 1512 1513 if (len < buflen) { 1514 memcpy(fifo + tf->tf_wptr, desc, len); 1515 1516 buflen -= len; 1517 desc += len; 1518 1519 tf->tf_wptr = 0; 1520 } 1521 1522 memcpy(fifo + tf->tf_wptr, desc, buflen); 1523 tf->tf_wptr += buflen; 1524 tf->tf_wptr %= tf->tf_len; 1525 1526 DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n", 1527 DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready); 1528 } 1529 1530 void 1531 tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf, 1532 bus_dmamap_t dmap) 1533 { 1534 struct tht_pbd pbd; 1535 u_int64_t dva; 1536 int i; 1537 1538 for (i = 0; i < dmap->dm_nsegs; i++) { 1539 dva = dmap->dm_segs[i].ds_addr; 1540 1541 pbd.addr_lo = htole32(dva); 1542 pbd.addr_hi = htole32(dva >> 32); 1543 pbd.len = htole32(dmap->dm_segs[i].ds_len); 1544 1545 tht_fifo_write(sc, tf, &pbd, sizeof(pbd)); 1546 } 1547 } 1548 1549 void 1550 tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc) 1551 { 1552 const static u_int32_t pad = 0x0; 1553 1554 /* this assumes you'll only ever be writing multiples of 4 bytes */ 1555 if (bc % 8) 1556 tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad)); 1557 } 1558 1559 void 1560 tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf) 1561 { 1562 bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem), 1563 0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc)); 1564 if (tf->tf_desc->tfd_write) 1565 tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr); 1566 else 1567 tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr); 1568 1569 DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc), 1570 tf->tf_wptr, tf->tf_rptr); 1571 } 1572 1573 const static bus_size_t tht_mac_regs[3] = { 1574 THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0 1575 }; 1576 1577 void 1578 tht_lladdr_read(struct tht_softc *sc) 1579 { 1580 int i; 1581 1582 for (i = 0; i < nitems(tht_mac_regs); i++) 1583 sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i])); 1584 } 1585 1586 void 1587 tht_lladdr_write(struct tht_softc *sc) 1588 { 1589 int i; 1590 1591 for (i = 0; i < nitems(tht_mac_regs); i++) 1592 tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i])); 1593 } 1594 1595 #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1) 1596 #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0) 1597 int 1598 tht_sw_reset(struct tht_softc *sc) 1599 { 1600 int i; 1601 1602 /* this follows SW Reset process in 8.8 of the doco */ 1603 1604 /* 1. disable rx */ 1605 tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN); 1606 1607 /* 2. initiate port disable */ 1608 tht_swrst_set(sc, THT_REG_DIS_PRT); 1609 1610 /* 3. initiate queue disable */ 1611 tht_swrst_set(sc, THT_REG_DIS_QU_0); 1612 tht_swrst_set(sc, THT_REG_DIS_QU_1); 1613 1614 /* 4. wait for successful finish of previous tasks */ 1615 if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000)) 1616 return (1); 1617 1618 /* 5. Reset interrupt registers */ 1619 tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */ 1620 tht_read(sc, THT_REG_ISR); /* 5.b */ 1621 for (i = 0; i < THT_NQUEUES; i++) { 1622 tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */ 1623 tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */ 1624 } 1625 1626 /* 6. initiate queue reset */ 1627 tht_swrst_set(sc, THT_REG_RST_QU_0); 1628 tht_swrst_set(sc, THT_REG_RST_QU_1); 1629 1630 /* 7. initiate port reset */ 1631 tht_swrst_set(sc, THT_REG_RST_PRT); 1632 1633 /* 8. clear txt/rxf/rxd/txf read and write ptrs */ 1634 for (i = 0; i < THT_NQUEUES; i++) { 1635 tht_write(sc, THT_REG_TXT_RPTR(i), 0); 1636 tht_write(sc, THT_REG_RXF_RPTR(i), 0); 1637 tht_write(sc, THT_REG_RXD_RPTR(i), 0); 1638 tht_write(sc, THT_REG_TXF_RPTR(i), 0); 1639 1640 tht_write(sc, THT_REG_TXT_WPTR(i), 0); 1641 tht_write(sc, THT_REG_RXF_WPTR(i), 0); 1642 tht_write(sc, THT_REG_RXD_WPTR(i), 0); 1643 tht_write(sc, THT_REG_TXF_WPTR(i), 0); 1644 } 1645 1646 /* 9. unset port disable */ 1647 tht_swrst_clr(sc, THT_REG_DIS_PRT); 1648 1649 /* 10. unset queue disable */ 1650 tht_swrst_clr(sc, THT_REG_DIS_QU_0); 1651 tht_swrst_clr(sc, THT_REG_DIS_QU_1); 1652 1653 /* 11. unset queue reset */ 1654 tht_swrst_clr(sc, THT_REG_RST_QU_0); 1655 tht_swrst_clr(sc, THT_REG_RST_QU_1); 1656 1657 /* 12. unset port reset */ 1658 tht_swrst_clr(sc, THT_REG_RST_PRT); 1659 1660 /* 13. enable rx */ 1661 tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN); 1662 1663 return (0); 1664 } 1665 1666 int 1667 tht_fw_load(struct tht_softc *sc) 1668 { 1669 u_int8_t *fw, *buf; 1670 size_t fwlen, wrlen; 1671 int error = 1, msecs, ret; 1672 1673 if (loadfirmware("tht", &fw, &fwlen) != 0) 1674 return (1); 1675 1676 if ((fwlen % 8) != 0) 1677 goto err; 1678 1679 buf = fw; 1680 while (fwlen > 0) { 1681 while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) { 1682 ret = tsleep_nsec(sc, PCATCH, "thtfw", 1683 MSEC_TO_NSEC(10)); 1684 if (ret == EINTR) 1685 goto err; 1686 } 1687 1688 wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen); 1689 tht_fifo_pre(sc, &sc->sc_txt); 1690 tht_fifo_write(sc, &sc->sc_txt, buf, wrlen); 1691 tht_fifo_post(sc, &sc->sc_txt); 1692 1693 fwlen -= wrlen; 1694 buf += wrlen; 1695 } 1696 1697 for (msecs = 0; msecs < 2000; msecs += 10) { 1698 if (tht_read(sc, THT_REG_INIT_STATUS) != 0) { 1699 error = 0; 1700 break; 1701 } 1702 ret = tsleep_nsec(sc, PCATCH, "thtinit", MSEC_TO_NSEC(10)); 1703 if (ret == EINTR) 1704 goto err; 1705 } 1706 1707 tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1); 1708 1709 err: 1710 free(fw, M_DEVBUF, fwlen); 1711 return (error); 1712 } 1713 1714 void 1715 tht_link_state(struct tht_softc *sc) 1716 { 1717 static const struct timeval interval = { 0, 10000 }; 1718 struct ifnet *ifp = &sc->sc_ac.ac_if; 1719 int link_state = LINK_STATE_DOWN; 1720 1721 if (!ratecheck(&sc->sc_mediacheck, &interval)) 1722 return; 1723 1724 if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK) 1725 link_state = LINK_STATE_FULL_DUPLEX; 1726 1727 if (ifp->if_link_state != link_state) { 1728 ifp->if_link_state = link_state; 1729 if_link_state_change(ifp); 1730 } 1731 1732 if (LINK_STATE_IS_UP(ifp->if_link_state)) 1733 ifp->if_baudrate = IF_Gbps(10); 1734 else 1735 ifp->if_baudrate = 0; 1736 } 1737 1738 u_int32_t 1739 tht_read(struct tht_softc *sc, bus_size_t r) 1740 { 1741 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4, 1742 BUS_SPACE_BARRIER_READ); 1743 return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r)); 1744 } 1745 1746 void 1747 tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v) 1748 { 1749 bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v); 1750 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4, 1751 BUS_SPACE_BARRIER_WRITE); 1752 } 1753 1754 void 1755 tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len) 1756 { 1757 bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, 1758 buf, len); 1759 bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len, 1760 BUS_SPACE_BARRIER_WRITE); 1761 } 1762 1763 int 1764 tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v, 1765 int timeout) 1766 { 1767 while ((tht_read(sc, r) & m) != v) { 1768 if (timeout == 0) 1769 return (0); 1770 1771 delay(1000); 1772 timeout--; 1773 } 1774 1775 return (1); 1776 } 1777 1778 int 1779 tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v, 1780 int timeout) 1781 { 1782 while ((tht_read(sc, r) & m) == v) { 1783 if (timeout == 0) 1784 return (0); 1785 1786 delay(1000); 1787 timeout--; 1788 } 1789 1790 return (1); 1791 } 1792 1793 struct tht_dmamem * 1794 tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align) 1795 { 1796 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1797 struct tht_dmamem *tdm; 1798 int nsegs; 1799 1800 tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK | M_ZERO); 1801 tdm->tdm_size = size; 1802 1803 if (bus_dmamap_create(dmat, size, 1, size, 0, 1804 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0) 1805 goto tdmfree; 1806 1807 if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs, 1808 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0) 1809 goto destroy; 1810 1811 if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva, 1812 BUS_DMA_WAITOK) != 0) 1813 goto free; 1814 1815 if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size, 1816 NULL, BUS_DMA_WAITOK) != 0) 1817 goto unmap; 1818 1819 return (tdm); 1820 1821 unmap: 1822 bus_dmamem_unmap(dmat, tdm->tdm_kva, size); 1823 free: 1824 bus_dmamem_free(dmat, &tdm->tdm_seg, 1); 1825 destroy: 1826 bus_dmamap_destroy(dmat, tdm->tdm_map); 1827 tdmfree: 1828 free(tdm, M_DEVBUF, 0); 1829 1830 return (NULL); 1831 } 1832 1833 void 1834 tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm) 1835 { 1836 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1837 1838 bus_dmamap_unload(dmat, tdm->tdm_map); 1839 bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size); 1840 bus_dmamem_free(dmat, &tdm->tdm_seg, 1); 1841 bus_dmamap_destroy(dmat, tdm->tdm_map); 1842 free(tdm, M_DEVBUF, 0); 1843 } 1844 1845 int 1846 tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts, 1847 int nsegs) 1848 { 1849 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1850 struct tht_pkt *pkt; 1851 int i; 1852 1853 tpl->tpl_pkts = mallocarray(npkts, sizeof(struct tht_pkt), 1854 M_DEVBUF, M_WAITOK | M_ZERO); 1855 1856 TAILQ_INIT(&tpl->tpl_free); 1857 TAILQ_INIT(&tpl->tpl_used); 1858 for (i = 0; i < npkts; i++) { 1859 pkt = &tpl->tpl_pkts[i]; 1860 1861 pkt->tp_id = i; 1862 if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs, 1863 THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 1864 &pkt->tp_dmap) != 0) { 1865 tht_pkt_free(sc, tpl); 1866 return (1); 1867 } 1868 1869 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link); 1870 } 1871 1872 return (0); 1873 } 1874 1875 void 1876 tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl) 1877 { 1878 bus_dma_tag_t dmat = sc->sc_thtc->sc_dmat; 1879 struct tht_pkt *pkt; 1880 1881 while ((pkt = tht_pkt_get(tpl)) != NULL) 1882 bus_dmamap_destroy(dmat, pkt->tp_dmap); 1883 free(tpl->tpl_pkts, M_DEVBUF, 0); 1884 tpl->tpl_pkts = NULL; 1885 } 1886 1887 void 1888 tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt) 1889 { 1890 TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link); 1891 TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link); 1892 } 1893 1894 struct tht_pkt * 1895 tht_pkt_get(struct tht_pkt_list *tpl) 1896 { 1897 struct tht_pkt *pkt; 1898 1899 pkt = TAILQ_FIRST(&tpl->tpl_free); 1900 if (pkt != NULL) { 1901 TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link); 1902 TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link); 1903 1904 } 1905 1906 return (pkt); 1907 } 1908 1909 struct tht_pkt * 1910 tht_pkt_used(struct tht_pkt_list *tpl) 1911 { 1912 return (TAILQ_FIRST(&tpl->tpl_used)); 1913 } 1914