xref: /openbsd-src/sys/dev/pci/if_tht.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_tht.c,v 1.142 2020/07/10 13:26:38 patrick Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Driver for the Tehuti TN30xx multi port 10Gb Ethernet chipsets,
21  * see http://www.tehutinetworks.net/.
22  *
23  * This driver was made possible because Tehuti networks provided
24  * hardware and documentation. Thanks!
25  */
26 
27 #include "bpfilter.h"
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/sockio.h>
32 #include <sys/mbuf.h>
33 #include <sys/kernel.h>
34 #include <sys/socket.h>
35 #include <sys/malloc.h>
36 #include <sys/device.h>
37 #include <sys/timeout.h>
38 #include <sys/queue.h>
39 #include <sys/rwlock.h>
40 #include <sys/time.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <net/if.h>
49 #include <net/if_media.h>
50 
51 #if NBPFILTER > 0
52 #include <net/bpf.h>
53 #endif
54 
55 #include <netinet/in.h>
56 #include <netinet/if_ether.h>
57 
58 #ifdef THT_DEBUG
59 #define THT_D_FIFO		(1<<0)
60 #define THT_D_TX		(1<<1)
61 #define THT_D_RX		(1<<2)
62 #define THT_D_INTR		(1<<3)
63 
64 int thtdebug = THT_D_TX | THT_D_RX | THT_D_INTR;
65 
66 #define DPRINTF(l, f...)	do { if (thtdebug & (l)) printf(f); } while (0)
67 #else
68 #define DPRINTF(l, f...)
69 #endif
70 
71 /* registers */
72 
73 #define THT_PCI_BAR		0x10
74 
75 #define _Q(_q)			((_q) * 4)
76 
77 /* General Configuration */
78 #define THT_REG_END_SEL		0x5448 /* PCI Endian Select */
79 #define THT_REG_CLKPLL		0x5000
80 #define  THT_REG_CLKPLL_PLLLK		(1<<9) /* PLL is locked */
81 #define  THT_REG_CLKPLL_RSTEND		(1<<8) /* Reset ended */
82 #define  THT_REG_CLKPLL_TXF_DIS		(1<<3) /* TX Free disabled */
83 #define  THT_REG_CLKPLL_VNT_STOP	(1<<2) /* VENETO Stop */
84 #define  THT_REG_CLKPLL_PLLRST		(1<<1) /* PLL Reset */
85 #define  THT_REG_CLKPLL_SFTRST		(1<<0) /* Software Reset */
86 /* Descriptors and FIFO Registers */
87 #define THT_REG_TXT_CFG0(_q)	(0x4040 + _Q(_q)) /* CFG0 TX Task queues */
88 #define THT_REG_RXF_CFG0(_q)	(0x4050 + _Q(_q)) /* CFG0 RX Free queues */
89 #define THT_REG_RXD_CFG0(_q)	(0x4060 + _Q(_q)) /* CFG0 RX DSC queues */
90 #define THT_REG_TXF_CFG0(_q)	(0x4070 + _Q(_q)) /* CFG0 TX Free queues */
91 #define THT_REG_TXT_CFG1(_q)	(0x4000 + _Q(_q)) /* CFG1 TX Task queues */
92 #define THT_REG_RXF_CFG1(_q)	(0x4010 + _Q(_q)) /* CFG1 RX Free queues */
93 #define THT_REG_RXD_CFG1(_q)	(0x4020 + _Q(_q)) /* CFG1 RX DSC queues */
94 #define THT_REG_TXF_CFG1(_q)	(0x4030 + _Q(_q)) /* CFG1 TX Free queues */
95 #define THT_REG_TXT_RPTR(_q)	(0x40c0 + _Q(_q)) /* TX Task read ptr */
96 #define THT_REG_RXF_RPTR(_q)	(0x40d0 + _Q(_q)) /* RX Free read ptr */
97 #define THT_REG_RXD_RPTR(_q)	(0x40e0 + _Q(_q)) /* RX DSC read ptr */
98 #define THT_REG_TXF_RPTR(_q)	(0x40f0 + _Q(_q)) /* TX Free read ptr */
99 #define THT_REG_TXT_WPTR(_q)	(0x4080 + _Q(_q)) /* TX Task write ptr */
100 #define THT_REG_RXF_WPTR(_q)	(0x4090 + _Q(_q)) /* RX Free write ptr */
101 #define THT_REG_RXD_WPTR(_q)	(0x40a0 + _Q(_q)) /* RX DSC write ptr */
102 #define THT_REG_TXF_WPTR(_q)	(0x40b0 + _Q(_q)) /* TX Free write ptr */
103 #define THT_REG_HTB_ADDR	0x4100 /* HTB Addressing Mechanism enable */
104 #define THT_REG_HTB_ADDR_HI	0x4110 /* High HTB Address */
105 #define THT_REG_HTB_ST_TMR	0x3290 /* HTB Timer */
106 #define THT_REG_RDINTCM(_q)	(0x5120 + _Q(_q)) /* RX DSC Intr Coalescing */
107 #define  THT_REG_RDINTCM_PKT_TH(_c)	((_c)<<20) /* pkt count threshold */
108 #define  THT_REG_RDINTCM_RXF_TH(_c)	((_c)<<16) /* rxf intr req thresh */
109 #define  THT_REG_RDINTCM_COAL_RC	(1<<15) /* coalescing timer recharge */
110 #define  THT_REG_RDINTCM_COAL(_c)	(_c) /* coalescing timer */
111 #define THT_REG_TDINTCM(_q)	(0x5130 + _Q(_q)) /* TX DSC Intr Coalescing */
112 #define  THT_REG_TDINTCM_PKT_TH(_c)	((_c)<<20) /* pkt count threshold */
113 #define  THT_REG_TDINTCM_COAL_RC	(1<<15) /* coalescing timer recharge */
114 #define  THT_REG_TDINTCM_COAL(_c)	(_c) /* coalescing timer */
115 /* 10G Ethernet MAC */
116 #define THT_REG_10G_REV		0x6000 /* Revision */
117 #define THT_REG_10G_SCR		0x6004 /* Scratch */
118 #define THT_REG_10G_CTL		0x6008 /* Control/Status */
119 #define  THT_REG_10G_CTL_CMD_FRAME_EN	(1<<13) /* cmd frame enable */
120 #define  THT_REG_10G_CTL_SW_RESET	(1<<12) /* sw reset */
121 #define  THT_REG_10G_CTL_STATS_AUTO_CLR	(1<<11) /* auto clear statistics */
122 #define  THT_REG_10G_CTL_LOOPBACK	(1<<10) /* enable loopback */
123 #define  THT_REG_10G_CTL_TX_ADDR_INS	(1<<9) /* set mac on tx */
124 #define  THT_REG_10G_CTL_PAUSE_IGNORE	(1<<8) /* ignore pause */
125 #define  THT_REG_10G_CTL_PAUSE_FWD	(1<<7) /* forward pause */
126 #define  THT_REG_10G_CTL_CRC_FWD	(1<<6) /* crc forward */
127 #define  THT_REG_10G_CTL_PAD		(1<<5) /* frame padding */
128 #define  THT_REG_10G_CTL_PROMISC	(1<<4) /* promiscuous mode */
129 #define  THT_REG_10G_CTL_WAN_MODE	(1<<3) /* WAN mode */
130 #define  THT_REG_10G_CTL_RX_EN		(1<<1) /* RX enable */
131 #define  THT_REG_10G_CTL_TX_EN		(1<<0) /* TX enable */
132 #define THT_REG_10G_FRM_LEN	0x6014 /* Frame Length */
133 #define THT_REG_10G_PAUSE	0x6018 /* Pause Quanta */
134 #define THT_REG_10G_RX_SEC	0x601c /* RX Section */
135 #define THT_REG_10G_TX_SEC	0x6020 /* TX Section */
136 #define  THT_REG_10G_SEC_AVAIL(_t)	(_t) /* section available thresh*/
137 #define  THT_REG_10G_SEC_EMPTY(_t)	((_t)<<16) /* section empty avail */
138 #define THT_REG_10G_RFIFO_AEF	0x6024 /* RX FIFO Almost Empty/Full */
139 #define THT_REG_10G_TFIFO_AEF	0x6028 /* TX FIFO Almost Empty/Full */
140 #define  THT_REG_10G_FIFO_AE(_t)	(_t) /* almost empty */
141 #define  THT_REG_10G_FIFO_AF(_t)	((_t)<<16) /* almost full */
142 #define THT_REG_10G_SM_STAT	0x6030 /* MDIO Status */
143 #define THT_REG_10G_SM_CMD	0x6034 /* MDIO Command */
144 #define THT_REG_10G_SM_DAT	0x6038 /* MDIO Data */
145 #define THT_REG_10G_SM_ADD	0x603c /* MDIO Address */
146 #define THT_REG_10G_STAT	0x6040 /* Status */
147 /* Statistic Counters */
148 /* XXX todo */
149 /* Status Registers */
150 #define THT_REG_MAC_LNK_STAT	0x0200 /* Link Status */
151 #define  THT_REG_MAC_LNK_STAT_DIS	(1<<4) /* Mac Stats read disable */
152 #define  THT_REG_MAC_LNK_STAT_LINK	(1<<2) /* Link State */
153 #define  THT_REG_MAC_LNK_STAT_REM_FAULT	(1<<1) /* Remote Fault */
154 #define  THT_REG_MAC_LNK_STAT_LOC_FAULT	(1<<0) /* Local Fault */
155 /* Interrupt Registers */
156 #define THT_REG_ISR		0x5100 /* Interrupt Status */
157 #define THT_REG_ISR_LINKCHG(_p)		(1<<(27+(_p))) /* link changed */
158 #define THT_REG_ISR_GPIO		(1<<26) /* GPIO */
159 #define THT_REG_ISR_RFRSH		(1<<25) /* DDR Refresh */
160 #define THT_REG_ISR_SWI			(1<<23) /* software interrupt */
161 #define THT_REG_ISR_RXF(_q)		(1<<(19+(_q))) /* rx free fifo */
162 #define THT_REG_ISR_TXF(_q)		(1<<(15+(_q))) /* tx free fifo */
163 #define THT_REG_ISR_RXD(_q)		(1<<(11+(_q))) /* rx desc fifo */
164 #define THT_REG_ISR_TMR(_t)		(1<<(6+(_t))) /* timer */
165 #define THT_REG_ISR_VNT			(1<<5) /* optistrata */
166 #define THT_REG_ISR_RxFL		(1<<4) /* RX Full */
167 #define THT_REG_ISR_TR			(1<<2) /* table read */
168 #define THT_REG_ISR_PCIE_LNK_INT	(1<<1) /* pcie link fail */
169 #define THT_REG_ISR_GPLE_CLR		(1<<0) /* pcie timeout */
170 #define THT_FMT_ISR		"\020" "\035LINKCHG1" "\034LINKCHG0" \
171 				    "\033GPIO" "\032RFRSH" "\030SWI" \
172 				    "\027RXF3" "\026RXF2" "\025RXF1" \
173 				    "\024RXF0" "\023TXF3" "\022TXF2" \
174 				    "\021TXF1" "\020TXF0" "\017RXD3" \
175 				    "\016RXD2" "\015RXD1" "\014RXD0" \
176 				    "\012TMR3" "\011TMR2" "\010TMR1" \
177 				    "\007TMR0" "\006VNT" "\005RxFL" \
178 				    "\003TR" "\002PCI_LNK_INT" \
179 				    "\001GPLE_CLR"
180 #define THT_REG_ISR_GTI		0x5080 /* GTI Interrupt Status */
181 #define THT_REG_IMR		0x5110 /* Interrupt Mask */
182 #define THT_REG_IMR_LINKCHG(_p)		(1<<(27+(_p))) /* link changed */
183 #define THT_REG_IMR_GPIO		(1<<26) /* GPIO */
184 #define THT_REG_IMR_RFRSH		(1<<25) /* DDR Refresh */
185 #define THT_REG_IMR_SWI			(1<<23) /* software interrupt */
186 #define THT_REG_IMR_RXF(_q)		(1<<(19+(_q))) /* rx free fifo */
187 #define THT_REG_IMR_TXF(_q)		(1<<(15+(_q))) /* tx free fifo */
188 #define THT_REG_IMR_RXD(_q)		(1<<(11+(_q))) /* rx desc fifo */
189 #define THT_REG_IMR_TMR(_t)		(1<<(6+(_t))) /* timer */
190 #define THT_REG_IMR_VNT			(1<<5) /* optistrata */
191 #define THT_REG_IMR_RxFL		(1<<4) /* RX Full */
192 #define THT_REG_IMR_TR			(1<<2) /* table read */
193 #define THT_REG_IMR_PCIE_LNK_INT	(1<<1) /* pcie link fail */
194 #define THT_REG_IMR_GPLE_CLR		(1<<0) /* pcie timeout */
195 #define THT_REG_IMR_GTI		0x5090 /* GTI Interrupt Mask */
196 #define THT_REG_ISR_MSK		0x5140 /* ISR Masked */
197 /* Global Counters */
198 /* XXX todo */
199 /* DDR2 SDRAM Controller Registers */
200 /* XXX TBD */
201 /* EEPROM Registers */
202 /* XXX todo */
203 /* Init arbitration and status registers */
204 #define THT_REG_INIT_SEMAPHORE	0x5170 /* Init Semaphore */
205 #define THT_REG_INIT_STATUS	0x5180 /* Init Status */
206 /* PCI Credits Registers */
207 /* XXX todo */
208 /* TX Arbitration Registers */
209 #define THT_REG_TXTSK_PR(_q)	(0x41b0 + _Q(_q)) /* TX Queue Priority */
210 /* RX Part Registers */
211 #define THT_REG_RX_FLT		0x1240 /* RX Filter Configuration */
212 #define  THT_REG_RX_FLT_ATXER		(1<<15) /* accept with xfer err */
213 #define  THT_REG_RX_FLT_ATRM		(1<<14) /* accept with term err */
214 #define  THT_REG_RX_FLT_AFTSQ		(1<<13) /* accept with fault seq */
215 #define  THT_REG_RX_FLT_OSEN		(1<<12) /* enable pkts */
216 #define  THT_REG_RX_FLT_APHER		(1<<11) /* accept with phy err */
217 #define  THT_REG_RX_FLT_TXFC		(1<<10) /* TX flow control */
218 #define  THT_REG_RX_FLT_FDA		(1<<8) /* filter direct address */
219 #define  THT_REG_RX_FLT_AOF		(1<<7) /* accept overflow frame */
220 #define  THT_REG_RX_FLT_ACF		(1<<6) /* accept control frame */
221 #define  THT_REG_RX_FLT_ARUNT		(1<<5) /* accept runt */
222 #define  THT_REG_RX_FLT_ACRC		(1<<4) /* accept crc error */
223 #define  THT_REG_RX_FLT_AM		(1<<3) /* accept multicast */
224 #define  THT_REG_RX_FLT_AB		(1<<2) /* accept broadcast */
225 #define  THT_REG_RX_FLT_PRM_MASK	0x3 /* promiscuous mode */
226 #define  THT_REG_RX_FLT_PRM_NORMAL	0x0 /* normal mode */
227 #define  THT_REG_RX_FLT_PRM_ALL		0x1 /* pass all incoming frames */
228 #define THT_REG_RX_MAX_FRAME	0x12c0 /* Max Frame Size */
229 #define THT_REG_RX_UNC_MAC0	0x1250 /* MAC Address low word */
230 #define THT_REG_RX_UNC_MAC1	0x1260 /* MAC Address mid word */
231 #define THT_REG_RX_UNC_MAC2	0x1270 /* MAC Address high word */
232 #define THT_REG_RX_MAC_MCST0(_m) (0x1a80 + (_m)*8)
233 #define THT_REG_RX_MAC_MCST1(_m) (0x1a84 + (_m)*8)
234 #define  THT_REG_RX_MAC_MCST_CNT	15
235 #define THT_REG_RX_MCST_HASH	0x1a00 /* imperfect multicast filter hash */
236 #define  THT_REG_RX_MCST_HASH_SIZE	(256 / NBBY)
237 /* OptiStrata Debug Registers */
238 #define THT_REG_VPC		0x2300 /* Program Counter */
239 #define THT_REG_VLI		0x2310 /* Last Interrupt */
240 #define THT_REG_VIC		0x2320 /* Interrupts Count */
241 #define THT_REG_VTMR		0x2330 /* Timer */
242 #define THT_REG_VGLB		0x2340 /* Global */
243 /* SW Reset Registers */
244 #define THT_REG_RST_PRT		0x7000 /* Reset Port */
245 #define  THT_REG_RST_PRT_ACTIVE		0x1 /* port reset is active */
246 #define THT_REG_DIS_PRT		0x7010 /* Disable Port */
247 #define THT_REG_RST_QU_0	0x7020 /* Reset Queue 0 */
248 #define THT_REG_RST_QU_1	0x7028 /* Reset Queue 1 */
249 #define THT_REG_DIS_QU_0	0x7030 /* Disable Queue 0 */
250 #define THT_REG_DIS_QU_1	0x7038 /* Disable Queue 1 */
251 
252 #define THT_PORT_SIZE		0x8000
253 #define THT_PORT_REGION(_p)	((_p) * THT_PORT_SIZE)
254 #define THT_NQUEUES		4
255 
256 #define THT_FIFO_ALIGN		4096
257 #define THT_FIFO_SIZE_4k	0x0
258 #define THT_FIFO_SIZE_8k	0x1
259 #define THT_FIFO_SIZE_16k	0x2
260 #define THT_FIFO_SIZE_32k	0x3
261 #define THT_FIFO_SIZE(_r)	(4096 * (1<<(_r)))
262 #define THT_FIFO_GAP		8 /* keep 8 bytes between ptrs */
263 #define THT_FIFO_PTR_MASK	0x00007ff8 /* rptr/wptr mask */
264 
265 #define THT_FIFO_DESC_LEN	208 /* a descriptor cant be bigger than this */
266 
267 #define THT_IMR_DOWN(_p)	(THT_REG_IMR_LINKCHG(_p))
268 #define THT_IMR_UP(_p)		(THT_REG_IMR_LINKCHG(_p) | \
269 				    THT_REG_IMR_RXF(0) | THT_REG_IMR_TXF(0) | \
270 				    THT_REG_IMR_RXD(0))
271 
272 /* hardware structures (we're using the 64 bit variants) */
273 
274 /* physical buffer descriptor */
275 struct tht_pbd {
276 	u_int32_t		addr_lo;
277 	u_int32_t		addr_hi;
278 	u_int32_t		len;
279 } __packed;
280 #define THT_PBD_PKTLEN		(64 * 1024)
281 
282 /* rx free fifo */
283 struct tht_rx_free {
284 	u_int16_t		bc; /* buffer count (0:4) */
285 	u_int16_t		type;
286 
287 	u_int64_t		uid;
288 
289 	/* followed by a pdb list */
290 } __packed;
291 #define THT_RXF_TYPE		1
292 #define THT_RXF_1ST_PDB_LEN	128
293 #define THT_RXF_SGL_LEN		((THT_FIFO_DESC_LEN - \
294 				    sizeof(struct tht_rx_free)) / \
295 				    sizeof(struct tht_pbd))
296 #define THT_RXF_PKT_NUM		128
297 
298 /* rx descriptor */
299 struct tht_rx_desc {
300 	u_int32_t		flags;
301 #define THT_RXD_FLAGS_BC(_f)		((_f) & 0x1f) /* buffer count */
302 #define THT_RXD_FLAGS_RXFQ(_f)		(((_f)>>8) & 0x3) /* rxf queue id */
303 #define THT_RXD_FLAGS_TO		(1<<15)
304 #define THT_RXD_FLAGS_TYPE(_f)		(((_f)>>16) & 0xf) /* desc type */
305 #define THT_RXD_FLAGS_OVF		(1<<21) /* overflow error */
306 #define THT_RXD_FLAGS_RUNT		(1<<22) /* runt error */
307 #define THT_RXD_FLAGS_CRC		(1<<23) /* crc error */
308 #define THT_RXD_FLAGS_UDPCS		(1<<24) /* udp checksum error */
309 #define THT_RXD_FLAGS_TCPCS		(1<<25) /* tcp checksum error */
310 #define THT_RXD_FLAGS_IPCS		(1<<26) /* ip checksum error */
311 #define THT_RXD_FLAGS_PKT_ID		0x70000000
312 #define THT_RXD_FLAGS_PKT_ID_NONIP	0x00000000
313 #define THT_RXD_FLAGS_PKT_ID_TCP4	0x10000000
314 #define THT_RXD_FLAGS_PKT_ID_UDP4	0x20000000
315 #define THT_RXD_FLAGS_PKT_ID_IPV4	0x30000000
316 #define THT_RXD_FLAGS_PKT_ID_TCP6	0x50000000
317 #define THT_RXD_FLAGS_PKT_ID_UDP6	0x60000000
318 #define THT_RXD_FLAGS_PKT_ID_IPV6	0x70000000
319 #define THT_RXD_FLAGS_VTAG		(1<<31)
320 	u_int16_t		len;
321 	u_int16_t		vlan;
322 #define THT_RXD_VLAN_ID(_v)		((_v) & 0xfff)
323 #define THT_RXD_VLAN_CFI		(1<<12)
324 #define THT_RXD_VLAN_PRI(_v)		((_v) & 0x7) >> 13)
325 
326 	u_int64_t		uid;
327 } __packed;
328 #define THT_RXD_TYPE		2
329 
330 /* rx decriptor type 3: data chain instruction */
331 struct tht_rx_desc_dc {
332 	/* preceded by tht_rx_desc */
333 
334 	u_int16_t		cd_offset;
335 	u_int16_t		flags;
336 
337 	u_int8_t		data[4];
338 } __packed;
339 #define THT_RXD_TYPE_DC		3
340 
341 /* rx descriptor type 4: rss (recv side scaling) information */
342 struct tht_rx_desc_rss {
343 	/* preceded by tht_rx_desc */
344 
345 	u_int8_t		rss_hft;
346 	u_int8_t		rss_type;
347 	u_int8_t		rss_tcpu;
348 	u_int8_t		reserved;
349 
350 	u_int32_t		rss_hash;
351 } __packed;
352 #define THT_RXD_TYPE_RSS	4
353 
354 /* tx task fifo */
355 struct tht_tx_task {
356 	u_int32_t		flags;
357 #define THT_TXT_FLAGS_BC(_f)	(_f) /* buffer count */
358 #define THT_TXT_FLAGS_UDPCS	(1<<5) /* udp checksum */
359 #define THT_TXT_FLAGS_TCPCS	(1<<6) /* tcp checksum */
360 #define THT_TXT_FLAGS_IPCS	(1<<7) /* ip checksum */
361 #define THT_TXT_FLAGS_VTAG	(1<<8) /* insert vlan tag */
362 #define THT_TXT_FLAGS_LGSND	(1<<9) /* tcp large send enabled */
363 #define THT_TXT_FLAGS_FRAG	(1<<10) /* ip fragmentation enabled */
364 #define THT_TXT_FLAGS_CFI	(1<<12) /* canonical format indicator */
365 #define THT_TXT_FLAGS_PRIO(_f)	((_f)<<13) /* vlan priority */
366 #define THT_TXT_FLAGS_VLAN(_f)	((_f)<<20) /* vlan id */
367 	u_int16_t		mss_mtu;
368 	u_int16_t		len;
369 
370 	u_int64_t		uid;
371 
372 	/* followed by a pbd list */
373 } __packed;
374 #define THT_TXT_TYPE		(3<<16)
375 #define THT_TXT_SGL_LEN		((THT_FIFO_DESC_LEN - \
376 				    sizeof(struct tht_tx_task)) / \
377 				    sizeof(struct tht_pbd))
378 #define THT_TXT_PKT_NUM		128
379 
380 /* tx free fifo */
381 struct tht_tx_free {
382 	u_int32_t		status;
383 
384 	u_int64_t		uid;
385 
386 	u_int32_t		pad;
387 } __packed;
388 
389 /* pci controller autoconf glue */
390 
391 struct thtc_softc {
392 	struct device		sc_dev;
393 
394 	bus_dma_tag_t		sc_dmat;
395 
396 	bus_space_tag_t		sc_memt;
397 	bus_space_handle_t	sc_memh;
398 	bus_size_t		sc_mems;
399 	void			*sc_ih;
400 };
401 
402 int			thtc_match(struct device *, void *, void *);
403 void			thtc_attach(struct device *, struct device *, void *);
404 int			thtc_print(void *, const char *);
405 
406 struct cfattach thtc_ca = {
407 	sizeof(struct thtc_softc), thtc_match, thtc_attach
408 };
409 
410 struct cfdriver thtc_cd = {
411 	NULL, "thtc", DV_DULL
412 };
413 
414 /* glue between the controller and the port */
415 
416 struct tht_attach_args {
417 	int			taa_port;
418 
419 	struct pci_attach_args	*taa_pa;
420 };
421 
422 /* tht itself */
423 
424 struct tht_dmamem {
425 	bus_dmamap_t		tdm_map;
426 	bus_dma_segment_t	tdm_seg;
427 	size_t			tdm_size;
428 	caddr_t			tdm_kva;
429 };
430 #define THT_DMA_MAP(_tdm)	((_tdm)->tdm_map)
431 #define THT_DMA_DVA(_tdm)	((_tdm)->tdm_map->dm_segs[0].ds_addr)
432 #define THT_DMA_KVA(_tdm)	((void *)(_tdm)->tdm_kva)
433 
434 struct tht_fifo_desc {
435 	bus_size_t		tfd_cfg0;
436 	bus_size_t		tfd_cfg1;
437 	bus_size_t		tfd_rptr;
438 	bus_size_t		tfd_wptr;
439 	u_int32_t		tfd_size;
440 	int			tfd_write;
441 };
442 #define THT_FIFO_PRE_SYNC(_d)	((_d)->tfd_write ? \
443 				    BUS_DMASYNC_PREWRITE : \
444 				    BUS_DMASYNC_PREREAD)
445 #define THT_FIFO_POST_SYNC(_d)	((_d)->tfd_write ? \
446 				    BUS_DMASYNC_POSTWRITE : \
447 				    BUS_DMASYNC_POSTREAD)
448 
449 struct tht_fifo {
450 	struct tht_fifo_desc	*tf_desc;
451 	struct tht_dmamem	*tf_mem;
452 	int			tf_len;
453 	int			tf_rptr;
454 	int			tf_wptr;
455 	int			tf_ready;
456 };
457 
458 struct tht_pkt {
459 	u_int64_t		tp_id;
460 
461 	bus_dmamap_t		tp_dmap;
462 	struct mbuf		*tp_m;
463 
464 	TAILQ_ENTRY(tht_pkt)	tp_link;
465 };
466 
467 struct tht_pkt_list {
468 	struct tht_pkt		*tpl_pkts;
469 	TAILQ_HEAD(, tht_pkt)	tpl_free;
470 	TAILQ_HEAD(, tht_pkt)	tpl_used;
471 };
472 
473 struct tht_softc {
474 	struct device		sc_dev;
475 	struct thtc_softc	*sc_thtc;
476 	int			sc_port;
477 
478 	bus_space_handle_t	sc_memh;
479 
480 	struct arpcom		sc_ac;
481 	struct ifmedia		sc_media;
482 	struct timeval		sc_mediacheck;
483 
484 	u_int16_t		sc_lladdr[3];
485 
486 	struct tht_pkt_list	sc_tx_list;
487 	struct tht_pkt_list	sc_rx_list;
488 
489 	struct tht_fifo		sc_txt;
490 	struct tht_fifo		sc_rxf;
491 	struct tht_fifo		sc_rxd;
492 	struct tht_fifo		sc_txf;
493 
494 	u_int32_t		sc_imr;
495 
496 	struct rwlock		sc_lock;
497 };
498 
499 int			tht_match(struct device *, void *, void *);
500 void			tht_attach(struct device *, struct device *, void *);
501 void			tht_mountroot(struct device *);
502 int			tht_intr(void *);
503 
504 struct cfattach tht_ca = {
505 	sizeof(struct tht_softc), tht_match, tht_attach
506 };
507 
508 struct cfdriver tht_cd = {
509 	NULL, "tht", DV_IFNET
510 };
511 
512 /* pkts */
513 int			tht_pkt_alloc(struct tht_softc *,
514 			    struct tht_pkt_list *, int, int);
515 void			tht_pkt_free(struct tht_softc *,
516 			    struct tht_pkt_list *);
517 void			tht_pkt_put(struct tht_pkt_list *, struct tht_pkt *);
518 struct tht_pkt 		*tht_pkt_get(struct tht_pkt_list *);
519 struct tht_pkt		*tht_pkt_used(struct tht_pkt_list *);
520 
521 /* fifos */
522 
523 struct tht_fifo_desc tht_txt_desc = {
524 	THT_REG_TXT_CFG0(0),
525 	THT_REG_TXT_CFG1(0),
526 	THT_REG_TXT_RPTR(0),
527 	THT_REG_TXT_WPTR(0),
528 	THT_FIFO_SIZE_16k,
529 	1
530 };
531 
532 struct tht_fifo_desc tht_rxf_desc = {
533 	THT_REG_RXF_CFG0(0),
534 	THT_REG_RXF_CFG1(0),
535 	THT_REG_RXF_RPTR(0),
536 	THT_REG_RXF_WPTR(0),
537 	THT_FIFO_SIZE_16k,
538 	1
539 };
540 
541 struct tht_fifo_desc tht_rxd_desc = {
542 	THT_REG_RXD_CFG0(0),
543 	THT_REG_RXD_CFG1(0),
544 	THT_REG_RXD_RPTR(0),
545 	THT_REG_RXD_WPTR(0),
546 	THT_FIFO_SIZE_16k,
547 	0
548 };
549 
550 struct tht_fifo_desc tht_txf_desc = {
551 	THT_REG_TXF_CFG0(0),
552 	THT_REG_TXF_CFG1(0),
553 	THT_REG_TXF_RPTR(0),
554 	THT_REG_TXF_WPTR(0),
555 	THT_FIFO_SIZE_4k,
556 	0
557 };
558 
559 int			tht_fifo_alloc(struct tht_softc *, struct tht_fifo *,
560 			    struct tht_fifo_desc *);
561 void			tht_fifo_free(struct tht_softc *, struct tht_fifo *);
562 
563 size_t			tht_fifo_readable(struct tht_softc *,
564 			    struct tht_fifo *);
565 size_t			tht_fifo_writable(struct tht_softc *,
566 			    struct tht_fifo *);
567 void			tht_fifo_pre(struct tht_softc *,
568 			    struct tht_fifo *);
569 void			tht_fifo_read(struct tht_softc *, struct tht_fifo *,
570 			    void *, size_t);
571 void			tht_fifo_write(struct tht_softc *, struct tht_fifo *,
572 			    void *, size_t);
573 void			tht_fifo_write_dmap(struct tht_softc *,
574 			    struct tht_fifo *, bus_dmamap_t);
575 void			tht_fifo_write_pad(struct tht_softc *,
576 			    struct tht_fifo *, int);
577 void			tht_fifo_post(struct tht_softc *,
578 			    struct tht_fifo *);
579 
580 /* port operations */
581 void			tht_lladdr_read(struct tht_softc *);
582 void			tht_lladdr_write(struct tht_softc *);
583 int			tht_sw_reset(struct tht_softc *);
584 int			tht_fw_load(struct tht_softc *);
585 void			tht_fw_tick(void *arg);
586 void			tht_link_state(struct tht_softc *);
587 
588 /* interface operations */
589 int			tht_ioctl(struct ifnet *, u_long, caddr_t);
590 void			tht_watchdog(struct ifnet *);
591 void			tht_start(struct ifnet *);
592 int			tht_load_pkt(struct tht_softc *, struct tht_pkt *,
593 			    struct mbuf *);
594 void			tht_txf(struct tht_softc *sc);
595 
596 void			tht_rxf_fill(struct tht_softc *, int);
597 void			tht_rxf_drain(struct tht_softc *);
598 void			tht_rxd(struct tht_softc *);
599 
600 void			tht_up(struct tht_softc *);
601 void			tht_iff(struct tht_softc *);
602 void			tht_down(struct tht_softc *);
603 
604 /* ifmedia operations */
605 int			tht_media_change(struct ifnet *);
606 void			tht_media_status(struct ifnet *, struct ifmediareq *);
607 
608 /* wrapper around dma memory */
609 struct tht_dmamem	*tht_dmamem_alloc(struct tht_softc *, bus_size_t,
610 			    bus_size_t);
611 void			tht_dmamem_free(struct tht_softc *,
612 			    struct tht_dmamem *);
613 
614 /* bus space operations */
615 u_int32_t		tht_read(struct tht_softc *, bus_size_t);
616 void			tht_write(struct tht_softc *, bus_size_t, u_int32_t);
617 void			tht_write_region(struct tht_softc *, bus_size_t,
618 			    void *, size_t);
619 int			tht_wait_eq(struct tht_softc *, bus_size_t, u_int32_t,
620 			    u_int32_t, int);
621 int			tht_wait_ne(struct tht_softc *, bus_size_t, u_int32_t,
622 			    u_int32_t, int);
623 
624 #define tht_set(_s, _r, _b)		tht_write((_s), (_r), \
625 					    tht_read((_s), (_r)) | (_b))
626 #define tht_clr(_s, _r, _b)		tht_write((_s), (_r), \
627 					    tht_read((_s), (_r)) & ~(_b))
628 #define tht_wait_set(_s, _r, _b, _t)	tht_wait_eq((_s), (_r), \
629 					    (_b), (_b), (_t))
630 
631 
632 /* misc */
633 #define DEVNAME(_sc)	((_sc)->sc_dev.dv_xname)
634 #define LWORDS(_b)	(((_b) + 7) >> 3)
635 
636 
637 struct thtc_device {
638 	pci_vendor_id_t		td_vendor;
639 	pci_vendor_id_t		td_product;
640 	u_int			td_nports;
641 };
642 
643 const struct thtc_device *thtc_lookup(struct pci_attach_args *);
644 
645 static const struct thtc_device thtc_devices[] = {
646 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3009, 1 },
647 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3010, 1 },
648 	{ PCI_VENDOR_TEHUTI,	PCI_PRODUCT_TEHUTI_TN3014, 2 }
649 };
650 
651 const struct thtc_device *
652 thtc_lookup(struct pci_attach_args *pa)
653 {
654 	int				i;
655 	const struct thtc_device	*td;
656 
657 	for (i = 0; i < nitems(thtc_devices); i++) {
658 		td = &thtc_devices[i];
659 		if (td->td_vendor == PCI_VENDOR(pa->pa_id) &&
660 		    td->td_product == PCI_PRODUCT(pa->pa_id))
661 			return (td);
662 	}
663 
664 	return (NULL);
665 }
666 
667 int
668 thtc_match(struct device *parent, void *match, void *aux)
669 {
670 	struct pci_attach_args		*pa = aux;
671 
672 	if (thtc_lookup(pa) != NULL)
673 		return (1);
674 
675 	return (0);
676 }
677 
678 void
679 thtc_attach(struct device *parent, struct device *self, void *aux)
680 {
681 	struct thtc_softc		*sc = (struct thtc_softc *)self;
682 	struct pci_attach_args		*pa = aux;
683 	pcireg_t			memtype;
684 	const struct thtc_device	*td;
685 	struct tht_attach_args		taa;
686 	pci_intr_handle_t		ih;
687 	int				i;
688 
689 	bzero(&taa, sizeof(taa));
690 	td = thtc_lookup(pa);
691 
692 	sc->sc_dmat = pa->pa_dmat;
693 
694 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, THT_PCI_BAR);
695 	if (pci_mapreg_map(pa, THT_PCI_BAR, memtype, 0, &sc->sc_memt,
696 	    &sc->sc_memh, NULL, &sc->sc_mems, 0) != 0) {
697 		printf(": unable to map host registers\n");
698 		return;
699 	}
700 
701 	if (pci_intr_map(pa, &ih) != 0) {
702 		printf(": unable to map interrupt\n");
703 		goto unmap;
704 	}
705 
706 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
707 	    IPL_NET, tht_intr, sc, DEVNAME(sc));
708 	if (sc->sc_ih == NULL) {
709 		printf(": unable to establish interrupt\n");
710 		return;
711 	}
712 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
713 
714 	taa.taa_pa = pa;
715 	for (i = 0; i < td->td_nports; i++) {
716 		taa.taa_port = i;
717 
718 		config_found(self, &taa, thtc_print);
719 	}
720 
721 	return;
722 
723 unmap:
724 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
725 	sc->sc_mems = 0;
726 }
727 
728 int
729 thtc_print(void *aux, const char *pnp)
730 {
731 	struct tht_attach_args		*taa = aux;
732 
733 	if (pnp != NULL)
734 		printf("\"%s\" at %s", tht_cd.cd_name, pnp);
735 
736 	printf(" port %d", taa->taa_port);
737 
738 	return (UNCONF);
739 }
740 
741 int
742 tht_match(struct device *parent, void *match, void *aux)
743 {
744 	return (1);
745 }
746 
747 void
748 tht_attach(struct device *parent, struct device *self, void *aux)
749 {
750 	struct thtc_softc		*csc = (struct thtc_softc *)parent;
751 	struct tht_softc		*sc = (struct tht_softc *)self;
752 	struct tht_attach_args		*taa = aux;
753 	struct ifnet			*ifp;
754 
755 	sc->sc_thtc = csc;
756 	sc->sc_port = taa->taa_port;
757 	sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
758 	rw_init(&sc->sc_lock, "thtioc");
759 
760 	if (bus_space_subregion(csc->sc_memt, csc->sc_memh,
761 	    THT_PORT_REGION(sc->sc_port), THT_PORT_SIZE,
762 	    &sc->sc_memh) != 0) {
763 		printf(": unable to map port registers\n");
764 		return;
765 	}
766 
767 	if (tht_sw_reset(sc) != 0) {
768 		printf(": unable to reset port\n");
769 		/* bus_space(9) says we dont have to free subregions */
770 		return;
771 	}
772 
773 	tht_lladdr_read(sc);
774 	bcopy(sc->sc_lladdr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
775 
776 	ifp = &sc->sc_ac.ac_if;
777 	ifp->if_softc = sc;
778 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
779 	ifp->if_capabilities = IFCAP_VLAN_MTU;
780 	ifp->if_ioctl = tht_ioctl;
781 	ifp->if_start = tht_start;
782 	ifp->if_watchdog = tht_watchdog;
783 	ifp->if_hardmtu = MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN; /* XXX */
784 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
785 	ifq_set_maxlen(&ifp->if_snd, 400);
786 
787 	ifmedia_init(&sc->sc_media, 0, tht_media_change, tht_media_status);
788 	ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_AUTO, 0, NULL);
789 	ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO);
790 
791 	if_attach(ifp);
792 	ether_ifattach(ifp);
793 
794 	printf(": address %s\n", ether_sprintf(sc->sc_ac.ac_enaddr));
795 
796 	config_mountroot(self, tht_mountroot);
797 }
798 
799 void
800 tht_mountroot(struct device *self)
801 {
802 	struct tht_softc		*sc = (struct tht_softc *)self;
803 
804 	if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
805 		return;
806 
807 	if (tht_fw_load(sc) != 0)
808 		printf("%s: firmware load failed\n", DEVNAME(sc));
809 
810 	tht_sw_reset(sc);
811 
812 	tht_fifo_free(sc, &sc->sc_txt);
813 
814 	tht_link_state(sc);
815 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
816 }
817 
818 int
819 tht_intr(void *arg)
820 {
821 	struct thtc_softc		*thtc = arg;
822 	struct tht_softc		*sc = arg;
823         struct device			*d;
824 	struct ifnet			*ifp;
825 	u_int32_t			isr;
826 	int				rv = 0;
827 
828 	for (d = TAILQ_NEXT(&thtc->sc_dev, dv_list); d != NULL;
829 	    d = TAILQ_NEXT(d, dv_list)) {
830 		sc = (struct tht_softc *)d;
831 
832 		isr = tht_read(sc, THT_REG_ISR);
833 		if (isr == 0x0) {
834 			tht_write(sc, THT_REG_IMR, sc->sc_imr);
835 			continue;
836 		}
837 		rv = 1;
838 
839 		DPRINTF(THT_D_INTR, "%s: isr: 0x%b\n", DEVNAME(sc), isr, THT_FMT_ISR);
840 
841 		if (ISSET(isr, THT_REG_ISR_LINKCHG(0) | THT_REG_ISR_LINKCHG(1)))
842 			tht_link_state(sc);
843 
844 		ifp = &sc->sc_ac.ac_if;
845 		if (ifp->if_flags & IFF_RUNNING) {
846 			if (ISSET(isr, THT_REG_ISR_RXD(0)))
847 				tht_rxd(sc);
848 
849 			if (ISSET(isr, THT_REG_ISR_RXF(0)))
850 				tht_rxf_fill(sc, 0);
851 
852 			if (ISSET(isr, THT_REG_ISR_TXF(0)))
853 				tht_txf(sc);
854 
855 			tht_start(ifp);
856 		}
857 		tht_write(sc, THT_REG_IMR, sc->sc_imr);
858 	}
859 	return (rv);
860 }
861 
862 int
863 tht_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
864 {
865 	struct tht_softc		*sc = ifp->if_softc;
866 	struct ifreq			*ifr = (struct ifreq *)addr;
867 	int				s, error = 0;
868 
869 	rw_enter_write(&sc->sc_lock);
870 	s = splnet();
871 
872 	switch (cmd) {
873 	case SIOCSIFADDR:
874 		ifp->if_flags |= IFF_UP;
875 		/* FALLTHROUGH */
876 
877 	case SIOCSIFFLAGS:
878 		if (ifp->if_flags & IFF_UP) {
879 			if (ifp->if_flags & IFF_RUNNING)
880 				error = ENETRESET;
881 			else
882 				tht_up(sc);
883 		} else {
884 			if (ifp->if_flags & IFF_RUNNING)
885 				tht_down(sc);
886 		}
887 		break;
888 
889 	case SIOCGIFMEDIA:
890 	case SIOCSIFMEDIA:
891 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
892 		break;
893 
894 	default:
895 		error =  ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
896 	}
897 
898 	if (error == ENETRESET) {
899 		if (ifp->if_flags & IFF_RUNNING)
900 			tht_iff(sc);
901 		error = 0;
902 	}
903 
904 	splx(s);
905 	rw_exit_write(&sc->sc_lock);
906 
907 	return (error);
908 }
909 
910 void
911 tht_up(struct tht_softc *sc)
912 {
913 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
914 
915 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
916 		return;
917 	}
918 
919 	if (tht_pkt_alloc(sc, &sc->sc_tx_list, THT_TXT_PKT_NUM,
920 	    THT_TXT_SGL_LEN) != 0)
921 		return;
922 	if (tht_pkt_alloc(sc, &sc->sc_rx_list, THT_RXF_PKT_NUM,
923 	    THT_RXF_SGL_LEN) != 0)
924 		goto free_tx_list;
925 
926 	if (tht_fifo_alloc(sc, &sc->sc_txt, &tht_txt_desc) != 0)
927 		goto free_rx_list;
928 	if (tht_fifo_alloc(sc, &sc->sc_rxf, &tht_rxf_desc) != 0)
929 		goto free_txt;
930 	if (tht_fifo_alloc(sc, &sc->sc_rxd, &tht_rxd_desc) != 0)
931 		goto free_rxf;
932 	if (tht_fifo_alloc(sc, &sc->sc_txf, &tht_txf_desc) != 0)
933 		goto free_rxd;
934 
935 	tht_write(sc, THT_REG_10G_FRM_LEN, MCLBYTES - ETHER_ALIGN);
936 	tht_write(sc, THT_REG_10G_PAUSE, 0x96);
937 	tht_write(sc, THT_REG_10G_RX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
938 	    THT_REG_10G_SEC_EMPTY(0x80));
939 	tht_write(sc, THT_REG_10G_TX_SEC, THT_REG_10G_SEC_AVAIL(0x10) |
940 	    THT_REG_10G_SEC_EMPTY(0xe0));
941 	tht_write(sc, THT_REG_10G_RFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
942 	    THT_REG_10G_FIFO_AF(0x0));
943 	tht_write(sc, THT_REG_10G_TFIFO_AEF, THT_REG_10G_FIFO_AE(0x0) |
944 	    THT_REG_10G_FIFO_AF(0x0));
945 	tht_write(sc, THT_REG_10G_CTL, THT_REG_10G_CTL_TX_EN |
946 	    THT_REG_10G_CTL_RX_EN | THT_REG_10G_CTL_PAD |
947 	    THT_REG_10G_CTL_PROMISC);
948 
949 	tht_write(sc, THT_REG_VGLB, 0);
950 
951 	tht_write(sc, THT_REG_RX_MAX_FRAME, MCLBYTES - ETHER_ALIGN);
952 
953 	tht_write(sc, THT_REG_RDINTCM(0), THT_REG_RDINTCM_PKT_TH(12) |
954 	    THT_REG_RDINTCM_RXF_TH(4) | THT_REG_RDINTCM_COAL_RC |
955 	    THT_REG_RDINTCM_COAL(0x20));
956 	tht_write(sc, THT_REG_TDINTCM(0), THT_REG_TDINTCM_PKT_TH(12) |
957 	    THT_REG_TDINTCM_COAL_RC | THT_REG_TDINTCM_COAL(0x20));
958 
959 	bcopy(sc->sc_ac.ac_enaddr, sc->sc_lladdr, ETHER_ADDR_LEN);
960 	tht_lladdr_write(sc);
961 
962 	/* populate rxf fifo */
963 	tht_rxf_fill(sc, 1);
964 
965 	/* program promiscuous mode and multicast filters */
966 	tht_iff(sc);
967 
968 	ifp->if_flags |= IFF_RUNNING;
969 	ifq_clr_oactive(&ifp->if_snd);
970 
971 	/* enable interrupts */
972 	sc->sc_imr = THT_IMR_UP(sc->sc_port);
973 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
974 
975 	return;
976 
977 free_rxd:
978 	tht_fifo_free(sc, &sc->sc_rxd);
979 free_rxf:
980 	tht_fifo_free(sc, &sc->sc_rxf);
981 free_txt:
982 	tht_fifo_free(sc, &sc->sc_txt);
983 
984 	tht_sw_reset(sc);
985 
986 free_rx_list:
987 	tht_pkt_free(sc, &sc->sc_rx_list);
988 free_tx_list:
989 	tht_pkt_free(sc, &sc->sc_tx_list);
990 }
991 
992 void
993 tht_iff(struct tht_softc *sc)
994 {
995 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
996 	struct ether_multi		*enm;
997 	struct ether_multistep		step;
998 	u_int32_t			rxf;
999 	u_int8_t			imf[THT_REG_RX_MCST_HASH_SIZE];
1000 	u_int8_t			hash;
1001 	int				i;
1002 
1003 	ifp->if_flags &= ~IFF_ALLMULTI;
1004 
1005 	rxf = THT_REG_RX_FLT_OSEN | THT_REG_RX_FLT_AM | THT_REG_RX_FLT_AB;
1006 	for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1007 		tht_write(sc, THT_REG_RX_MAC_MCST0(i), 0);
1008 		tht_write(sc, THT_REG_RX_MAC_MCST1(i), 0);
1009 	}
1010 	memset(imf, 0x00, sizeof(imf));
1011 
1012 	if (ifp->if_flags & IFF_PROMISC) {
1013 		ifp->if_flags |= IFF_ALLMULTI;
1014 		rxf |= THT_REG_RX_FLT_PRM_ALL;
1015 	} else if (sc->sc_ac.ac_multirangecnt > 0) {
1016 		ifp->if_flags |= IFF_ALLMULTI;
1017 		memset(imf, 0xff, sizeof(imf));
1018 	} else {
1019 		ETHER_FIRST_MULTI(step, &sc->sc_ac, enm);
1020 
1021 #if 0
1022 		/* fill the perfect multicast filters */
1023 		for (i = 0; i < THT_REG_RX_MAC_MCST_CNT; i++) {
1024 			if (enm == NULL)
1025 				break;
1026 
1027 			tht_write(sc, THT_REG_RX_MAC_MCST0(i),
1028 			    (enm->enm_addrlo[0] << 0) |
1029 			    (enm->enm_addrlo[1] << 8) |
1030 			    (enm->enm_addrlo[2] << 16) |
1031 			    (enm->enm_addrlo[3] << 24));
1032 			tht_write(sc, THT_REG_RX_MAC_MCST1(i),
1033 			    (enm->enm_addrlo[4] << 0) |
1034 			    (enm->enm_addrlo[5] << 8));
1035 
1036 			ETHER_NEXT_MULTI(step, enm);
1037 		}
1038 #endif
1039 
1040 		/* fill the imperfect multicast filter with whats left */
1041 		while (enm != NULL) {
1042 			hash = 0x00;
1043 			for (i = 0; i < ETHER_ADDR_LEN; i++)
1044 				hash ^= enm->enm_addrlo[i];
1045 			setbit(imf, hash);
1046 
1047 			ETHER_NEXT_MULTI(step, enm);
1048 		}
1049 	}
1050 
1051 	tht_write_region(sc, THT_REG_RX_MCST_HASH, imf, sizeof(imf));
1052 	tht_write(sc, THT_REG_RX_FLT, rxf);
1053 }
1054 
1055 void
1056 tht_down(struct tht_softc *sc)
1057 {
1058 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1059 
1060 	if (!ISSET(ifp->if_flags, IFF_RUNNING)) {
1061 		return;
1062 	}
1063 
1064 	ifp->if_flags &= ~(IFF_RUNNING | IFF_ALLMULTI);
1065 	ifq_clr_oactive(&ifp->if_snd);
1066 
1067 	while (tht_fifo_writable(sc, &sc->sc_txt) < sc->sc_txt.tf_len &&
1068 	    tht_fifo_readable(sc, &sc->sc_txf) > 0)
1069 		tsleep_nsec(sc, 0, "thtdown", SEC_TO_NSEC(1));
1070 
1071 	sc->sc_imr = THT_IMR_DOWN(sc->sc_port);
1072 	tht_write(sc, THT_REG_IMR, sc->sc_imr);
1073 
1074 	tht_sw_reset(sc);
1075 
1076 	tht_fifo_free(sc, &sc->sc_txf);
1077 	tht_fifo_free(sc, &sc->sc_rxd);
1078 	tht_fifo_free(sc, &sc->sc_rxf);
1079 	tht_fifo_free(sc, &sc->sc_txt);
1080 
1081 	/* free mbufs that were on the rxf fifo */
1082 	tht_rxf_drain(sc);
1083 
1084 	tht_pkt_free(sc, &sc->sc_rx_list);
1085 	tht_pkt_free(sc, &sc->sc_tx_list);
1086 }
1087 
1088 void
1089 tht_start(struct ifnet *ifp)
1090 {
1091 	struct tht_softc		*sc = ifp->if_softc;
1092 	struct tht_pkt			*pkt;
1093 	struct tht_tx_task		txt;
1094 	u_int32_t			flags;
1095 	struct mbuf			*m;
1096 	int				bc;
1097 
1098 	if (!(ifp->if_flags & IFF_RUNNING))
1099 		return;
1100 	if (ifq_is_oactive(&ifp->if_snd))
1101 		return;
1102 	if (ifq_empty(&ifp->if_snd))
1103 		return;
1104 
1105 	if (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_DESC_LEN)
1106 		return;
1107 
1108 	bzero(&txt, sizeof(txt));
1109 
1110 	tht_fifo_pre(sc, &sc->sc_txt);
1111 
1112 	do {
1113 		m = ifq_deq_begin(&ifp->if_snd);
1114 		if (m == NULL)
1115 			break;
1116 
1117 		pkt = tht_pkt_get(&sc->sc_tx_list);
1118 		if (pkt == NULL) {
1119 			ifq_deq_rollback(&ifp->if_snd, m);
1120 			ifq_set_oactive(&ifp->if_snd);
1121 			break;
1122 		}
1123 
1124 		ifq_deq_commit(&ifp->if_snd, m);
1125 		if (tht_load_pkt(sc, pkt, m) != 0) {
1126 			m_freem(m);
1127 			tht_pkt_put(&sc->sc_tx_list, pkt);
1128 			ifp->if_oerrors++;
1129 			break;
1130 		}
1131 		/* thou shalt not use m after this point, only pkt->tp_m */
1132 
1133 #if NBPFILTER > 0
1134 		if (ifp->if_bpf)
1135 			bpf_mtap(ifp->if_bpf, pkt->tp_m, BPF_DIRECTION_OUT);
1136 #endif
1137 
1138 		bc = sizeof(txt) +
1139 		    sizeof(struct tht_pbd) * pkt->tp_dmap->dm_nsegs;
1140 
1141 		flags = THT_TXT_TYPE | LWORDS(bc);
1142 		txt.flags = htole32(flags);
1143 		txt.len = htole16(pkt->tp_m->m_pkthdr.len);
1144 		txt.uid = pkt->tp_id;
1145 
1146 		DPRINTF(THT_D_TX, "%s: txt uid 0x%llx flags 0x%08x len %d\n",
1147 		    DEVNAME(sc), pkt->tp_id, flags, pkt->tp_m->m_pkthdr.len);
1148 
1149 		tht_fifo_write(sc, &sc->sc_txt, &txt, sizeof(txt));
1150 		tht_fifo_write_dmap(sc, &sc->sc_txt, pkt->tp_dmap);
1151 		tht_fifo_write_pad(sc, &sc->sc_txt, bc);
1152 
1153 		bus_dmamap_sync(sc->sc_thtc->sc_dmat, pkt->tp_dmap, 0,
1154 		    pkt->tp_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1155 
1156 	} while (sc->sc_txt.tf_ready > THT_FIFO_DESC_LEN);
1157 
1158 	tht_fifo_post(sc, &sc->sc_txt);
1159 }
1160 
1161 int
1162 tht_load_pkt(struct tht_softc *sc, struct tht_pkt *pkt, struct mbuf *m)
1163 {
1164 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1165 	bus_dmamap_t			dmap = pkt->tp_dmap;
1166 	struct mbuf			*m0 = NULL;
1167 
1168 	switch(bus_dmamap_load_mbuf(dmat, dmap, m, BUS_DMA_NOWAIT)) {
1169 	case 0:
1170 		pkt->tp_m = m;
1171 		break;
1172 
1173 	case EFBIG: /* mbuf chain is too fragmented */
1174 		MGETHDR(m0, M_DONTWAIT, MT_DATA);
1175 		if (m0 == NULL)
1176 			return (ENOBUFS);
1177 		if (m->m_pkthdr.len > MHLEN) {
1178 			MCLGET(m0, M_DONTWAIT);
1179 			if (!(m0->m_flags & M_EXT)) {
1180 				m_freem(m0);
1181 				return (ENOBUFS);
1182 			}
1183 		}
1184 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t));
1185 		m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len;
1186 		if (bus_dmamap_load_mbuf(dmat, dmap, m0, BUS_DMA_NOWAIT)) {
1187                         m_freem(m0);
1188 			return (ENOBUFS);
1189                 }
1190 
1191 		m_freem(m);
1192 		pkt->tp_m = m0;
1193 		break;
1194 
1195 	default:
1196 		return (ENOBUFS);
1197 	}
1198 
1199 	return (0);
1200 }
1201 
1202 void
1203 tht_txf(struct tht_softc *sc)
1204 {
1205 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1206 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1207 	bus_dmamap_t			dmap;
1208 	struct tht_tx_free		txf;
1209 	struct tht_pkt			*pkt;
1210 
1211 	if (tht_fifo_readable(sc, &sc->sc_txf) < sizeof(txf))
1212 		return;
1213 
1214 	tht_fifo_pre(sc, &sc->sc_txf);
1215 
1216 	do {
1217 		tht_fifo_read(sc, &sc->sc_txf, &txf, sizeof(txf));
1218 
1219 		DPRINTF(THT_D_TX, "%s: txf uid 0x%llx\n", DEVNAME(sc), txf.uid);
1220 
1221 		pkt = &sc->sc_tx_list.tpl_pkts[txf.uid];
1222 		dmap = pkt->tp_dmap;
1223 
1224 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1225 		    BUS_DMASYNC_POSTWRITE);
1226 		bus_dmamap_unload(dmat, dmap);
1227 
1228 		m_freem(pkt->tp_m);
1229 
1230 		tht_pkt_put(&sc->sc_tx_list, pkt);
1231 
1232 	} while (sc->sc_txf.tf_ready >= sizeof(txf));
1233 
1234 	ifq_clr_oactive(&ifp->if_snd);
1235 
1236 	tht_fifo_post(sc, &sc->sc_txf);
1237 }
1238 
1239 void
1240 tht_rxf_fill(struct tht_softc *sc, int wait)
1241 {
1242 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1243 	bus_dmamap_t			dmap;
1244 	struct tht_rx_free		rxf;
1245 	struct tht_pkt			*pkt;
1246 	struct mbuf			*m;
1247 	int				bc;
1248 
1249 	if (tht_fifo_writable(sc, &sc->sc_rxf) <= THT_FIFO_DESC_LEN)
1250 		return;
1251 
1252 	tht_fifo_pre(sc, &sc->sc_rxf);
1253 
1254 	for (;;) {
1255 		if ((pkt = tht_pkt_get(&sc->sc_rx_list)) == NULL)
1256 			goto done;
1257 
1258 		MGETHDR(m, wait ? M_WAIT : M_DONTWAIT, MT_DATA);
1259 		if (m == NULL)
1260 			goto put_pkt;
1261 
1262 		MCLGET(m, wait ? M_WAIT : M_DONTWAIT);
1263 		if (!ISSET(m->m_flags, M_EXT))
1264 			goto free_m;
1265 
1266 		m->m_data += ETHER_ALIGN;
1267 		m->m_len = m->m_pkthdr.len = MCLBYTES - ETHER_ALIGN;
1268 
1269 		dmap = pkt->tp_dmap;
1270 		if (bus_dmamap_load_mbuf(dmat, dmap, m,
1271 		    wait ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0)
1272 			goto free_m;
1273 
1274 		pkt->tp_m = m;
1275 
1276 		bc = sizeof(rxf) + sizeof(struct tht_pbd) * dmap->dm_nsegs;
1277 
1278 		rxf.bc = htole16(LWORDS(bc));
1279 		rxf.type = htole16(THT_RXF_TYPE);
1280 		rxf.uid = pkt->tp_id;
1281 
1282 		tht_fifo_write(sc, &sc->sc_rxf, &rxf, sizeof(rxf));
1283 		tht_fifo_write_dmap(sc, &sc->sc_rxf, dmap);
1284 		tht_fifo_write_pad(sc, &sc->sc_rxf, bc);
1285 
1286 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1287 		    BUS_DMASYNC_PREREAD);
1288 
1289 		if (sc->sc_rxf.tf_ready <= THT_FIFO_DESC_LEN)
1290 			goto done;
1291 	}
1292 
1293 free_m:
1294 	m_freem(m);
1295 put_pkt:
1296 	tht_pkt_put(&sc->sc_rx_list, pkt);
1297 done:
1298 	tht_fifo_post(sc, &sc->sc_rxf);
1299 }
1300 
1301 void
1302 tht_rxf_drain(struct tht_softc *sc)
1303 {
1304 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1305 	bus_dmamap_t			dmap;
1306 	struct tht_pkt			*pkt;
1307 
1308 	while ((pkt = tht_pkt_used(&sc->sc_rx_list)) != NULL) {
1309 		dmap = pkt->tp_dmap;
1310 
1311 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1312 		    BUS_DMASYNC_POSTREAD);
1313 		bus_dmamap_unload(dmat, dmap);
1314 
1315 		m_freem(pkt->tp_m);
1316 
1317 		tht_pkt_put(&sc->sc_rx_list, pkt);
1318 	}
1319 }
1320 
1321 void
1322 tht_rxd(struct tht_softc *sc)
1323 {
1324 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1325 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1326 	bus_dmamap_t			dmap;
1327 	struct tht_rx_desc		rxd;
1328 	struct tht_pkt			*pkt;
1329 	struct mbuf			*m;
1330 	struct mbuf_list		ml = MBUF_LIST_INITIALIZER();
1331 	int				bc;
1332 	u_int32_t			flags;
1333 
1334 	if (tht_fifo_readable(sc, &sc->sc_rxd) < sizeof(rxd))
1335 		return;
1336 
1337 	tht_fifo_pre(sc, &sc->sc_rxd);
1338 
1339 	do {
1340 		tht_fifo_read(sc, &sc->sc_rxd, &rxd, sizeof(rxd));
1341 
1342 		flags = letoh32(rxd.flags);
1343 		bc = THT_RXD_FLAGS_BC(flags) * 8;
1344 		bc -= sizeof(rxd);
1345 		pkt = &sc->sc_rx_list.tpl_pkts[rxd.uid];
1346 
1347 		dmap = pkt->tp_dmap;
1348 
1349 		bus_dmamap_sync(dmat, dmap, 0, dmap->dm_mapsize,
1350 		    BUS_DMASYNC_POSTREAD);
1351 		bus_dmamap_unload(dmat, dmap);
1352 
1353 		m = pkt->tp_m;
1354 		m->m_pkthdr.len = m->m_len = letoh16(rxd.len);
1355 
1356 		/* XXX process type 3 rx descriptors */
1357 
1358 		ml_enqueue(&ml, m);
1359 
1360 		tht_pkt_put(&sc->sc_rx_list, pkt);
1361 
1362 		while (bc > 0) {
1363 			static u_int32_t pad;
1364 
1365 			tht_fifo_read(sc, &sc->sc_rxd, &pad, sizeof(pad));
1366 			bc -= sizeof(pad);
1367 		}
1368 	} while (sc->sc_rxd.tf_ready >= sizeof(rxd));
1369 
1370 	tht_fifo_post(sc, &sc->sc_rxd);
1371 
1372 	if_input(ifp, &ml);
1373 
1374 	/* put more pkts on the fifo */
1375 	tht_rxf_fill(sc, 0);
1376 }
1377 
1378 void
1379 tht_watchdog(struct ifnet *ifp)
1380 {
1381 	/* do nothing */
1382 }
1383 
1384 int
1385 tht_media_change(struct ifnet *ifp)
1386 {
1387 	/* ignore */
1388 	return (0);
1389 }
1390 
1391 void
1392 tht_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1393 {
1394 	struct tht_softc		*sc = ifp->if_softc;
1395 
1396 	imr->ifm_active = IFM_ETHER | IFM_AUTO;
1397 	imr->ifm_status = IFM_AVALID;
1398 
1399 	tht_link_state(sc);
1400 
1401 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1402 		imr->ifm_status |= IFM_ACTIVE;
1403 }
1404 
1405 int
1406 tht_fifo_alloc(struct tht_softc *sc, struct tht_fifo *tf,
1407     struct tht_fifo_desc *tfd)
1408 {
1409 	u_int64_t			dva;
1410 
1411 	tf->tf_len = THT_FIFO_SIZE(tfd->tfd_size);
1412 	tf->tf_mem = tht_dmamem_alloc(sc, tf->tf_len, THT_FIFO_ALIGN);
1413 	if (tf->tf_mem == NULL)
1414 		return (1);
1415 
1416 	tf->tf_desc = tfd;
1417 	tf->tf_rptr = tf->tf_wptr = 0;
1418 
1419 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1420 	    0, tf->tf_len, THT_FIFO_PRE_SYNC(tfd));
1421 
1422 	dva = THT_DMA_DVA(tf->tf_mem);
1423 	tht_write(sc, tfd->tfd_cfg0, (u_int32_t)dva | tfd->tfd_size);
1424 	tht_write(sc, tfd->tfd_cfg1, (u_int32_t)(dva >> 32));
1425 
1426 	return (0);
1427 }
1428 
1429 void
1430 tht_fifo_free(struct tht_softc *sc, struct tht_fifo *tf)
1431 {
1432 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1433 	    0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1434 	tht_dmamem_free(sc, tf->tf_mem);
1435 }
1436 
1437 size_t
1438 tht_fifo_readable(struct tht_softc *sc, struct tht_fifo *tf)
1439 {
1440 	tf->tf_wptr = tht_read(sc, tf->tf_desc->tfd_wptr);
1441 	tf->tf_wptr &= THT_FIFO_PTR_MASK;
1442 	tf->tf_ready = tf->tf_wptr - tf->tf_rptr;
1443 	if (tf->tf_ready < 0)
1444 		tf->tf_ready += tf->tf_len;
1445 
1446 	DPRINTF(THT_D_FIFO, "%s: fifo rdable wptr: %d rptr: %d ready: %d\n",
1447 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1448 
1449 	return (tf->tf_ready);
1450 }
1451 
1452 size_t
1453 tht_fifo_writable(struct tht_softc *sc, struct tht_fifo *tf)
1454 {
1455 	tf->tf_rptr = tht_read(sc, tf->tf_desc->tfd_rptr);
1456 	tf->tf_rptr &= THT_FIFO_PTR_MASK;
1457 	tf->tf_ready = tf->tf_rptr - tf->tf_wptr;
1458 	if (tf->tf_ready <= 0)
1459 		tf->tf_ready += tf->tf_len;
1460 
1461 	DPRINTF(THT_D_FIFO, "%s: fifo wrable wptr: %d rptr: %d ready: %d\n",
1462 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1463 
1464 	return (tf->tf_ready);
1465 }
1466 
1467 void
1468 tht_fifo_pre(struct tht_softc *sc, struct tht_fifo *tf)
1469 {
1470 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1471 	    0, tf->tf_len, THT_FIFO_POST_SYNC(tf->tf_desc));
1472 }
1473 
1474 void
1475 tht_fifo_read(struct tht_softc *sc, struct tht_fifo *tf,
1476     void *buf, size_t buflen)
1477 {
1478 	u_int8_t			*fifo = THT_DMA_KVA(tf->tf_mem);
1479 	u_int8_t			*desc = buf;
1480 	size_t				len;
1481 
1482 	tf->tf_ready -= buflen;
1483 
1484 	len = tf->tf_len - tf->tf_rptr;
1485 
1486 	if (len < buflen) {
1487 		memcpy(desc, fifo + tf->tf_rptr, len);
1488 
1489 		buflen -= len;
1490 		desc += len;
1491 
1492 		tf->tf_rptr = 0;
1493 	}
1494 
1495 	memcpy(desc, fifo + tf->tf_rptr, buflen);
1496 	tf->tf_rptr += buflen;
1497 
1498 	DPRINTF(THT_D_FIFO, "%s: fifo rd wptr: %d rptr: %d ready: %d\n",
1499 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1500 }
1501 
1502 void
1503 tht_fifo_write(struct tht_softc *sc, struct tht_fifo *tf,
1504     void *buf, size_t buflen)
1505 {
1506 	u_int8_t			*fifo = THT_DMA_KVA(tf->tf_mem);
1507 	u_int8_t			*desc = buf;
1508 	size_t				len;
1509 
1510 	tf->tf_ready -= buflen;
1511 
1512 	len = tf->tf_len - tf->tf_wptr;
1513 
1514 	if (len < buflen) {
1515 		memcpy(fifo + tf->tf_wptr, desc, len);
1516 
1517 		buflen -= len;
1518 		desc += len;
1519 
1520 		tf->tf_wptr = 0;
1521 	}
1522 
1523 	memcpy(fifo + tf->tf_wptr, desc, buflen);
1524 	tf->tf_wptr += buflen;
1525 	tf->tf_wptr %= tf->tf_len;
1526 
1527 	DPRINTF(THT_D_FIFO, "%s: fifo wr wptr: %d rptr: %d ready: %d\n",
1528 	    DEVNAME(sc), tf->tf_wptr, tf->tf_rptr, tf->tf_ready);
1529 }
1530 
1531 void
1532 tht_fifo_write_dmap(struct tht_softc *sc, struct tht_fifo *tf,
1533     bus_dmamap_t dmap)
1534 {
1535 	struct tht_pbd			pbd;
1536 	u_int64_t			dva;
1537 	int				i;
1538 
1539 	for (i = 0; i < dmap->dm_nsegs; i++) {
1540 		dva = dmap->dm_segs[i].ds_addr;
1541 
1542 		pbd.addr_lo = htole32(dva);
1543 		pbd.addr_hi = htole32(dva >> 32);
1544 		pbd.len = htole32(dmap->dm_segs[i].ds_len);
1545 
1546 		tht_fifo_write(sc, tf, &pbd, sizeof(pbd));
1547 	}
1548 }
1549 
1550 void
1551 tht_fifo_write_pad(struct tht_softc *sc, struct tht_fifo *tf, int bc)
1552 {
1553 	const static u_int32_t pad = 0x0;
1554 
1555 	/* this assumes you'll only ever be writing multiples of 4 bytes */
1556 	if (bc % 8)
1557 		tht_fifo_write(sc, tf, (void *)&pad, sizeof(pad));
1558 }
1559 
1560 void
1561 tht_fifo_post(struct tht_softc *sc, struct tht_fifo *tf)
1562 {
1563 	bus_dmamap_sync(sc->sc_thtc->sc_dmat, THT_DMA_MAP(tf->tf_mem),
1564 	    0, tf->tf_len, THT_FIFO_PRE_SYNC(tf->tf_desc));
1565 	if (tf->tf_desc->tfd_write)
1566 		tht_write(sc, tf->tf_desc->tfd_wptr, tf->tf_wptr);
1567 	else
1568 		tht_write(sc, tf->tf_desc->tfd_rptr, tf->tf_rptr);
1569 
1570 	DPRINTF(THT_D_FIFO, "%s: fifo post wptr: %d rptr: %d\n", DEVNAME(sc),
1571 	    tf->tf_wptr, tf->tf_rptr);
1572 }
1573 
1574 const static bus_size_t tht_mac_regs[3] = {
1575     THT_REG_RX_UNC_MAC2, THT_REG_RX_UNC_MAC1, THT_REG_RX_UNC_MAC0
1576 };
1577 
1578 void
1579 tht_lladdr_read(struct tht_softc *sc)
1580 {
1581 	int				i;
1582 
1583 	for (i = 0; i < nitems(tht_mac_regs); i++)
1584 		sc->sc_lladdr[i] = betoh16(tht_read(sc, tht_mac_regs[i]));
1585 }
1586 
1587 void
1588 tht_lladdr_write(struct tht_softc *sc)
1589 {
1590 	int				i;
1591 
1592 	for (i = 0; i < nitems(tht_mac_regs); i++)
1593 		tht_write(sc, tht_mac_regs[i], htobe16(sc->sc_lladdr[i]));
1594 }
1595 
1596 #define tht_swrst_set(_s, _r) tht_write((_s), (_r), 0x1)
1597 #define tht_swrst_clr(_s, _r) tht_write((_s), (_r), 0x0)
1598 int
1599 tht_sw_reset(struct tht_softc *sc)
1600 {
1601 	int				i;
1602 
1603 	/* this follows SW Reset process in 8.8 of the doco */
1604 
1605 	/* 1. disable rx */
1606 	tht_clr(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1607 
1608 	/* 2. initiate port disable */
1609 	tht_swrst_set(sc, THT_REG_DIS_PRT);
1610 
1611 	/* 3. initiate queue disable */
1612 	tht_swrst_set(sc, THT_REG_DIS_QU_0);
1613 	tht_swrst_set(sc, THT_REG_DIS_QU_1);
1614 
1615 	/* 4. wait for successful finish of previous tasks */
1616 	if (!tht_wait_set(sc, THT_REG_RST_PRT, THT_REG_RST_PRT_ACTIVE, 1000))
1617 		return (1);
1618 
1619 	/* 5. Reset interrupt registers */
1620 	tht_write(sc, THT_REG_IMR, 0x0); /* 5.a */
1621 	tht_read(sc, THT_REG_ISR); /* 5.b */
1622 	for (i = 0; i < THT_NQUEUES; i++) {
1623 		tht_write(sc, THT_REG_RDINTCM(i), 0x0); /* 5.c/5.d */
1624 		tht_write(sc, THT_REG_TDINTCM(i), 0x0); /* 5.e */
1625 	}
1626 
1627 	/* 6. initiate queue reset */
1628 	tht_swrst_set(sc, THT_REG_RST_QU_0);
1629 	tht_swrst_set(sc, THT_REG_RST_QU_1);
1630 
1631 	/* 7. initiate port reset */
1632 	tht_swrst_set(sc, THT_REG_RST_PRT);
1633 
1634 	/* 8. clear txt/rxf/rxd/txf read and write ptrs */
1635 	for (i = 0; i < THT_NQUEUES; i++) {
1636 		tht_write(sc, THT_REG_TXT_RPTR(i), 0);
1637 		tht_write(sc, THT_REG_RXF_RPTR(i), 0);
1638 		tht_write(sc, THT_REG_RXD_RPTR(i), 0);
1639 		tht_write(sc, THT_REG_TXF_RPTR(i), 0);
1640 
1641 		tht_write(sc, THT_REG_TXT_WPTR(i), 0);
1642 		tht_write(sc, THT_REG_RXF_WPTR(i), 0);
1643 		tht_write(sc, THT_REG_RXD_WPTR(i), 0);
1644 		tht_write(sc, THT_REG_TXF_WPTR(i), 0);
1645 	}
1646 
1647 	/* 9. unset port disable */
1648 	tht_swrst_clr(sc, THT_REG_DIS_PRT);
1649 
1650 	/* 10. unset queue disable */
1651 	tht_swrst_clr(sc, THT_REG_DIS_QU_0);
1652 	tht_swrst_clr(sc, THT_REG_DIS_QU_1);
1653 
1654 	/* 11. unset queue reset */
1655 	tht_swrst_clr(sc, THT_REG_RST_QU_0);
1656 	tht_swrst_clr(sc, THT_REG_RST_QU_1);
1657 
1658 	/* 12. unset port reset */
1659 	tht_swrst_clr(sc, THT_REG_RST_PRT);
1660 
1661 	/* 13. enable rx */
1662 	tht_set(sc, THT_REG_RX_FLT, THT_REG_RX_FLT_OSEN);
1663 
1664 	return (0);
1665 }
1666 
1667 int
1668 tht_fw_load(struct tht_softc *sc)
1669 {
1670 	struct timeout			ticker;
1671 	volatile int			ok = 1;
1672 	u_int8_t			*fw, *buf;
1673 	size_t				fwlen, wrlen;
1674 	int				error = 1;
1675 
1676 	if (loadfirmware("tht", &fw, &fwlen) != 0)
1677 		return (1);
1678 
1679 	if ((fwlen % 8) != 0)
1680 		goto err;
1681 
1682 	buf = fw;
1683 	while (fwlen > 0) {
1684 		while (tht_fifo_writable(sc, &sc->sc_txt) <= THT_FIFO_GAP) {
1685 			if (tsleep(sc, PCATCH, "thtfw", 1) == EINTR)
1686 				goto err;
1687 		}
1688 
1689 		wrlen = MIN(sc->sc_txt.tf_ready - THT_FIFO_GAP, fwlen);
1690 		tht_fifo_pre(sc, &sc->sc_txt);
1691 		tht_fifo_write(sc, &sc->sc_txt, buf, wrlen);
1692 		tht_fifo_post(sc, &sc->sc_txt);
1693 
1694 		fwlen -= wrlen;
1695 		buf += wrlen;
1696 	}
1697 
1698 	timeout_set(&ticker, tht_fw_tick, (void *)&ok);
1699 	timeout_add_sec(&ticker, 2);
1700 	while (ok) {
1701 		if (tht_read(sc, THT_REG_INIT_STATUS) != 0) {
1702 			error = 0;
1703 			break;
1704 		}
1705 
1706 		if (tsleep(sc, PCATCH, "thtinit", 1) == EINTR)
1707 			goto err;
1708 	}
1709 	timeout_del(&ticker);
1710 
1711 	tht_write(sc, THT_REG_INIT_SEMAPHORE, 0x1);
1712 
1713 err:
1714 	free(fw, M_DEVBUF, fwlen);
1715 	return (error);
1716 }
1717 
1718 void
1719 tht_fw_tick(void *arg)
1720 {
1721 	volatile int			*ok = arg;
1722 
1723 	*ok = 0;
1724 }
1725 
1726 void
1727 tht_link_state(struct tht_softc *sc)
1728 {
1729 	static const struct timeval	interval = { 0, 10000 };
1730 	struct ifnet			*ifp = &sc->sc_ac.ac_if;
1731 	int				link_state = LINK_STATE_DOWN;
1732 
1733 	if (!ratecheck(&sc->sc_mediacheck, &interval))
1734 		return;
1735 
1736 	if (tht_read(sc, THT_REG_MAC_LNK_STAT) & THT_REG_MAC_LNK_STAT_LINK)
1737 		link_state = LINK_STATE_FULL_DUPLEX;
1738 
1739 	if (ifp->if_link_state != link_state) {
1740 		ifp->if_link_state = link_state;
1741 		if_link_state_change(ifp);
1742 	}
1743 
1744 	if (LINK_STATE_IS_UP(ifp->if_link_state))
1745 		ifp->if_baudrate = IF_Gbps(10);
1746 	else
1747 		ifp->if_baudrate = 0;
1748 }
1749 
1750 u_int32_t
1751 tht_read(struct tht_softc *sc, bus_size_t r)
1752 {
1753 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1754 	    BUS_SPACE_BARRIER_READ);
1755 	return (bus_space_read_4(sc->sc_thtc->sc_memt, sc->sc_memh, r));
1756 }
1757 
1758 void
1759 tht_write(struct tht_softc *sc, bus_size_t r, u_int32_t v)
1760 {
1761 	bus_space_write_4(sc->sc_thtc->sc_memt, sc->sc_memh, r, v);
1762 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, 4,
1763 	    BUS_SPACE_BARRIER_WRITE);
1764 }
1765 
1766 void
1767 tht_write_region(struct tht_softc *sc, bus_size_t r, void *buf, size_t len)
1768 {
1769 	bus_space_write_raw_region_4(sc->sc_thtc->sc_memt, sc->sc_memh, r,
1770 	    buf, len);
1771 	bus_space_barrier(sc->sc_thtc->sc_memt, sc->sc_memh, r, len,
1772 	    BUS_SPACE_BARRIER_WRITE);
1773 }
1774 
1775 int
1776 tht_wait_eq(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1777     int timeout)
1778 {
1779 	while ((tht_read(sc, r) & m) != v) {
1780 		if (timeout == 0)
1781 			return (0);
1782 
1783 		delay(1000);
1784 		timeout--;
1785 	}
1786 
1787 	return (1);
1788 }
1789 
1790 int
1791 tht_wait_ne(struct tht_softc *sc, bus_size_t r, u_int32_t m, u_int32_t v,
1792     int timeout)
1793 {
1794 	while ((tht_read(sc, r) & m) == v) {
1795 		if (timeout == 0)
1796 			return (0);
1797 
1798 		delay(1000);
1799 		timeout--;
1800 	}
1801 
1802 	return (1);
1803 }
1804 
1805 struct tht_dmamem *
1806 tht_dmamem_alloc(struct tht_softc *sc, bus_size_t size, bus_size_t align)
1807 {
1808 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1809 	struct tht_dmamem		*tdm;
1810 	int				nsegs;
1811 
1812 	tdm = malloc(sizeof(struct tht_dmamem), M_DEVBUF, M_WAITOK | M_ZERO);
1813 	tdm->tdm_size = size;
1814 
1815 	if (bus_dmamap_create(dmat, size, 1, size, 0,
1816 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &tdm->tdm_map) != 0)
1817 		goto tdmfree;
1818 
1819 	if (bus_dmamem_alloc(dmat, size, align, 0, &tdm->tdm_seg, 1, &nsegs,
1820 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
1821 		goto destroy;
1822 
1823 	if (bus_dmamem_map(dmat, &tdm->tdm_seg, nsegs, size, &tdm->tdm_kva,
1824 	    BUS_DMA_WAITOK) != 0)
1825 		goto free;
1826 
1827 	if (bus_dmamap_load(dmat, tdm->tdm_map, tdm->tdm_kva, size,
1828 	    NULL, BUS_DMA_WAITOK) != 0)
1829 		goto unmap;
1830 
1831 	return (tdm);
1832 
1833 unmap:
1834 	bus_dmamem_unmap(dmat, tdm->tdm_kva, size);
1835 free:
1836 	bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1837 destroy:
1838 	bus_dmamap_destroy(dmat, tdm->tdm_map);
1839 tdmfree:
1840 	free(tdm, M_DEVBUF, 0);
1841 
1842 	return (NULL);
1843 }
1844 
1845 void
1846 tht_dmamem_free(struct tht_softc *sc, struct tht_dmamem *tdm)
1847 {
1848 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1849 
1850 	bus_dmamap_unload(dmat, tdm->tdm_map);
1851 	bus_dmamem_unmap(dmat, tdm->tdm_kva, tdm->tdm_size);
1852 	bus_dmamem_free(dmat, &tdm->tdm_seg, 1);
1853 	bus_dmamap_destroy(dmat, tdm->tdm_map);
1854 	free(tdm, M_DEVBUF, 0);
1855 }
1856 
1857 int
1858 tht_pkt_alloc(struct tht_softc *sc, struct tht_pkt_list *tpl, int npkts,
1859     int nsegs)
1860 {
1861 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1862 	struct tht_pkt			*pkt;
1863 	int				i;
1864 
1865 	tpl->tpl_pkts = mallocarray(npkts, sizeof(struct tht_pkt),
1866 	    M_DEVBUF, M_WAITOK | M_ZERO);
1867 
1868 	TAILQ_INIT(&tpl->tpl_free);
1869 	TAILQ_INIT(&tpl->tpl_used);
1870 	for (i = 0; i < npkts; i++) {
1871 		pkt = &tpl->tpl_pkts[i];
1872 
1873 		pkt->tp_id = i;
1874 		if (bus_dmamap_create(dmat, THT_PBD_PKTLEN, nsegs,
1875 		    THT_PBD_PKTLEN, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
1876 		    &pkt->tp_dmap) != 0) {
1877 			tht_pkt_free(sc, tpl);
1878 			return (1);
1879 		}
1880 
1881 		TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1882 	}
1883 
1884 	return (0);
1885 }
1886 
1887 void
1888 tht_pkt_free(struct tht_softc *sc, struct tht_pkt_list *tpl)
1889 {
1890 	bus_dma_tag_t			dmat = sc->sc_thtc->sc_dmat;
1891 	struct tht_pkt			*pkt;
1892 
1893 	while ((pkt = tht_pkt_get(tpl)) != NULL)
1894 		bus_dmamap_destroy(dmat, pkt->tp_dmap);
1895 	free(tpl->tpl_pkts, M_DEVBUF, 0);
1896 	tpl->tpl_pkts = NULL;
1897 }
1898 
1899 void
1900 tht_pkt_put(struct tht_pkt_list *tpl, struct tht_pkt *pkt)
1901 {
1902 	TAILQ_REMOVE(&tpl->tpl_used, pkt, tp_link);
1903 	TAILQ_INSERT_TAIL(&tpl->tpl_free, pkt, tp_link);
1904 }
1905 
1906 struct tht_pkt *
1907 tht_pkt_get(struct tht_pkt_list *tpl)
1908 {
1909 	struct tht_pkt			*pkt;
1910 
1911 	pkt = TAILQ_FIRST(&tpl->tpl_free);
1912 	if (pkt != NULL) {
1913 		TAILQ_REMOVE(&tpl->tpl_free, pkt, tp_link);
1914 		TAILQ_INSERT_TAIL(&tpl->tpl_used, pkt, tp_link);
1915 
1916 	}
1917 
1918 	return (pkt);
1919 }
1920 
1921 struct tht_pkt *
1922 tht_pkt_used(struct tht_pkt_list *tpl)
1923 {
1924 	return (TAILQ_FIRST(&tpl->tpl_used));
1925 }
1926