xref: /openbsd-src/sys/dev/pci/if_aq_pci.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 /* $OpenBSD: if_aq_pci.c,v 1.16 2022/04/06 18:59:29 naddy Exp $ */
2 /*	$NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2021 Jonathan Matthew <jonathan@d14n.org>
6  * Copyright (c) 2021 Mike Larkin <mlarkin@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /**
22  * aQuantia Corporation Network Driver
23  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  *
29  *   (1) Redistributions of source code must retain the above
30  *   copyright notice, this list of conditions and the following
31  *   disclaimer.
32  *
33  *   (2) Redistributions in binary form must reproduce the above
34  *   copyright notice, this list of conditions and the following
35  *   disclaimer in the documentation and/or other materials provided
36  *   with the distribution.
37  *
38  *   (3) The name of the author may not be used to endorse or promote
39  *   products derived from this software without specific prior
40  *   written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
43  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
44  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
48  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  *
54  */
55 
56 /*-
57  * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
58  * All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  * modification, are permitted provided that the following conditions
62  * are met:
63  * 1. Redistributions of source code must retain the above copyright
64  *    notice, this list of conditions and the following disclaimer.
65  * 2. Redistributions in binary form must reproduce the above copyright
66  *    notice, this list of conditions and the following disclaimer in the
67  *    documentation and/or other materials provided with the distribution.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
70  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
71  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
72  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
73  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
74  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
75  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
77  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
78  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
79  * POSSIBILITY OF SUCH DAMAGE.
80  */
81 #include "bpfilter.h"
82 #include "vlan.h"
83 
84 #include <sys/types.h>
85 #include <sys/device.h>
86 #include <sys/param.h>
87 #include <sys/kernel.h>
88 #include <sys/sockio.h>
89 #include <sys/systm.h>
90 #include <sys/intrmap.h>
91 
92 #include <net/if.h>
93 #include <net/if_media.h>
94 #include <net/toeplitz.h>
95 
96 #include <netinet/in.h>
97 #include <netinet/if_ether.h>
98 
99 #include <dev/pci/pcireg.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcidevs.h>
102 
103 #if NBPFILTER > 0
104 #include <net/bpf.h>
105 #endif
106 
107 /* #define AQ_DEBUG 1 */
108 #ifdef AQ_DEBUG
109 #define DPRINTF(x) printf x
110 #else
111 #define DPRINTF(x)
112 #endif /* AQ_DEBUG */
113 
114 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
115 
116 #define AQ_BAR0 				0x10
117 #define AQ_MAXQ 				8
118 #define AQ_RSS_KEYSIZE				40
119 #define AQ_RSS_REDIR_ENTRIES			12
120 
121 #define AQ_TXD_NUM 				2048
122 #define AQ_RXD_NUM 				2048
123 
124 #define AQ_TX_MAX_SEGMENTS			32
125 
126 #define AQ_LINKSTAT_IRQ				31
127 
128 #define RPF_ACTION_HOST				1
129 
130 #define AQ_FW_SOFTRESET_REG			0x0000
131 #define  AQ_FW_SOFTRESET_DIS			(1 << 14)
132 #define  AQ_FW_SOFTRESET_RESET			(1 << 15)
133 #define AQ_FW_VERSION_REG			0x0018
134 #define AQ_HW_REVISION_REG			0x001c
135 #define AQ_GLB_NVR_INTERFACE1_REG		0x0100
136 #define AQ_FW_MBOX_CMD_REG			0x0200
137 #define  AQ_FW_MBOX_CMD_EXECUTE			0x00008000
138 #define  AQ_FW_MBOX_CMD_BUSY			0x00000100
139 #define AQ_FW_MBOX_ADDR_REG			0x0208
140 #define AQ_FW_MBOX_VAL_REG			0x020C
141 #define AQ_FW_GLB_CPU_SEM_REG(i)		(0x03a0 + (i) * 4)
142 #define AQ_FW_SEM_RAM_REG			AQ_FW_GLB_CPU_SEM_REG(2)
143 #define AQ_FW_GLB_CTL2_REG			0x0404
144 #define AQ_GLB_GENERAL_PROVISIONING9_REG	0x0520
145 #define AQ_GLB_NVR_PROVISIONING2_REG		0x0534
146 #define AQ_INTR_STATUS_REG			0x2000  /* intr status */
147 #define AQ_INTR_STATUS_CLR_REG			0x2050  /* intr status clear */
148 #define AQ_INTR_MASK_REG			0x2060	/* intr mask set */
149 #define AQ_INTR_MASK_CLR_REG			0x2070	/* intr mask clear */
150 #define AQ_INTR_AUTOMASK_REG			0x2090
151 
152 /* AQ_INTR_IRQ_MAP_TXRX_REG 0x2100-0x2140 */
153 #define AQ_INTR_IRQ_MAP_TXRX_REG(i)		(0x2100 + ((i) / 2) * 4)
154 #define AQ_INTR_IRQ_MAP_TX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
155 #define  AQ_INTR_IRQ_MAP_TX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 16 : 24))
156 #define  AQ_INTR_IRQ_MAP_TX_EN(i)		(1U << (((i) & 1) ? 23 : 31))
157 #define AQ_INTR_IRQ_MAP_RX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
158 #define  AQ_INTR_IRQ_MAP_RX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 0 : 8))
159 #define  AQ_INTR_IRQ_MAP_RX_EN(i)		(1U << (((i) & 1) ? 7 : 15))
160 
161 /* AQ_GEN_INTR_MAP_REG[AQ_RINGS_NUM] 0x2180-0x2200 */
162 #define AQ_GEN_INTR_MAP_REG(i)			(0x2180 + (i) * 4)
163 #define  AQ_B0_ERR_INT				8U
164 
165 #define AQ_INTR_CTRL_REG			0x2300
166 #define  AQ_INTR_CTRL_IRQMODE			((1 << 0) | (1 << 1))
167 #define AQ_INTR_CTRL_IRQMODE_LEGACY		0
168 #define AQ_INTR_CTRL_IRQMODE_MSI		1
169 #define AQ_INTR_CTRL_IRQMODE_MSIX		2
170 #define  AQ_INTR_CTRL_MULTIVEC			(1 << 2)
171 #define  AQ_INTR_CTRL_RESET_DIS			(1 << 29)
172 #define  AQ_INTR_CTRL_RESET_IRQ			(1 << 31)
173 #define AQ_MBOXIF_POWER_GATING_CONTROL_REG	0x32a8
174 
175 #define FW_MPI_MBOX_ADDR_REG			0x0360
176 #define FW1X_MPI_INIT1_REG			0x0364
177 #define FW1X_MPI_INIT2_REG			0x0370
178 #define FW1X_MPI_EFUSEADDR_REG			0x0374
179 
180 #define FW2X_MPI_EFUSEADDR_REG			0x0364
181 #define FW2X_MPI_CONTROL_REG			0x0368  /* 64bit */
182 #define FW2X_MPI_STATE_REG			0x0370  /* 64bit */
183 #define FW_BOOT_EXIT_CODE_REG			0x0388
184 
185 #define FW_BOOT_EXIT_CODE_REG			0x0388
186 #define  RBL_STATUS_DEAD			0x0000dead
187 #define  RBL_STATUS_SUCCESS			0x0000abba
188 #define  RBL_STATUS_FAILURE			0x00000bad
189 #define  RBL_STATUS_HOST_BOOT			0x0000f1a7
190 #define FW_MPI_DAISY_CHAIN_STATUS_REG		0x0704
191 #define AQ_PCI_REG_CONTROL_6_REG		0x1014
192 
193 #define FW_MPI_RESETCTRL_REG			0x4000
194 #define  FW_MPI_RESETCTRL_RESET_DIS		(1 << 29)
195 
196 #define RX_SYSCONTROL_REG			0x5000
197 #define  RX_SYSCONTROL_RESET_DIS		(1 << 29)
198 
199 #define RX_TCP_RSS_HASH_REG			0x5040
200 #define  RX_TCP_RSS_HASH_RPF2			(0xf << 16)
201 #define  RX_TCP_RSS_HASH_TYPE			(0xffff)
202 
203 #define RPF_L2BC_REG				0x5100
204 #define  RPF_L2BC_EN				(1 << 0)
205 #define  RPF_L2BC_PROMISC			(1 << 3)
206 #define  RPF_L2BC_ACTION			0x7000
207 #define  RPF_L2BC_THRESHOLD			0xFFFF0000
208 
209 #define AQ_HW_MAC_OWN				0
210 
211 /* RPF_L2UC_*_REG[34] (actual [38]?) */
212 #define RPF_L2UC_LSW_REG(i)                     (0x5110 + (i) * 8)
213 #define RPF_L2UC_MSW_REG(i)                     (0x5114 + (i) * 8)
214 #define  RPF_L2UC_MSW_MACADDR_HI		0xFFFF
215 #define  RPF_L2UC_MSW_ACTION			0x70000
216 #define  RPF_L2UC_MSW_EN			(1 << 31)
217 #define AQ_HW_MAC_NUM				34
218 
219 /* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
220 #define RPF_MCAST_FILTER_REG(i)			(0x5250 + (i) * 4)
221 #define  RPF_MCAST_FILTER_EN			(1 << 31)
222 #define RPF_MCAST_FILTER_MASK_REG		0x5270
223 #define  RPF_MCAST_FILTER_MASK_ALLMULTI		(1 << 14)
224 
225 #define RPF_VLAN_MODE_REG			0x5280
226 #define  RPF_VLAN_MODE_PROMISC			(1 << 1)
227 #define  RPF_VLAN_MODE_ACCEPT_UNTAGGED		(1 << 2)
228 #define  RPF_VLAN_MODE_UNTAGGED_ACTION		0x38
229 
230 #define RPF_VLAN_TPID_REG                       0x5284
231 #define  RPF_VLAN_TPID_OUTER			0xFFFF0000
232 #define  RPF_VLAN_TPID_INNER			0xFFFF
233 
234 /* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
235 #define RPF_ETHERTYPE_FILTER_REG(i)		(0x5300 + (i) * 4)
236 #define  RPF_ETHERTYPE_FILTER_EN		(1 << 31)
237 
238 /* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
239 #define RPF_L3_FILTER_REG(i)			(0x5380 + (i) * 4)
240 #define  RPF_L3_FILTER_L4_EN			(1 << 31)
241 
242 #define RX_FLR_RSS_CONTROL1_REG			0x54c0
243 #define  RX_FLR_RSS_CONTROL1_EN			(1 << 31)
244 
245 #define RPF_RPB_RX_TC_UPT_REG                   0x54c4
246 #define  RPF_RPB_RX_TC_UPT_MASK(i)              (0x00000007 << ((i) * 4))
247 
248 #define RPF_RSS_KEY_ADDR_REG			0x54d0
249 #define  RPF_RSS_KEY_ADDR			0x1f
250 #define  RPF_RSS_KEY_WR_EN			(1 << 5)
251 #define RPF_RSS_KEY_WR_DATA_REG			0x54d4
252 #define RPF_RSS_KEY_RD_DATA_REG			0x54d8
253 
254 #define RPF_RSS_REDIR_ADDR_REG			0x54e0
255 #define  RPF_RSS_REDIR_ADDR			0xf
256 #define  RPF_RSS_REDIR_WR_EN			(1 << 4)
257 
258 #define RPF_RSS_REDIR_WR_DATA_REG		0x54e4
259 
260 
261 #define RPO_HWCSUM_REG				0x5580
262 #define  RPO_HWCSUM_L4CSUM_EN			(1 << 0)
263 #define  RPO_HWCSUM_IP4CSUM_EN			(1 << 1)
264 
265 #define RPB_RPF_RX_REG				0x5700
266 #define  RPB_RPF_RX_TC_MODE			(1 << 8)
267 #define  RPB_RPF_RX_FC_MODE			0x30
268 #define  RPB_RPF_RX_BUF_EN			(1 << 0)
269 
270 /* RPB_RXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x5710-0x5790 */
271 #define RPB_RXB_BUFSIZE_REG(i)			(0x5710 + (i) * 0x10)
272 #define  RPB_RXB_BUFSIZE			0x1FF
273 #define RPB_RXB_XOFF_REG(i)			(0x5714 + (i) * 0x10)
274 #define  RPB_RXB_XOFF_EN			(1 << 31)
275 #define  RPB_RXB_XOFF_THRESH_HI                 0x3FFF0000
276 #define  RPB_RXB_XOFF_THRESH_LO                 0x3FFF
277 
278 #define RX_DMA_DESC_CACHE_INIT_REG		0x5a00
279 #define  RX_DMA_DESC_CACHE_INIT			(1 << 0)
280 
281 #define RX_DMA_INT_DESC_WRWB_EN_REG		0x5a30
282 #define  RX_DMA_INT_DESC_WRWB_EN		(1 << 2)
283 #define  RX_DMA_INT_DESC_MODERATE_EN		(1 << 3)
284 
285 #define RX_INTR_MODERATION_CTL_REG(i)		(0x5a40 + (i) * 4)
286 #define  RX_INTR_MODERATION_CTL_EN		(1 << 1)
287 #define  RX_INTR_MODERATION_CTL_MIN		(0xFF << 8)
288 #define  RX_INTR_MODERATION_CTL_MAX		(0x1FF << 16)
289 
290 #define RX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x5b00 + (i) * 0x20)
291 #define RX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x5b04 + (i) * 0x20)
292 #define RX_DMA_DESC_REG(i)			(0x5b08 + (i) * 0x20)
293 #define  RX_DMA_DESC_LEN			(0x3FF << 3)
294 #define  RX_DMA_DESC_RESET			(1 << 25)
295 #define  RX_DMA_DESC_HEADER_SPLIT		(1 << 28)
296 #define  RX_DMA_DESC_VLAN_STRIP			(1 << 29)
297 #define  RX_DMA_DESC_EN				(1 << 31)
298 #define RX_DMA_DESC_HEAD_PTR_REG(i)		(0x5b0c + (i) * 0x20)
299 #define  RX_DMA_DESC_HEAD_PTR			0xFFF
300 #define RX_DMA_DESC_TAIL_PTR_REG(i)		(0x5b10 + (i) * 0x20)
301 #define RX_DMA_DESC_BUFSIZE_REG(i)		(0x5b18 + (i) * 0x20)
302 #define  RX_DMA_DESC_BUFSIZE_DATA		0x000F
303 #define  RX_DMA_DESC_BUFSIZE_HDR		0x0FF0
304 
305 #define RX_DMA_DCAD_REG(i)			(0x6100 + (i) * 4)
306 #define  RX_DMA_DCAD_CPUID			0xFF
307 #define  RX_DMA_DCAD_PAYLOAD_EN			(1 << 29)
308 #define  RX_DMA_DCAD_HEADER_EN			(1 << 30)
309 #define  RX_DMA_DCAD_DESC_EN			(1 << 31)
310 
311 #define RX_DMA_DCA_REG				0x6180
312 #define  RX_DMA_DCA_EN				(1 << 31)
313 #define  RX_DMA_DCA_MODE			0xF
314 
315 #define TX_SYSCONTROL_REG			0x7000
316 #define  TX_SYSCONTROL_RESET_DIS		(1 << 29)
317 
318 #define TX_TPO2_REG				0x7040
319 #define  TX_TPO2_EN				(1 << 16)
320 
321 #define TPS_DESC_VM_ARB_MODE_REG		0x7300
322 #define  TPS_DESC_VM_ARB_MODE			(1 << 0)
323 #define TPS_DESC_RATE_REG			0x7310
324 #define  TPS_DESC_RATE_TA_RST			(1 << 31)
325 #define  TPS_DESC_RATE_LIM			0x7FF
326 #define TPS_DESC_TC_ARB_MODE_REG		0x7200
327 #define  TPS_DESC_TC_ARB_MODE			0x3
328 #define TPS_DATA_TC_ARB_MODE_REG		0x7100
329 #define  TPS_DATA_TC_ARB_MODE			(1 << 0)
330 
331 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7110-0x7130 */
332 #define TPS_DATA_TCT_REG(i)			(0x7110 + (i) * 4)
333 #define  TPS_DATA_TCT_CREDIT_MAX		0xFFF0000
334 #define  TPS_DATA_TCT_WEIGHT			0x1FF
335 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7210-0x7230 */
336 #define TPS_DESC_TCT_REG(i)			(0x7210 + (i) * 4)
337 #define  TPS_DESC_TCT_CREDIT_MAX		0xFFF0000
338 #define  TPS_DESC_TCT_WEIGHT			0x1FF
339 
340 #define AQ_HW_TXBUF_MAX         160
341 #define AQ_HW_RXBUF_MAX         320
342 
343 #define TPO_HWCSUM_REG				0x7800
344 #define  TPO_HWCSUM_L4CSUM_EN			(1 << 0)
345 #define  TPO_HWCSUM_IP4CSUM_EN			(1 << 1)
346 
347 #define THM_LSO_TCP_FLAG1_REG			0x7820
348 #define  THM_LSO_TCP_FLAG1_FIRST		0xFFF
349 #define  THM_LSO_TCP_FLAG1_MID			0xFFF0000
350 #define THM_LSO_TCP_FLAG2_REG			0x7824
351 #define  THM_LSO_TCP_FLAG2_LAST			0xFFF
352 
353 #define TPB_TX_BUF_REG				0x7900
354 #define  TPB_TX_BUF_EN				(1 << 0)
355 #define  TPB_TX_BUF_SCP_INS_EN			(1 << 2)
356 #define  TPB_TX_BUF_TC_MODE_EN			(1 << 8)
357 
358 /* TPB_TXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x7910-7990 */
359 #define TPB_TXB_BUFSIZE_REG(i)			(0x7910 + (i) * 0x10)
360 #define  TPB_TXB_BUFSIZE                        (0xFF)
361 #define TPB_TXB_THRESH_REG(i)                   (0x7914 + (i) * 0x10)
362 #define  TPB_TXB_THRESH_HI                      0x1FFF0000
363 #define  TPB_TXB_THRESH_LO                      0x1FFF
364 
365 #define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG	0x7b20
366 
367 #define TX_DMA_INT_DESC_WRWB_EN_REG		0x7b40
368 #define  TX_DMA_INT_DESC_WRWB_EN		(1 << 1)
369 #define  TX_DMA_INT_DESC_MODERATE_EN		(1 << 4)
370 
371 #define TX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x7c00 + (i) * 0x40)
372 #define TX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x7c04 + (i) * 0x40)
373 #define TX_DMA_DESC_REG(i)			(0x7c08 + (i) * 0x40)
374 #define  TX_DMA_DESC_LEN			0x00000FF8
375 #define  TX_DMA_DESC_EN				0x80000000
376 #define TX_DMA_DESC_HEAD_PTR_REG(i)		(0x7c0c + (i) * 0x40)
377 #define  TX_DMA_DESC_HEAD_PTR			0x00000FFF
378 #define TX_DMA_DESC_TAIL_PTR_REG(i)		(0x7c10 + (i) * 0x40)
379 #define TX_DMA_DESC_WRWB_THRESH_REG(i)		(0x7c18 + (i) * 0x40)
380 #define  TX_DMA_DESC_WRWB_THRESH		0x00003F00
381 
382 #define TDM_DCAD_REG(i)				(0x8400 + (i) * 4)
383 #define  TDM_DCAD_CPUID				0x7F
384 #define  TDM_DCAD_CPUID_EN			0x80000000
385 
386 #define TDM_DCA_REG				0x8480
387 #define  TDM_DCA_EN				(1 << 31)
388 #define  TDM_DCA_MODE				0xF
389 
390 #define TX_INTR_MODERATION_CTL_REG(i)		(0x8980 + (i) * 4)
391 #define  TX_INTR_MODERATION_CTL_EN		(1 << 1)
392 #define  TX_INTR_MODERATION_CTL_MIN		(0xFF << 8)
393 #define  TX_INTR_MODERATION_CTL_MAX		(0x1FF << 16)
394 
395 #define __LOWEST_SET_BIT(__mask) (((((uint32_t)__mask) - 1) & ((uint32_t)__mask)) ^ ((uint32_t)__mask))
396 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
397 
398 #if 0
399 #define AQ_READ_REG(sc, reg) \
400 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
401 
402 #endif
403 #define AQ_WRITE_REG(sc, reg, val) \
404 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
405 
406 #define AQ_WRITE_REG_BIT(sc, reg, mask, val)                    \
407 	do {                                                    \
408 		uint32_t _v;                                    \
409 		_v = AQ_READ_REG((sc), (reg));                  \
410 		_v &= ~(mask);                                  \
411 		if ((val) != 0)                                 \
412 			_v |= __SHIFTIN((val), (mask));         \
413 		AQ_WRITE_REG((sc), (reg), _v);                  \
414 	} while (/* CONSTCOND */ 0)
415 
416 #define AQ_READ64_REG(sc, reg)					\
417 	((uint64_t)AQ_READ_REG(sc, reg) |			\
418 	(((uint64_t)AQ_READ_REG(sc, (reg) + 4)) << 32))
419 
420 #define AQ_WRITE64_REG(sc, reg, val)				\
421 	do {							\
422 		AQ_WRITE_REG(sc, reg, (uint32_t)val);		\
423 		AQ_WRITE_REG(sc, reg + 4, (uint32_t)(val >> 32)); \
424 	} while (/* CONSTCOND */0)
425 
426 #define WAIT_FOR(expr, us, n, errp)                             \
427 	do {                                                    \
428 		unsigned int _n;                                \
429 		for (_n = n; (!(expr)) && _n != 0; --_n) {      \
430 			delay((us));                            \
431 		}                                               \
432 		if ((errp != NULL)) {                           \
433 			if (_n == 0)                            \
434 				*(errp) = ETIMEDOUT;            \
435 			else                                    \
436 				*(errp) = 0;                    \
437 		}                                               \
438 	} while (/* CONSTCOND */ 0)
439 
440 #define FW_VERSION_MAJOR(sc)	(((sc)->sc_fw_version >> 24) & 0xff)
441 #define FW_VERSION_MINOR(sc)	(((sc)->sc_fw_version >> 16) & 0xff)
442 #define FW_VERSION_BUILD(sc)	((sc)->sc_fw_version & 0xffff)
443 
444 #define FEATURES_MIPS		0x00000001
445 #define FEATURES_TPO2		0x00000002
446 #define FEATURES_RPF2		0x00000004
447 #define FEATURES_MPI_AQ		0x00000008
448 #define FEATURES_REV_A0		0x10000000
449 #define FEATURES_REV_A		(FEATURES_REV_A0)
450 #define FEATURES_REV_B0		0x20000000
451 #define FEATURES_REV_B1		0x40000000
452 #define FEATURES_REV_B		(FEATURES_REV_B0|FEATURES_REV_B1)
453 
454 /* lock for FW2X_MPI_{CONTROL,STATE]_REG read-modify-write */
455 #define AQ_MPI_LOCK(sc)		mtx_enter(&(sc)->sc_mpi_mutex);
456 #define AQ_MPI_UNLOCK(sc)	mtx_leave(&(sc)->sc_mpi_mutex);
457 
458 #define FW2X_CTRL_10BASET_HD			(1 << 0)
459 #define FW2X_CTRL_10BASET_FD			(1 << 1)
460 #define FW2X_CTRL_100BASETX_HD			(1 << 2)
461 #define FW2X_CTRL_100BASET4_HD			(1 << 3)
462 #define FW2X_CTRL_100BASET2_HD			(1 << 4)
463 #define FW2X_CTRL_100BASETX_FD			(1 << 5)
464 #define FW2X_CTRL_100BASET2_FD			(1 << 6)
465 #define FW2X_CTRL_1000BASET_HD			(1 << 7)
466 #define FW2X_CTRL_1000BASET_FD			(1 << 8)
467 #define FW2X_CTRL_2P5GBASET_FD			(1 << 9)
468 #define FW2X_CTRL_5GBASET_FD			(1 << 10)
469 #define FW2X_CTRL_10GBASET_FD			(1 << 11)
470 #define FW2X_CTRL_RESERVED1			(1ULL << 32)
471 #define FW2X_CTRL_10BASET_EEE			(1ULL << 33)
472 #define FW2X_CTRL_RESERVED2			(1ULL << 34)
473 #define FW2X_CTRL_PAUSE				(1ULL << 35)
474 #define FW2X_CTRL_ASYMMETRIC_PAUSE		(1ULL << 36)
475 #define FW2X_CTRL_100BASETX_EEE			(1ULL << 37)
476 #define FW2X_CTRL_RESERVED3			(1ULL << 38)
477 #define FW2X_CTRL_RESERVED4			(1ULL << 39)
478 #define FW2X_CTRL_1000BASET_FD_EEE		(1ULL << 40)
479 #define FW2X_CTRL_2P5GBASET_FD_EEE		(1ULL << 41)
480 #define FW2X_CTRL_5GBASET_FD_EEE		(1ULL << 42)
481 #define FW2X_CTRL_10GBASET_FD_EEE		(1ULL << 43)
482 #define FW2X_CTRL_RESERVED5			(1ULL << 44)
483 #define FW2X_CTRL_RESERVED6			(1ULL << 45)
484 #define FW2X_CTRL_RESERVED7			(1ULL << 46)
485 #define FW2X_CTRL_RESERVED8			(1ULL << 47)
486 #define FW2X_CTRL_RESERVED9			(1ULL << 48)
487 #define FW2X_CTRL_CABLE_DIAG			(1ULL << 49)
488 #define FW2X_CTRL_TEMPERATURE			(1ULL << 50)
489 #define FW2X_CTRL_DOWNSHIFT			(1ULL << 51)
490 #define FW2X_CTRL_PTP_AVB_EN			(1ULL << 52)
491 #define FW2X_CTRL_MEDIA_DETECT			(1ULL << 53)
492 #define FW2X_CTRL_LINK_DROP			(1ULL << 54)
493 #define FW2X_CTRL_SLEEP_PROXY			(1ULL << 55)
494 #define FW2X_CTRL_WOL				(1ULL << 56)
495 #define FW2X_CTRL_MAC_STOP			(1ULL << 57)
496 #define FW2X_CTRL_EXT_LOOPBACK			(1ULL << 58)
497 #define FW2X_CTRL_INT_LOOPBACK			(1ULL << 59)
498 #define FW2X_CTRL_EFUSE_AGENT			(1ULL << 60)
499 #define FW2X_CTRL_WOL_TIMER			(1ULL << 61)
500 #define FW2X_CTRL_STATISTICS			(1ULL << 62)
501 #define FW2X_CTRL_TRANSACTION_ID		(1ULL << 63)
502 
503 #define FW2X_CTRL_RATE_100M			FW2X_CTRL_100BASETX_FD
504 #define FW2X_CTRL_RATE_1G			FW2X_CTRL_1000BASET_FD
505 #define FW2X_CTRL_RATE_2G5			FW2X_CTRL_2P5GBASET_FD
506 #define FW2X_CTRL_RATE_5G			FW2X_CTRL_5GBASET_FD
507 #define FW2X_CTRL_RATE_10G			FW2X_CTRL_10GBASET_FD
508 #define FW2X_CTRL_RATE_MASK		\
509 	(FW2X_CTRL_RATE_100M |		\
510 	 FW2X_CTRL_RATE_1G |		\
511 	 FW2X_CTRL_RATE_2G5 |		\
512 	 FW2X_CTRL_RATE_5G |		\
513 	 FW2X_CTRL_RATE_10G)
514 #define FW2X_CTRL_EEE_MASK		\
515 	(FW2X_CTRL_10BASET_EEE |	\
516 	 FW2X_CTRL_100BASETX_EEE |	\
517 	 FW2X_CTRL_1000BASET_FD_EEE |	\
518 	 FW2X_CTRL_2P5GBASET_FD_EEE |	\
519 	 FW2X_CTRL_5GBASET_FD_EEE |	\
520 	 FW2X_CTRL_10GBASET_FD_EEE)
521 
522 enum aq_fw_bootloader_mode {
523 	FW_BOOT_MODE_UNKNOWN = 0,
524 	FW_BOOT_MODE_FLB,
525 	FW_BOOT_MODE_RBL_FLASH,
526 	FW_BOOT_MODE_RBL_HOST_BOOTLOAD
527 };
528 
529 enum aq_media_type {
530 	AQ_MEDIA_TYPE_UNKNOWN = 0,
531 	AQ_MEDIA_TYPE_FIBRE,
532 	AQ_MEDIA_TYPE_TP
533 };
534 
535 enum aq_link_speed {
536 	AQ_LINK_NONE    = 0,
537 	AQ_LINK_100M    = (1 << 0),
538 	AQ_LINK_1G      = (1 << 1),
539 	AQ_LINK_2G5     = (1 << 2),
540 	AQ_LINK_5G      = (1 << 3),
541 	AQ_LINK_10G     = (1 << 4)
542 };
543 
544 #define AQ_LINK_ALL	(AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | \
545 			    AQ_LINK_5G | AQ_LINK_10G )
546 #define AQ_LINK_AUTO	AQ_LINK_ALL
547 
548 enum aq_link_eee {
549 	AQ_EEE_DISABLE = 0,
550 	AQ_EEE_ENABLE = 1
551 };
552 
553 enum aq_hw_fw_mpi_state {
554 	MPI_DEINIT      = 0,
555 	MPI_RESET       = 1,
556 	MPI_INIT        = 2,
557 	MPI_POWER       = 4
558 };
559 
560 enum aq_link_fc {
561         AQ_FC_NONE = 0,
562         AQ_FC_RX = (1 << 0),
563         AQ_FC_TX = (1 << 1),
564         AQ_FC_ALL = (AQ_FC_RX | AQ_FC_TX)
565 };
566 
567 struct aq_dmamem {
568 	bus_dmamap_t		aqm_map;
569 	bus_dma_segment_t	aqm_seg;
570 	int			aqm_nsegs;
571 	size_t			aqm_size;
572 	caddr_t			aqm_kva;
573 };
574 
575 #define AQ_DMA_MAP(_aqm)	((_aqm)->aqm_map)
576 #define AQ_DMA_DVA(_aqm)	((_aqm)->aqm_map->dm_segs[0].ds_addr)
577 #define AQ_DMA_KVA(_aqm)	((void *)(_aqm)->aqm_kva)
578 #define AQ_DMA_LEN(_aqm)	((_aqm)->aqm_size)
579 
580 
581 struct aq_mailbox_header {
582         uint32_t version;
583         uint32_t transaction_id;
584         int32_t error;
585 } __packed __aligned(4);
586 
587 struct aq_hw_stats_s {
588         uint32_t uprc;
589         uint32_t mprc;
590         uint32_t bprc;
591         uint32_t erpt;
592         uint32_t uptc;
593         uint32_t mptc;
594         uint32_t bptc;
595         uint32_t erpr;
596         uint32_t mbtc;
597         uint32_t bbtc;
598         uint32_t mbrc;
599         uint32_t bbrc;
600         uint32_t ubrc;
601         uint32_t ubtc;
602         uint32_t ptc;
603         uint32_t prc;
604         uint32_t dpc;   /* not exists in fw2x_msm_statistics */
605         uint32_t cprc;  /* not exists in fw2x_msm_statistics */
606 } __packed __aligned(4);
607 
608 struct aq_fw2x_capabilities {
609         uint32_t caps_lo;
610         uint32_t caps_hi;
611 } __packed __aligned(4);
612 
613 struct aq_fw2x_msm_statistics {
614 	uint32_t uprc;
615 	uint32_t mprc;
616 	uint32_t bprc;
617 	uint32_t erpt;
618 	uint32_t uptc;
619 	uint32_t mptc;
620 	uint32_t bptc;
621 	uint32_t erpr;
622 	uint32_t mbtc;
623 	uint32_t bbtc;
624 	uint32_t mbrc;
625 	uint32_t bbrc;
626 	uint32_t ubrc;
627 	uint32_t ubtc;
628 	uint32_t ptc;
629 	uint32_t prc;
630 } __packed __aligned(4);
631 
632 struct aq_fw2x_phy_cable_diag_data {
633 	uint32_t lane_data[4];
634 } __packed __aligned(4);
635 
636 struct aq_fw2x_mailbox {		/* struct fwHostInterface */
637 	struct aq_mailbox_header header;
638 	struct aq_fw2x_msm_statistics msm;	/* msmStatistics_t msm; */
639 
640 	uint32_t phy_info1;
641 #define PHYINFO1_FAULT_CODE	__BITS(31,16)
642 #define PHYINFO1_PHY_H_BIT	__BITS(0,15)
643 	uint32_t phy_info2;
644 #define PHYINFO2_TEMPERATURE	__BITS(15,0)
645 #define PHYINFO2_CABLE_LEN	__BITS(23,16)
646 
647 	struct aq_fw2x_phy_cable_diag_data diag_data;
648 	uint32_t reserved[8];
649 
650 	struct aq_fw2x_capabilities caps;
651 
652 	/* ... */
653 } __packed __aligned(4);
654 
655 struct aq_rx_desc_read {
656 	uint64_t		buf_addr;
657 	uint64_t		hdr_addr;
658 } __packed;
659 
660 struct aq_rx_desc_wb {
661 	uint32_t		type;
662 #define AQ_RXDESC_TYPE_RSSTYPE	0x000f
663 #define AQ_RXDESC_TYPE_ETHER	0x0030
664 #define AQ_RXDESC_TYPE_PROTO	0x01c0
665 #define AQ_RXDESC_TYPE_VLAN	(1 << 9)
666 #define AQ_RXDESC_TYPE_VLAN2	(1 << 10)
667 #define AQ_RXDESC_TYPE_DMA_ERR	(1 << 12)
668 #define AQ_RXDESC_TYPE_V4_SUM	(1 << 19)
669 #define AQ_RXDESC_TYPE_L4_SUM	(1 << 20)
670 	uint32_t		rss_hash;
671 	uint16_t		status;
672 #define AQ_RXDESC_STATUS_DD	(1 << 0)
673 #define AQ_RXDESC_STATUS_EOP	(1 << 1)
674 #define AQ_RXDESC_STATUS_MACERR (1 << 2)
675 #define AQ_RXDESC_STATUS_V4_SUM_NG (1 << 3)
676 #define AQ_RXDESC_STATUS_L4_SUM_ERR (1 << 4)
677 #define AQ_RXDESC_STATUS_L4_SUM_OK (1 << 5)
678 	uint16_t		pkt_len;
679 	uint16_t		next_desc_ptr;
680 	uint16_t		vlan;
681 } __packed;
682 
683 struct aq_tx_desc {
684 	uint64_t		buf_addr;
685 	uint32_t		ctl1;
686 #define AQ_TXDESC_CTL1_TYPE_TXD	0x00000001
687 #define AQ_TXDESC_CTL1_TYPE_TXC	0x00000002
688 #define AQ_TXDESC_CTL1_BLEN_SHIFT 4
689 #define AQ_TXDESC_CTL1_VLAN_SHIFT 4
690 #define AQ_TXDESC_CTL1_DD	(1 << 20)
691 #define AQ_TXDESC_CTL1_CMD_EOP	(1 << 21)
692 #define AQ_TXDESC_CTL1_CMD_VLAN	(1 << 22)
693 #define AQ_TXDESC_CTL1_CMD_FCS	(1 << 23)
694 #define AQ_TXDESC_CTL1_CMD_IP4CSUM (1 << 24)
695 #define AQ_TXDESC_CTL1_CMD_L4CSUM (1 << 25)
696 #define AQ_TXDESC_CTL1_CMD_WB	(1 << 27)
697 
698 #define AQ_TXDESC_CTL1_VID_SHIFT 4
699 	uint32_t		ctl2;
700 #define AQ_TXDESC_CTL2_LEN_SHIFT 14
701 #define AQ_TXDESC_CTL2_CTX_EN	(1 << 13)
702 } __packed;
703 
704 struct aq_slot {
705 	bus_dmamap_t		 as_map;
706 	struct mbuf		*as_m;
707 };
708 
709 struct aq_rxring {
710 	struct ifiqueue		*rx_ifiq;
711 	struct aq_dmamem	 rx_mem;
712 	struct aq_slot		*rx_slots;
713 	int			 rx_q;
714 	int			 rx_irq;
715 
716 	struct timeout		 rx_refill;
717 	struct if_rxring	 rx_rxr;
718 	uint32_t		 rx_prod;
719 	uint32_t		 rx_cons;
720 
721 	struct mbuf		*rx_m_head;
722 	struct mbuf		**rx_m_tail;
723 	int			 rx_m_error;
724 };
725 
726 struct aq_txring {
727 	struct ifqueue		*tx_ifq;
728 	struct aq_dmamem	 tx_mem;
729 	struct aq_slot		*tx_slots;
730 	int			 tx_q;
731 	int			 tx_irq;
732 	uint32_t		 tx_prod;
733 	uint32_t		 tx_cons;
734 };
735 
736 struct aq_queues {
737 	char			 q_name[16];
738 	void			*q_ihc;
739 	struct aq_softc		*q_sc;
740 	int			 q_index;
741 	struct aq_rxring 	 q_rx;
742 	struct aq_txring 	 q_tx;
743 };
744 
745 
746 struct aq_softc;
747 struct aq_firmware_ops {
748 	int (*reset)(struct aq_softc *);
749 	int (*set_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state,
750 	    enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
751 	int (*get_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state *,
752 	    enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
753 	int (*get_stats)(struct aq_softc *, struct aq_hw_stats_s *);
754 };
755 
756 struct aq_softc {
757 	struct device		sc_dev;
758 	uint16_t		sc_product;
759 	uint16_t		sc_revision;
760 	bus_dma_tag_t		sc_dmat;
761 	pci_chipset_tag_t	sc_pc;
762 	pcitag_t		sc_pcitag;
763 	int			sc_nqueues;
764 	struct aq_queues	sc_queues[AQ_MAXQ];
765 	struct intrmap		*sc_intrmap;
766 	void			*sc_ih;
767 	bus_space_handle_t	sc_ioh;
768 	bus_space_tag_t		sc_iot;
769 
770 	uint32_t		sc_mbox_addr;
771 	int			sc_rbl_enabled;
772 	int			sc_fast_start_enabled;
773 	int			sc_flash_present;
774 	uint32_t		sc_fw_version;
775 	const struct		aq_firmware_ops *sc_fw_ops;
776 	uint64_t		sc_fw_caps;
777 	enum aq_media_type	sc_media_type;
778 	enum aq_link_speed	sc_available_rates;
779 	uint32_t		sc_features;
780 	int			sc_linkstat_irq;
781 	struct arpcom		sc_arpcom;
782 	struct ifmedia		sc_media;
783 
784 	struct ether_addr	sc_enaddr;
785 	struct mutex		sc_mpi_mutex;
786 };
787 
788 const struct pci_matchid aq_devices[] = {
789 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100 },
790 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107 },
791 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108 },
792 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109 },
793 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111 },
794 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112 },
795 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S },
796 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S },
797 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S },
798 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S },
799 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S },
800 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S },
801 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100 },
802 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107 },
803 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108 },
804 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109 },
805 };
806 
807 const struct aq_product {
808 	pci_vendor_id_t aq_vendor;
809 	pci_product_id_t aq_product;
810 	enum aq_media_type aq_media_type;
811 	enum aq_link_speed aq_available_rates;
812 } aq_products[] = {
813 {	PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100,
814 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
815 },
816 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107,
817 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
818 },
819 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108,
820 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
821 },
822 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109,
823 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
824 },
825 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
826 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
827 },
828 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
829 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
830 },
831 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S,
832 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
833 },
834 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S,
835 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
836 },
837 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S,
838 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
839 },
840 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S,
841 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
842 },
843 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S,
844 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
845 },
846 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S,
847 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
848 },
849 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100,
850 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
851 },
852 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107,
853 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
854 },
855 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108,
856 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
857 },
858 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109,
859 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
860 }
861 };
862 
863 int	aq_match(struct device *, void *, void *);
864 void	aq_attach(struct device *, struct device *, void *);
865 int	aq_detach(struct device *, int);
866 int	aq_activate(struct device *, int);
867 int	aq_intr(void *);
868 int	aq_intr_link(void *);
869 int	aq_intr_queue(void *);
870 void	aq_global_software_reset(struct aq_softc *);
871 int	aq_fw_reset(struct aq_softc *);
872 int	aq_mac_soft_reset(struct aq_softc *, enum aq_fw_bootloader_mode *);
873 int	aq_mac_soft_reset_rbl(struct aq_softc *, enum aq_fw_bootloader_mode *);
874 int	aq_mac_soft_reset_flb(struct aq_softc *);
875 int	aq_fw_read_version(struct aq_softc *);
876 int	aq_fw_version_init(struct aq_softc *);
877 int	aq_hw_init_ucp(struct aq_softc *);
878 int	aq_fw_downld_dwords(struct aq_softc *, uint32_t, uint32_t *, uint32_t);
879 int	aq_get_mac_addr(struct aq_softc *);
880 int	aq_init_rss(struct aq_softc *);
881 int	aq_hw_reset(struct aq_softc *);
882 int	aq_hw_init(struct aq_softc *, int, int);
883 void	aq_hw_qos_set(struct aq_softc *);
884 void	aq_l3_filter_set(struct aq_softc *);
885 void	aq_hw_init_tx_path(struct aq_softc *);
886 void	aq_hw_init_rx_path(struct aq_softc *);
887 int	aq_set_mac_addr(struct aq_softc *, int, uint8_t *);
888 int	aq_set_linkmode(struct aq_softc *, enum aq_link_speed,
889     enum aq_link_fc, enum aq_link_eee);
890 void	aq_watchdog(struct ifnet *);
891 void	aq_enable_intr(struct aq_softc *, int, int);
892 int	aq_rxrinfo(struct aq_softc *, struct if_rxrinfo *);
893 int	aq_ioctl(struct ifnet *, u_long, caddr_t);
894 int	aq_up(struct aq_softc *);
895 void	aq_down(struct aq_softc *);
896 void	aq_iff(struct aq_softc *);
897 void	aq_start(struct ifqueue *);
898 void	aq_ifmedia_status(struct ifnet *, struct ifmediareq *);
899 int	aq_ifmedia_change(struct ifnet *);
900 void	aq_update_link_status(struct aq_softc *);
901 
902 void	aq_refill(void *);
903 int	aq_rx_fill(struct aq_softc *, struct aq_rxring *);
904 static inline unsigned int aq_rx_fill_slots(struct aq_softc *,
905 	    struct aq_rxring *, uint);
906 
907 int	aq_dmamem_alloc(struct aq_softc *, struct aq_dmamem *,
908 	    bus_size_t, u_int);
909 void	aq_dmamem_zero(struct aq_dmamem *);
910 void	aq_dmamem_free(struct aq_softc *, struct aq_dmamem *);
911 
912 int	aq_fw1x_reset(struct aq_softc *);
913 int	aq_fw1x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
914     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
915 int	aq_fw1x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
916     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
917 int	aq_fw1x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
918 
919 int	aq_fw2x_reset(struct aq_softc *);
920 int	aq_fw2x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
921     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
922 int	aq_fw2x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
923     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
924 int	aq_fw2x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
925 
926 const struct aq_firmware_ops aq_fw1x_ops = {
927 	.reset = aq_fw1x_reset,
928 	.set_mode = aq_fw1x_set_mode,
929 	.get_mode = aq_fw1x_get_mode,
930 	.get_stats = aq_fw1x_get_stats,
931 };
932 
933 const struct aq_firmware_ops aq_fw2x_ops = {
934 	.reset = aq_fw2x_reset,
935 	.set_mode = aq_fw2x_set_mode,
936 	.get_mode = aq_fw2x_get_mode,
937 	.get_stats = aq_fw2x_get_stats,
938 };
939 
940 const struct cfattach aq_ca = {
941 	sizeof(struct aq_softc), aq_match, aq_attach, NULL,
942 	aq_activate
943 };
944 
945 struct cfdriver aq_cd = {
946 	NULL, "aq", DV_IFNET
947 };
948 
949 uint32_t
950 AQ_READ_REG(struct aq_softc *sc, uint32_t reg)
951 {
952 	uint32_t res;
953 
954 	res = bus_space_read_4(sc->sc_iot, sc->sc_ioh, reg);
955 
956 	return res;
957 }
958 
959 
960 int
961 aq_match(struct device *dev, void *match, void *aux)
962 {
963 	return pci_matchbyid((struct pci_attach_args *)aux, aq_devices,
964 	    sizeof(aq_devices) / sizeof(aq_devices[0]));
965 }
966 
967 const struct aq_product *
968 aq_lookup(const struct pci_attach_args *pa)
969 {
970 	unsigned int i;
971 
972 	for (i = 0; i < sizeof(aq_products) / sizeof(aq_products[0]); i++) {
973 	if (PCI_VENDOR(pa->pa_id) == aq_products[i].aq_vendor &&
974 		PCI_PRODUCT(pa->pa_id) == aq_products[i].aq_product) {
975 			return &aq_products[i];
976 		}
977 	}
978 
979 	return NULL;
980 }
981 
982 void
983 aq_attach(struct device *parent, struct device *self, void *aux)
984 {
985 	struct aq_softc *sc = (struct aq_softc *)self;
986 	struct pci_attach_args *pa = aux;
987 	const struct aq_product *aqp;
988 	pcireg_t bar, memtype;
989 	pci_chipset_tag_t pc;
990 	pci_intr_handle_t ih;
991 	int (*isr)(void *);
992 	const char *intrstr;
993 	pcitag_t tag;
994 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
995 	int txmin, txmax, rxmin, rxmax;
996 	int irqmode, irqnum;
997 	int i;
998 
999 	mtx_init(&sc->sc_mpi_mutex, IPL_NET);
1000 
1001 	sc->sc_dmat = pa->pa_dmat;
1002 	sc->sc_pc = pc = pa->pa_pc;
1003 	sc->sc_pcitag = tag = pa->pa_tag;
1004 
1005 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
1006 	sc->sc_revision = PCI_REVISION(pa->pa_class);
1007 
1008 	aqp = aq_lookup(pa);
1009 
1010 	bar = pci_conf_read(pc, tag, AQ_BAR0);
1011 	if (PCI_MAPREG_TYPE(bar) != PCI_MAPREG_TYPE_MEM) {
1012 		printf(": wrong BAR type\n");
1013 		return;
1014 	}
1015 
1016 	memtype = pci_mapreg_type(pc, tag, AQ_BAR0);
1017 	if (pci_mapreg_map(pa, AQ_BAR0, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
1018 	    NULL, NULL, 0)) {
1019 		printf(": failed to map BAR0\n");
1020 		return;
1021 	}
1022 
1023 	sc->sc_nqueues = 1;
1024 	sc->sc_linkstat_irq = AQ_LINKSTAT_IRQ;
1025 	isr = aq_intr;
1026 	irqnum = 0;
1027 
1028 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
1029 		int nmsix = pci_intr_msix_count(pa);
1030 		if (nmsix > 1) {
1031 			nmsix--;
1032 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1033 			    nmsix, AQ_MAXQ, INTRMAP_POWEROF2);
1034 			sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
1035 			KASSERT(sc->sc_nqueues > 0);
1036 			KASSERT(powerof2(sc->sc_nqueues));
1037 
1038 			sc->sc_linkstat_irq = 0;
1039 			isr = aq_intr_link;
1040 			irqnum++;
1041 		}
1042 		irqmode = AQ_INTR_CTRL_IRQMODE_MSIX;
1043 	} else if (pci_intr_map_msi(pa, &ih) == 0) {
1044 		irqmode = AQ_INTR_CTRL_IRQMODE_MSI;
1045 	} else if (pci_intr_map(pa, &ih) == 0) {
1046 		irqmode = AQ_INTR_CTRL_IRQMODE_LEGACY;
1047 	} else {
1048 		printf(": failed to map interrupt\n");
1049 		return;
1050 	}
1051 
1052 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
1053 	    IPL_NET | IPL_MPSAFE, isr, sc, self->dv_xname);
1054 	intrstr = pci_intr_string(pa->pa_pc, ih);
1055 	if (intrstr)
1056 		printf(": %s", intrstr);
1057 
1058 	if (sc->sc_nqueues > 1)
1059 		printf(", %d queues", sc->sc_nqueues);
1060 
1061 	if (aq_fw_reset(sc))
1062 		return;
1063 
1064 	DPRINTF((", FW version 0x%x", sc->sc_fw_version));
1065 
1066 	if (aq_fw_version_init(sc))
1067 		return;
1068 
1069 	if (aq_hw_init_ucp(sc))
1070 		return;
1071 
1072 	if (aq_hw_reset(sc))
1073 		return;
1074 
1075 	if (aq_get_mac_addr(sc))
1076 		return;
1077 
1078 	if (aq_init_rss(sc))
1079 		return;
1080 
1081 	if (aq_hw_init(sc, irqmode, (sc->sc_nqueues > 1)))
1082 		return;
1083 
1084 	sc->sc_media_type = aqp->aq_media_type;
1085 	sc->sc_available_rates = aqp->aq_available_rates;
1086 
1087 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1088 	    aq_ifmedia_status);
1089 
1090 	bcopy(sc->sc_enaddr.ether_addr_octet, sc->sc_arpcom.ac_enaddr, 6);
1091 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
1092 	ifp->if_softc = sc;
1093 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1094 	ifp->if_xflags = IFXF_MPSAFE;
1095 	ifp->if_ioctl = aq_ioctl;
1096 	ifp->if_qstart = aq_start;
1097 	ifp->if_watchdog = aq_watchdog;
1098 	ifp->if_hardmtu = 9000;
1099 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
1100 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv4 |
1101 	    IFCAP_CSUM_TCPv6;
1102 #if NVLAN > 0
1103 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1104 #endif
1105 	ifq_set_maxlen(&ifp->if_snd, AQ_TXD_NUM);
1106 
1107 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1108 	    aq_ifmedia_status);
1109 	if (sc->sc_available_rates & AQ_LINK_100M) {
1110 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL);
1111 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0,
1112 		    NULL);
1113 	}
1114 
1115 	if (sc->sc_available_rates & AQ_LINK_1G) {
1116 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
1117 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0,
1118 		    NULL);
1119 	}
1120 
1121 	if (sc->sc_available_rates & AQ_LINK_2G5) {
1122 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1123 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX,
1124 		    0, NULL);
1125 	}
1126 
1127 	if (sc->sc_available_rates & AQ_LINK_5G) {
1128 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
1129 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
1130 		    0, NULL);
1131 	}
1132 
1133 	if (sc->sc_available_rates & AQ_LINK_10G) {
1134 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1135 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
1136 		    0, NULL);
1137 	}
1138 
1139 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1140 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO | IFM_FDX, 0, NULL);
1141 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1142 	aq_set_linkmode(sc, AQ_LINK_AUTO, AQ_FC_NONE, AQ_EEE_DISABLE);
1143 
1144         if_attach(ifp);
1145         ether_ifattach(ifp);
1146 
1147 	if_attach_iqueues(ifp, sc->sc_nqueues);
1148 	if_attach_queues(ifp, sc->sc_nqueues);
1149 
1150 	/*
1151 	 * set interrupt moderation for up to 20k interrupts per second,
1152 	 * more rx than tx.  these values are in units of 2us.
1153 	 */
1154 	txmin = 20;
1155 	txmax = 200;
1156 	rxmin = 6;
1157 	rxmax = 60;
1158 
1159 	for (i = 0; i < sc->sc_nqueues; i++) {
1160 		struct aq_queues *aq = &sc->sc_queues[i];
1161 		struct aq_rxring *rx = &aq->q_rx;
1162 		struct aq_txring *tx = &aq->q_tx;
1163 		pci_intr_handle_t ih;
1164 
1165 		aq->q_sc = sc;
1166 		aq->q_index = i;
1167 		rx->rx_q = i;
1168 		rx->rx_ifiq = ifp->if_iqs[i];
1169 		rx->rx_m_head = NULL;
1170 		rx->rx_m_tail = &rx->rx_m_head;
1171 		rx->rx_m_error = 0;
1172 		ifp->if_iqs[i]->ifiq_softc = aq;
1173 		timeout_set(&rx->rx_refill, aq_refill, rx);
1174 
1175 		tx->tx_q = i;
1176 		tx->tx_ifq = ifp->if_ifqs[i];
1177 		ifp->if_ifqs[i]->ifq_softc = aq;
1178 
1179 		snprintf(aq->q_name, sizeof(aq->q_name), "%s:%u",
1180 		    DEVNAME(sc), i);
1181 
1182 		if (sc->sc_nqueues > 1) {
1183 			if (pci_intr_map_msix(pa, irqnum, &ih)) {
1184 				printf("%s: unable to map msi-x vector %d\n",
1185 				    DEVNAME(sc), irqnum);
1186 				return;
1187 			}
1188 
1189 			aq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1190 			    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
1191 			    aq_intr_queue, aq, aq->q_name);
1192 			if (aq->q_ihc == NULL) {
1193 				printf("%s: unable to establish interrupt %d\n",
1194 				    DEVNAME(sc), irqnum);
1195 				return;
1196 			}
1197 			rx->rx_irq = irqnum;
1198 			tx->tx_irq = irqnum;
1199 			irqnum++;
1200 		} else {
1201 			rx->rx_irq = irqnum++;
1202 			tx->tx_irq = irqnum++;
1203 		}
1204 
1205 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1206 		    TX_INTR_MODERATION_CTL_MIN, txmin);
1207 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1208 		    TX_INTR_MODERATION_CTL_MAX, txmax);
1209 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1210 		    TX_INTR_MODERATION_CTL_EN, 1);
1211 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1212 		    RX_INTR_MODERATION_CTL_MIN, rxmin);
1213 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1214 		    RX_INTR_MODERATION_CTL_MAX, rxmax);
1215 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1216 		    RX_INTR_MODERATION_CTL_EN, 1);
1217 	}
1218 
1219 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1220 	    TX_DMA_INT_DESC_WRWB_EN, 0);
1221 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1222 	    TX_DMA_INT_DESC_MODERATE_EN, 1);
1223 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1224 	    RX_DMA_INT_DESC_WRWB_EN, 0);
1225 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1226 	    RX_DMA_INT_DESC_MODERATE_EN, 1);
1227 
1228 	aq_enable_intr(sc, 1, 0);
1229 	printf("\n");
1230 }
1231 
1232 int
1233 aq_fw_reset(struct aq_softc *sc)
1234 {
1235 	uint32_t ver, v, boot_exit_code;
1236 	int i, error;
1237 	enum aq_fw_bootloader_mode mode;
1238 
1239 	mode = FW_BOOT_MODE_UNKNOWN;
1240 
1241 	ver = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1242 
1243 	for (i = 1000; i > 0; i--) {
1244 		v = AQ_READ_REG(sc, FW_MPI_DAISY_CHAIN_STATUS_REG);
1245 		boot_exit_code = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG);
1246 		if (v != 0x06000000 || boot_exit_code != 0)
1247 			break;
1248 	}
1249 
1250 	if (i <= 0) {
1251 		printf("%s: F/W reset failed. Neither RBL nor FLB started",
1252 		    DEVNAME(sc));
1253 		return ETIMEDOUT;
1254 	}
1255 
1256 	sc->sc_rbl_enabled = (boot_exit_code != 0);
1257 
1258 	/*
1259 	 * Having FW version 0 is an indicator that cold start
1260 	 * is in progress. This means two things:
1261 	 * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
1262 	 * 2) Driver may skip reset sequence and save time.
1263 	 */
1264 	if (sc->sc_fast_start_enabled && (ver != 0)) {
1265 		error = aq_fw_read_version(sc);
1266 		/* Skip reset as it just completed */
1267 		if (error == 0)
1268 			return 0;
1269 	}
1270 
1271 	error = aq_mac_soft_reset(sc, &mode);
1272 	if (error != 0) {
1273 		printf("%s: MAC reset failed: %d\n", DEVNAME(sc), error);
1274 		return error;
1275 	}
1276 
1277 	switch (mode) {
1278 	case FW_BOOT_MODE_FLB:
1279 		DPRINTF(("%s: FLB> F/W successfully loaded from flash.",
1280 		    DEVNAME(sc)));
1281 		sc->sc_flash_present = 1;
1282 		return aq_fw_read_version(sc);
1283 	case FW_BOOT_MODE_RBL_FLASH:
1284 		DPRINTF(("%s: RBL> F/W loaded from flash. Host Bootload "
1285 		    "disabled.", DEVNAME(sc)));
1286 		sc->sc_flash_present = 1;
1287 		return aq_fw_read_version(sc);
1288 	case FW_BOOT_MODE_UNKNOWN:
1289 		printf("%s: F/W bootload error: unknown bootloader type",
1290 		    DEVNAME(sc));
1291 		return ENOTSUP;
1292 	case FW_BOOT_MODE_RBL_HOST_BOOTLOAD:
1293 		printf("%s: RBL> F/W Host Bootload not implemented", DEVNAME(sc));
1294 		return ENOTSUP;
1295 	}
1296 
1297 	return ENOTSUP;
1298 }
1299 
1300 int
1301 aq_mac_soft_reset_rbl(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1302 {
1303 	int timo;
1304 
1305 	DPRINTF(("%s: RBL> MAC reset STARTED!\n", DEVNAME(sc)));
1306 
1307 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1308 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1309 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1310 
1311 	/* MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone */
1312 	AQ_WRITE_REG(sc, FW_BOOT_EXIT_CODE_REG, RBL_STATUS_DEAD);
1313 
1314 	aq_global_software_reset(sc);
1315 
1316 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e0);
1317 
1318 	/* Wait for RBL to finish boot process. */
1319 #define RBL_TIMEOUT_MS	10000
1320 	uint16_t rbl_status;
1321 	for (timo = RBL_TIMEOUT_MS; timo > 0; timo--) {
1322 		rbl_status = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG) & 0xffff;
1323 		if (rbl_status != 0 && rbl_status != RBL_STATUS_DEAD)
1324 			break;
1325 		delay(1000);
1326 	}
1327 
1328 	if (timo <= 0) {
1329 		printf("%s: RBL> RBL restart failed: timeout\n", DEVNAME(sc));
1330 		return EBUSY;
1331 	}
1332 
1333 	switch (rbl_status) {
1334 	case RBL_STATUS_SUCCESS:
1335 		if (mode != NULL)
1336 			*mode = FW_BOOT_MODE_RBL_FLASH;
1337 		DPRINTF(("%s: RBL> reset complete! [Flash]\n", DEVNAME(sc)));
1338 		break;
1339 	case RBL_STATUS_HOST_BOOT:
1340 		if (mode != NULL)
1341 			*mode = FW_BOOT_MODE_RBL_HOST_BOOTLOAD;
1342 		DPRINTF(("%s: RBL> reset complete! [Host Bootload]\n",
1343 		    DEVNAME(sc)));
1344 		break;
1345 	case RBL_STATUS_FAILURE:
1346 	default:
1347 		printf("%s: unknown RBL status 0x%x\n", DEVNAME(sc),
1348 		    rbl_status);
1349 		return EBUSY;
1350 	}
1351 
1352 	return 0;
1353 }
1354 
1355 int
1356 aq_mac_soft_reset_flb(struct aq_softc *sc)
1357 {
1358 	uint32_t v;
1359 	int timo;
1360 
1361 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1362 	/*
1363 	 * Let Felicity hardware to complete SMBUS transaction before
1364 	 * Global software reset.
1365 	 */
1366 	delay(50000);
1367 
1368 	/*
1369 	 * If SPI burst transaction was interrupted(before running the script),
1370 	 * global software reset may not clear SPI interface.
1371 	 * Clean it up manually before global reset.
1372 	 */
1373 	AQ_WRITE_REG(sc, AQ_GLB_NVR_PROVISIONING2_REG, 0x00a0);
1374 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x009f);
1375 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x809f);
1376 	delay(50000);
1377 
1378 	v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1379 	v &= ~AQ_FW_SOFTRESET_DIS;
1380 	v |= AQ_FW_SOFTRESET_RESET;
1381 	AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1382 
1383 	/* Kickstart. */
1384 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1385 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1386 	if (!sc->sc_fast_start_enabled)
1387 		AQ_WRITE_REG(sc, AQ_GLB_GENERAL_PROVISIONING9_REG, 1);
1388 
1389 	/*
1390 	 * For the case SPI burst transaction was interrupted (by MCP reset
1391 	 * above), wait until it is completed by hardware.
1392 	 */
1393 	delay(50000);
1394 
1395 	/* MAC Kickstart */
1396 	if (!sc->sc_fast_start_enabled) {
1397 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x180e0);
1398 
1399 		uint32_t flb_status;
1400 		for (timo = 0; timo < 1000; timo++) {
1401 			flb_status = AQ_READ_REG(sc,
1402 			    FW_MPI_DAISY_CHAIN_STATUS_REG) & 0x10;
1403 			if (flb_status != 0)
1404 				break;
1405 			delay(1000);
1406 		}
1407 		if (flb_status == 0) {
1408 			printf("%s: FLB> MAC kickstart failed: timed out\n",
1409 			    DEVNAME(sc));
1410 			return ETIMEDOUT;
1411 		}
1412 		DPRINTF(("%s: FLB> MAC kickstart done, %d ms\n", DEVNAME(sc),
1413 		    timo));
1414 		/* FW reset */
1415 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1416 		/*
1417 		 * Let Felicity hardware complete SMBUS transaction before
1418 		 * Global software reset.
1419 		 */
1420 		delay(50000);
1421 		sc->sc_fast_start_enabled = true;
1422 	}
1423 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1424 
1425 	/* PHY Kickstart: #undone */
1426 	aq_global_software_reset(sc);
1427 
1428 	for (timo = 0; timo < 1000; timo++) {
1429 		if (AQ_READ_REG(sc, AQ_FW_VERSION_REG) != 0)
1430 			break;
1431 		delay(10000);
1432 	}
1433 	if (timo >= 1000) {
1434 		printf("%s: FLB> Global Soft Reset failed\n", DEVNAME(sc));
1435 		return ETIMEDOUT;
1436 	}
1437 	DPRINTF(("%s: FLB> F/W restart: %d ms\n", DEVNAME(sc), timo * 10));
1438 
1439 	return 0;
1440 
1441 }
1442 
1443 int
1444 aq_mac_soft_reset(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1445 {
1446 	if (sc->sc_rbl_enabled)
1447 		return aq_mac_soft_reset_rbl(sc, mode);
1448 
1449 	if (mode != NULL)
1450 		*mode = FW_BOOT_MODE_FLB;
1451 	return aq_mac_soft_reset_flb(sc);
1452 }
1453 
1454 void
1455 aq_global_software_reset(struct aq_softc *sc)
1456 {
1457         uint32_t v;
1458 
1459         AQ_WRITE_REG_BIT(sc, RX_SYSCONTROL_REG, RX_SYSCONTROL_RESET_DIS, 0);
1460         AQ_WRITE_REG_BIT(sc, TX_SYSCONTROL_REG, TX_SYSCONTROL_RESET_DIS, 0);
1461         AQ_WRITE_REG_BIT(sc, FW_MPI_RESETCTRL_REG,
1462             FW_MPI_RESETCTRL_RESET_DIS, 0);
1463 
1464         v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1465         v &= ~AQ_FW_SOFTRESET_DIS;
1466         v |= AQ_FW_SOFTRESET_RESET;
1467         AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1468 }
1469 
1470 int
1471 aq_fw_read_version(struct aq_softc *sc)
1472 {
1473 	int i, error = EBUSY;
1474 #define MAC_FW_START_TIMEOUT_MS 10000
1475 	for (i = 0; i < MAC_FW_START_TIMEOUT_MS; i++) {
1476 		sc->sc_fw_version = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1477 		if (sc->sc_fw_version != 0) {
1478 			error = 0;
1479 			break;
1480 		}
1481 		delay(1000);
1482 	}
1483 	return error;
1484 }
1485 
1486 int
1487 aq_fw_version_init(struct aq_softc *sc)
1488 {
1489 	int error = 0;
1490 	char fw_vers[sizeof("F/W version xxxxx.xxxxx.xxxxx")];
1491 
1492 	if (FW_VERSION_MAJOR(sc) == 1) {
1493 		sc->sc_fw_ops = &aq_fw1x_ops;
1494 	} else if ((FW_VERSION_MAJOR(sc) == 2) || (FW_VERSION_MAJOR(sc) == 3)) {
1495 		sc->sc_fw_ops = &aq_fw2x_ops;
1496 	} else {
1497 		printf("%s: Unsupported F/W version %d.%d.%d\n",
1498 		    DEVNAME(sc),
1499 		    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc),
1500 		    FW_VERSION_BUILD(sc));
1501 		return ENOTSUP;
1502 	}
1503 	snprintf(fw_vers, sizeof(fw_vers), "F/W version %d.%d.%d",
1504 	    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc), FW_VERSION_BUILD(sc));
1505 
1506 	/* detect revision */
1507 	uint32_t hwrev = AQ_READ_REG(sc, AQ_HW_REVISION_REG);
1508 	switch (hwrev & 0x0000000f) {
1509 	case 0x01:
1510 		printf(", revision A0, %s", fw_vers);
1511 		sc->sc_features |= FEATURES_REV_A0 |
1512 		    FEATURES_MPI_AQ | FEATURES_MIPS;
1513 		break;
1514 	case 0x02:
1515 		printf(", revision B0, %s", fw_vers);
1516 		sc->sc_features |= FEATURES_REV_B0 |
1517 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1518 		    FEATURES_TPO2 | FEATURES_RPF2;
1519 		break;
1520 	case 0x0A:
1521 		printf(", revision B1, %s", fw_vers);
1522 		sc->sc_features |= FEATURES_REV_B1 |
1523 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1524 		    FEATURES_TPO2 | FEATURES_RPF2;
1525 		break;
1526 	default:
1527 		printf(", Unknown revision (0x%08x)", hwrev);
1528 		error = ENOTSUP;
1529 		break;
1530 	}
1531 	return error;
1532 }
1533 
1534 int
1535 aq_hw_init_ucp(struct aq_softc *sc)
1536 {
1537 	int timo;
1538 
1539 	if (FW_VERSION_MAJOR(sc) == 1) {
1540 		if (AQ_READ_REG(sc, FW1X_MPI_INIT2_REG) == 0) {
1541 			uint32_t data;
1542 			arc4random_buf(&data, sizeof(data));
1543 			data &= 0xfefefefe;
1544 			data |= 0x02020202;
1545 			AQ_WRITE_REG(sc, FW1X_MPI_INIT2_REG, data);
1546 		}
1547 		AQ_WRITE_REG(sc, FW1X_MPI_INIT1_REG, 0);
1548 	}
1549 
1550 	for (timo = 100; timo > 0; timo--) {
1551 		sc->sc_mbox_addr = AQ_READ_REG(sc, FW_MPI_MBOX_ADDR_REG);
1552 		if (sc->sc_mbox_addr != 0)
1553 			break;
1554 		delay(1000);
1555 	}
1556 
1557 #define AQ_FW_MIN_VERSION	0x01050006
1558 #define AQ_FW_MIN_VERSION_STR	"1.5.6"
1559 	if (sc->sc_fw_version < AQ_FW_MIN_VERSION) {
1560 		printf("%s: atlantic: wrong FW version: " AQ_FW_MIN_VERSION_STR
1561 		    " or later required, this is %d.%d.%d\n",
1562 		    DEVNAME(sc),
1563 		    FW_VERSION_MAJOR(sc),
1564 		    FW_VERSION_MINOR(sc),
1565 		    FW_VERSION_BUILD(sc));
1566 		return ENOTSUP;
1567 	}
1568 
1569 	if (sc->sc_mbox_addr == 0)
1570 		printf("%s: NULL MBOX!!\n", DEVNAME(sc));
1571 
1572 	return 0;
1573 }
1574 
1575 int
1576 aq_hw_reset(struct aq_softc *sc)
1577 {
1578 	int error;
1579 
1580 	/* disable irq */
1581 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS, 0);
1582 
1583 	/* apply */
1584 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_IRQ, 1);
1585 
1586 	/* wait ack 10 times by 1ms */
1587 	WAIT_FOR(
1588 	    (AQ_READ_REG(sc, AQ_INTR_CTRL_REG) & AQ_INTR_CTRL_RESET_IRQ) == 0,
1589 	    1000, 10, &error);
1590 	if (error != 0) {
1591 		printf("%s: atlantic: IRQ reset failed: %d\n", DEVNAME(sc),
1592 		    error);
1593 		return error;
1594 	}
1595 
1596 	return sc->sc_fw_ops->reset(sc);
1597 }
1598 
1599 int
1600 aq_get_mac_addr(struct aq_softc *sc)
1601 {
1602 	uint32_t mac_addr[2];
1603 	uint32_t efuse_shadow_addr;
1604 	int err;
1605 
1606 	efuse_shadow_addr = 0;
1607 	if (FW_VERSION_MAJOR(sc) >= 2)
1608 		efuse_shadow_addr = AQ_READ_REG(sc, FW2X_MPI_EFUSEADDR_REG);
1609 	else
1610 		efuse_shadow_addr = AQ_READ_REG(sc, FW1X_MPI_EFUSEADDR_REG);
1611 
1612 	if (efuse_shadow_addr == 0) {
1613 		printf("%s: cannot get efuse addr", DEVNAME(sc));
1614 		return ENXIO;
1615 	}
1616 
1617 	DPRINTF(("%s: efuse_shadow_addr = %x\n", DEVNAME(sc), efuse_shadow_addr));
1618 
1619 	memset(mac_addr, 0, sizeof(mac_addr));
1620 	err = aq_fw_downld_dwords(sc, efuse_shadow_addr + (40 * 4),
1621 	    mac_addr, 2);
1622 	if (err < 0)
1623 		return err;
1624 
1625 	if (mac_addr[0] == 0 && mac_addr[1] == 0) {
1626 		printf("%s: mac address not found", DEVNAME(sc));
1627 		return ENXIO;
1628 	}
1629 
1630 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1631 	    mac_addr[1]));
1632 
1633 	mac_addr[0] = htobe32(mac_addr[0]);
1634 	mac_addr[1] = htobe32(mac_addr[1]);
1635 
1636 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1637 	    mac_addr[1]));
1638 
1639 	memcpy(sc->sc_enaddr.ether_addr_octet,
1640 	    (uint8_t *)mac_addr, ETHER_ADDR_LEN);
1641 	DPRINTF((": %s", ether_sprintf(sc->sc_enaddr.ether_addr_octet)));
1642 
1643 	return 0;
1644 }
1645 
1646 int
1647 aq_activate(struct device *self, int act)
1648 {
1649 	return 0;
1650 }
1651 
1652 int
1653 aq_fw_downld_dwords(struct aq_softc *sc, uint32_t addr, uint32_t *p,
1654     uint32_t cnt)
1655 {
1656 	uint32_t v;
1657 	int error = 0;
1658 
1659 	WAIT_FOR(AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG) == 1, 1, 10000, &error);
1660 	if (error != 0) {
1661 		AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1662 		v = AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG);
1663 		if (v == 0) {
1664 			printf("%s: %s:%d: timeout\n",
1665 			    DEVNAME(sc), __func__, __LINE__);
1666 			return ETIMEDOUT;
1667 		}
1668 	}
1669 
1670 	AQ_WRITE_REG(sc, AQ_FW_MBOX_ADDR_REG, addr);
1671 
1672 	error = 0;
1673 	for (; cnt > 0 && error == 0; cnt--) {
1674 		/* execute mailbox interface */
1675 		AQ_WRITE_REG_BIT(sc, AQ_FW_MBOX_CMD_REG,
1676 		    AQ_FW_MBOX_CMD_EXECUTE, 1);
1677 		if (sc->sc_features & FEATURES_REV_B1) {
1678 			WAIT_FOR(AQ_READ_REG(sc, AQ_FW_MBOX_ADDR_REG) != addr,
1679 			    1, 1000, &error);
1680 		} else {
1681 			WAIT_FOR((AQ_READ_REG(sc, AQ_FW_MBOX_CMD_REG) &
1682 			    AQ_FW_MBOX_CMD_BUSY) == 0,
1683 			    1, 1000, &error);
1684 		}
1685 		*p++ = AQ_READ_REG(sc, AQ_FW_MBOX_VAL_REG);
1686 		addr += sizeof(uint32_t);
1687 	}
1688 	AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1689 
1690 	if (error != 0)
1691 		printf("%s: %s:%d: timeout\n",
1692 		    DEVNAME(sc), __func__, __LINE__);
1693 
1694 	return error;
1695 }
1696 
1697 int
1698 aq_fw2x_reset(struct aq_softc *sc)
1699 {
1700 	struct aq_fw2x_capabilities caps = { 0 };
1701 	int error;
1702 
1703 	error = aq_fw_downld_dwords(sc,
1704 	    sc->sc_mbox_addr + offsetof(struct aq_fw2x_mailbox, caps),
1705 	    (uint32_t *)&caps, sizeof caps / sizeof(uint32_t));
1706 	if (error != 0) {
1707 		printf("%s: fw2x> can't get F/W capabilities mask, error %d\n",
1708 		    DEVNAME(sc), error);
1709 		return error;
1710 	}
1711 	sc->sc_fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
1712 
1713 	DPRINTF(("%s: fw2x> F/W capabilities=0x%llx\n", DEVNAME(sc),
1714 	    sc->sc_fw_caps));
1715 
1716 	return 0;
1717 }
1718 
1719 int
1720 aq_fw1x_reset(struct aq_softc *sc)
1721 {
1722 	printf("%s: unimplemented %s\n", DEVNAME(sc), __func__);
1723 	return 0;
1724 }
1725 
1726 int
1727 aq_fw1x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state w,
1728     enum aq_link_speed x, enum aq_link_fc y, enum aq_link_eee z)
1729 {
1730 	return 0;
1731 }
1732 
1733 int
1734 aq_fw1x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *w,
1735     enum aq_link_speed *x, enum aq_link_fc *y, enum aq_link_eee *z)
1736 {
1737 	return 0;
1738 }
1739 
1740 int
1741 aq_fw1x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1742 {
1743 	return 0;
1744 }
1745 
1746 
1747 int
1748 aq_fw2x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *modep,
1749     enum aq_link_speed *speedp, enum aq_link_fc *fcp, enum aq_link_eee *eeep)
1750 {
1751 	uint64_t mpi_state, mpi_ctrl;
1752 	enum aq_link_speed speed;
1753 	enum aq_link_fc fc;
1754 
1755 	AQ_MPI_LOCK(sc);
1756 
1757 	mpi_state = AQ_READ64_REG(sc, FW2X_MPI_STATE_REG);
1758 	if (modep != NULL) {
1759 		mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1760 		if (mpi_ctrl & FW2X_CTRL_RATE_MASK)
1761 			*modep = MPI_INIT;
1762 		else
1763 			*modep = MPI_DEINIT;
1764 	}
1765 
1766 	AQ_MPI_UNLOCK(sc);
1767 
1768 	if (mpi_state & FW2X_CTRL_RATE_10G)
1769 		speed = AQ_LINK_10G;
1770 	else if (mpi_state & FW2X_CTRL_RATE_5G)
1771 		speed = AQ_LINK_5G;
1772 	else if (mpi_state & FW2X_CTRL_RATE_2G5)
1773 		speed = AQ_LINK_2G5;
1774 	else if (mpi_state & FW2X_CTRL_RATE_1G)
1775 		speed = AQ_LINK_1G;
1776 	else if (mpi_state & FW2X_CTRL_RATE_100M)
1777 		speed = AQ_LINK_100M;
1778 	else
1779 		speed = AQ_LINK_NONE;
1780 	if (speedp != NULL)
1781 		*speedp = speed;
1782 
1783 	fc = AQ_FC_NONE;
1784 	if (mpi_state & FW2X_CTRL_PAUSE)
1785 		fc |= AQ_FC_RX;
1786 	if (mpi_state & FW2X_CTRL_ASYMMETRIC_PAUSE)
1787 		fc |= AQ_FC_TX;
1788 	if (fcp != NULL)
1789 		*fcp = fc;
1790 
1791 	if (eeep != NULL)
1792 		*eeep = AQ_EEE_DISABLE;
1793 
1794 	return 0;
1795 }
1796 
1797 int
1798 aq_fw2x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1799 {
1800 	return 0;
1801 }
1802 
1803 void
1804 aq_hw_l3_filter_set(struct aq_softc *sc)
1805 {
1806 	int i;
1807 
1808 	/* clear all filter */
1809 	for (i = 0; i < 8; i++) {
1810 		AQ_WRITE_REG_BIT(sc, RPF_L3_FILTER_REG(i),
1811 		    RPF_L3_FILTER_L4_EN, 0);
1812 	}
1813 }
1814 
1815 int
1816 aq_hw_init(struct aq_softc *sc, int irqmode, int multivec)
1817 {
1818 	uint32_t v;
1819 
1820 	/* Force limit MRRS on RDM/TDM to 2K */
1821 	v = AQ_READ_REG(sc, AQ_PCI_REG_CONTROL_6_REG);
1822 	AQ_WRITE_REG(sc, AQ_PCI_REG_CONTROL_6_REG, (v & ~0x0707) | 0x0404);
1823 
1824 	/*
1825 	 * TX DMA total request limit. B0 hardware is not capable to
1826 	 * handle more than (8K-MRRS) incoming DMA data.
1827 	 * Value 24 in 256byte units
1828 	 */
1829 	AQ_WRITE_REG(sc, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG, 24);
1830 
1831 	aq_hw_init_tx_path(sc);
1832 	aq_hw_init_rx_path(sc);
1833 
1834 	if (aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_enaddr.ether_addr_octet))
1835 		return EINVAL;
1836 
1837 	aq_set_linkmode(sc, AQ_LINK_NONE, AQ_FC_NONE, AQ_EEE_DISABLE);
1838 
1839 	aq_hw_qos_set(sc);
1840 
1841 	/* Enable interrupt */
1842 	AQ_WRITE_REG(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS);
1843 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_MULTIVEC, multivec);
1844 
1845 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_IRQMODE, irqmode);
1846 
1847 	AQ_WRITE_REG(sc, AQ_INTR_AUTOMASK_REG, 0xffffffff);
1848 
1849 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(0),
1850 	    ((AQ_B0_ERR_INT << 24) | (1U << 31)) |
1851 	    ((AQ_B0_ERR_INT << 16) | (1 << 23))
1852 	);
1853 
1854 	/* link interrupt */
1855 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(3),
1856 	    (1 << 7) | sc->sc_linkstat_irq);
1857 
1858 	return 0;
1859 }
1860 
1861 void
1862 aq_hw_init_tx_path(struct aq_softc *sc)
1863 {
1864 	/* Tx TC/RSS number config */
1865 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_TC_MODE_EN, 1);
1866 
1867 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1868 	    THM_LSO_TCP_FLAG1_FIRST, 0x0ff6);
1869 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1870 	    THM_LSO_TCP_FLAG1_MID,   0x0ff6);
1871 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG2_REG,
1872 	   THM_LSO_TCP_FLAG2_LAST,  0x0f7f);
1873 
1874 	/* misc */
1875 	AQ_WRITE_REG(sc, TX_TPO2_REG,
1876 	   (sc->sc_features & FEATURES_TPO2) ? TX_TPO2_EN : 0);
1877 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_EN, 0);
1878 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_MODE, 0);
1879 
1880 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_SCP_INS_EN, 1);
1881 }
1882 
1883 void
1884 aq_hw_init_rx_path(struct aq_softc *sc)
1885 {
1886 	int i;
1887 
1888 	/* clear setting */
1889 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 0);
1890 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 0);
1891 	AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG, 0);
1892 	for (i = 0; i < 32; i++) {
1893 		AQ_WRITE_REG_BIT(sc, RPF_ETHERTYPE_FILTER_REG(i),
1894 		   RPF_ETHERTYPE_FILTER_EN, 0);
1895 	}
1896 
1897 	if (sc->sc_nqueues > 1) {
1898 		uint32_t bits;
1899 
1900 		AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 1);
1901 		AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 1);
1902 
1903 		switch (sc->sc_nqueues) {
1904 		case 2:
1905 			bits = 0x11111111;
1906 			break;
1907 		case 4:
1908 			bits = 0x22222222;
1909 			break;
1910 		case 8:
1911 			bits = 0x33333333;
1912 			break;
1913 		}
1914 
1915 		AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG,
1916 		    RX_FLR_RSS_CONTROL1_EN | bits);
1917 	}
1918 
1919 	/* L2 and Multicast filters */
1920 	for (i = 0; i < AQ_HW_MAC_NUM; i++) {
1921 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_EN, 0);
1922 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_ACTION,
1923 		    RPF_ACTION_HOST);
1924 	}
1925 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_MASK_REG, 0);
1926 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_REG(0), 0x00010fff);
1927 
1928 	/* Vlan filters */
1929 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_OUTER,
1930 	    ETHERTYPE_QINQ);
1931 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_INNER,
1932 	    ETHERTYPE_VLAN);
1933 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG, RPF_VLAN_MODE_PROMISC, 1);
1934 
1935 	if (sc->sc_features & FEATURES_REV_B) {
1936 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1937 		    RPF_VLAN_MODE_ACCEPT_UNTAGGED, 1);
1938 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1939 		    RPF_VLAN_MODE_UNTAGGED_ACTION, RPF_ACTION_HOST);
1940 	}
1941 
1942 	if (sc->sc_features & FEATURES_RPF2)
1943 		AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, RX_TCP_RSS_HASH_RPF2);
1944 	else
1945 		AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, 0);
1946 
1947 	/* we might want to figure out what this magic number does */
1948 	AQ_WRITE_REG_BIT(sc, RX_TCP_RSS_HASH_REG, RX_TCP_RSS_HASH_TYPE,
1949 	    0x001e);
1950 
1951 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_EN, 1);
1952 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_ACTION, RPF_ACTION_HOST);
1953 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_THRESHOLD, 0xffff);
1954 
1955 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_EN, 0);
1956 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_MODE, 0);
1957 }
1958 
1959 /* set multicast filter. index 0 for own address */
1960 int
1961 aq_set_mac_addr(struct aq_softc *sc, int index, uint8_t *enaddr)
1962 {
1963 	uint32_t h, l;
1964 
1965 	if (index >= AQ_HW_MAC_NUM)
1966 		return EINVAL;
1967 
1968 	if (enaddr == NULL) {
1969 		/* disable */
1970 		AQ_WRITE_REG_BIT(sc,
1971 		    RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1972 		return 0;
1973 	}
1974 
1975 	h = (enaddr[0] <<  8) | (enaddr[1]);
1976 	l = ((uint32_t)enaddr[2] << 24) | (enaddr[3] << 16) |
1977 	    (enaddr[4] <<  8) | (enaddr[5]);
1978 
1979 	/* disable, set, and enable */
1980 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1981 	AQ_WRITE_REG(sc, RPF_L2UC_LSW_REG(index), l);
1982 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index),
1983 	    RPF_L2UC_MSW_MACADDR_HI, h);
1984 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_ACTION, 1);
1985 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 1);
1986 
1987 	return 0;
1988 }
1989 
1990 int
1991 aq_get_linkmode(struct aq_softc *sc, enum aq_link_speed *speed,
1992     enum aq_link_fc *fc, enum aq_link_eee *eee)
1993 {
1994 	enum aq_hw_fw_mpi_state mode;
1995 	int error;
1996 
1997 	error = sc->sc_fw_ops->get_mode(sc, &mode, speed, fc, eee);
1998 	if (error != 0)
1999 		return error;
2000 	if (mode != MPI_INIT)
2001 		return ENXIO;
2002 
2003 	return 0;
2004 }
2005 
2006 int
2007 aq_set_linkmode(struct aq_softc *sc, enum aq_link_speed speed,
2008     enum aq_link_fc fc, enum aq_link_eee eee)
2009 {
2010 	return sc->sc_fw_ops->set_mode(sc, MPI_INIT, speed, fc, eee);
2011 }
2012 
2013 int
2014 aq_fw2x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state mode,
2015     enum aq_link_speed speed, enum aq_link_fc fc, enum aq_link_eee eee)
2016 {
2017 	uint64_t mpi_ctrl;
2018 	int error = 0;
2019 
2020 	AQ_MPI_LOCK(sc);
2021 
2022 	mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
2023 
2024 	switch (mode) {
2025 	case MPI_INIT:
2026 		mpi_ctrl &= ~FW2X_CTRL_RATE_MASK;
2027 		if (speed & AQ_LINK_10G)
2028 			mpi_ctrl |= FW2X_CTRL_RATE_10G;
2029 		if (speed & AQ_LINK_5G)
2030 			mpi_ctrl |= FW2X_CTRL_RATE_5G;
2031 		if (speed & AQ_LINK_2G5)
2032 			mpi_ctrl |= FW2X_CTRL_RATE_2G5;
2033 		if (speed & AQ_LINK_1G)
2034 			mpi_ctrl |= FW2X_CTRL_RATE_1G;
2035 		if (speed & AQ_LINK_100M)
2036 			mpi_ctrl |= FW2X_CTRL_RATE_100M;
2037 
2038 		mpi_ctrl &= ~FW2X_CTRL_LINK_DROP;
2039 
2040 		mpi_ctrl &= ~FW2X_CTRL_EEE_MASK;
2041 		if (eee == AQ_EEE_ENABLE)
2042 			mpi_ctrl |= FW2X_CTRL_EEE_MASK;
2043 
2044 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
2045 		if (fc & AQ_FC_RX)
2046 			mpi_ctrl |= FW2X_CTRL_PAUSE;
2047 		if (fc & AQ_FC_TX)
2048 			mpi_ctrl |= FW2X_CTRL_ASYMMETRIC_PAUSE;
2049 		break;
2050 	case MPI_DEINIT:
2051 		mpi_ctrl &= ~(FW2X_CTRL_RATE_MASK | FW2X_CTRL_EEE_MASK);
2052 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
2053 		break;
2054 	default:
2055 		printf("%s: fw2x> unknown MPI state %d\n", DEVNAME(sc), mode);
2056 		error =  EINVAL;
2057 		goto failure;
2058 	}
2059 	AQ_WRITE64_REG(sc, FW2X_MPI_CONTROL_REG, mpi_ctrl);
2060 
2061  failure:
2062 	AQ_MPI_UNLOCK(sc);
2063 	return error;
2064 }
2065 
2066 void
2067 aq_hw_qos_set(struct aq_softc *sc)
2068 {
2069 	uint32_t tc = 0;
2070 	uint32_t buff_size;
2071 
2072 	/* TPS Descriptor rate init */
2073 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_TA_RST, 0);
2074 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_LIM, 0xa);
2075 
2076 	/* TPS VM init */
2077 	AQ_WRITE_REG_BIT(sc, TPS_DESC_VM_ARB_MODE_REG, TPS_DESC_VM_ARB_MODE, 0);
2078 
2079 	/* TPS TC credits init */
2080 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TC_ARB_MODE_REG, TPS_DESC_TC_ARB_MODE, 0);
2081 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TC_ARB_MODE_REG, TPS_DATA_TC_ARB_MODE, 0);
2082 
2083 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
2084 	    TPS_DATA_TCT_CREDIT_MAX, 0xfff);
2085 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
2086 	    TPS_DATA_TCT_WEIGHT, 0x64);
2087 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
2088 	    TPS_DESC_TCT_CREDIT_MAX, 0x50);
2089 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
2090 	    TPS_DESC_TCT_WEIGHT, 0x1e);
2091 
2092 	/* Tx buf size */
2093 	tc = 0;
2094 	buff_size = AQ_HW_TXBUF_MAX;
2095 	AQ_WRITE_REG_BIT(sc, TPB_TXB_BUFSIZE_REG(tc), TPB_TXB_BUFSIZE,
2096 	    buff_size);
2097 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_HI,
2098 	    (buff_size * (1024 / 32) * 66) / 100);
2099 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_LO,
2100 	    (buff_size * (1024 / 32) * 50) / 100);
2101 
2102 	/* QoS Rx buf size per TC */
2103 	tc = 0;
2104 	buff_size = AQ_HW_RXBUF_MAX;
2105 	AQ_WRITE_REG_BIT(sc, RPB_RXB_BUFSIZE_REG(tc), RPB_RXB_BUFSIZE,
2106 	    buff_size);
2107 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_EN, 0);
2108 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_HI,
2109 	    (buff_size * (1024 / 32) * 66) / 100);
2110 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_LO,
2111 	    (buff_size * (1024 / 32) * 50) / 100);
2112 
2113 	/* QoS 802.1p priority -> TC mapping */
2114 	int i_priority;
2115 	for (i_priority = 0; i_priority < 8; i_priority++) {
2116 		AQ_WRITE_REG_BIT(sc, RPF_RPB_RX_TC_UPT_REG,
2117 		    RPF_RPB_RX_TC_UPT_MASK(i_priority), 0);
2118 	}
2119 }
2120 
2121 int
2122 aq_init_rss(struct aq_softc *sc)
2123 {
2124 	uint32_t rss_key[AQ_RSS_KEYSIZE / sizeof(uint32_t)];
2125 	uint32_t redir;
2126 	int bits, queue;
2127 	int error;
2128 	int i;
2129 
2130 	if (sc->sc_nqueues == 1)
2131 		return 0;
2132 
2133 	/* rss key is composed of 32 bit registers */
2134 	stoeplitz_to_key(rss_key, sizeof(rss_key));
2135 	for (i = 0; i < nitems(rss_key); i++) {
2136 		AQ_WRITE_REG(sc, RPF_RSS_KEY_WR_DATA_REG, htole32(rss_key[i]));
2137 		AQ_WRITE_REG_BIT(sc, RPF_RSS_KEY_ADDR_REG, RPF_RSS_KEY_ADDR,
2138 		    nitems(rss_key) - 1 - i);
2139 		AQ_WRITE_REG_BIT(sc, RPF_RSS_KEY_ADDR_REG, RPF_RSS_KEY_WR_EN,
2140 		    1);
2141 		WAIT_FOR((AQ_READ_REG(sc, RPF_RSS_KEY_ADDR_REG) &
2142 		    RPF_RSS_KEY_WR_EN) == 0, 1000, 10, &error);
2143 		if (error != 0) {
2144 			printf(": timed out setting rss key\n");
2145 			return error;
2146 		}
2147 	}
2148 
2149 	/*
2150 	 * the redirection table has 64 entries, each entry is a 3 bit
2151 	 * queue number, packed into a 16 bit register, so there are 12
2152 	 * registers to program.
2153 	 */
2154 	bits = 0;
2155 	redir = 0;
2156 	queue = 0;
2157 	for (i = 0; i < AQ_RSS_REDIR_ENTRIES; i++) {
2158 		while (bits < 16) {
2159 			redir |= (queue << bits);
2160 			bits += 3;
2161 			queue++;
2162 			if (queue == sc->sc_nqueues)
2163 				queue = 0;
2164 		}
2165 
2166 		AQ_WRITE_REG(sc, RPF_RSS_REDIR_WR_DATA_REG, htole16(redir));
2167 		AQ_WRITE_REG_BIT(sc, RPF_RSS_REDIR_ADDR_REG, RPF_RSS_REDIR_ADDR,
2168 		    i);
2169 		AQ_WRITE_REG_BIT(sc, RPF_RSS_REDIR_ADDR_REG,
2170 		    RPF_RSS_REDIR_WR_EN, 1);
2171 		WAIT_FOR((AQ_READ_REG(sc, RPF_RSS_REDIR_ADDR_REG) &
2172 		    RPF_RSS_REDIR_WR_EN) == 0, 1000, 10, &error);
2173 		if (error != 0) {
2174 			printf(": timed out setting rss table\n");
2175 			return error;
2176 		}
2177 		redir >>= 16;
2178 		bits -= 16;
2179 	}
2180 
2181 	return 0;
2182 }
2183 
2184 void
2185 aq_txring_reset(struct aq_softc *sc, struct aq_txring *tx, int start)
2186 {
2187 	daddr_t paddr;
2188 
2189 	tx->tx_prod = 0;
2190 	tx->tx_cons = 0;
2191 
2192 	/* empty slots? */
2193 
2194 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 0);
2195 
2196 	if (start == 0)
2197 		return;
2198 
2199 	paddr = AQ_DMA_DVA(&tx->tx_mem);
2200 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRLSW_REG(tx->tx_q), paddr);
2201 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRMSW_REG(tx->tx_q),
2202 	    paddr >> 32);
2203 
2204 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_LEN,
2205 	    AQ_TXD_NUM / 8);
2206 
2207 	tx->tx_prod = AQ_READ_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q));
2208 	tx->tx_cons = tx->tx_prod;
2209 	AQ_WRITE_REG(sc, TX_DMA_DESC_WRWB_THRESH_REG(tx->tx_q), 0);
2210 
2211 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2212 	    AQ_INTR_IRQ_MAP_TX_IRQMAP(tx->tx_q), tx->tx_irq);
2213 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2214 	    AQ_INTR_IRQ_MAP_TX_EN(tx->tx_q), 1);
2215 
2216 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 1);
2217 
2218 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID, 0);
2219 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID_EN, 0);
2220 }
2221 
2222 void
2223 aq_rxring_reset(struct aq_softc *sc, struct aq_rxring *rx, int start)
2224 {
2225 	daddr_t paddr;
2226 	int strip;
2227 
2228 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 0);
2229 	/* drain */
2230 
2231 	if (start == 0)
2232 		return;
2233 
2234 	paddr = AQ_DMA_DVA(&rx->rx_mem);
2235 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRLSW_REG(rx->rx_q), paddr);
2236 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRMSW_REG(rx->rx_q),
2237 	    paddr >> 32);
2238 
2239 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_LEN,
2240 	    AQ_RXD_NUM / 8);
2241 
2242 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2243 	    RX_DMA_DESC_BUFSIZE_DATA, MCLBYTES / 1024);
2244 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2245 	    RX_DMA_DESC_BUFSIZE_HDR, 0);
2246 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2247 	    RX_DMA_DESC_HEADER_SPLIT, 0);
2248 
2249 #if NVLAN > 0
2250 	strip = 1;
2251 #else
2252 	strip = 0;
2253 #endif
2254 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2255 	    RX_DMA_DESC_VLAN_STRIP, strip);
2256 
2257 	rx->rx_cons = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2258 	    RX_DMA_DESC_HEAD_PTR;
2259 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_cons);
2260 	rx->rx_prod = rx->rx_cons;
2261 
2262 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2263 	    AQ_INTR_IRQ_MAP_RX_IRQMAP(rx->rx_q), rx->rx_irq);
2264 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2265 	    AQ_INTR_IRQ_MAP_RX_EN(rx->rx_q), 1);
2266 
2267 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2268 	    RX_DMA_DCAD_CPUID, 0);
2269 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2270 	    RX_DMA_DCAD_DESC_EN, 0);
2271 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2272 	    RX_DMA_DCAD_HEADER_EN, 0);
2273 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2274 	    RX_DMA_DCAD_PAYLOAD_EN, 0);
2275 
2276 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 1);
2277 }
2278 
2279 static inline unsigned int
2280 aq_rx_fill_slots(struct aq_softc *sc, struct aq_rxring *rx, uint nslots)
2281 {
2282 	struct aq_rx_desc_read *ring, *rd;
2283 	struct aq_slot *as;
2284 	struct mbuf *m;
2285 	uint p, fills;
2286 
2287 	ring = AQ_DMA_KVA(&rx->rx_mem);
2288 	p = rx->rx_prod;
2289 
2290 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2291 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTWRITE);
2292 
2293 	for (fills = 0; fills < nslots; fills++) {
2294 		as = &rx->rx_slots[p];
2295 		rd = &ring[p];
2296 
2297 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2298 		if (m == NULL)
2299 			break;
2300 
2301 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2302 		m->m_data += ETHER_ALIGN;
2303 		m->m_len = m->m_pkthdr.len = MCLBYTES;
2304 
2305 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2306 		    BUS_DMA_NOWAIT) != 0) {
2307 			m_freem(m);
2308 			break;
2309 		}
2310 		as->as_m = m;
2311 
2312 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2313 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREREAD);
2314 		htolem64(&rd->buf_addr, as->as_map->dm_segs[0].ds_addr);
2315 		rd->hdr_addr = 0;
2316 		p++;
2317 		if (p == AQ_RXD_NUM)
2318 			p = 0;
2319 	}
2320 
2321 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2322 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREWRITE);
2323 
2324 	rx->rx_prod = p;
2325 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_prod);
2326 	return (nslots - fills);
2327 }
2328 
2329 int
2330 aq_rx_fill(struct aq_softc *sc, struct aq_rxring *rx)
2331 {
2332 	u_int slots;
2333 
2334 	slots = if_rxr_get(&rx->rx_rxr, AQ_RXD_NUM);
2335 	if (slots == 0)
2336 		return 1;
2337 
2338 	slots = aq_rx_fill_slots(sc, rx, slots);
2339 	if_rxr_put(&rx->rx_rxr, slots);
2340 	return 0;
2341 }
2342 
2343 void
2344 aq_refill(void *xq)
2345 {
2346 	struct aq_queues *q = xq;
2347 	struct aq_softc *sc = q->q_sc;
2348 
2349 	aq_rx_fill(sc, &q->q_rx);
2350 
2351 	if (if_rxr_inuse(&q->q_rx.rx_rxr) == 0)
2352 		timeout_add(&q->q_rx.rx_refill, 1);
2353 }
2354 
2355 void
2356 aq_rxeof(struct aq_softc *sc, struct aq_rxring *rx)
2357 {
2358 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2359 	struct aq_rx_desc_wb *rxd;
2360 	struct aq_rx_desc_wb *ring;
2361 	struct aq_slot *as;
2362 	uint32_t end, idx;
2363 	uint16_t pktlen, status;
2364 	uint32_t rxd_type;
2365 	struct mbuf *m, *mb;
2366 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2367 	int rxfree;
2368 
2369 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2370 		return;
2371 
2372 	end = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2373 	    RX_DMA_DESC_HEAD_PTR;
2374 
2375 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2376 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTREAD);
2377 
2378 	rxfree = 0;
2379 	idx = rx->rx_cons;
2380 	ring = AQ_DMA_KVA(&rx->rx_mem);
2381 	while (idx != end) {
2382 		rxd = &ring[idx];
2383 		as = &rx->rx_slots[idx];
2384 
2385 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2386 		    as->as_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2387 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2388 
2389 		status = lemtoh16(&rxd->status);
2390 		if ((status & AQ_RXDESC_STATUS_DD) == 0)
2391 			break;
2392 
2393 		rxfree++;
2394 		mb = as->as_m;
2395 		as->as_m = NULL;
2396 
2397 		pktlen = lemtoh16(&rxd->pkt_len);
2398 		rxd_type = lemtoh32(&rxd->type);
2399 		if ((rxd_type & AQ_RXDESC_TYPE_RSSTYPE) != 0) {
2400 			mb->m_pkthdr.ph_flowid = lemtoh32(&rxd->rss_hash);
2401 			mb->m_pkthdr.csum_flags |= M_FLOWID;
2402 		}
2403 
2404 		mb->m_pkthdr.len = 0;
2405 		mb->m_next = NULL;
2406 		*rx->rx_m_tail = mb;
2407 		rx->rx_m_tail = &mb->m_next;
2408 
2409 		m = rx->rx_m_head;
2410 
2411 #if NVLAN > 0
2412 		if (rxd_type & (AQ_RXDESC_TYPE_VLAN | AQ_RXDESC_TYPE_VLAN2)) {
2413 			m->m_pkthdr.ether_vtag = lemtoh16(&rxd->vlan);
2414 			m->m_flags |= M_VLANTAG;
2415 		}
2416 #endif
2417 
2418 		if ((rxd_type & AQ_RXDESC_TYPE_V4_SUM) &&
2419 		    ((status & AQ_RXDESC_STATUS_V4_SUM_NG) == 0))
2420 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2421 
2422 		if ((rxd_type & AQ_RXDESC_TYPE_L4_SUM) &&
2423 		   (status & AQ_RXDESC_STATUS_L4_SUM_OK))
2424 			m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK |
2425 			    M_TCP_CSUM_IN_OK;
2426 
2427 		if ((status & AQ_RXDESC_STATUS_MACERR) ||
2428 		    (rxd_type & AQ_RXDESC_TYPE_DMA_ERR)) {
2429 			printf("%s:rx: rx error (status %x type %x)\n",
2430 			    DEVNAME(sc), status, rxd_type);
2431 			rx->rx_m_error = 1;
2432 		}
2433 
2434 		if (status & AQ_RXDESC_STATUS_EOP) {
2435 			mb->m_len = pktlen - m->m_pkthdr.len;
2436 			m->m_pkthdr.len = pktlen;
2437 			if (rx->rx_m_error != 0) {
2438 				ifp->if_ierrors++;
2439 				m_freem(m);
2440 			} else {
2441 				ml_enqueue(&ml, m);
2442 			}
2443 
2444 			rx->rx_m_head = NULL;
2445 			rx->rx_m_tail = &rx->rx_m_head;
2446 			rx->rx_m_error = 0;
2447 		} else {
2448 			mb->m_len = MCLBYTES;
2449 			m->m_pkthdr.len += mb->m_len;
2450 		}
2451 
2452 		idx++;
2453 		if (idx == AQ_RXD_NUM)
2454 			idx = 0;
2455 	}
2456 
2457 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2458 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREREAD);
2459 
2460 	rx->rx_cons = idx;
2461 
2462 	if (rxfree > 0) {
2463 		if_rxr_put(&rx->rx_rxr, rxfree);
2464 		if (ifiq_input(rx->rx_ifiq, &ml))
2465 			if_rxr_livelocked(&rx->rx_rxr);
2466 
2467 		aq_rx_fill(sc, rx);
2468 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
2469 			timeout_add(&rx->rx_refill, 1);
2470 	}
2471 }
2472 
2473 void
2474 aq_txeof(struct aq_softc *sc, struct aq_txring *tx)
2475 {
2476 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2477 	struct aq_slot *as;
2478 	uint32_t idx, end, free;
2479 
2480 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2481 		return;
2482 
2483 	idx = tx->tx_cons;
2484 	end = AQ_READ_REG(sc, TX_DMA_DESC_HEAD_PTR_REG(tx->tx_q)) &
2485 	    TX_DMA_DESC_HEAD_PTR;
2486 	free = 0;
2487 
2488 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2489 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTREAD);
2490 
2491 	while (idx != end) {
2492 		as = &tx->tx_slots[idx];
2493 
2494 		if (as->as_m != NULL) {
2495 			bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2496 			    as->as_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2497 			bus_dmamap_unload(sc->sc_dmat, as->as_map);
2498 
2499 			m_freem(as->as_m);
2500 			as->as_m = NULL;
2501 		}
2502 
2503 		idx++;
2504 		if (idx == AQ_TXD_NUM)
2505 			idx = 0;
2506 		free++;
2507 	}
2508 
2509 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2510 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREREAD);
2511 
2512 	tx->tx_cons = idx;
2513 
2514 	if (free != 0) {
2515 		if (ifq_is_oactive(tx->tx_ifq))
2516 			ifq_restart(tx->tx_ifq);
2517 	}
2518 }
2519 
2520 void
2521 aq_start(struct ifqueue *ifq)
2522 {
2523 	struct aq_queues *aq = ifq->ifq_softc;
2524 	struct aq_softc *sc = aq->q_sc;
2525 	struct aq_txring *tx = &aq->q_tx;
2526 	struct aq_tx_desc *ring, *txd;
2527 	struct aq_slot *as;
2528 	struct mbuf *m;
2529 	uint32_t idx, free, used, ctl1, ctl2;
2530 	int error, i;
2531 
2532 	idx = tx->tx_prod;
2533 	free = tx->tx_cons + AQ_TXD_NUM - tx->tx_prod;
2534 	used = 0;
2535 
2536 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2537 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTWRITE);
2538 	ring = (struct aq_tx_desc *)AQ_DMA_KVA(&tx->tx_mem);
2539 
2540 	for (;;) {
2541 		if (used + AQ_TX_MAX_SEGMENTS + 1 >= free) {
2542 			ifq_set_oactive(ifq);
2543 			break;
2544 		}
2545 
2546 		m = ifq_dequeue(ifq);
2547 		if (m == NULL)
2548 			break;
2549 
2550 		as = &tx->tx_slots[idx];
2551 
2552 		error = bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2553 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2554 		if (error == EFBIG) {
2555 			if (m_defrag(m, M_DONTWAIT)) {
2556 				m_freem(m);
2557 				break;
2558 			}
2559 
2560 			error = bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map,
2561 			    m, BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2562 		}
2563 		if (error != 0) {
2564 			m_freem(m);
2565 			break;
2566 		}
2567 
2568 		as->as_m = m;
2569 
2570 #if NBPFILTER > 0
2571 		if (ifq->ifq_if->if_bpf)
2572 			bpf_mtap_ether(ifq->ifq_if->if_bpf, m, BPF_DIRECTION_OUT);
2573 #endif
2574 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2575 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2576 
2577 		ctl2 = m->m_pkthdr.len << AQ_TXDESC_CTL2_LEN_SHIFT;
2578 		ctl1 = AQ_TXDESC_CTL1_TYPE_TXD | AQ_TXDESC_CTL1_CMD_FCS;
2579 #if NVLAN > 0
2580 		if (m->m_flags & M_VLANTAG) {
2581 			txd = ring + idx;
2582 			txd->buf_addr = 0;
2583 			txd->ctl1 = htole32(AQ_TXDESC_CTL1_TYPE_TXC |
2584 			    (m->m_pkthdr.ether_vtag << AQ_TXDESC_CTL1_VLAN_SHIFT));
2585 			txd->ctl2 = 0;
2586 
2587 			ctl1 |= AQ_TXDESC_CTL1_CMD_VLAN;
2588 			ctl2 |= AQ_TXDESC_CTL2_CTX_EN;
2589 
2590 			idx++;
2591 			if (idx == AQ_TXD_NUM)
2592 				idx = 0;
2593 			used++;
2594 		}
2595 #endif
2596 
2597 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2598 			ctl1 |= AQ_TXDESC_CTL1_CMD_IP4CSUM;
2599 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
2600 			ctl1 |= AQ_TXDESC_CTL1_CMD_L4CSUM;
2601 
2602 		for (i = 0; i < as->as_map->dm_nsegs; i++) {
2603 
2604 			if (i == as->as_map->dm_nsegs - 1)
2605 				ctl1 |= AQ_TXDESC_CTL1_CMD_EOP |
2606 				    AQ_TXDESC_CTL1_CMD_WB;
2607 
2608 			txd = ring + idx;
2609 			txd->buf_addr = htole64(as->as_map->dm_segs[i].ds_addr);
2610 			txd->ctl1 = htole32(ctl1 |
2611 			    (as->as_map->dm_segs[i].ds_len <<
2612 			    AQ_TXDESC_CTL1_BLEN_SHIFT));
2613 			txd->ctl2 = htole32(ctl2);
2614 
2615 			idx++;
2616 			if (idx == AQ_TXD_NUM)
2617 				idx = 0;
2618 			used++;
2619 		}
2620 	}
2621 
2622 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2623 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREWRITE);
2624 
2625 	if (used != 0) {
2626 		tx->tx_prod = idx;
2627 		AQ_WRITE_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q),
2628 		    tx->tx_prod);
2629 	}
2630 }
2631 
2632 int
2633 aq_intr_queue(void *arg)
2634 {
2635 	struct aq_queues *aq = arg;
2636 	struct aq_softc *sc = aq->q_sc;
2637 	uint32_t status;
2638 	uint32_t clear;
2639 
2640 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2641 	clear = 0;
2642 	if (status & (1 << aq->q_tx.tx_irq)) {
2643 		clear |= (1 << aq->q_tx.tx_irq);
2644 		aq_txeof(sc, &aq->q_tx);
2645 	}
2646 
2647 	if (status & (1 << aq->q_rx.rx_irq)) {
2648 		clear |= (1 << aq->q_rx.rx_irq);
2649 		aq_rxeof(sc, &aq->q_rx);
2650 	}
2651 
2652 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, clear);
2653 	return (clear != 0);
2654 }
2655 
2656 int
2657 aq_intr_link(void *arg)
2658 {
2659 	struct aq_softc *sc = arg;
2660 	uint32_t status;
2661 
2662 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2663 	if (status & (1 << sc->sc_linkstat_irq)) {
2664 		aq_update_link_status(sc);
2665 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_REG, (1 << sc->sc_linkstat_irq));
2666 		return 1;
2667 	}
2668 
2669 	return 0;
2670 }
2671 
2672 int
2673 aq_intr(void *arg)
2674 {
2675 	struct aq_softc *sc = arg;
2676 	struct aq_queues *aq = &sc->sc_queues[0];
2677 	uint32_t status;
2678 
2679 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2680 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2681 
2682 	if (status & (1 << sc->sc_linkstat_irq))
2683 		aq_update_link_status(sc);
2684 
2685 	if (status & (1 << aq->q_tx.tx_irq)) {
2686 		aq_txeof(sc, &aq->q_tx);
2687 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2688 		    (1 << aq->q_tx.tx_irq));
2689 	}
2690 	if (status & (1 << aq->q_rx.rx_irq)) {
2691 		aq_rxeof(sc, &aq->q_rx);
2692 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2693 		    (1 << aq->q_rx.rx_irq));
2694 	}
2695 
2696 	return 1;
2697 }
2698 
2699 void
2700 aq_watchdog(struct ifnet *ifp)
2701 {
2702 
2703 }
2704 
2705 void
2706 aq_free_slots(struct aq_softc *sc, struct aq_slot *slots, int allocated,
2707     int total)
2708 {
2709 	struct aq_slot *as;
2710 
2711 	int i = allocated;
2712 	while (i-- > 0) {
2713 		as = &slots[i];
2714 		bus_dmamap_destroy(sc->sc_dmat, as->as_map);
2715 		if (as->as_m != NULL)
2716 			m_freem(as->as_m);
2717 	}
2718 	free(slots, M_DEVBUF, total * sizeof(*as));
2719 }
2720 
2721 int
2722 aq_queue_up(struct aq_softc *sc, struct aq_queues *aq)
2723 {
2724 	struct aq_rxring *rx;
2725 	struct aq_txring *tx;
2726 	struct aq_slot *as;
2727 	int i, mtu;
2728 
2729 	rx = &aq->q_rx;
2730 	rx->rx_slots = mallocarray(sizeof(*as), AQ_RXD_NUM, M_DEVBUF,
2731 	    M_WAITOK | M_ZERO);
2732 	if (rx->rx_slots == NULL) {
2733 		printf("%s: failed to allocate rx slots %d\n", DEVNAME(sc),
2734 		    aq->q_index);
2735 		return ENOMEM;
2736 	}
2737 
2738 	for (i = 0; i < AQ_RXD_NUM; i++) {
2739 		as = &rx->rx_slots[i];
2740 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2741 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2742 		    &as->as_map) != 0) {
2743 			printf("%s: failed to allocate rx dma maps %d\n",
2744 			    DEVNAME(sc), aq->q_index);
2745 			goto destroy_rx_slots;
2746 		}
2747 	}
2748 
2749 	if (aq_dmamem_alloc(sc, &rx->rx_mem, AQ_RXD_NUM *
2750 	    sizeof(struct aq_rx_desc_read), PAGE_SIZE) != 0) {
2751 		printf("%s: unable to allocate rx ring %d\n", DEVNAME(sc),
2752 		    aq->q_index);
2753 		goto destroy_rx_slots;
2754 	}
2755 
2756 	tx = &aq->q_tx;
2757 	tx->tx_slots = mallocarray(sizeof(*as), AQ_TXD_NUM, M_DEVBUF,
2758 	    M_WAITOK | M_ZERO);
2759 	if (tx->tx_slots == NULL) {
2760 		printf("%s: failed to allocate tx slots %d\n", DEVNAME(sc),
2761 		    aq->q_index);
2762 		goto destroy_rx_ring;
2763 	}
2764 
2765 	mtu = sc->sc_arpcom.ac_if.if_hardmtu;
2766 	for (i = 0; i < AQ_TXD_NUM; i++) {
2767 		as = &tx->tx_slots[i];
2768 		if (bus_dmamap_create(sc->sc_dmat, mtu, AQ_TX_MAX_SEGMENTS,
2769 		    MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2770 		    &as->as_map) != 0) {
2771 			printf("%s: failed to allocated tx dma maps %d\n",
2772 			    DEVNAME(sc), aq->q_index);
2773 			goto destroy_tx_slots;
2774 		}
2775 	}
2776 
2777 	if (aq_dmamem_alloc(sc, &tx->tx_mem, AQ_TXD_NUM *
2778 	    sizeof(struct aq_tx_desc), PAGE_SIZE) != 0) {
2779 		printf("%s: unable to allocate tx ring %d\n", DEVNAME(sc),
2780 		    aq->q_index);
2781 		goto destroy_tx_slots;
2782 	}
2783 
2784 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem),
2785 	    0, AQ_DMA_LEN(&tx->tx_mem),
2786 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2787 
2788 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem),
2789 	    0, AQ_DMA_LEN(&rx->rx_mem),
2790 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2791 
2792 	aq_txring_reset(sc, tx, 1);
2793 	aq_rxring_reset(sc, rx, 1);
2794 	return 0;
2795 
2796 destroy_tx_slots:
2797 	aq_free_slots(sc, tx->tx_slots, i, AQ_TXD_NUM);
2798 	tx->tx_slots = NULL;
2799 	i = AQ_RXD_NUM;
2800 
2801 destroy_rx_ring:
2802 	aq_dmamem_free(sc, &rx->rx_mem);
2803 destroy_rx_slots:
2804 	aq_free_slots(sc, rx->rx_slots, i, AQ_RXD_NUM);
2805 	rx->rx_slots = NULL;
2806 	return ENOMEM;
2807 }
2808 
2809 void
2810 aq_queue_down(struct aq_softc *sc, struct aq_queues *aq)
2811 {
2812 	struct aq_txring *tx;
2813 	struct aq_rxring *rx;
2814 
2815 	tx = &aq->q_tx;
2816 	aq_txring_reset(sc, &aq->q_tx, 0);
2817 	if (tx->tx_slots != NULL) {
2818 		aq_free_slots(sc, tx->tx_slots, AQ_TXD_NUM, AQ_TXD_NUM);
2819 		tx->tx_slots = NULL;
2820 	}
2821 
2822 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem),
2823 	    0, AQ_DMA_LEN(&tx->tx_mem),
2824 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2825 
2826 	aq_dmamem_free(sc, &tx->tx_mem);
2827 
2828 	rx = &aq->q_rx;
2829 	m_freem(rx->rx_m_head);
2830 	rx->rx_m_head = NULL;
2831 	rx->rx_m_tail = &rx->rx_m_head;
2832 	rx->rx_m_error = 0;
2833 	aq_rxring_reset(sc, &aq->q_rx, 0);
2834 	if (rx->rx_slots != NULL) {
2835 		aq_free_slots(sc, rx->rx_slots, AQ_RXD_NUM, AQ_RXD_NUM);
2836 		rx->rx_slots = NULL;
2837 	}
2838 
2839 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem),
2840 	    0, AQ_DMA_LEN(&rx->rx_mem),
2841 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2842 
2843 	aq_dmamem_free(sc, &rx->rx_mem);
2844 }
2845 
2846 void
2847 aq_invalidate_rx_desc_cache(struct aq_softc *sc)
2848 {
2849 	uint32_t cache;
2850 
2851 	cache = AQ_READ_REG(sc, RX_DMA_DESC_CACHE_INIT_REG);
2852 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_CACHE_INIT_REG, RX_DMA_DESC_CACHE_INIT,
2853 	    (cache & RX_DMA_DESC_CACHE_INIT) ^ RX_DMA_DESC_CACHE_INIT);
2854 }
2855 
2856 int
2857 aq_up(struct aq_softc *sc)
2858 {
2859 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2860 	int i;
2861 
2862 	aq_invalidate_rx_desc_cache(sc);
2863 
2864 	for (i = 0; i < sc->sc_nqueues; i++) {
2865 		if (aq_queue_up(sc, &sc->sc_queues[i]) != 0)
2866 			goto downqueues;
2867 	}
2868 
2869 	aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_arpcom.ac_enaddr);
2870 
2871 	AQ_WRITE_REG_BIT(sc, TPO_HWCSUM_REG, TPO_HWCSUM_IP4CSUM_EN, 1);
2872 	AQ_WRITE_REG_BIT(sc, TPO_HWCSUM_REG, TPO_HWCSUM_L4CSUM_EN, 1);
2873 
2874 	AQ_WRITE_REG_BIT(sc, RPO_HWCSUM_REG, RPO_HWCSUM_IP4CSUM_EN, 1);
2875 	AQ_WRITE_REG_BIT(sc, RPO_HWCSUM_REG, RPO_HWCSUM_L4CSUM_EN, 1);
2876 
2877 	SET(ifp->if_flags, IFF_RUNNING);
2878 	aq_enable_intr(sc, 1, 1);
2879 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 1);
2880 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 1);
2881 
2882 	for (i = 0; i < sc->sc_nqueues; i++) {
2883 		struct aq_queues *aq = &sc->sc_queues[i];
2884 
2885 		if_rxr_init(&aq->q_rx.rx_rxr, howmany(ifp->if_hardmtu, MCLBYTES),
2886 		    AQ_RXD_NUM - 1);
2887 		aq_rx_fill(sc, &aq->q_rx);
2888 
2889 		ifq_clr_oactive(aq->q_tx.tx_ifq);
2890 	}
2891 
2892 	return ENETRESET;
2893 
2894 downqueues:
2895 	for (i = 0; i < sc->sc_nqueues; i++)
2896 		aq_queue_down(sc, &sc->sc_queues[i]);
2897 	return ENOMEM;
2898 }
2899 
2900 void
2901 aq_down(struct aq_softc *sc)
2902 {
2903 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2904 	int i;
2905 
2906 	CLR(ifp->if_flags, IFF_RUNNING);
2907 
2908 	aq_enable_intr(sc, 1, 0);
2909 	intr_barrier(sc->sc_ih);
2910 
2911 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 0);
2912 	for (i = 0; i < sc->sc_nqueues; i++) {
2913 		/* queue intr barrier? */
2914 		aq_queue_down(sc, &sc->sc_queues[i]);
2915 	}
2916 
2917 	aq_invalidate_rx_desc_cache(sc);
2918 }
2919 
2920 void
2921 aq_enable_intr(struct aq_softc *sc, int link, int txrx)
2922 {
2923 	uint32_t imask = 0;
2924 	int i;
2925 
2926 	if (txrx) {
2927 		for (i = 0; i < sc->sc_nqueues; i++) {
2928 			imask |= (1 << sc->sc_queues[i].q_tx.tx_irq);
2929 			imask |= (1 << sc->sc_queues[i].q_rx.rx_irq);
2930 		}
2931 	}
2932 
2933 	if (link)
2934 		imask |= (1 << sc->sc_linkstat_irq);
2935 
2936 	AQ_WRITE_REG(sc, AQ_INTR_MASK_REG, imask);
2937 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2938 }
2939 
2940 void
2941 aq_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2942 {
2943 	struct aq_softc *aq = ifp->if_softc;
2944 	enum aq_link_speed speed;
2945 	enum aq_link_fc fc;
2946 	int media;
2947 	int flow;
2948 
2949 	if (aq_get_linkmode(aq, &speed, &fc, NULL) != 0)
2950 		return;
2951 
2952 	switch (speed) {
2953 	case AQ_LINK_10G:
2954 		media = IFM_10G_T;
2955 		break;
2956 	case AQ_LINK_5G:
2957 		media = IFM_5000_T;
2958 		break;
2959 	case AQ_LINK_2G5:
2960 		media = IFM_2500_T;
2961 		break;
2962 	case AQ_LINK_1G:
2963 		media = IFM_1000_T;
2964 		break;
2965 	case AQ_LINK_100M:
2966 		media = IFM_100_TX;
2967 		break;
2968 	case AQ_LINK_NONE:
2969 		media = 0;
2970 		break;
2971 	}
2972 
2973 	flow = 0;
2974 	if (fc & AQ_FC_RX)
2975 		flow |= IFM_ETH_RXPAUSE;
2976 	if (fc & AQ_FC_TX)
2977 		flow |= IFM_ETH_TXPAUSE;
2978 
2979 	ifmr->ifm_status = IFM_AVALID;
2980 	if (speed != AQ_LINK_NONE) {
2981 		ifmr->ifm_status |= IFM_ACTIVE;
2982 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media | flow;
2983 	}
2984 }
2985 
2986 int
2987 aq_ifmedia_change(struct ifnet *ifp)
2988 {
2989 	struct aq_softc *sc = ifp->if_softc;
2990 	enum aq_link_speed rate = AQ_LINK_NONE;
2991 	enum aq_link_fc fc = AQ_FC_NONE;
2992 
2993 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2994 		return EINVAL;
2995 
2996 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2997 	case IFM_AUTO:
2998 		rate = AQ_LINK_AUTO;
2999 		break;
3000 	case IFM_NONE:
3001 		rate = AQ_LINK_NONE;
3002 		break;
3003 	case IFM_100_TX:
3004 		rate = AQ_LINK_100M;
3005 		break;
3006 	case IFM_1000_T:
3007 		rate = AQ_LINK_1G;
3008 		break;
3009 	case IFM_2500_T:
3010 		rate = AQ_LINK_2G5;
3011 		break;
3012 	case IFM_5000_T:
3013 		rate = AQ_LINK_5G;
3014 		break;
3015 	case IFM_10G_T:
3016 		rate = AQ_LINK_10G;
3017 		break;
3018 	default:
3019 		return ENODEV;
3020 	}
3021 
3022 	if (sc->sc_media.ifm_media & IFM_FLOW)
3023 		fc = AQ_FC_ALL;
3024 
3025 	return aq_set_linkmode(sc, rate, fc, AQ_EEE_DISABLE);
3026 }
3027 
3028 void
3029 aq_update_link_status(struct aq_softc *sc)
3030 {
3031 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3032 	enum aq_link_speed speed;
3033 	enum aq_link_fc fc;
3034 
3035 	if (aq_get_linkmode(sc, &speed, &fc, NULL) != 0)
3036 		return;
3037 
3038 	if (speed == AQ_LINK_NONE) {
3039 		if (ifp->if_link_state != LINK_STATE_DOWN) {
3040 			ifp->if_link_state = LINK_STATE_DOWN;
3041 			if_link_state_change(ifp);
3042 		}
3043 	} else {
3044 		if (ifp->if_link_state != LINK_STATE_FULL_DUPLEX) {
3045 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
3046 			if_link_state_change(ifp);
3047 		}
3048 	}
3049 }
3050 
3051 int
3052 aq_rxrinfo(struct aq_softc *sc, struct if_rxrinfo *ifri)
3053 {
3054 	struct if_rxring_info *ifr;
3055 	int i;
3056 	int error;
3057 
3058 	ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_TEMP,
3059 	    M_WAITOK | M_ZERO | M_CANFAIL);
3060 	if (ifr == NULL)
3061 		return (ENOMEM);
3062 
3063 	for (i = 0; i < sc->sc_nqueues; i++) {
3064 		ifr[i].ifr_size = MCLBYTES;
3065 		ifr[i].ifr_info = sc->sc_queues[i].q_rx.rx_rxr;
3066 	}
3067 
3068 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
3069 	free(ifr, M_TEMP, sc->sc_nqueues * sizeof(*ifr));
3070 
3071 	return (error);
3072 }
3073 
3074 int
3075 aq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
3076 {
3077 	struct aq_softc *sc = ifp->if_softc;
3078 	struct ifreq *ifr = (struct ifreq *)data;
3079 	int error = 0, s;
3080 
3081 	s = splnet();
3082 
3083 	switch (cmd) {
3084 	case SIOCSIFADDR:
3085 		ifp->if_flags |= IFF_UP;
3086 		if ((ifp->if_flags & IFF_RUNNING) == 0)
3087 			error = aq_up(sc);
3088 		break;
3089 	case SIOCSIFFLAGS:
3090 		if (ifp->if_flags & IFF_UP) {
3091 			if (ifp->if_flags & IFF_RUNNING)
3092 				error = ENETRESET;
3093 			else
3094 				error = aq_up(sc);
3095 		} else {
3096 			if (ifp->if_flags & IFF_RUNNING)
3097 				aq_down(sc);
3098 		}
3099 		break;
3100 	case SIOCSIFMEDIA:
3101 	case SIOCGIFMEDIA:
3102 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
3103 		break;
3104 
3105 	case SIOCGIFRXR:
3106 		error = aq_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
3107 		break;
3108 
3109 	default:
3110 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
3111 	}
3112 
3113 	if (error == ENETRESET) {
3114 		if (ifp->if_flags & IFF_RUNNING)
3115 			aq_iff(sc);
3116 		error = 0;
3117 	}
3118 
3119 	splx(s);
3120 	return error;
3121 }
3122 
3123 void
3124 aq_iff(struct aq_softc *sc)
3125 {
3126 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
3127 	struct arpcom *ac = &sc->sc_arpcom;
3128 	struct ether_multi *enm;
3129 	struct ether_multistep step;
3130 	int idx;
3131 
3132 	if (ifp->if_flags & IFF_PROMISC) {
3133 		ifp->if_flags |= IFF_ALLMULTI;
3134 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 1);
3135 	} else if (ac->ac_multirangecnt > 0 ||
3136 	    ac->ac_multicnt >= AQ_HW_MAC_NUM) {
3137 		ifp->if_flags |= IFF_ALLMULTI;
3138 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
3139 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
3140 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 1);
3141 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
3142 		    RPF_MCAST_FILTER_EN, 1);
3143 	} else {
3144 		ifp->if_flags &= ~IFF_ALLMULTI;
3145 		idx = AQ_HW_MAC_OWN + 1;
3146 
3147 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
3148 
3149 		ETHER_FIRST_MULTI(step, ac, enm);
3150 		while (enm != NULL) {
3151 			aq_set_mac_addr(sc, idx++, enm->enm_addrlo);
3152 			ETHER_NEXT_MULTI(step, enm);
3153 		}
3154 
3155 		for (; idx < AQ_HW_MAC_NUM; idx++)
3156 			aq_set_mac_addr(sc, idx, NULL);
3157 
3158 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
3159 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 0);
3160 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
3161 		    RPF_MCAST_FILTER_EN, 0);
3162 	}
3163 }
3164 
3165 int
3166 aq_dmamem_alloc(struct aq_softc *sc, struct aq_dmamem *aqm,
3167     bus_size_t size, u_int align)
3168 {
3169 	aqm->aqm_size = size;
3170 
3171 	if (bus_dmamap_create(sc->sc_dmat, aqm->aqm_size, 1,
3172 	    aqm->aqm_size, 0,
3173 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3174 	    &aqm->aqm_map) != 0)
3175 		return (1);
3176 	if (bus_dmamem_alloc(sc->sc_dmat, aqm->aqm_size,
3177 	    align, 0, &aqm->aqm_seg, 1, &aqm->aqm_nsegs,
3178 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
3179 		goto destroy;
3180 	if (bus_dmamem_map(sc->sc_dmat, &aqm->aqm_seg, aqm->aqm_nsegs,
3181 	    aqm->aqm_size, &aqm->aqm_kva, BUS_DMA_WAITOK) != 0)
3182 		goto free;
3183 	if (bus_dmamap_load(sc->sc_dmat, aqm->aqm_map, aqm->aqm_kva,
3184 	    aqm->aqm_size, NULL, BUS_DMA_WAITOK) != 0)
3185 		goto unmap;
3186 
3187 	return (0);
3188 unmap:
3189 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
3190 free:
3191 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
3192 destroy:
3193 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
3194 	return (1);
3195 }
3196 
3197 void
3198 aq_dmamem_free(struct aq_softc *sc, struct aq_dmamem *aqm)
3199 {
3200 	bus_dmamap_unload(sc->sc_dmat, aqm->aqm_map);
3201 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
3202 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
3203 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
3204 }
3205