xref: /openbsd-src/sys/dev/pci/if_aq_pci.c (revision 198f0b5dccae76a18ee7603263e9fb6884a167ec)
1 /* $OpenBSD: if_aq_pci.c,v 1.13 2022/03/30 00:25:27 jmatthew Exp $ */
2 /*	$NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2021 Jonathan Matthew <jonathan@d14n.org>
6  * Copyright (c) 2021 Mike Larkin <mlarkin@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /**
22  * aQuantia Corporation Network Driver
23  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  *
29  *   (1) Redistributions of source code must retain the above
30  *   copyright notice, this list of conditions and the following
31  *   disclaimer.
32  *
33  *   (2) Redistributions in binary form must reproduce the above
34  *   copyright notice, this list of conditions and the following
35  *   disclaimer in the documentation and/or other materials provided
36  *   with the distribution.
37  *
38  *   (3) The name of the author may not be used to endorse or promote
39  *   products derived from this software without specific prior
40  *   written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
43  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
44  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
48  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  *
54  */
55 
56 /*-
57  * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
58  * All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  * modification, are permitted provided that the following conditions
62  * are met:
63  * 1. Redistributions of source code must retain the above copyright
64  *    notice, this list of conditions and the following disclaimer.
65  * 2. Redistributions in binary form must reproduce the above copyright
66  *    notice, this list of conditions and the following disclaimer in the
67  *    documentation and/or other materials provided with the distribution.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
70  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
71  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
72  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
73  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
74  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
75  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
77  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
78  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
79  * POSSIBILITY OF SUCH DAMAGE.
80  */
81 #include "bpfilter.h"
82 #include "vlan.h"
83 
84 #include <sys/types.h>
85 #include <sys/device.h>
86 #include <sys/param.h>
87 #include <sys/kernel.h>
88 #include <sys/sockio.h>
89 #include <sys/systm.h>
90 #include <sys/intrmap.h>
91 
92 #include <net/if.h>
93 #include <net/if_media.h>
94 
95 #include <netinet/in.h>
96 #include <netinet/if_ether.h>
97 
98 #include <dev/pci/pcireg.h>
99 #include <dev/pci/pcivar.h>
100 #include <dev/pci/pcidevs.h>
101 
102 #if NBPFILTER > 0
103 #include <net/bpf.h>
104 #endif
105 
106 /* #define AQ_DEBUG 1 */
107 #ifdef AQ_DEBUG
108 #define DPRINTF(x) printf x
109 #else
110 #define DPRINTF(x)
111 #endif /* AQ_DEBUG */
112 
113 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
114 
115 #define AQ_BAR0 				0x10
116 #define AQ_MAXQ 				8
117 
118 #define AQ_TXD_NUM 				2048
119 #define AQ_RXD_NUM 				2048
120 
121 #define AQ_TX_MAX_SEGMENTS			32
122 
123 #define AQ_LINKSTAT_IRQ				31
124 
125 #define RPF_ACTION_HOST				1
126 
127 #define AQ_FW_SOFTRESET_REG			0x0000
128 #define  AQ_FW_SOFTRESET_DIS			(1 << 14)
129 #define  AQ_FW_SOFTRESET_RESET			(1 << 15)
130 #define AQ_FW_VERSION_REG			0x0018
131 #define AQ_HW_REVISION_REG			0x001c
132 #define AQ_GLB_NVR_INTERFACE1_REG		0x0100
133 #define AQ_FW_MBOX_CMD_REG			0x0200
134 #define  AQ_FW_MBOX_CMD_EXECUTE			0x00008000
135 #define  AQ_FW_MBOX_CMD_BUSY			0x00000100
136 #define AQ_FW_MBOX_ADDR_REG			0x0208
137 #define AQ_FW_MBOX_VAL_REG			0x020C
138 #define AQ_FW_GLB_CPU_SEM_REG(i)		(0x03a0 + (i) * 4)
139 #define AQ_FW_SEM_RAM_REG			AQ_FW_GLB_CPU_SEM_REG(2)
140 #define AQ_FW_GLB_CTL2_REG			0x0404
141 #define AQ_GLB_GENERAL_PROVISIONING9_REG	0x0520
142 #define AQ_GLB_NVR_PROVISIONING2_REG		0x0534
143 #define AQ_INTR_STATUS_REG			0x2000  /* intr status */
144 #define AQ_INTR_STATUS_CLR_REG			0x2050  /* intr status clear */
145 #define AQ_INTR_MASK_REG			0x2060	/* intr mask set */
146 #define AQ_INTR_MASK_CLR_REG			0x2070	/* intr mask clear */
147 #define AQ_INTR_AUTOMASK_REG			0x2090
148 
149 /* AQ_INTR_IRQ_MAP_TXRX_REG 0x2100-0x2140 */
150 #define AQ_INTR_IRQ_MAP_TXRX_REG(i)		(0x2100 + ((i) / 2) * 4)
151 #define AQ_INTR_IRQ_MAP_TX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
152 #define  AQ_INTR_IRQ_MAP_TX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 16 : 24))
153 #define  AQ_INTR_IRQ_MAP_TX_EN(i)		(1U << (((i) & 1) ? 23 : 31))
154 #define AQ_INTR_IRQ_MAP_RX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
155 #define  AQ_INTR_IRQ_MAP_RX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 0 : 8))
156 #define  AQ_INTR_IRQ_MAP_RX_EN(i)		(1U << (((i) & 1) ? 7 : 15))
157 
158 /* AQ_GEN_INTR_MAP_REG[AQ_RINGS_NUM] 0x2180-0x2200 */
159 #define AQ_GEN_INTR_MAP_REG(i)			(0x2180 + (i) * 4)
160 #define  AQ_B0_ERR_INT				8U
161 
162 #define AQ_INTR_CTRL_REG			0x2300
163 #define  AQ_INTR_CTRL_IRQMODE			((1 << 0) | (1 << 1))
164 #define AQ_INTR_CTRL_IRQMODE_LEGACY		0
165 #define AQ_INTR_CTRL_IRQMODE_MSI		1
166 #define AQ_INTR_CTRL_IRQMODE_MSIX		2
167 #define  AQ_INTR_CTRL_MULTIVEC			(1 << 2)
168 #define  AQ_INTR_CTRL_RESET_DIS			(1 << 29)
169 #define  AQ_INTR_CTRL_RESET_IRQ			(1 << 31)
170 #define AQ_MBOXIF_POWER_GATING_CONTROL_REG	0x32a8
171 
172 #define FW_MPI_MBOX_ADDR_REG			0x0360
173 #define FW1X_MPI_INIT1_REG			0x0364
174 #define FW1X_MPI_INIT2_REG			0x0370
175 #define FW1X_MPI_EFUSEADDR_REG			0x0374
176 
177 #define FW2X_MPI_EFUSEADDR_REG			0x0364
178 #define FW2X_MPI_CONTROL_REG			0x0368  /* 64bit */
179 #define FW2X_MPI_STATE_REG			0x0370  /* 64bit */
180 #define FW_BOOT_EXIT_CODE_REG			0x0388
181 
182 #define FW_BOOT_EXIT_CODE_REG			0x0388
183 #define  RBL_STATUS_DEAD			0x0000dead
184 #define  RBL_STATUS_SUCCESS			0x0000abba
185 #define  RBL_STATUS_FAILURE			0x00000bad
186 #define  RBL_STATUS_HOST_BOOT			0x0000f1a7
187 #define FW_MPI_DAISY_CHAIN_STATUS_REG		0x0704
188 #define AQ_PCI_REG_CONTROL_6_REG		0x1014
189 
190 #define FW_MPI_RESETCTRL_REG			0x4000
191 #define  FW_MPI_RESETCTRL_RESET_DIS		(1 << 29)
192 
193 #define RX_SYSCONTROL_REG			0x5000
194 #define  RX_SYSCONTROL_RESET_DIS		(1 << 29)
195 
196 #define RX_TCP_RSS_HASH_REG			0x5040
197 
198 #define RPF_L2BC_REG				0x5100
199 #define  RPF_L2BC_EN				(1 << 0)
200 #define  RPF_L2BC_PROMISC			(1 << 3)
201 #define  RPF_L2BC_ACTION			0x7000
202 #define  RPF_L2BC_THRESHOLD			0xFFFF0000
203 
204 #define AQ_HW_MAC_OWN				0
205 
206 /* RPF_L2UC_*_REG[34] (actual [38]?) */
207 #define RPF_L2UC_LSW_REG(i)                     (0x5110 + (i) * 8)
208 #define RPF_L2UC_MSW_REG(i)                     (0x5114 + (i) * 8)
209 #define  RPF_L2UC_MSW_MACADDR_HI		0xFFFF
210 #define  RPF_L2UC_MSW_ACTION			0x70000
211 #define  RPF_L2UC_MSW_EN			(1 << 31)
212 #define AQ_HW_MAC_NUM				34
213 
214 /* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
215 #define RPF_MCAST_FILTER_REG(i)			(0x5250 + (i) * 4)
216 #define  RPF_MCAST_FILTER_EN			(1 << 31)
217 #define RPF_MCAST_FILTER_MASK_REG		0x5270
218 #define  RPF_MCAST_FILTER_MASK_ALLMULTI		(1 << 14)
219 
220 #define RPF_VLAN_MODE_REG			0x5280
221 #define  RPF_VLAN_MODE_PROMISC			(1 << 1)
222 #define  RPF_VLAN_MODE_ACCEPT_UNTAGGED		(1 << 2)
223 #define  RPF_VLAN_MODE_UNTAGGED_ACTION		0x38
224 
225 #define RPF_VLAN_TPID_REG                       0x5284
226 #define  RPF_VLAN_TPID_OUTER			0xFFFF0000
227 #define  RPF_VLAN_TPID_INNER			0xFFFF
228 
229 /* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
230 #define RPF_ETHERTYPE_FILTER_REG(i)		(0x5300 + (i) * 4)
231 #define  RPF_ETHERTYPE_FILTER_EN		(1 << 31)
232 
233 /* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
234 #define RPF_L3_FILTER_REG(i)			(0x5380 + (i) * 4)
235 #define  RPF_L3_FILTER_L4_EN			(1 << 31)
236 
237 #define RX_FLR_RSS_CONTROL1_REG			0x54c0
238 #define  RX_FLR_RSS_CONTROL1_EN			(1 << 31)
239 
240 #define RPF_RPB_RX_TC_UPT_REG                   0x54c4
241 #define  RPF_RPB_RX_TC_UPT_MASK(i)              (0x00000007 << ((i) * 4))
242 
243 #define RPO_HWCSUM_REG				0x5580
244 #define  RPO_HWCSUM_L4CSUM_EN			(1 << 0)
245 #define  RPO_HWCSUM_IP4CSUM_EN			(1 << 1)
246 
247 #define RPB_RPF_RX_REG				0x5700
248 #define  RPB_RPF_RX_TC_MODE			(1 << 8)
249 #define  RPB_RPF_RX_FC_MODE			0x30
250 #define  RPB_RPF_RX_BUF_EN			(1 << 0)
251 
252 /* RPB_RXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x5710-0x5790 */
253 #define RPB_RXB_BUFSIZE_REG(i)			(0x5710 + (i) * 0x10)
254 #define  RPB_RXB_BUFSIZE			0x1FF
255 #define RPB_RXB_XOFF_REG(i)			(0x5714 + (i) * 0x10)
256 #define  RPB_RXB_XOFF_EN			(1 << 31)
257 #define  RPB_RXB_XOFF_THRESH_HI                 0x3FFF0000
258 #define  RPB_RXB_XOFF_THRESH_LO                 0x3FFF
259 
260 #define RX_DMA_DESC_CACHE_INIT_REG		0x5a00
261 #define  RX_DMA_DESC_CACHE_INIT			(1 << 0)
262 
263 #define RX_DMA_INT_DESC_WRWB_EN_REG		0x5a30
264 #define  RX_DMA_INT_DESC_WRWB_EN		(1 << 2)
265 #define  RX_DMA_INT_DESC_MODERATE_EN		(1 << 3)
266 
267 #define RX_INTR_MODERATION_CTL_REG(i)		(0x5a40 + (i) * 4)
268 #define  RX_INTR_MODERATION_CTL_EN		(1 << 1)
269 #define  RX_INTR_MODERATION_CTL_MIN		(0xFF << 8)
270 #define  RX_INTR_MODERATION_CTL_MAX		(0x1FF << 16)
271 
272 #define RX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x5b00 + (i) * 0x20)
273 #define RX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x5b04 + (i) * 0x20)
274 #define RX_DMA_DESC_REG(i)			(0x5b08 + (i) * 0x20)
275 #define  RX_DMA_DESC_LEN			(0x3FF << 3)
276 #define  RX_DMA_DESC_RESET			(1 << 25)
277 #define  RX_DMA_DESC_HEADER_SPLIT		(1 << 28)
278 #define  RX_DMA_DESC_VLAN_STRIP			(1 << 29)
279 #define  RX_DMA_DESC_EN				(1 << 31)
280 #define RX_DMA_DESC_HEAD_PTR_REG(i)		(0x5b0c + (i) * 0x20)
281 #define  RX_DMA_DESC_HEAD_PTR			0xFFF
282 #define RX_DMA_DESC_TAIL_PTR_REG(i)		(0x5b10 + (i) * 0x20)
283 #define RX_DMA_DESC_BUFSIZE_REG(i)		(0x5b18 + (i) * 0x20)
284 #define  RX_DMA_DESC_BUFSIZE_DATA		0x000F
285 #define  RX_DMA_DESC_BUFSIZE_HDR		0x0FF0
286 
287 #define RX_DMA_DCAD_REG(i)			(0x6100 + (i) * 4)
288 #define  RX_DMA_DCAD_CPUID			0xFF
289 #define  RX_DMA_DCAD_PAYLOAD_EN			(1 << 29)
290 #define  RX_DMA_DCAD_HEADER_EN			(1 << 30)
291 #define  RX_DMA_DCAD_DESC_EN			(1 << 31)
292 
293 #define RX_DMA_DCA_REG				0x6180
294 #define  RX_DMA_DCA_EN				(1 << 31)
295 #define  RX_DMA_DCA_MODE			0xF
296 
297 #define TX_SYSCONTROL_REG			0x7000
298 #define  TX_SYSCONTROL_RESET_DIS		(1 << 29)
299 
300 #define TX_TPO2_REG				0x7040
301 #define  TX_TPO2_EN				(1 << 16)
302 
303 #define TPS_DESC_VM_ARB_MODE_REG		0x7300
304 #define  TPS_DESC_VM_ARB_MODE			(1 << 0)
305 #define TPS_DESC_RATE_REG			0x7310
306 #define  TPS_DESC_RATE_TA_RST			(1 << 31)
307 #define  TPS_DESC_RATE_LIM			0x7FF
308 #define TPS_DESC_TC_ARB_MODE_REG		0x7200
309 #define  TPS_DESC_TC_ARB_MODE			0x3
310 #define TPS_DATA_TC_ARB_MODE_REG		0x7100
311 #define  TPS_DATA_TC_ARB_MODE			(1 << 0)
312 
313 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7110-0x7130 */
314 #define TPS_DATA_TCT_REG(i)			(0x7110 + (i) * 4)
315 #define  TPS_DATA_TCT_CREDIT_MAX		0xFFF0000
316 #define  TPS_DATA_TCT_WEIGHT			0x1FF
317 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7210-0x7230 */
318 #define TPS_DESC_TCT_REG(i)			(0x7210 + (i) * 4)
319 #define  TPS_DESC_TCT_CREDIT_MAX		0xFFF0000
320 #define  TPS_DESC_TCT_WEIGHT			0x1FF
321 
322 #define AQ_HW_TXBUF_MAX         160
323 #define AQ_HW_RXBUF_MAX         320
324 
325 #define TPO_HWCSUM_REG				0x7800
326 #define  TPO_HWCSUM_L4CSUM_EN			(1 << 0)
327 #define  TPO_HWCSUM_IP4CSUM_EN			(1 << 1)
328 
329 #define THM_LSO_TCP_FLAG1_REG			0x7820
330 #define  THM_LSO_TCP_FLAG1_FIRST		0xFFF
331 #define  THM_LSO_TCP_FLAG1_MID			0xFFF0000
332 #define THM_LSO_TCP_FLAG2_REG			0x7824
333 #define  THM_LSO_TCP_FLAG2_LAST			0xFFF
334 
335 #define TPB_TX_BUF_REG				0x7900
336 #define  TPB_TX_BUF_EN				(1 << 0)
337 #define  TPB_TX_BUF_SCP_INS_EN			(1 << 2)
338 #define  TPB_TX_BUF_TC_MODE_EN			(1 << 8)
339 
340 /* TPB_TXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x7910-7990 */
341 #define TPB_TXB_BUFSIZE_REG(i)			(0x7910 + (i) * 0x10)
342 #define  TPB_TXB_BUFSIZE                        (0xFF)
343 #define TPB_TXB_THRESH_REG(i)                   (0x7914 + (i) * 0x10)
344 #define  TPB_TXB_THRESH_HI                      0x1FFF0000
345 #define  TPB_TXB_THRESH_LO                      0x1FFF
346 
347 #define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG	0x7b20
348 
349 #define TX_DMA_INT_DESC_WRWB_EN_REG		0x7b40
350 #define  TX_DMA_INT_DESC_WRWB_EN		(1 << 1)
351 #define  TX_DMA_INT_DESC_MODERATE_EN		(1 << 4)
352 
353 #define TX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x7c00 + (i) * 0x40)
354 #define TX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x7c04 + (i) * 0x40)
355 #define TX_DMA_DESC_REG(i)			(0x7c08 + (i) * 0x40)
356 #define  TX_DMA_DESC_LEN			0x00000FF8
357 #define  TX_DMA_DESC_EN				0x80000000
358 #define TX_DMA_DESC_HEAD_PTR_REG(i)		(0x7c0c + (i) * 0x40)
359 #define  TX_DMA_DESC_HEAD_PTR			0x00000FFF
360 #define TX_DMA_DESC_TAIL_PTR_REG(i)		(0x7c10 + (i) * 0x40)
361 #define TX_DMA_DESC_WRWB_THRESH_REG(i)		(0x7c18 + (i) * 0x40)
362 #define  TX_DMA_DESC_WRWB_THRESH		0x00003F00
363 
364 #define TDM_DCAD_REG(i)				(0x8400 + (i) * 4)
365 #define  TDM_DCAD_CPUID				0x7F
366 #define  TDM_DCAD_CPUID_EN			0x80000000
367 
368 #define TDM_DCA_REG				0x8480
369 #define  TDM_DCA_EN				(1 << 31)
370 #define  TDM_DCA_MODE				0xF
371 
372 #define TX_INTR_MODERATION_CTL_REG(i)		(0x8980 + (i) * 4)
373 #define  TX_INTR_MODERATION_CTL_EN		(1 << 1)
374 #define  TX_INTR_MODERATION_CTL_MIN		(0xFF << 8)
375 #define  TX_INTR_MODERATION_CTL_MAX		(0x1FF << 16)
376 
377 #define __LOWEST_SET_BIT(__mask) (((((uint32_t)__mask) - 1) & ((uint32_t)__mask)) ^ ((uint32_t)__mask))
378 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
379 
380 #if 0
381 #define AQ_READ_REG(sc, reg) \
382 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
383 
384 #endif
385 #define AQ_WRITE_REG(sc, reg, val) \
386 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
387 
388 #define AQ_WRITE_REG_BIT(sc, reg, mask, val)                    \
389 	do {                                                    \
390 		uint32_t _v;                                    \
391 		_v = AQ_READ_REG((sc), (reg));                  \
392 		_v &= ~(mask);                                  \
393 		if ((val) != 0)                                 \
394 			_v |= __SHIFTIN((val), (mask));         \
395 		AQ_WRITE_REG((sc), (reg), _v);                  \
396 	} while (/* CONSTCOND */ 0)
397 
398 #define AQ_READ64_REG(sc, reg)					\
399 	((uint64_t)AQ_READ_REG(sc, reg) |			\
400 	(((uint64_t)AQ_READ_REG(sc, (reg) + 4)) << 32))
401 
402 #define AQ_WRITE64_REG(sc, reg, val)				\
403 	do {							\
404 		AQ_WRITE_REG(sc, reg, (uint32_t)val);		\
405 		AQ_WRITE_REG(sc, reg + 4, (uint32_t)(val >> 32)); \
406 	} while (/* CONSTCOND */0)
407 
408 #define WAIT_FOR(expr, us, n, errp)                             \
409 	do {                                                    \
410 		unsigned int _n;                                \
411 		for (_n = n; (!(expr)) && _n != 0; --_n) {      \
412 			delay((us));                            \
413 		}                                               \
414 		if ((errp != NULL)) {                           \
415 			if (_n == 0)                            \
416 				*(errp) = ETIMEDOUT;            \
417 			else                                    \
418 				*(errp) = 0;                    \
419 		}                                               \
420 	} while (/* CONSTCOND */ 0)
421 
422 #define FW_VERSION_MAJOR(sc)	(((sc)->sc_fw_version >> 24) & 0xff)
423 #define FW_VERSION_MINOR(sc)	(((sc)->sc_fw_version >> 16) & 0xff)
424 #define FW_VERSION_BUILD(sc)	((sc)->sc_fw_version & 0xffff)
425 
426 #define FEATURES_MIPS		0x00000001
427 #define FEATURES_TPO2		0x00000002
428 #define FEATURES_RPF2		0x00000004
429 #define FEATURES_MPI_AQ		0x00000008
430 #define FEATURES_REV_A0		0x10000000
431 #define FEATURES_REV_A		(FEATURES_REV_A0)
432 #define FEATURES_REV_B0		0x20000000
433 #define FEATURES_REV_B1		0x40000000
434 #define FEATURES_REV_B		(FEATURES_REV_B0|FEATURES_REV_B1)
435 
436 /* lock for FW2X_MPI_{CONTROL,STATE]_REG read-modify-write */
437 #define AQ_MPI_LOCK(sc)		mtx_enter(&(sc)->sc_mpi_mutex);
438 #define AQ_MPI_UNLOCK(sc)	mtx_leave(&(sc)->sc_mpi_mutex);
439 
440 #define FW2X_CTRL_10BASET_HD			(1 << 0)
441 #define FW2X_CTRL_10BASET_FD			(1 << 1)
442 #define FW2X_CTRL_100BASETX_HD			(1 << 2)
443 #define FW2X_CTRL_100BASET4_HD			(1 << 3)
444 #define FW2X_CTRL_100BASET2_HD			(1 << 4)
445 #define FW2X_CTRL_100BASETX_FD			(1 << 5)
446 #define FW2X_CTRL_100BASET2_FD			(1 << 6)
447 #define FW2X_CTRL_1000BASET_HD			(1 << 7)
448 #define FW2X_CTRL_1000BASET_FD			(1 << 8)
449 #define FW2X_CTRL_2P5GBASET_FD			(1 << 9)
450 #define FW2X_CTRL_5GBASET_FD			(1 << 10)
451 #define FW2X_CTRL_10GBASET_FD			(1 << 11)
452 #define FW2X_CTRL_RESERVED1			(1ULL << 32)
453 #define FW2X_CTRL_10BASET_EEE			(1ULL << 33)
454 #define FW2X_CTRL_RESERVED2			(1ULL << 34)
455 #define FW2X_CTRL_PAUSE				(1ULL << 35)
456 #define FW2X_CTRL_ASYMMETRIC_PAUSE		(1ULL << 36)
457 #define FW2X_CTRL_100BASETX_EEE			(1ULL << 37)
458 #define FW2X_CTRL_RESERVED3			(1ULL << 38)
459 #define FW2X_CTRL_RESERVED4			(1ULL << 39)
460 #define FW2X_CTRL_1000BASET_FD_EEE		(1ULL << 40)
461 #define FW2X_CTRL_2P5GBASET_FD_EEE		(1ULL << 41)
462 #define FW2X_CTRL_5GBASET_FD_EEE		(1ULL << 42)
463 #define FW2X_CTRL_10GBASET_FD_EEE		(1ULL << 43)
464 #define FW2X_CTRL_RESERVED5			(1ULL << 44)
465 #define FW2X_CTRL_RESERVED6			(1ULL << 45)
466 #define FW2X_CTRL_RESERVED7			(1ULL << 46)
467 #define FW2X_CTRL_RESERVED8			(1ULL << 47)
468 #define FW2X_CTRL_RESERVED9			(1ULL << 48)
469 #define FW2X_CTRL_CABLE_DIAG			(1ULL << 49)
470 #define FW2X_CTRL_TEMPERATURE			(1ULL << 50)
471 #define FW2X_CTRL_DOWNSHIFT			(1ULL << 51)
472 #define FW2X_CTRL_PTP_AVB_EN			(1ULL << 52)
473 #define FW2X_CTRL_MEDIA_DETECT			(1ULL << 53)
474 #define FW2X_CTRL_LINK_DROP			(1ULL << 54)
475 #define FW2X_CTRL_SLEEP_PROXY			(1ULL << 55)
476 #define FW2X_CTRL_WOL				(1ULL << 56)
477 #define FW2X_CTRL_MAC_STOP			(1ULL << 57)
478 #define FW2X_CTRL_EXT_LOOPBACK			(1ULL << 58)
479 #define FW2X_CTRL_INT_LOOPBACK			(1ULL << 59)
480 #define FW2X_CTRL_EFUSE_AGENT			(1ULL << 60)
481 #define FW2X_CTRL_WOL_TIMER			(1ULL << 61)
482 #define FW2X_CTRL_STATISTICS			(1ULL << 62)
483 #define FW2X_CTRL_TRANSACTION_ID		(1ULL << 63)
484 
485 #define FW2X_CTRL_RATE_100M			FW2X_CTRL_100BASETX_FD
486 #define FW2X_CTRL_RATE_1G			FW2X_CTRL_1000BASET_FD
487 #define FW2X_CTRL_RATE_2G5			FW2X_CTRL_2P5GBASET_FD
488 #define FW2X_CTRL_RATE_5G			FW2X_CTRL_5GBASET_FD
489 #define FW2X_CTRL_RATE_10G			FW2X_CTRL_10GBASET_FD
490 #define FW2X_CTRL_RATE_MASK		\
491 	(FW2X_CTRL_RATE_100M |		\
492 	 FW2X_CTRL_RATE_1G |		\
493 	 FW2X_CTRL_RATE_2G5 |		\
494 	 FW2X_CTRL_RATE_5G |		\
495 	 FW2X_CTRL_RATE_10G)
496 #define FW2X_CTRL_EEE_MASK		\
497 	(FW2X_CTRL_10BASET_EEE |	\
498 	 FW2X_CTRL_100BASETX_EEE |	\
499 	 FW2X_CTRL_1000BASET_FD_EEE |	\
500 	 FW2X_CTRL_2P5GBASET_FD_EEE |	\
501 	 FW2X_CTRL_5GBASET_FD_EEE |	\
502 	 FW2X_CTRL_10GBASET_FD_EEE)
503 
504 enum aq_fw_bootloader_mode {
505 	FW_BOOT_MODE_UNKNOWN = 0,
506 	FW_BOOT_MODE_FLB,
507 	FW_BOOT_MODE_RBL_FLASH,
508 	FW_BOOT_MODE_RBL_HOST_BOOTLOAD
509 };
510 
511 enum aq_media_type {
512 	AQ_MEDIA_TYPE_UNKNOWN = 0,
513 	AQ_MEDIA_TYPE_FIBRE,
514 	AQ_MEDIA_TYPE_TP
515 };
516 
517 enum aq_link_speed {
518 	AQ_LINK_NONE    = 0,
519 	AQ_LINK_100M    = (1 << 0),
520 	AQ_LINK_1G      = (1 << 1),
521 	AQ_LINK_2G5     = (1 << 2),
522 	AQ_LINK_5G      = (1 << 3),
523 	AQ_LINK_10G     = (1 << 4)
524 };
525 
526 #define AQ_LINK_ALL	(AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | \
527 			    AQ_LINK_5G | AQ_LINK_10G )
528 #define AQ_LINK_AUTO	AQ_LINK_ALL
529 
530 enum aq_link_eee {
531 	AQ_EEE_DISABLE = 0,
532 	AQ_EEE_ENABLE = 1
533 };
534 
535 enum aq_hw_fw_mpi_state {
536 	MPI_DEINIT      = 0,
537 	MPI_RESET       = 1,
538 	MPI_INIT        = 2,
539 	MPI_POWER       = 4
540 };
541 
542 enum aq_link_fc {
543         AQ_FC_NONE = 0,
544         AQ_FC_RX = (1 << 0),
545         AQ_FC_TX = (1 << 1),
546         AQ_FC_ALL = (AQ_FC_RX | AQ_FC_TX)
547 };
548 
549 struct aq_dmamem {
550 	bus_dmamap_t		aqm_map;
551 	bus_dma_segment_t	aqm_seg;
552 	int			aqm_nsegs;
553 	size_t			aqm_size;
554 	caddr_t			aqm_kva;
555 };
556 
557 #define AQ_DMA_MAP(_aqm)	((_aqm)->aqm_map)
558 #define AQ_DMA_DVA(_aqm)	((_aqm)->aqm_map->dm_segs[0].ds_addr)
559 #define AQ_DMA_KVA(_aqm)	((void *)(_aqm)->aqm_kva)
560 #define AQ_DMA_LEN(_aqm)	((_aqm)->aqm_size)
561 
562 
563 struct aq_mailbox_header {
564         uint32_t version;
565         uint32_t transaction_id;
566         int32_t error;
567 } __packed __aligned(4);
568 
569 struct aq_hw_stats_s {
570         uint32_t uprc;
571         uint32_t mprc;
572         uint32_t bprc;
573         uint32_t erpt;
574         uint32_t uptc;
575         uint32_t mptc;
576         uint32_t bptc;
577         uint32_t erpr;
578         uint32_t mbtc;
579         uint32_t bbtc;
580         uint32_t mbrc;
581         uint32_t bbrc;
582         uint32_t ubrc;
583         uint32_t ubtc;
584         uint32_t ptc;
585         uint32_t prc;
586         uint32_t dpc;   /* not exists in fw2x_msm_statistics */
587         uint32_t cprc;  /* not exists in fw2x_msm_statistics */
588 } __packed __aligned(4);
589 
590 struct aq_fw2x_capabilities {
591         uint32_t caps_lo;
592         uint32_t caps_hi;
593 } __packed __aligned(4);
594 
595 struct aq_fw2x_msm_statistics {
596 	uint32_t uprc;
597 	uint32_t mprc;
598 	uint32_t bprc;
599 	uint32_t erpt;
600 	uint32_t uptc;
601 	uint32_t mptc;
602 	uint32_t bptc;
603 	uint32_t erpr;
604 	uint32_t mbtc;
605 	uint32_t bbtc;
606 	uint32_t mbrc;
607 	uint32_t bbrc;
608 	uint32_t ubrc;
609 	uint32_t ubtc;
610 	uint32_t ptc;
611 	uint32_t prc;
612 } __packed __aligned(4);
613 
614 struct aq_fw2x_phy_cable_diag_data {
615 	uint32_t lane_data[4];
616 } __packed __aligned(4);
617 
618 struct aq_fw2x_mailbox {		/* struct fwHostInterface */
619 	struct aq_mailbox_header header;
620 	struct aq_fw2x_msm_statistics msm;	/* msmStatistics_t msm; */
621 
622 	uint32_t phy_info1;
623 #define PHYINFO1_FAULT_CODE	__BITS(31,16)
624 #define PHYINFO1_PHY_H_BIT	__BITS(0,15)
625 	uint32_t phy_info2;
626 #define PHYINFO2_TEMPERATURE	__BITS(15,0)
627 #define PHYINFO2_CABLE_LEN	__BITS(23,16)
628 
629 	struct aq_fw2x_phy_cable_diag_data diag_data;
630 	uint32_t reserved[8];
631 
632 	struct aq_fw2x_capabilities caps;
633 
634 	/* ... */
635 } __packed __aligned(4);
636 
637 struct aq_rx_desc_read {
638 	uint64_t		buf_addr;
639 	uint64_t		hdr_addr;
640 } __packed;
641 
642 struct aq_rx_desc_wb {
643 	uint32_t		type;
644 #define AQ_RXDESC_TYPE_RSSTYPE	0x000f
645 #define AQ_RXDESC_TYPE_ETHER	0x0030
646 #define AQ_RXDESC_TYPE_PROTO	0x01c0
647 #define AQ_RXDESC_TYPE_VLAN	(1 << 9)
648 #define AQ_RXDESC_TYPE_VLAN2	(1 << 10)
649 #define AQ_RXDESC_TYPE_DMA_ERR	(1 << 12)
650 #define AQ_RXDESC_TYPE_V4_SUM	(1 << 19)
651 #define AQ_RXDESC_TYPE_L4_SUM	(1 << 20)
652 	uint32_t		rss_hash;
653 	uint16_t		status;
654 #define AQ_RXDESC_STATUS_DD	(1 << 0)
655 #define AQ_RXDESC_STATUS_EOP	(1 << 1)
656 #define AQ_RXDESC_STATUS_MACERR (1 << 2)
657 #define AQ_RXDESC_STATUS_V4_SUM_NG (1 << 3)
658 #define AQ_RXDESC_STATUS_L4_SUM_ERR (1 << 4)
659 #define AQ_RXDESC_STATUS_L4_SUM_OK (1 << 5)
660 	uint16_t		pkt_len;
661 	uint16_t		next_desc_ptr;
662 	uint16_t		vlan;
663 } __packed;
664 
665 struct aq_tx_desc {
666 	uint64_t		buf_addr;
667 	uint32_t		ctl1;
668 #define AQ_TXDESC_CTL1_TYPE_TXD	0x00000001
669 #define AQ_TXDESC_CTL1_TYPE_TXC	0x00000002
670 #define AQ_TXDESC_CTL1_BLEN_SHIFT 4
671 #define AQ_TXDESC_CTL1_VLAN_SHIFT 4
672 #define AQ_TXDESC_CTL1_DD	(1 << 20)
673 #define AQ_TXDESC_CTL1_CMD_EOP	(1 << 21)
674 #define AQ_TXDESC_CTL1_CMD_VLAN	(1 << 22)
675 #define AQ_TXDESC_CTL1_CMD_FCS	(1 << 23)
676 #define AQ_TXDESC_CTL1_CMD_IP4CSUM (1 << 24)
677 #define AQ_TXDESC_CTL1_CMD_L4CSUM (1 << 25)
678 #define AQ_TXDESC_CTL1_CMD_WB	(1 << 27)
679 
680 #define AQ_TXDESC_CTL1_VID_SHIFT 4
681 	uint32_t		ctl2;
682 #define AQ_TXDESC_CTL2_LEN_SHIFT 14
683 #define AQ_TXDESC_CTL2_CTX_EN	(1 << 13)
684 } __packed;
685 
686 struct aq_slot {
687 	bus_dmamap_t		 as_map;
688 	struct mbuf		*as_m;
689 };
690 
691 struct aq_rxring {
692 	struct ifiqueue		*rx_ifiq;
693 	struct aq_dmamem	 rx_mem;
694 	struct aq_slot		*rx_slots;
695 	int			 rx_q;
696 	int			 rx_irq;
697 
698 	struct timeout		 rx_refill;
699 	struct if_rxring	 rx_rxr;
700 	uint32_t		 rx_prod;
701 	uint32_t		 rx_cons;
702 
703 	struct mbuf		*rx_m_head;
704 	struct mbuf		**rx_m_tail;
705 	int			 rx_m_error;
706 };
707 
708 struct aq_txring {
709 	struct ifqueue		*tx_ifq;
710 	struct aq_dmamem	 tx_mem;
711 	struct aq_slot		*tx_slots;
712 	int			 tx_q;
713 	int			 tx_irq;
714 	uint32_t		 tx_prod;
715 	uint32_t		 tx_cons;
716 };
717 
718 struct aq_queues {
719 	char			 q_name[16];
720 	void			*q_ihc;
721 	struct aq_softc		*q_sc;
722 	int			 q_index;
723 	struct aq_rxring 	 q_rx;
724 	struct aq_txring 	 q_tx;
725 };
726 
727 
728 struct aq_softc;
729 struct aq_firmware_ops {
730 	int (*reset)(struct aq_softc *);
731 	int (*set_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state,
732 	    enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
733 	int (*get_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state *,
734 	    enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
735 	int (*get_stats)(struct aq_softc *, struct aq_hw_stats_s *);
736 };
737 
738 struct aq_softc {
739 	struct device		sc_dev;
740 	uint16_t		sc_product;
741 	uint16_t		sc_revision;
742 	bus_dma_tag_t		sc_dmat;
743 	pci_chipset_tag_t	sc_pc;
744 	pcitag_t		sc_pcitag;
745 	int			sc_nqueues;
746 	struct aq_queues	sc_queues[AQ_MAXQ];
747 	struct intrmap		*sc_intrmap;
748 	void			*sc_ih;
749 	bus_space_handle_t	sc_ioh;
750 	bus_space_tag_t		sc_iot;
751 
752 	uint32_t		sc_mbox_addr;
753 	int			sc_rbl_enabled;
754 	int			sc_fast_start_enabled;
755 	int			sc_flash_present;
756 	uint32_t		sc_fw_version;
757 	const struct		aq_firmware_ops *sc_fw_ops;
758 	uint64_t		sc_fw_caps;
759 	enum aq_media_type	sc_media_type;
760 	enum aq_link_speed	sc_available_rates;
761 	uint32_t		sc_features;
762 	int			sc_linkstat_irq;
763 	struct arpcom		sc_arpcom;
764 	struct ifmedia		sc_media;
765 
766 	struct ether_addr	sc_enaddr;
767 	struct mutex		sc_mpi_mutex;
768 };
769 
770 const struct pci_matchid aq_devices[] = {
771 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100 },
772 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107 },
773 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108 },
774 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109 },
775 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111 },
776 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112 },
777 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S },
778 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S },
779 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S },
780 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S },
781 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S },
782 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S },
783 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100 },
784 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107 },
785 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108 },
786 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109 },
787 };
788 
789 const struct aq_product {
790 	pci_vendor_id_t aq_vendor;
791 	pci_product_id_t aq_product;
792 	enum aq_media_type aq_media_type;
793 	enum aq_link_speed aq_available_rates;
794 } aq_products[] = {
795 {	PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100,
796 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
797 },
798 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107,
799 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
800 },
801 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108,
802 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
803 },
804 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109,
805 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
806 },
807 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
808 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
809 },
810 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
811 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
812 },
813 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S,
814 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
815 },
816 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S,
817 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
818 },
819 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S,
820 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
821 },
822 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S,
823 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
824 },
825 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S,
826 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
827 },
828 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S,
829 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
830 },
831 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100,
832 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
833 },
834 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107,
835 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
836 },
837 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108,
838 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
839 },
840 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109,
841 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
842 }
843 };
844 
845 int	aq_match(struct device *, void *, void *);
846 void	aq_attach(struct device *, struct device *, void *);
847 int	aq_detach(struct device *, int);
848 int	aq_activate(struct device *, int);
849 int	aq_intr(void *);
850 int	aq_intr_link(void *);
851 int	aq_intr_queue(void *);
852 void	aq_global_software_reset(struct aq_softc *);
853 int	aq_fw_reset(struct aq_softc *);
854 int	aq_mac_soft_reset(struct aq_softc *, enum aq_fw_bootloader_mode *);
855 int	aq_mac_soft_reset_rbl(struct aq_softc *, enum aq_fw_bootloader_mode *);
856 int	aq_mac_soft_reset_flb(struct aq_softc *);
857 int	aq_fw_read_version(struct aq_softc *);
858 int	aq_fw_version_init(struct aq_softc *);
859 int	aq_hw_init_ucp(struct aq_softc *);
860 int	aq_fw_downld_dwords(struct aq_softc *, uint32_t, uint32_t *, uint32_t);
861 int	aq_get_mac_addr(struct aq_softc *);
862 int	aq_hw_reset(struct aq_softc *);
863 int	aq_hw_init(struct aq_softc *, int, int);
864 void	aq_hw_qos_set(struct aq_softc *);
865 void	aq_l3_filter_set(struct aq_softc *);
866 void	aq_hw_init_tx_path(struct aq_softc *);
867 void	aq_hw_init_rx_path(struct aq_softc *);
868 int	aq_set_mac_addr(struct aq_softc *, int, uint8_t *);
869 int	aq_set_linkmode(struct aq_softc *, enum aq_link_speed,
870     enum aq_link_fc, enum aq_link_eee);
871 void	aq_watchdog(struct ifnet *);
872 void	aq_enable_intr(struct aq_softc *, int, int);
873 int	aq_rxrinfo(struct aq_softc *, struct if_rxrinfo *);
874 int	aq_ioctl(struct ifnet *, u_long, caddr_t);
875 int	aq_up(struct aq_softc *);
876 void	aq_down(struct aq_softc *);
877 void	aq_iff(struct aq_softc *);
878 void	aq_start(struct ifqueue *);
879 void	aq_ifmedia_status(struct ifnet *, struct ifmediareq *);
880 int	aq_ifmedia_change(struct ifnet *);
881 void	aq_update_link_status(struct aq_softc *);
882 
883 void	aq_refill(void *);
884 int	aq_rx_fill(struct aq_softc *, struct aq_rxring *);
885 static inline unsigned int aq_rx_fill_slots(struct aq_softc *,
886 	    struct aq_rxring *, uint);
887 
888 int	aq_dmamem_alloc(struct aq_softc *, struct aq_dmamem *,
889 	    bus_size_t, u_int);
890 void	aq_dmamem_zero(struct aq_dmamem *);
891 void	aq_dmamem_free(struct aq_softc *, struct aq_dmamem *);
892 
893 int	aq_fw1x_reset(struct aq_softc *);
894 int	aq_fw1x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
895     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
896 int	aq_fw1x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
897     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
898 int	aq_fw1x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
899 
900 int	aq_fw2x_reset(struct aq_softc *);
901 int	aq_fw2x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
902     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
903 int	aq_fw2x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
904     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
905 int	aq_fw2x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
906 
907 const struct aq_firmware_ops aq_fw1x_ops = {
908 	.reset = aq_fw1x_reset,
909 	.set_mode = aq_fw1x_set_mode,
910 	.get_mode = aq_fw1x_get_mode,
911 	.get_stats = aq_fw1x_get_stats,
912 };
913 
914 const struct aq_firmware_ops aq_fw2x_ops = {
915 	.reset = aq_fw2x_reset,
916 	.set_mode = aq_fw2x_set_mode,
917 	.get_mode = aq_fw2x_get_mode,
918 	.get_stats = aq_fw2x_get_stats,
919 };
920 
921 struct cfattach aq_ca = {
922 	sizeof(struct aq_softc), aq_match, aq_attach, NULL,
923 	aq_activate
924 };
925 
926 struct cfdriver aq_cd = {
927 	NULL, "aq", DV_IFNET
928 };
929 
930 uint32_t
931 AQ_READ_REG(struct aq_softc *sc, uint32_t reg)
932 {
933 	uint32_t res;
934 
935 	res = bus_space_read_4(sc->sc_iot, sc->sc_ioh, reg);
936 
937 	return res;
938 }
939 
940 
941 int
942 aq_match(struct device *dev, void *match, void *aux)
943 {
944 	return pci_matchbyid((struct pci_attach_args *)aux, aq_devices,
945 	    sizeof(aq_devices) / sizeof(aq_devices[0]));
946 }
947 
948 const struct aq_product *
949 aq_lookup(const struct pci_attach_args *pa)
950 {
951 	unsigned int i;
952 
953 	for (i = 0; i < sizeof(aq_products) / sizeof(aq_products[0]); i++) {
954 	if (PCI_VENDOR(pa->pa_id) == aq_products[i].aq_vendor &&
955 		PCI_PRODUCT(pa->pa_id) == aq_products[i].aq_product) {
956 			return &aq_products[i];
957 		}
958 	}
959 
960 	return NULL;
961 }
962 
963 void
964 aq_attach(struct device *parent, struct device *self, void *aux)
965 {
966 	struct aq_softc *sc = (struct aq_softc *)self;
967 	struct pci_attach_args *pa = aux;
968 	const struct aq_product *aqp;
969 	pcireg_t bar, memtype;
970 	pci_chipset_tag_t pc;
971 	pci_intr_handle_t ih;
972 	int (*isr)(void *);
973 	const char *intrstr;
974 	pcitag_t tag;
975 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
976 	int txmin, txmax, rxmin, rxmax;
977 	int irqmode, irqnum;
978 	int i;
979 
980 	mtx_init(&sc->sc_mpi_mutex, IPL_NET);
981 
982 	sc->sc_dmat = pa->pa_dmat;
983 	sc->sc_pc = pc = pa->pa_pc;
984 	sc->sc_pcitag = tag = pa->pa_tag;
985 
986 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
987 	sc->sc_revision = PCI_REVISION(pa->pa_class);
988 
989 	aqp = aq_lookup(pa);
990 
991 	bar = pci_conf_read(pc, tag, AQ_BAR0);
992 	if (PCI_MAPREG_TYPE(bar) != PCI_MAPREG_TYPE_MEM) {
993 		printf(": wrong BAR type\n");
994 		return;
995 	}
996 
997 	memtype = pci_mapreg_type(pc, tag, AQ_BAR0);
998 	if (pci_mapreg_map(pa, AQ_BAR0, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
999 	    NULL, NULL, 0)) {
1000 		printf(": failed to map BAR0\n");
1001 		return;
1002 	}
1003 
1004 	sc->sc_nqueues = 1;
1005 	sc->sc_linkstat_irq = AQ_LINKSTAT_IRQ;
1006 	isr = aq_intr;
1007 	irqnum = 0;
1008 
1009 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
1010 		int nmsix = pci_intr_msix_count(pa);
1011 		if (nmsix > 1) {
1012 			nmsix--;
1013 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1014 			    nmsix, AQ_MAXQ, INTRMAP_POWEROF2);
1015 			sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
1016 			KASSERT(sc->sc_nqueues > 0);
1017 			KASSERT(powerof2(sc->sc_nqueues));
1018 
1019 			sc->sc_linkstat_irq = 0;
1020 			isr = aq_intr_link;
1021 			irqnum++;
1022 		}
1023 		irqmode = AQ_INTR_CTRL_IRQMODE_MSIX;
1024 	} else if (pci_intr_map_msi(pa, &ih) == 0) {
1025 		irqmode = AQ_INTR_CTRL_IRQMODE_MSI;
1026 	} else if (pci_intr_map(pa, &ih) == 0) {
1027 		irqmode = AQ_INTR_CTRL_IRQMODE_LEGACY;
1028 	} else {
1029 		printf(": failed to map interrupt\n");
1030 		return;
1031 	}
1032 
1033 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
1034 	    IPL_NET | IPL_MPSAFE, isr, sc, self->dv_xname);
1035 	intrstr = pci_intr_string(pa->pa_pc, ih);
1036 	if (intrstr)
1037 		printf(": %s", intrstr);
1038 
1039 	if (sc->sc_nqueues > 1)
1040 		printf(", %d queues", sc->sc_nqueues);
1041 
1042 	if (aq_fw_reset(sc))
1043 		return;
1044 
1045 	DPRINTF((", FW version 0x%x", sc->sc_fw_version));
1046 
1047 	if (aq_fw_version_init(sc))
1048 		return;
1049 
1050 	if (aq_hw_init_ucp(sc))
1051 		return;
1052 
1053 	if (aq_hw_reset(sc))
1054 		return;
1055 
1056 	if (aq_get_mac_addr(sc))
1057 		return;
1058 
1059 	if (aq_hw_init(sc, irqmode, (sc->sc_nqueues > 1)))
1060 		return;
1061 
1062 	sc->sc_media_type = aqp->aq_media_type;
1063 	sc->sc_available_rates = aqp->aq_available_rates;
1064 
1065 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1066 	    aq_ifmedia_status);
1067 
1068 	bcopy(sc->sc_enaddr.ether_addr_octet, sc->sc_arpcom.ac_enaddr, 6);
1069 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
1070 	ifp->if_softc = sc;
1071 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1072 	ifp->if_xflags = IFXF_MPSAFE;
1073 	ifp->if_ioctl = aq_ioctl;
1074 	ifp->if_qstart = aq_start;
1075 	ifp->if_watchdog = aq_watchdog;
1076 	ifp->if_hardmtu = 9000;
1077 	ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 |
1078 	    IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv4 |
1079 	    IFCAP_CSUM_TCPv6;
1080 #if NVLAN > 0
1081 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1082 #endif
1083 	ifq_set_maxlen(&ifp->if_snd, AQ_TXD_NUM);
1084 
1085 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1086 	    aq_ifmedia_status);
1087 	if (sc->sc_available_rates & AQ_LINK_100M) {
1088 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL);
1089 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0,
1090 		    NULL);
1091 	}
1092 
1093 	if (sc->sc_available_rates & AQ_LINK_1G) {
1094 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
1095 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0,
1096 		    NULL);
1097 	}
1098 
1099 	if (sc->sc_available_rates & AQ_LINK_2G5) {
1100 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1101 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX,
1102 		    0, NULL);
1103 	}
1104 
1105 	if (sc->sc_available_rates & AQ_LINK_5G) {
1106 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
1107 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
1108 		    0, NULL);
1109 	}
1110 
1111 	if (sc->sc_available_rates & AQ_LINK_10G) {
1112 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1113 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
1114 		    0, NULL);
1115 	}
1116 
1117 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1118 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO | IFM_FDX, 0, NULL);
1119 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1120 	aq_set_linkmode(sc, AQ_LINK_AUTO, AQ_FC_NONE, AQ_EEE_DISABLE);
1121 
1122         if_attach(ifp);
1123         ether_ifattach(ifp);
1124 
1125 	if_attach_iqueues(ifp, sc->sc_nqueues);
1126 	if_attach_queues(ifp, sc->sc_nqueues);
1127 
1128 	/*
1129 	 * set interrupt moderation for up to 20k interrupts per second,
1130 	 * more rx than tx.  these values are in units of 2us.
1131 	 */
1132 	txmin = 20;
1133 	txmax = 200;
1134 	rxmin = 6;
1135 	rxmax = 60;
1136 
1137 	for (i = 0; i < sc->sc_nqueues; i++) {
1138 		struct aq_queues *aq = &sc->sc_queues[i];
1139 		struct aq_rxring *rx = &aq->q_rx;
1140 		struct aq_txring *tx = &aq->q_tx;
1141 		pci_intr_handle_t ih;
1142 
1143 		aq->q_sc = sc;
1144 		aq->q_index = i;
1145 		rx->rx_q = i;
1146 		rx->rx_ifiq = ifp->if_iqs[i];
1147 		rx->rx_m_head = NULL;
1148 		rx->rx_m_tail = &rx->rx_m_head;
1149 		rx->rx_m_error = 0;
1150 		ifp->if_iqs[i]->ifiq_softc = aq;
1151 		timeout_set(&rx->rx_refill, aq_refill, rx);
1152 
1153 		tx->tx_q = i;
1154 		tx->tx_ifq = ifp->if_ifqs[i];
1155 		ifp->if_ifqs[i]->ifq_softc = aq;
1156 
1157 		snprintf(aq->q_name, sizeof(aq->q_name), "%s:%u",
1158 		    DEVNAME(sc), i);
1159 
1160 		if (sc->sc_nqueues > 1) {
1161 			if (pci_intr_map_msix(pa, irqnum, &ih)) {
1162 				printf("%s: unable to map msi-x vector %d\n",
1163 				    DEVNAME(sc), irqnum);
1164 				return;
1165 			}
1166 
1167 			aq->q_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1168 			    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
1169 			    aq_intr_queue, aq, aq->q_name);
1170 			if (aq->q_ihc == NULL) {
1171 				printf("%s: unable to establish interrupt %d\n",
1172 				    DEVNAME(sc), irqnum);
1173 				return;
1174 			}
1175 			rx->rx_irq = irqnum;
1176 			tx->tx_irq = irqnum;
1177 			irqnum++;
1178 		} else {
1179 			rx->rx_irq = irqnum++;
1180 			tx->tx_irq = irqnum++;
1181 		}
1182 
1183 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1184 		    TX_INTR_MODERATION_CTL_MIN, txmin);
1185 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1186 		    TX_INTR_MODERATION_CTL_MAX, txmax);
1187 		AQ_WRITE_REG_BIT(sc, TX_INTR_MODERATION_CTL_REG(i),
1188 		    TX_INTR_MODERATION_CTL_EN, 1);
1189 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1190 		    RX_INTR_MODERATION_CTL_MIN, rxmin);
1191 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1192 		    RX_INTR_MODERATION_CTL_MAX, rxmax);
1193 		AQ_WRITE_REG_BIT(sc, RX_INTR_MODERATION_CTL_REG(i),
1194 		    RX_INTR_MODERATION_CTL_EN, 1);
1195 	}
1196 
1197 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1198 	    TX_DMA_INT_DESC_WRWB_EN, 0);
1199 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1200 	    TX_DMA_INT_DESC_MODERATE_EN, 1);
1201 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1202 	    RX_DMA_INT_DESC_WRWB_EN, 0);
1203 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1204 	    RX_DMA_INT_DESC_MODERATE_EN, 1);
1205 
1206 	aq_enable_intr(sc, 1, 0);
1207 	printf("\n");
1208 }
1209 
1210 int
1211 aq_fw_reset(struct aq_softc *sc)
1212 {
1213 	uint32_t ver, v, boot_exit_code;
1214 	int i, error;
1215 	enum aq_fw_bootloader_mode mode;
1216 
1217 	mode = FW_BOOT_MODE_UNKNOWN;
1218 
1219 	ver = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1220 
1221 	for (i = 1000; i > 0; i--) {
1222 		v = AQ_READ_REG(sc, FW_MPI_DAISY_CHAIN_STATUS_REG);
1223 		boot_exit_code = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG);
1224 		if (v != 0x06000000 || boot_exit_code != 0)
1225 			break;
1226 	}
1227 
1228 	if (i <= 0) {
1229 		printf("%s: F/W reset failed. Neither RBL nor FLB started",
1230 		    DEVNAME(sc));
1231 		return ETIMEDOUT;
1232 	}
1233 
1234 	sc->sc_rbl_enabled = (boot_exit_code != 0);
1235 
1236 	/*
1237 	 * Having FW version 0 is an indicator that cold start
1238 	 * is in progress. This means two things:
1239 	 * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
1240 	 * 2) Driver may skip reset sequence and save time.
1241 	 */
1242 	if (sc->sc_fast_start_enabled && (ver != 0)) {
1243 		error = aq_fw_read_version(sc);
1244 		/* Skip reset as it just completed */
1245 		if (error == 0)
1246 			return 0;
1247 	}
1248 
1249 	error = aq_mac_soft_reset(sc, &mode);
1250 	if (error != 0) {
1251 		printf("%s: MAC reset failed: %d\n", DEVNAME(sc), error);
1252 		return error;
1253 	}
1254 
1255 	switch (mode) {
1256 	case FW_BOOT_MODE_FLB:
1257 		DPRINTF(("%s: FLB> F/W successfully loaded from flash.",
1258 		    DEVNAME(sc)));
1259 		sc->sc_flash_present = 1;
1260 		return aq_fw_read_version(sc);
1261 	case FW_BOOT_MODE_RBL_FLASH:
1262 		DPRINTF(("%s: RBL> F/W loaded from flash. Host Bootload "
1263 		    "disabled.", DEVNAME(sc)));
1264 		sc->sc_flash_present = 1;
1265 		return aq_fw_read_version(sc);
1266 	case FW_BOOT_MODE_UNKNOWN:
1267 		printf("%s: F/W bootload error: unknown bootloader type",
1268 		    DEVNAME(sc));
1269 		return ENOTSUP;
1270 	case FW_BOOT_MODE_RBL_HOST_BOOTLOAD:
1271 		printf("%s: RBL> F/W Host Bootload not implemented", DEVNAME(sc));
1272 		return ENOTSUP;
1273 	}
1274 
1275 	return ENOTSUP;
1276 }
1277 
1278 int
1279 aq_mac_soft_reset_rbl(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1280 {
1281 	int timo;
1282 
1283 	DPRINTF(("%s: RBL> MAC reset STARTED!\n", DEVNAME(sc)));
1284 
1285 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1286 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1287 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1288 
1289 	/* MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone */
1290 	AQ_WRITE_REG(sc, FW_BOOT_EXIT_CODE_REG, RBL_STATUS_DEAD);
1291 
1292 	aq_global_software_reset(sc);
1293 
1294 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e0);
1295 
1296 	/* Wait for RBL to finish boot process. */
1297 #define RBL_TIMEOUT_MS	10000
1298 	uint16_t rbl_status;
1299 	for (timo = RBL_TIMEOUT_MS; timo > 0; timo--) {
1300 		rbl_status = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG) & 0xffff;
1301 		if (rbl_status != 0 && rbl_status != RBL_STATUS_DEAD)
1302 			break;
1303 		delay(1000);
1304 	}
1305 
1306 	if (timo <= 0) {
1307 		printf("%s: RBL> RBL restart failed: timeout\n", DEVNAME(sc));
1308 		return EBUSY;
1309 	}
1310 
1311 	switch (rbl_status) {
1312 	case RBL_STATUS_SUCCESS:
1313 		if (mode != NULL)
1314 			*mode = FW_BOOT_MODE_RBL_FLASH;
1315 		DPRINTF(("%s: RBL> reset complete! [Flash]\n", DEVNAME(sc)));
1316 		break;
1317 	case RBL_STATUS_HOST_BOOT:
1318 		if (mode != NULL)
1319 			*mode = FW_BOOT_MODE_RBL_HOST_BOOTLOAD;
1320 		DPRINTF(("%s: RBL> reset complete! [Host Bootload]\n",
1321 		    DEVNAME(sc)));
1322 		break;
1323 	case RBL_STATUS_FAILURE:
1324 	default:
1325 		printf("%s: unknown RBL status 0x%x\n", DEVNAME(sc),
1326 		    rbl_status);
1327 		return EBUSY;
1328 	}
1329 
1330 	return 0;
1331 }
1332 
1333 int
1334 aq_mac_soft_reset_flb(struct aq_softc *sc)
1335 {
1336 	uint32_t v;
1337 	int timo;
1338 
1339 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1340 	/*
1341 	 * Let Felicity hardware to complete SMBUS transaction before
1342 	 * Global software reset.
1343 	 */
1344 	delay(50000);
1345 
1346 	/*
1347 	 * If SPI burst transaction was interrupted(before running the script),
1348 	 * global software reset may not clear SPI interface.
1349 	 * Clean it up manually before global reset.
1350 	 */
1351 	AQ_WRITE_REG(sc, AQ_GLB_NVR_PROVISIONING2_REG, 0x00a0);
1352 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x009f);
1353 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x809f);
1354 	delay(50000);
1355 
1356 	v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1357 	v &= ~AQ_FW_SOFTRESET_DIS;
1358 	v |= AQ_FW_SOFTRESET_RESET;
1359 	AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1360 
1361 	/* Kickstart. */
1362 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1363 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1364 	if (!sc->sc_fast_start_enabled)
1365 		AQ_WRITE_REG(sc, AQ_GLB_GENERAL_PROVISIONING9_REG, 1);
1366 
1367 	/*
1368 	 * For the case SPI burst transaction was interrupted (by MCP reset
1369 	 * above), wait until it is completed by hardware.
1370 	 */
1371 	delay(50000);
1372 
1373 	/* MAC Kickstart */
1374 	if (!sc->sc_fast_start_enabled) {
1375 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x180e0);
1376 
1377 		uint32_t flb_status;
1378 		for (timo = 0; timo < 1000; timo++) {
1379 			flb_status = AQ_READ_REG(sc,
1380 			    FW_MPI_DAISY_CHAIN_STATUS_REG) & 0x10;
1381 			if (flb_status != 0)
1382 				break;
1383 			delay(1000);
1384 		}
1385 		if (flb_status == 0) {
1386 			printf("%s: FLB> MAC kickstart failed: timed out\n",
1387 			    DEVNAME(sc));
1388 			return ETIMEDOUT;
1389 		}
1390 		DPRINTF(("%s: FLB> MAC kickstart done, %d ms\n", DEVNAME(sc),
1391 		    timo));
1392 		/* FW reset */
1393 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1394 		/*
1395 		 * Let Felicity hardware complete SMBUS transaction before
1396 		 * Global software reset.
1397 		 */
1398 		delay(50000);
1399 		sc->sc_fast_start_enabled = true;
1400 	}
1401 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1402 
1403 	/* PHY Kickstart: #undone */
1404 	aq_global_software_reset(sc);
1405 
1406 	for (timo = 0; timo < 1000; timo++) {
1407 		if (AQ_READ_REG(sc, AQ_FW_VERSION_REG) != 0)
1408 			break;
1409 		delay(10000);
1410 	}
1411 	if (timo >= 1000) {
1412 		printf("%s: FLB> Global Soft Reset failed\n", DEVNAME(sc));
1413 		return ETIMEDOUT;
1414 	}
1415 	DPRINTF(("%s: FLB> F/W restart: %d ms\n", DEVNAME(sc), timo * 10));
1416 
1417 	return 0;
1418 
1419 }
1420 
1421 int
1422 aq_mac_soft_reset(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1423 {
1424 	if (sc->sc_rbl_enabled)
1425 		return aq_mac_soft_reset_rbl(sc, mode);
1426 
1427 	if (mode != NULL)
1428 		*mode = FW_BOOT_MODE_FLB;
1429 	return aq_mac_soft_reset_flb(sc);
1430 }
1431 
1432 void
1433 aq_global_software_reset(struct aq_softc *sc)
1434 {
1435         uint32_t v;
1436 
1437         AQ_WRITE_REG_BIT(sc, RX_SYSCONTROL_REG, RX_SYSCONTROL_RESET_DIS, 0);
1438         AQ_WRITE_REG_BIT(sc, TX_SYSCONTROL_REG, TX_SYSCONTROL_RESET_DIS, 0);
1439         AQ_WRITE_REG_BIT(sc, FW_MPI_RESETCTRL_REG,
1440             FW_MPI_RESETCTRL_RESET_DIS, 0);
1441 
1442         v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1443         v &= ~AQ_FW_SOFTRESET_DIS;
1444         v |= AQ_FW_SOFTRESET_RESET;
1445         AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1446 }
1447 
1448 int
1449 aq_fw_read_version(struct aq_softc *sc)
1450 {
1451 	int i, error = EBUSY;
1452 #define MAC_FW_START_TIMEOUT_MS 10000
1453 	for (i = 0; i < MAC_FW_START_TIMEOUT_MS; i++) {
1454 		sc->sc_fw_version = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1455 		if (sc->sc_fw_version != 0) {
1456 			error = 0;
1457 			break;
1458 		}
1459 		delay(1000);
1460 	}
1461 	return error;
1462 }
1463 
1464 int
1465 aq_fw_version_init(struct aq_softc *sc)
1466 {
1467 	int error = 0;
1468 	char fw_vers[sizeof("F/W version xxxxx.xxxxx.xxxxx")];
1469 
1470 	if (FW_VERSION_MAJOR(sc) == 1) {
1471 		sc->sc_fw_ops = &aq_fw1x_ops;
1472 	} else if ((FW_VERSION_MAJOR(sc) == 2) || (FW_VERSION_MAJOR(sc) == 3)) {
1473 		sc->sc_fw_ops = &aq_fw2x_ops;
1474 	} else {
1475 		printf("%s: Unsupported F/W version %d.%d.%d\n",
1476 		    DEVNAME(sc),
1477 		    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc),
1478 		    FW_VERSION_BUILD(sc));
1479 		return ENOTSUP;
1480 	}
1481 	snprintf(fw_vers, sizeof(fw_vers), "F/W version %d.%d.%d",
1482 	    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc), FW_VERSION_BUILD(sc));
1483 
1484 	/* detect revision */
1485 	uint32_t hwrev = AQ_READ_REG(sc, AQ_HW_REVISION_REG);
1486 	switch (hwrev & 0x0000000f) {
1487 	case 0x01:
1488 		printf(", revision A0, %s", fw_vers);
1489 		sc->sc_features |= FEATURES_REV_A0 |
1490 		    FEATURES_MPI_AQ | FEATURES_MIPS;
1491 		break;
1492 	case 0x02:
1493 		printf(", revision B0, %s", fw_vers);
1494 		sc->sc_features |= FEATURES_REV_B0 |
1495 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1496 		    FEATURES_TPO2 | FEATURES_RPF2;
1497 		break;
1498 	case 0x0A:
1499 		printf(", revision B1, %s", fw_vers);
1500 		sc->sc_features |= FEATURES_REV_B1 |
1501 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1502 		    FEATURES_TPO2 | FEATURES_RPF2;
1503 		break;
1504 	default:
1505 		printf(", Unknown revision (0x%08x)", hwrev);
1506 		error = ENOTSUP;
1507 		break;
1508 	}
1509 	return error;
1510 }
1511 
1512 int
1513 aq_hw_init_ucp(struct aq_softc *sc)
1514 {
1515 	int timo;
1516 
1517 	if (FW_VERSION_MAJOR(sc) == 1) {
1518 		if (AQ_READ_REG(sc, FW1X_MPI_INIT2_REG) == 0) {
1519 			uint32_t data;
1520 			arc4random_buf(&data, sizeof(data));
1521 			data &= 0xfefefefe;
1522 			data |= 0x02020202;
1523 			AQ_WRITE_REG(sc, FW1X_MPI_INIT2_REG, data);
1524 		}
1525 		AQ_WRITE_REG(sc, FW1X_MPI_INIT1_REG, 0);
1526 	}
1527 
1528 	for (timo = 100; timo > 0; timo--) {
1529 		sc->sc_mbox_addr = AQ_READ_REG(sc, FW_MPI_MBOX_ADDR_REG);
1530 		if (sc->sc_mbox_addr != 0)
1531 			break;
1532 		delay(1000);
1533 	}
1534 
1535 #define AQ_FW_MIN_VERSION	0x01050006
1536 #define AQ_FW_MIN_VERSION_STR	"1.5.6"
1537 	if (sc->sc_fw_version < AQ_FW_MIN_VERSION) {
1538 		printf("%s: atlantic: wrong FW version: " AQ_FW_MIN_VERSION_STR
1539 		    " or later required, this is %d.%d.%d\n",
1540 		    DEVNAME(sc),
1541 		    FW_VERSION_MAJOR(sc),
1542 		    FW_VERSION_MINOR(sc),
1543 		    FW_VERSION_BUILD(sc));
1544 		return ENOTSUP;
1545 	}
1546 
1547 	if (sc->sc_mbox_addr == 0)
1548 		printf("%s: NULL MBOX!!\n", DEVNAME(sc));
1549 
1550 	return 0;
1551 }
1552 
1553 int
1554 aq_hw_reset(struct aq_softc *sc)
1555 {
1556 	int error;
1557 
1558 	/* disable irq */
1559 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS, 0);
1560 
1561 	/* apply */
1562 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_IRQ, 1);
1563 
1564 	/* wait ack 10 times by 1ms */
1565 	WAIT_FOR(
1566 	    (AQ_READ_REG(sc, AQ_INTR_CTRL_REG) & AQ_INTR_CTRL_RESET_IRQ) == 0,
1567 	    1000, 10, &error);
1568 	if (error != 0) {
1569 		printf("%s: atlantic: IRQ reset failed: %d\n", DEVNAME(sc),
1570 		    error);
1571 		return error;
1572 	}
1573 
1574 	return sc->sc_fw_ops->reset(sc);
1575 }
1576 
1577 int
1578 aq_get_mac_addr(struct aq_softc *sc)
1579 {
1580 	uint32_t mac_addr[2];
1581 	uint32_t efuse_shadow_addr;
1582 	int err;
1583 
1584 	efuse_shadow_addr = 0;
1585 	if (FW_VERSION_MAJOR(sc) >= 2)
1586 		efuse_shadow_addr = AQ_READ_REG(sc, FW2X_MPI_EFUSEADDR_REG);
1587 	else
1588 		efuse_shadow_addr = AQ_READ_REG(sc, FW1X_MPI_EFUSEADDR_REG);
1589 
1590 	if (efuse_shadow_addr == 0) {
1591 		printf("%s: cannot get efuse addr", DEVNAME(sc));
1592 		return ENXIO;
1593 	}
1594 
1595 	DPRINTF(("%s: efuse_shadow_addr = %x\n", DEVNAME(sc), efuse_shadow_addr));
1596 
1597 	memset(mac_addr, 0, sizeof(mac_addr));
1598 	err = aq_fw_downld_dwords(sc, efuse_shadow_addr + (40 * 4),
1599 	    mac_addr, 2);
1600 	if (err < 0)
1601 		return err;
1602 
1603 	if (mac_addr[0] == 0 && mac_addr[1] == 0) {
1604 		printf("%s: mac address not found", DEVNAME(sc));
1605 		return ENXIO;
1606 	}
1607 
1608 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1609 	    mac_addr[1]));
1610 
1611 	mac_addr[0] = htobe32(mac_addr[0]);
1612 	mac_addr[1] = htobe32(mac_addr[1]);
1613 
1614 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1615 	    mac_addr[1]));
1616 
1617 	memcpy(sc->sc_enaddr.ether_addr_octet,
1618 	    (uint8_t *)mac_addr, ETHER_ADDR_LEN);
1619 	DPRINTF((": %s", ether_sprintf(sc->sc_enaddr.ether_addr_octet)));
1620 
1621 	return 0;
1622 }
1623 
1624 int
1625 aq_activate(struct device *self, int act)
1626 {
1627 	return 0;
1628 }
1629 
1630 int
1631 aq_fw_downld_dwords(struct aq_softc *sc, uint32_t addr, uint32_t *p,
1632     uint32_t cnt)
1633 {
1634 	uint32_t v;
1635 	int error = 0;
1636 
1637 	WAIT_FOR(AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG) == 1, 1, 10000, &error);
1638 	if (error != 0) {
1639 		AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1640 		v = AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG);
1641 		if (v == 0) {
1642 			printf("%s: %s:%d: timeout\n",
1643 			    DEVNAME(sc), __func__, __LINE__);
1644 			return ETIMEDOUT;
1645 		}
1646 	}
1647 
1648 	AQ_WRITE_REG(sc, AQ_FW_MBOX_ADDR_REG, addr);
1649 
1650 	error = 0;
1651 	for (; cnt > 0 && error == 0; cnt--) {
1652 		/* execute mailbox interface */
1653 		AQ_WRITE_REG_BIT(sc, AQ_FW_MBOX_CMD_REG,
1654 		    AQ_FW_MBOX_CMD_EXECUTE, 1);
1655 		if (sc->sc_features & FEATURES_REV_B1) {
1656 			WAIT_FOR(AQ_READ_REG(sc, AQ_FW_MBOX_ADDR_REG) != addr,
1657 			    1, 1000, &error);
1658 		} else {
1659 			WAIT_FOR((AQ_READ_REG(sc, AQ_FW_MBOX_CMD_REG) &
1660 			    AQ_FW_MBOX_CMD_BUSY) == 0,
1661 			    1, 1000, &error);
1662 		}
1663 		*p++ = AQ_READ_REG(sc, AQ_FW_MBOX_VAL_REG);
1664 		addr += sizeof(uint32_t);
1665 	}
1666 	AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1667 
1668 	if (error != 0)
1669 		printf("%s: %s:%d: timeout\n",
1670 		    DEVNAME(sc), __func__, __LINE__);
1671 
1672 	return error;
1673 }
1674 
1675 int
1676 aq_fw2x_reset(struct aq_softc *sc)
1677 {
1678 	struct aq_fw2x_capabilities caps = { 0 };
1679 	int error;
1680 
1681 	error = aq_fw_downld_dwords(sc,
1682 	    sc->sc_mbox_addr + offsetof(struct aq_fw2x_mailbox, caps),
1683 	    (uint32_t *)&caps, sizeof caps / sizeof(uint32_t));
1684 	if (error != 0) {
1685 		printf("%s: fw2x> can't get F/W capabilities mask, error %d\n",
1686 		    DEVNAME(sc), error);
1687 		return error;
1688 	}
1689 	sc->sc_fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
1690 
1691 	DPRINTF(("%s: fw2x> F/W capabilities=0x%llx\n", DEVNAME(sc),
1692 	    sc->sc_fw_caps));
1693 
1694 	return 0;
1695 }
1696 
1697 int
1698 aq_fw1x_reset(struct aq_softc *sc)
1699 {
1700 	printf("%s: unimplemented %s\n", DEVNAME(sc), __func__);
1701 	return 0;
1702 }
1703 
1704 int
1705 aq_fw1x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state w,
1706     enum aq_link_speed x, enum aq_link_fc y, enum aq_link_eee z)
1707 {
1708 	return 0;
1709 }
1710 
1711 int
1712 aq_fw1x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *w,
1713     enum aq_link_speed *x, enum aq_link_fc *y, enum aq_link_eee *z)
1714 {
1715 	return 0;
1716 }
1717 
1718 int
1719 aq_fw1x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1720 {
1721 	return 0;
1722 }
1723 
1724 
1725 int
1726 aq_fw2x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *modep,
1727     enum aq_link_speed *speedp, enum aq_link_fc *fcp, enum aq_link_eee *eeep)
1728 {
1729 	uint64_t mpi_state, mpi_ctrl;
1730 	enum aq_link_speed speed;
1731 	enum aq_link_fc fc;
1732 
1733 	AQ_MPI_LOCK(sc);
1734 
1735 	mpi_state = AQ_READ64_REG(sc, FW2X_MPI_STATE_REG);
1736 	if (modep != NULL) {
1737 		mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1738 		if (mpi_ctrl & FW2X_CTRL_RATE_MASK)
1739 			*modep = MPI_INIT;
1740 		else
1741 			*modep = MPI_DEINIT;
1742 	}
1743 
1744 	AQ_MPI_UNLOCK(sc);
1745 
1746 	if (mpi_state & FW2X_CTRL_RATE_10G)
1747 		speed = AQ_LINK_10G;
1748 	else if (mpi_state & FW2X_CTRL_RATE_5G)
1749 		speed = AQ_LINK_5G;
1750 	else if (mpi_state & FW2X_CTRL_RATE_2G5)
1751 		speed = AQ_LINK_2G5;
1752 	else if (mpi_state & FW2X_CTRL_RATE_1G)
1753 		speed = AQ_LINK_1G;
1754 	else if (mpi_state & FW2X_CTRL_RATE_100M)
1755 		speed = AQ_LINK_100M;
1756 	else
1757 		speed = AQ_LINK_NONE;
1758 	if (speedp != NULL)
1759 		*speedp = speed;
1760 
1761 	fc = AQ_FC_NONE;
1762 	if (mpi_state & FW2X_CTRL_PAUSE)
1763 		fc |= AQ_FC_RX;
1764 	if (mpi_state & FW2X_CTRL_ASYMMETRIC_PAUSE)
1765 		fc |= AQ_FC_TX;
1766 	if (fcp != NULL)
1767 		*fcp = fc;
1768 
1769 	if (eeep != NULL)
1770 		*eeep = AQ_EEE_DISABLE;
1771 
1772 	return 0;
1773 }
1774 
1775 int
1776 aq_fw2x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1777 {
1778 	return 0;
1779 }
1780 
1781 void
1782 aq_hw_l3_filter_set(struct aq_softc *sc)
1783 {
1784 	int i;
1785 
1786 	/* clear all filter */
1787 	for (i = 0; i < 8; i++) {
1788 		AQ_WRITE_REG_BIT(sc, RPF_L3_FILTER_REG(i),
1789 		    RPF_L3_FILTER_L4_EN, 0);
1790 	}
1791 }
1792 
1793 int
1794 aq_hw_init(struct aq_softc *sc, int irqmode, int multivec)
1795 {
1796 	uint32_t v;
1797 
1798 	/* Force limit MRRS on RDM/TDM to 2K */
1799 	v = AQ_READ_REG(sc, AQ_PCI_REG_CONTROL_6_REG);
1800 	AQ_WRITE_REG(sc, AQ_PCI_REG_CONTROL_6_REG, (v & ~0x0707) | 0x0404);
1801 
1802 	/*
1803 	 * TX DMA total request limit. B0 hardware is not capable to
1804 	 * handle more than (8K-MRRS) incoming DMA data.
1805 	 * Value 24 in 256byte units
1806 	 */
1807 	AQ_WRITE_REG(sc, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG, 24);
1808 
1809 	aq_hw_init_tx_path(sc);
1810 	aq_hw_init_rx_path(sc);
1811 
1812 	if (aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_enaddr.ether_addr_octet))
1813 		return EINVAL;
1814 
1815 	aq_set_linkmode(sc, AQ_LINK_NONE, AQ_FC_NONE, AQ_EEE_DISABLE);
1816 
1817 	aq_hw_qos_set(sc);
1818 
1819 	/* Enable interrupt */
1820 	AQ_WRITE_REG(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS);
1821 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_MULTIVEC, multivec);
1822 
1823 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_IRQMODE, irqmode);
1824 
1825 	AQ_WRITE_REG(sc, AQ_INTR_AUTOMASK_REG, 0xffffffff);
1826 
1827 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(0),
1828 	    ((AQ_B0_ERR_INT << 24) | (1U << 31)) |
1829 	    ((AQ_B0_ERR_INT << 16) | (1 << 23))
1830 	);
1831 
1832 	/* link interrupt */
1833 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(3),
1834 	    (1 << 7) | sc->sc_linkstat_irq);
1835 
1836 	return 0;
1837 }
1838 
1839 void
1840 aq_hw_init_tx_path(struct aq_softc *sc)
1841 {
1842 	/* Tx TC/RSS number config */
1843 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_TC_MODE_EN, 1);
1844 
1845 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1846 	    THM_LSO_TCP_FLAG1_FIRST, 0x0ff6);
1847 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1848 	    THM_LSO_TCP_FLAG1_MID,   0x0ff6);
1849 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG2_REG,
1850 	   THM_LSO_TCP_FLAG2_LAST,  0x0f7f);
1851 
1852 	/* misc */
1853 	AQ_WRITE_REG(sc, TX_TPO2_REG,
1854 	   (sc->sc_features & FEATURES_TPO2) ? TX_TPO2_EN : 0);
1855 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_EN, 0);
1856 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_MODE, 0);
1857 
1858 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_SCP_INS_EN, 1);
1859 }
1860 
1861 void
1862 aq_hw_init_rx_path(struct aq_softc *sc)
1863 {
1864 	int i;
1865 
1866 	/* clear setting */
1867 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 0);
1868 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 0);
1869 	AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG, 0);
1870 	for (i = 0; i < 32; i++) {
1871 		AQ_WRITE_REG_BIT(sc, RPF_ETHERTYPE_FILTER_REG(i),
1872 		   RPF_ETHERTYPE_FILTER_EN, 0);
1873 	}
1874 
1875 	/* L2 and Multicast filters */
1876 	for (i = 0; i < AQ_HW_MAC_NUM; i++) {
1877 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_EN, 0);
1878 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_ACTION,
1879 		    RPF_ACTION_HOST);
1880 	}
1881 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_MASK_REG, 0);
1882 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_REG(0), 0x00010fff);
1883 
1884 	/* Vlan filters */
1885 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_OUTER,
1886 	    ETHERTYPE_QINQ);
1887 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_INNER,
1888 	    ETHERTYPE_VLAN);
1889 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG, RPF_VLAN_MODE_PROMISC, 1);
1890 
1891 	if (sc->sc_features & FEATURES_REV_B) {
1892 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1893 		    RPF_VLAN_MODE_ACCEPT_UNTAGGED, 1);
1894 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1895 		    RPF_VLAN_MODE_UNTAGGED_ACTION, RPF_ACTION_HOST);
1896 	}
1897 
1898 	AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, 0);
1899 
1900 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_EN, 1);
1901 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_ACTION, RPF_ACTION_HOST);
1902 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_THRESHOLD, 0xffff);
1903 
1904 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_EN, 0);
1905 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_MODE, 0);
1906 }
1907 
1908 /* set multicast filter. index 0 for own address */
1909 int
1910 aq_set_mac_addr(struct aq_softc *sc, int index, uint8_t *enaddr)
1911 {
1912 	uint32_t h, l;
1913 
1914 	if (index >= AQ_HW_MAC_NUM)
1915 		return EINVAL;
1916 
1917 	if (enaddr == NULL) {
1918 		/* disable */
1919 		AQ_WRITE_REG_BIT(sc,
1920 		    RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1921 		return 0;
1922 	}
1923 
1924 	h = (enaddr[0] <<  8) | (enaddr[1]);
1925 	l = ((uint32_t)enaddr[2] << 24) | (enaddr[3] << 16) |
1926 	    (enaddr[4] <<  8) | (enaddr[5]);
1927 
1928 	/* disable, set, and enable */
1929 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1930 	AQ_WRITE_REG(sc, RPF_L2UC_LSW_REG(index), l);
1931 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index),
1932 	    RPF_L2UC_MSW_MACADDR_HI, h);
1933 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_ACTION, 1);
1934 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 1);
1935 
1936 	return 0;
1937 }
1938 
1939 int
1940 aq_get_linkmode(struct aq_softc *sc, enum aq_link_speed *speed,
1941     enum aq_link_fc *fc, enum aq_link_eee *eee)
1942 {
1943 	enum aq_hw_fw_mpi_state mode;
1944 	int error;
1945 
1946 	error = sc->sc_fw_ops->get_mode(sc, &mode, speed, fc, eee);
1947 	if (error != 0)
1948 		return error;
1949 	if (mode != MPI_INIT)
1950 		return ENXIO;
1951 
1952 	return 0;
1953 }
1954 
1955 int
1956 aq_set_linkmode(struct aq_softc *sc, enum aq_link_speed speed,
1957     enum aq_link_fc fc, enum aq_link_eee eee)
1958 {
1959 	return sc->sc_fw_ops->set_mode(sc, MPI_INIT, speed, fc, eee);
1960 }
1961 
1962 int
1963 aq_fw2x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state mode,
1964     enum aq_link_speed speed, enum aq_link_fc fc, enum aq_link_eee eee)
1965 {
1966 	uint64_t mpi_ctrl;
1967 	int error = 0;
1968 
1969 	AQ_MPI_LOCK(sc);
1970 
1971 	mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1972 
1973 	switch (mode) {
1974 	case MPI_INIT:
1975 		mpi_ctrl &= ~FW2X_CTRL_RATE_MASK;
1976 		if (speed & AQ_LINK_10G)
1977 			mpi_ctrl |= FW2X_CTRL_RATE_10G;
1978 		if (speed & AQ_LINK_5G)
1979 			mpi_ctrl |= FW2X_CTRL_RATE_5G;
1980 		if (speed & AQ_LINK_2G5)
1981 			mpi_ctrl |= FW2X_CTRL_RATE_2G5;
1982 		if (speed & AQ_LINK_1G)
1983 			mpi_ctrl |= FW2X_CTRL_RATE_1G;
1984 		if (speed & AQ_LINK_100M)
1985 			mpi_ctrl |= FW2X_CTRL_RATE_100M;
1986 
1987 		mpi_ctrl &= ~FW2X_CTRL_LINK_DROP;
1988 
1989 		mpi_ctrl &= ~FW2X_CTRL_EEE_MASK;
1990 		if (eee == AQ_EEE_ENABLE)
1991 			mpi_ctrl |= FW2X_CTRL_EEE_MASK;
1992 
1993 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1994 		if (fc & AQ_FC_RX)
1995 			mpi_ctrl |= FW2X_CTRL_PAUSE;
1996 		if (fc & AQ_FC_TX)
1997 			mpi_ctrl |= FW2X_CTRL_ASYMMETRIC_PAUSE;
1998 		break;
1999 	case MPI_DEINIT:
2000 		mpi_ctrl &= ~(FW2X_CTRL_RATE_MASK | FW2X_CTRL_EEE_MASK);
2001 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
2002 		break;
2003 	default:
2004 		printf("%s: fw2x> unknown MPI state %d\n", DEVNAME(sc), mode);
2005 		error =  EINVAL;
2006 		goto failure;
2007 	}
2008 	AQ_WRITE64_REG(sc, FW2X_MPI_CONTROL_REG, mpi_ctrl);
2009 
2010  failure:
2011 	AQ_MPI_UNLOCK(sc);
2012 	return error;
2013 }
2014 
2015 void
2016 aq_hw_qos_set(struct aq_softc *sc)
2017 {
2018 	uint32_t tc = 0;
2019 	uint32_t buff_size;
2020 
2021 	/* TPS Descriptor rate init */
2022 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_TA_RST, 0);
2023 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_LIM, 0xa);
2024 
2025 	/* TPS VM init */
2026 	AQ_WRITE_REG_BIT(sc, TPS_DESC_VM_ARB_MODE_REG, TPS_DESC_VM_ARB_MODE, 0);
2027 
2028 	/* TPS TC credits init */
2029 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TC_ARB_MODE_REG, TPS_DESC_TC_ARB_MODE, 0);
2030 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TC_ARB_MODE_REG, TPS_DATA_TC_ARB_MODE, 0);
2031 
2032 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
2033 	    TPS_DATA_TCT_CREDIT_MAX, 0xfff);
2034 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
2035 	    TPS_DATA_TCT_WEIGHT, 0x64);
2036 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
2037 	    TPS_DESC_TCT_CREDIT_MAX, 0x50);
2038 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
2039 	    TPS_DESC_TCT_WEIGHT, 0x1e);
2040 
2041 	/* Tx buf size */
2042 	tc = 0;
2043 	buff_size = AQ_HW_TXBUF_MAX;
2044 	AQ_WRITE_REG_BIT(sc, TPB_TXB_BUFSIZE_REG(tc), TPB_TXB_BUFSIZE,
2045 	    buff_size);
2046 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_HI,
2047 	    (buff_size * (1024 / 32) * 66) / 100);
2048 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_LO,
2049 	    (buff_size * (1024 / 32) * 50) / 100);
2050 
2051 	/* QoS Rx buf size per TC */
2052 	tc = 0;
2053 	buff_size = AQ_HW_RXBUF_MAX;
2054 	AQ_WRITE_REG_BIT(sc, RPB_RXB_BUFSIZE_REG(tc), RPB_RXB_BUFSIZE,
2055 	    buff_size);
2056 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_EN, 0);
2057 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_HI,
2058 	    (buff_size * (1024 / 32) * 66) / 100);
2059 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_LO,
2060 	    (buff_size * (1024 / 32) * 50) / 100);
2061 
2062 	/* QoS 802.1p priority -> TC mapping */
2063 	int i_priority;
2064 	for (i_priority = 0; i_priority < 8; i_priority++) {
2065 		AQ_WRITE_REG_BIT(sc, RPF_RPB_RX_TC_UPT_REG,
2066 		    RPF_RPB_RX_TC_UPT_MASK(i_priority), 0);
2067 	}
2068 }
2069 
2070 void
2071 aq_txring_reset(struct aq_softc *sc, struct aq_txring *tx, int start)
2072 {
2073 	daddr_t paddr;
2074 
2075 	tx->tx_prod = 0;
2076 	tx->tx_cons = 0;
2077 
2078 	/* empty slots? */
2079 
2080 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 0);
2081 
2082 	if (start == 0)
2083 		return;
2084 
2085 	paddr = AQ_DMA_DVA(&tx->tx_mem);
2086 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRLSW_REG(tx->tx_q), paddr);
2087 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRMSW_REG(tx->tx_q),
2088 	    paddr >> 32);
2089 
2090 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_LEN,
2091 	    AQ_TXD_NUM / 8);
2092 
2093 	tx->tx_prod = AQ_READ_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q));
2094 	tx->tx_cons = tx->tx_prod;
2095 	AQ_WRITE_REG(sc, TX_DMA_DESC_WRWB_THRESH_REG(tx->tx_q), 0);
2096 
2097 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2098 	    AQ_INTR_IRQ_MAP_TX_IRQMAP(tx->tx_q), tx->tx_irq);
2099 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2100 	    AQ_INTR_IRQ_MAP_TX_EN(tx->tx_q), 1);
2101 
2102 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 1);
2103 
2104 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID, 0);
2105 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID_EN, 0);
2106 }
2107 
2108 void
2109 aq_rxring_reset(struct aq_softc *sc, struct aq_rxring *rx, int start)
2110 {
2111 	daddr_t paddr;
2112 	int strip;
2113 
2114 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 0);
2115 	/* drain */
2116 
2117 	if (start == 0)
2118 		return;
2119 
2120 	paddr = AQ_DMA_DVA(&rx->rx_mem);
2121 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRLSW_REG(rx->rx_q), paddr);
2122 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRMSW_REG(rx->rx_q),
2123 	    paddr >> 32);
2124 
2125 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_LEN,
2126 	    AQ_RXD_NUM / 8);
2127 
2128 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2129 	    RX_DMA_DESC_BUFSIZE_DATA, MCLBYTES / 1024);
2130 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2131 	    RX_DMA_DESC_BUFSIZE_HDR, 0);
2132 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2133 	    RX_DMA_DESC_HEADER_SPLIT, 0);
2134 
2135 #if NVLAN > 0
2136 	strip = 1;
2137 #else
2138 	strip = 0;
2139 #endif
2140 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2141 	    RX_DMA_DESC_VLAN_STRIP, strip);
2142 
2143 	rx->rx_cons = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2144 	    RX_DMA_DESC_HEAD_PTR;
2145 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_cons);
2146 	rx->rx_prod = rx->rx_cons;
2147 
2148 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2149 	    AQ_INTR_IRQ_MAP_RX_IRQMAP(rx->rx_q), rx->rx_irq);
2150 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2151 	    AQ_INTR_IRQ_MAP_RX_EN(rx->rx_q), 1);
2152 
2153 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2154 	    RX_DMA_DCAD_CPUID, 0);
2155 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2156 	    RX_DMA_DCAD_DESC_EN, 0);
2157 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2158 	    RX_DMA_DCAD_HEADER_EN, 0);
2159 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2160 	    RX_DMA_DCAD_PAYLOAD_EN, 0);
2161 
2162 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 1);
2163 }
2164 
2165 static inline unsigned int
2166 aq_rx_fill_slots(struct aq_softc *sc, struct aq_rxring *rx, uint nslots)
2167 {
2168 	struct aq_rx_desc_read *ring, *rd;
2169 	struct aq_slot *as;
2170 	struct mbuf *m;
2171 	uint p, fills;
2172 
2173 	ring = AQ_DMA_KVA(&rx->rx_mem);
2174 	p = rx->rx_prod;
2175 
2176 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2177 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTWRITE);
2178 
2179 	for (fills = 0; fills < nslots; fills++) {
2180 		as = &rx->rx_slots[p];
2181 		rd = &ring[p];
2182 
2183 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2184 		if (m == NULL)
2185 			break;
2186 
2187 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2188 		m->m_data += ETHER_ALIGN;
2189 		m->m_len = m->m_pkthdr.len = MCLBYTES;
2190 
2191 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2192 		    BUS_DMA_NOWAIT) != 0) {
2193 			m_freem(m);
2194 			break;
2195 		}
2196 		as->as_m = m;
2197 
2198 		htolem64(&rd->buf_addr, as->as_map->dm_segs[0].ds_addr);
2199 		rd->hdr_addr = 0;
2200 		p++;
2201 		if (p == AQ_RXD_NUM)
2202 			p = 0;
2203 	}
2204 
2205 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2206 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREWRITE);
2207 
2208 	rx->rx_prod = p;
2209 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_prod);
2210 	return (nslots - fills);
2211 }
2212 
2213 int
2214 aq_rx_fill(struct aq_softc *sc, struct aq_rxring *rx)
2215 {
2216 	u_int slots;
2217 
2218 	slots = if_rxr_get(&rx->rx_rxr, AQ_RXD_NUM);
2219 	if (slots == 0)
2220 		return 1;
2221 
2222 	slots = aq_rx_fill_slots(sc, rx, slots);
2223 	if_rxr_put(&rx->rx_rxr, slots);
2224 	return 0;
2225 }
2226 
2227 void
2228 aq_refill(void *xq)
2229 {
2230 	struct aq_queues *q = xq;
2231 	struct aq_softc *sc = q->q_sc;
2232 
2233 	aq_rx_fill(sc, &q->q_rx);
2234 
2235 	if (if_rxr_inuse(&q->q_rx.rx_rxr) == 0)
2236 		timeout_add(&q->q_rx.rx_refill, 1);
2237 }
2238 
2239 void
2240 aq_rxeof(struct aq_softc *sc, struct aq_rxring *rx)
2241 {
2242 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2243 	struct aq_rx_desc_wb *rxd;
2244 	struct aq_rx_desc_wb *ring;
2245 	struct aq_slot *as;
2246 	uint32_t end, idx;
2247 	uint16_t pktlen, status;
2248 	uint32_t rxd_type;
2249 	struct mbuf *m, *mb;
2250 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2251 	int rxfree;
2252 
2253 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2254 		return;
2255 
2256 	end = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2257 	    RX_DMA_DESC_HEAD_PTR;
2258 
2259 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2260 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTREAD);
2261 
2262 	rxfree = 0;
2263 	idx = rx->rx_cons;
2264 	ring = AQ_DMA_KVA(&rx->rx_mem);
2265 	while (idx != end) {
2266 		rxd = &ring[idx];
2267 		as = &rx->rx_slots[idx];
2268 
2269 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2270 		    as->as_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2271 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2272 
2273 		status = lemtoh16(&rxd->status);
2274 		if ((status & AQ_RXDESC_STATUS_DD) == 0)
2275 			break;
2276 
2277 		rxfree++;
2278 		mb = as->as_m;
2279 		as->as_m = NULL;
2280 
2281 		pktlen = lemtoh16(&rxd->pkt_len);
2282 		rxd_type = lemtoh32(&rxd->type);
2283 		/* rss hash */
2284 
2285 		mb->m_pkthdr.len = 0;
2286 		mb->m_next = NULL;
2287 		*rx->rx_m_tail = mb;
2288 		rx->rx_m_tail = &mb->m_next;
2289 
2290 		m = rx->rx_m_head;
2291 
2292 #if NVLAN > 0
2293 		if (rxd_type & (AQ_RXDESC_TYPE_VLAN | AQ_RXDESC_TYPE_VLAN2)) {
2294 			m->m_pkthdr.ether_vtag = lemtoh16(&rxd->vlan);
2295 			m->m_flags |= M_VLANTAG;
2296 		}
2297 #endif
2298 
2299 		if ((rxd_type & AQ_RXDESC_TYPE_V4_SUM) &&
2300 		    ((status & AQ_RXDESC_STATUS_V4_SUM_NG) == 0))
2301 			m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2302 
2303 		if ((rxd_type & AQ_RXDESC_TYPE_L4_SUM) &&
2304 		   (status & AQ_RXDESC_STATUS_L4_SUM_OK))
2305 			m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK |
2306 			    M_TCP_CSUM_IN_OK;
2307 
2308 		if ((status & AQ_RXDESC_STATUS_MACERR) ||
2309 		    (rxd_type & AQ_RXDESC_TYPE_DMA_ERR)) {
2310 			printf("%s:rx: rx error (status %x type %x)\n",
2311 			    DEVNAME(sc), status, rxd_type);
2312 			rx->rx_m_error = 1;
2313 		}
2314 
2315 		if (status & AQ_RXDESC_STATUS_EOP) {
2316 			mb->m_len = pktlen - m->m_pkthdr.len;
2317 			m->m_pkthdr.len = pktlen;
2318 			if (rx->rx_m_error != 0) {
2319 				ifp->if_ierrors++;
2320 				m_freem(m);
2321 			} else {
2322 				ml_enqueue(&ml, m);
2323 			}
2324 
2325 			rx->rx_m_head = NULL;
2326 			rx->rx_m_tail = &rx->rx_m_head;
2327 			rx->rx_m_error = 0;
2328 		} else {
2329 			mb->m_len = MCLBYTES;
2330 			m->m_pkthdr.len += mb->m_len;
2331 		}
2332 
2333 		idx++;
2334 		if (idx == AQ_RXD_NUM)
2335 			idx = 0;
2336 	}
2337 	rx->rx_cons = idx;
2338 
2339 	if (rxfree > 0) {
2340 		if_rxr_put(&rx->rx_rxr, rxfree);
2341 		if (ifiq_input(rx->rx_ifiq, &ml))
2342 			if_rxr_livelocked(&rx->rx_rxr);
2343 
2344 		aq_rx_fill(sc, rx);
2345 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
2346 			timeout_add(&rx->rx_refill, 1);
2347 	}
2348 }
2349 
2350 void
2351 aq_txeof(struct aq_softc *sc, struct aq_txring *tx)
2352 {
2353 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2354 	struct aq_slot *as;
2355 	uint32_t idx, end, free;
2356 
2357 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2358 		return;
2359 
2360 	idx = tx->tx_cons;
2361 	end = AQ_READ_REG(sc, TX_DMA_DESC_HEAD_PTR_REG(tx->tx_q)) &
2362 	    TX_DMA_DESC_HEAD_PTR;
2363 	free = 0;
2364 
2365 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2366 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTREAD);
2367 
2368 	while (idx != end) {
2369 		as = &tx->tx_slots[idx];
2370 
2371 		if (as->as_m != NULL) {
2372 			bus_dmamap_unload(sc->sc_dmat, as->as_map);
2373 
2374 			m_freem(as->as_m);
2375 			as->as_m = NULL;
2376 		}
2377 
2378 		idx++;
2379 		if (idx == AQ_TXD_NUM)
2380 			idx = 0;
2381 		free++;
2382 	}
2383 
2384 	tx->tx_cons = idx;
2385 
2386 	if (free != 0) {
2387 		if (ifq_is_oactive(tx->tx_ifq))
2388 			ifq_restart(tx->tx_ifq);
2389 	}
2390 }
2391 
2392 void
2393 aq_start(struct ifqueue *ifq)
2394 {
2395 	struct aq_queues *aq = ifq->ifq_softc;
2396 	struct aq_softc *sc = aq->q_sc;
2397 	struct aq_txring *tx = &aq->q_tx;
2398 	struct aq_tx_desc *ring, *txd;
2399 	struct aq_slot *as;
2400 	struct mbuf *m;
2401 	uint32_t idx, free, used, ctl1, ctl2;
2402 	int error, i;
2403 
2404 	idx = tx->tx_prod;
2405 	free = tx->tx_cons + AQ_TXD_NUM - tx->tx_prod;
2406 	used = 0;
2407 
2408 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2409 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTWRITE);
2410 	ring = (struct aq_tx_desc *)AQ_DMA_KVA(&tx->tx_mem);
2411 
2412 	for (;;) {
2413 		if (used + AQ_TX_MAX_SEGMENTS + 1 >= free) {
2414 			ifq_set_oactive(ifq);
2415 			break;
2416 		}
2417 
2418 		m = ifq_dequeue(ifq);
2419 		if (m == NULL)
2420 			break;
2421 
2422 		as = &tx->tx_slots[idx];
2423 
2424 		error = bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2425 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2426 		if (error == EFBIG) {
2427 			if (m_defrag(m, M_DONTWAIT)) {
2428 				m_freem(m);
2429 				break;
2430 			}
2431 
2432 			error = bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map,
2433 			    m, BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2434 		}
2435 		if (error != 0) {
2436 			m_freem(m);
2437 			break;
2438 		}
2439 
2440 		as->as_m = m;
2441 
2442 #if NBPFILTER > 0
2443 		if (ifq->ifq_if->if_bpf)
2444 			bpf_mtap_ether(ifq->ifq_if->if_bpf, m, BPF_DIRECTION_OUT);
2445 #endif
2446 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2447 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2448 
2449 		ctl2 = m->m_pkthdr.len << AQ_TXDESC_CTL2_LEN_SHIFT;
2450 		ctl1 = AQ_TXDESC_CTL1_TYPE_TXD | AQ_TXDESC_CTL1_CMD_FCS;
2451 #if NVLAN > 0
2452 		if (m->m_flags & M_VLANTAG) {
2453 			txd = ring + idx;
2454 			txd->buf_addr = 0;
2455 			txd->ctl1 = htole32(AQ_TXDESC_CTL1_TYPE_TXC |
2456 			    (m->m_pkthdr.ether_vtag << AQ_TXDESC_CTL1_VLAN_SHIFT));
2457 			txd->ctl2 = 0;
2458 
2459 			ctl1 |= AQ_TXDESC_CTL1_CMD_VLAN;
2460 			ctl2 |= AQ_TXDESC_CTL2_CTX_EN;
2461 
2462 			idx++;
2463 			if (idx == AQ_TXD_NUM)
2464 				idx = 0;
2465 			used++;
2466 		}
2467 #endif
2468 
2469 		if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2470 			ctl1 |= AQ_TXDESC_CTL1_CMD_IP4CSUM;
2471 		if (m->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT))
2472 			ctl1 |= AQ_TXDESC_CTL1_CMD_L4CSUM;
2473 
2474 		for (i = 0; i < as->as_map->dm_nsegs; i++) {
2475 
2476 			if (i == as->as_map->dm_nsegs - 1)
2477 				ctl1 |= AQ_TXDESC_CTL1_CMD_EOP |
2478 				    AQ_TXDESC_CTL1_CMD_WB;
2479 
2480 			txd = ring + idx;
2481 			txd->buf_addr = htole64(as->as_map->dm_segs[i].ds_addr);
2482 			txd->ctl1 = htole32(ctl1 |
2483 			    (as->as_map->dm_segs[i].ds_len <<
2484 			    AQ_TXDESC_CTL1_BLEN_SHIFT));
2485 			txd->ctl2 = htole32(ctl2);
2486 
2487 			idx++;
2488 			if (idx == AQ_TXD_NUM)
2489 				idx = 0;
2490 			used++;
2491 		}
2492 	}
2493 
2494 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2495 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREWRITE);
2496 
2497 	if (used != 0) {
2498 		tx->tx_prod = idx;
2499 		AQ_WRITE_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q),
2500 		    tx->tx_prod);
2501 	}
2502 }
2503 
2504 int
2505 aq_intr_queue(void *arg)
2506 {
2507 	struct aq_queues *aq = arg;
2508 	struct aq_softc *sc = aq->q_sc;
2509 	uint32_t status;
2510 	uint32_t clear;
2511 
2512 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2513 	clear = 0;
2514 	if (status & (1 << aq->q_tx.tx_irq)) {
2515 		clear |= (1 << aq->q_tx.tx_irq);
2516 		aq_txeof(sc, &aq->q_tx);
2517 	}
2518 
2519 	if (status & (1 << aq->q_rx.rx_irq)) {
2520 		clear |= (1 << aq->q_rx.rx_irq);
2521 		aq_rxeof(sc, &aq->q_rx);
2522 	}
2523 
2524 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, clear);
2525 	return (clear != 0);
2526 }
2527 
2528 int
2529 aq_intr_link(void *arg)
2530 {
2531 	struct aq_softc *sc = arg;
2532 	uint32_t status;
2533 
2534 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2535 	if (status & (1 << sc->sc_linkstat_irq)) {
2536 		aq_update_link_status(sc);
2537 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_REG, (1 << sc->sc_linkstat_irq));
2538 		return 1;
2539 	}
2540 
2541 	return 0;
2542 }
2543 
2544 int
2545 aq_intr(void *arg)
2546 {
2547 	struct aq_softc *sc = arg;
2548 	struct aq_queues *aq = &sc->sc_queues[0];
2549 	uint32_t status;
2550 
2551 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2552 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2553 
2554 	if (status & (1 << sc->sc_linkstat_irq))
2555 		aq_update_link_status(sc);
2556 
2557 	if (status & (1 << aq->q_tx.tx_irq)) {
2558 		aq_txeof(sc, &aq->q_tx);
2559 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2560 		    (1 << aq->q_tx.tx_irq));
2561 	}
2562 	if (status & (1 << aq->q_rx.rx_irq)) {
2563 		aq_rxeof(sc, &aq->q_rx);
2564 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2565 		    (1 << aq->q_rx.rx_irq));
2566 	}
2567 
2568 	return 1;
2569 }
2570 
2571 void
2572 aq_watchdog(struct ifnet *ifp)
2573 {
2574 
2575 }
2576 
2577 void
2578 aq_free_slots(struct aq_softc *sc, struct aq_slot *slots, int allocated,
2579     int total)
2580 {
2581 	struct aq_slot *as;
2582 
2583 	int i = allocated;
2584 	while (i-- > 0) {
2585 		as = &slots[i];
2586 		bus_dmamap_destroy(sc->sc_dmat, as->as_map);
2587 		if (as->as_m != NULL)
2588 			m_freem(as->as_m);
2589 	}
2590 	free(slots, M_DEVBUF, total * sizeof(*as));
2591 }
2592 
2593 int
2594 aq_queue_up(struct aq_softc *sc, struct aq_queues *aq)
2595 {
2596 	struct aq_rxring *rx;
2597 	struct aq_txring *tx;
2598 	struct aq_slot *as;
2599 	int i, mtu;
2600 
2601 	rx = &aq->q_rx;
2602 	rx->rx_slots = mallocarray(sizeof(*as), AQ_RXD_NUM, M_DEVBUF,
2603 	    M_WAITOK | M_ZERO);
2604 	if (rx->rx_slots == NULL) {
2605 		printf("%s: failed to allocate rx slots %d\n", DEVNAME(sc),
2606 		    aq->q_index);
2607 		return ENOMEM;
2608 	}
2609 
2610 	for (i = 0; i < AQ_RXD_NUM; i++) {
2611 		as = &rx->rx_slots[i];
2612 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2613 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2614 		    &as->as_map) != 0) {
2615 			printf("%s: failed to allocate rx dma maps %d\n",
2616 			    DEVNAME(sc), aq->q_index);
2617 			goto destroy_rx_slots;
2618 		}
2619 	}
2620 
2621 	if (aq_dmamem_alloc(sc, &rx->rx_mem, AQ_RXD_NUM *
2622 	    sizeof(struct aq_rx_desc_read), PAGE_SIZE) != 0) {
2623 		printf("%s: unable to allocate rx ring %d\n", DEVNAME(sc),
2624 		    aq->q_index);
2625 		goto destroy_rx_slots;
2626 	}
2627 
2628 	tx = &aq->q_tx;
2629 	tx->tx_slots = mallocarray(sizeof(*as), AQ_TXD_NUM, M_DEVBUF,
2630 	    M_WAITOK | M_ZERO);
2631 	if (tx->tx_slots == NULL) {
2632 		printf("%s: failed to allocate tx slots %d\n", DEVNAME(sc),
2633 		    aq->q_index);
2634 		goto destroy_rx_ring;
2635 	}
2636 
2637 	mtu = sc->sc_arpcom.ac_if.if_hardmtu;
2638 	for (i = 0; i < AQ_TXD_NUM; i++) {
2639 		as = &tx->tx_slots[i];
2640 		if (bus_dmamap_create(sc->sc_dmat, mtu, AQ_TX_MAX_SEGMENTS,
2641 		    MCLBYTES, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2642 		    &as->as_map) != 0) {
2643 			printf("%s: failed to allocated tx dma maps %d\n",
2644 			    DEVNAME(sc), aq->q_index);
2645 			goto destroy_tx_slots;
2646 		}
2647 	}
2648 
2649 	if (aq_dmamem_alloc(sc, &tx->tx_mem, AQ_TXD_NUM *
2650 	    sizeof(struct aq_tx_desc), PAGE_SIZE) != 0) {
2651 		printf("%s: unable to allocate tx ring %d\n", DEVNAME(sc),
2652 		    aq->q_index);
2653 		goto destroy_tx_slots;
2654 	}
2655 
2656 	aq_txring_reset(sc, tx, 1);
2657 	aq_rxring_reset(sc, rx, 1);
2658 	return 0;
2659 
2660 destroy_tx_slots:
2661 	aq_free_slots(sc, tx->tx_slots, i, AQ_TXD_NUM);
2662 	tx->tx_slots = NULL;
2663 	i = AQ_RXD_NUM;
2664 
2665 destroy_rx_ring:
2666 	aq_dmamem_free(sc, &rx->rx_mem);
2667 destroy_rx_slots:
2668 	aq_free_slots(sc, rx->rx_slots, i, AQ_RXD_NUM);
2669 	rx->rx_slots = NULL;
2670 	return ENOMEM;
2671 }
2672 
2673 void
2674 aq_queue_down(struct aq_softc *sc, struct aq_queues *aq)
2675 {
2676 	struct aq_txring *tx;
2677 	struct aq_rxring *rx;
2678 
2679 	tx = &aq->q_tx;
2680 	aq_txring_reset(sc, &aq->q_tx, 0);
2681 	if (tx->tx_slots != NULL) {
2682 		aq_free_slots(sc, tx->tx_slots, AQ_TXD_NUM, AQ_TXD_NUM);
2683 		tx->tx_slots = NULL;
2684 	}
2685 
2686 	aq_dmamem_free(sc, &tx->tx_mem);
2687 
2688 	rx = &aq->q_rx;
2689 	m_freem(rx->rx_m_head);
2690 	rx->rx_m_head = NULL;
2691 	rx->rx_m_tail = &rx->rx_m_head;
2692 	rx->rx_m_error = 0;
2693 	aq_rxring_reset(sc, &aq->q_rx, 0);
2694 	if (rx->rx_slots != NULL) {
2695 		aq_free_slots(sc, rx->rx_slots, AQ_RXD_NUM, AQ_RXD_NUM);
2696 		rx->rx_slots = NULL;
2697 	}
2698 
2699 	aq_dmamem_free(sc, &rx->rx_mem);
2700 }
2701 
2702 void
2703 aq_invalidate_rx_desc_cache(struct aq_softc *sc)
2704 {
2705 	uint32_t cache;
2706 
2707 	cache = AQ_READ_REG(sc, RX_DMA_DESC_CACHE_INIT_REG);
2708 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_CACHE_INIT_REG, RX_DMA_DESC_CACHE_INIT,
2709 	    (cache & RX_DMA_DESC_CACHE_INIT) ^ RX_DMA_DESC_CACHE_INIT);
2710 }
2711 
2712 int
2713 aq_up(struct aq_softc *sc)
2714 {
2715 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2716 	int i;
2717 
2718 	aq_invalidate_rx_desc_cache(sc);
2719 
2720 	for (i = 0; i < sc->sc_nqueues; i++) {
2721 		if (aq_queue_up(sc, &sc->sc_queues[i]) != 0)
2722 			goto downqueues;
2723 	}
2724 
2725 	aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_arpcom.ac_enaddr);
2726 
2727 	AQ_WRITE_REG_BIT(sc, TPO_HWCSUM_REG, TPO_HWCSUM_IP4CSUM_EN, 1);
2728 	AQ_WRITE_REG_BIT(sc, TPO_HWCSUM_REG, TPO_HWCSUM_L4CSUM_EN, 1);
2729 
2730 	AQ_WRITE_REG_BIT(sc, RPO_HWCSUM_REG, RPO_HWCSUM_IP4CSUM_EN, 1);
2731 	AQ_WRITE_REG_BIT(sc, RPO_HWCSUM_REG, RPO_HWCSUM_L4CSUM_EN, 1);
2732 
2733 	SET(ifp->if_flags, IFF_RUNNING);
2734 	aq_enable_intr(sc, 1, 1);
2735 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 1);
2736 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 1);
2737 
2738 	for (i = 0; i < sc->sc_nqueues; i++) {
2739 		struct aq_queues *aq = &sc->sc_queues[i];
2740 
2741 		if_rxr_init(&aq->q_rx.rx_rxr, howmany(ifp->if_hardmtu, MCLBYTES),
2742 		    AQ_RXD_NUM - 1);
2743 		aq_rx_fill(sc, &aq->q_rx);
2744 
2745 		ifq_clr_oactive(aq->q_tx.tx_ifq);
2746 	}
2747 
2748 	return ENETRESET;
2749 
2750 downqueues:
2751 	for (i = 0; i < sc->sc_nqueues; i++)
2752 		aq_queue_down(sc, &sc->sc_queues[i]);
2753 	return ENOMEM;
2754 }
2755 
2756 void
2757 aq_down(struct aq_softc *sc)
2758 {
2759 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2760 	int i;
2761 
2762 	CLR(ifp->if_flags, IFF_RUNNING);
2763 
2764 	aq_enable_intr(sc, 1, 0);
2765 	intr_barrier(sc->sc_ih);
2766 
2767 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 0);
2768 	for (i = 0; i < sc->sc_nqueues; i++) {
2769 		/* queue intr barrier? */
2770 		aq_queue_down(sc, &sc->sc_queues[i]);
2771 	}
2772 
2773 	aq_invalidate_rx_desc_cache(sc);
2774 }
2775 
2776 void
2777 aq_enable_intr(struct aq_softc *sc, int link, int txrx)
2778 {
2779 	uint32_t imask = 0;
2780 	int i;
2781 
2782 	if (txrx) {
2783 		for (i = 0; i < sc->sc_nqueues; i++) {
2784 			imask |= (1 << sc->sc_queues[i].q_tx.tx_irq);
2785 			imask |= (1 << sc->sc_queues[i].q_rx.rx_irq);
2786 		}
2787 	}
2788 
2789 	if (link)
2790 		imask |= (1 << sc->sc_linkstat_irq);
2791 
2792 	AQ_WRITE_REG(sc, AQ_INTR_MASK_REG, imask);
2793 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2794 }
2795 
2796 void
2797 aq_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2798 {
2799 	struct aq_softc *aq = ifp->if_softc;
2800 	enum aq_link_speed speed;
2801 	enum aq_link_fc fc;
2802 	int media;
2803 	int flow;
2804 
2805 	if (aq_get_linkmode(aq, &speed, &fc, NULL) != 0)
2806 		return;
2807 
2808 	switch (speed) {
2809 	case AQ_LINK_10G:
2810 		media = IFM_10G_T;
2811 		break;
2812 	case AQ_LINK_5G:
2813 		media = IFM_5000_T;
2814 		break;
2815 	case AQ_LINK_2G5:
2816 		media = IFM_2500_T;
2817 		break;
2818 	case AQ_LINK_1G:
2819 		media = IFM_1000_T;
2820 		break;
2821 	case AQ_LINK_100M:
2822 		media = IFM_100_TX;
2823 		break;
2824 	case AQ_LINK_NONE:
2825 		media = 0;
2826 		break;
2827 	}
2828 
2829 	flow = 0;
2830 	if (fc & AQ_FC_RX)
2831 		flow |= IFM_ETH_RXPAUSE;
2832 	if (fc & AQ_FC_TX)
2833 		flow |= IFM_ETH_TXPAUSE;
2834 
2835 	ifmr->ifm_status = IFM_AVALID;
2836 	if (speed != AQ_LINK_NONE) {
2837 		ifmr->ifm_status |= IFM_ACTIVE;
2838 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media | flow;
2839 	}
2840 }
2841 
2842 int
2843 aq_ifmedia_change(struct ifnet *ifp)
2844 {
2845 	struct aq_softc *sc = ifp->if_softc;
2846 	enum aq_link_speed rate = AQ_LINK_NONE;
2847 	enum aq_link_fc fc = AQ_FC_NONE;
2848 
2849 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2850 		return EINVAL;
2851 
2852 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2853 	case IFM_AUTO:
2854 		rate = AQ_LINK_AUTO;
2855 		break;
2856 	case IFM_NONE:
2857 		rate = AQ_LINK_NONE;
2858 		break;
2859 	case IFM_100_TX:
2860 		rate = AQ_LINK_100M;
2861 		break;
2862 	case IFM_1000_T:
2863 		rate = AQ_LINK_1G;
2864 		break;
2865 	case IFM_2500_T:
2866 		rate = AQ_LINK_2G5;
2867 		break;
2868 	case IFM_5000_T:
2869 		rate = AQ_LINK_5G;
2870 		break;
2871 	case IFM_10G_T:
2872 		rate = AQ_LINK_10G;
2873 		break;
2874 	default:
2875 		return ENODEV;
2876 	}
2877 
2878 	if (sc->sc_media.ifm_media & IFM_FLOW)
2879 		fc = AQ_FC_ALL;
2880 
2881 	return aq_set_linkmode(sc, rate, fc, AQ_EEE_DISABLE);
2882 }
2883 
2884 void
2885 aq_update_link_status(struct aq_softc *sc)
2886 {
2887 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2888 	enum aq_link_speed speed;
2889 	enum aq_link_fc fc;
2890 
2891 	if (aq_get_linkmode(sc, &speed, &fc, NULL) != 0)
2892 		return;
2893 
2894 	if (speed == AQ_LINK_NONE) {
2895 		if (ifp->if_link_state != LINK_STATE_DOWN) {
2896 			ifp->if_link_state = LINK_STATE_DOWN;
2897 			if_link_state_change(ifp);
2898 		}
2899 	} else {
2900 		if (ifp->if_link_state != LINK_STATE_FULL_DUPLEX) {
2901 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
2902 			if_link_state_change(ifp);
2903 		}
2904 	}
2905 }
2906 
2907 int
2908 aq_rxrinfo(struct aq_softc *sc, struct if_rxrinfo *ifri)
2909 {
2910 	struct if_rxring_info *ifr;
2911 	int i;
2912 	int error;
2913 
2914 	ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_TEMP,
2915 	    M_WAITOK | M_ZERO | M_CANFAIL);
2916 	if (ifr == NULL)
2917 		return (ENOMEM);
2918 
2919 	for (i = 0; i < sc->sc_nqueues; i++) {
2920 		ifr[i].ifr_size = MCLBYTES;
2921 		ifr[i].ifr_info = sc->sc_queues[i].q_rx.rx_rxr;
2922 	}
2923 
2924 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
2925 	free(ifr, M_TEMP, sc->sc_nqueues * sizeof(*ifr));
2926 
2927 	return (error);
2928 }
2929 
2930 int
2931 aq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2932 {
2933 	struct aq_softc *sc = ifp->if_softc;
2934 	struct ifreq *ifr = (struct ifreq *)data;
2935 	int error = 0, s;
2936 
2937 	s = splnet();
2938 
2939 	switch (cmd) {
2940 	case SIOCSIFADDR:
2941 		ifp->if_flags |= IFF_UP;
2942 		if ((ifp->if_flags & IFF_RUNNING) == 0)
2943 			error = aq_up(sc);
2944 		break;
2945 	case SIOCSIFFLAGS:
2946 		if (ifp->if_flags & IFF_UP) {
2947 			if (ifp->if_flags & IFF_RUNNING)
2948 				error = ENETRESET;
2949 			else
2950 				error = aq_up(sc);
2951 		} else {
2952 			if (ifp->if_flags & IFF_RUNNING)
2953 				aq_down(sc);
2954 		}
2955 		break;
2956 	case SIOCSIFMEDIA:
2957 	case SIOCGIFMEDIA:
2958 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2959 		break;
2960 
2961 	case SIOCGIFRXR:
2962 		error = aq_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2963 		break;
2964 
2965 	default:
2966 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
2967 	}
2968 
2969 	if (error == ENETRESET) {
2970 		if (ifp->if_flags & IFF_RUNNING)
2971 			aq_iff(sc);
2972 		error = 0;
2973 	}
2974 
2975 	splx(s);
2976 	return error;
2977 }
2978 
2979 void
2980 aq_iff(struct aq_softc *sc)
2981 {
2982 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2983 	struct arpcom *ac = &sc->sc_arpcom;
2984 	struct ether_multi *enm;
2985 	struct ether_multistep step;
2986 	int idx;
2987 
2988 	if (ifp->if_flags & IFF_PROMISC) {
2989 		ifp->if_flags |= IFF_ALLMULTI;
2990 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 1);
2991 	} else if (ac->ac_multirangecnt > 0 ||
2992 	    ac->ac_multicnt >= AQ_HW_MAC_NUM) {
2993 		ifp->if_flags |= IFF_ALLMULTI;
2994 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2995 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2996 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 1);
2997 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2998 		    RPF_MCAST_FILTER_EN, 1);
2999 	} else {
3000 		ifp->if_flags &= ~IFF_ALLMULTI;
3001 		idx = AQ_HW_MAC_OWN + 1;
3002 
3003 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
3004 
3005 		ETHER_FIRST_MULTI(step, ac, enm);
3006 		while (enm != NULL) {
3007 			aq_set_mac_addr(sc, idx++, enm->enm_addrlo);
3008 			ETHER_NEXT_MULTI(step, enm);
3009 		}
3010 
3011 		for (; idx < AQ_HW_MAC_NUM; idx++)
3012 			aq_set_mac_addr(sc, idx, NULL);
3013 
3014 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
3015 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 0);
3016 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
3017 		    RPF_MCAST_FILTER_EN, 0);
3018 	}
3019 }
3020 
3021 int
3022 aq_dmamem_alloc(struct aq_softc *sc, struct aq_dmamem *aqm,
3023     bus_size_t size, u_int align)
3024 {
3025 	aqm->aqm_size = size;
3026 
3027 	if (bus_dmamap_create(sc->sc_dmat, aqm->aqm_size, 1,
3028 	    aqm->aqm_size, 0,
3029 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3030 	    &aqm->aqm_map) != 0)
3031 		return (1);
3032 	if (bus_dmamem_alloc(sc->sc_dmat, aqm->aqm_size,
3033 	    align, 0, &aqm->aqm_seg, 1, &aqm->aqm_nsegs,
3034 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
3035 		goto destroy;
3036 	if (bus_dmamem_map(sc->sc_dmat, &aqm->aqm_seg, aqm->aqm_nsegs,
3037 	    aqm->aqm_size, &aqm->aqm_kva, BUS_DMA_WAITOK) != 0)
3038 		goto free;
3039 	if (bus_dmamap_load(sc->sc_dmat, aqm->aqm_map, aqm->aqm_kva,
3040 	    aqm->aqm_size, NULL, BUS_DMA_WAITOK) != 0)
3041 		goto unmap;
3042 
3043 	return (0);
3044 unmap:
3045 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
3046 free:
3047 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
3048 destroy:
3049 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
3050 	return (1);
3051 }
3052 
3053 void
3054 aq_dmamem_free(struct aq_softc *sc, struct aq_dmamem *aqm)
3055 {
3056 	bus_dmamap_unload(sc->sc_dmat, aqm->aqm_map);
3057 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
3058 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
3059 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
3060 }
3061