xref: /openbsd-src/sys/dev/pci/if_aq_pci.c (revision 824adb5411e4389b29bae28eba5c2c2bbd147f34)
1 /* $OpenBSD: if_aq_pci.c,v 1.3 2021/09/20 01:27:23 jmatthew Exp $ */
2 /*	$NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2021 Jonathan Matthew <jonathan@d14n.org>
6  * Copyright (c) 2021 Mike Larkin <mlarkin@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /**
22  * aQuantia Corporation Network Driver
23  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  *
29  *   (1) Redistributions of source code must retain the above
30  *   copyright notice, this list of conditions and the following
31  *   disclaimer.
32  *
33  *   (2) Redistributions in binary form must reproduce the above
34  *   copyright notice, this list of conditions and the following
35  *   disclaimer in the documentation and/or other materials provided
36  *   with the distribution.
37  *
38  *   (3) The name of the author may not be used to endorse or promote
39  *   products derived from this software without specific prior
40  *   written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
43  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
44  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
48  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  *
54  */
55 
56 /*-
57  * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
58  * All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  * modification, are permitted provided that the following conditions
62  * are met:
63  * 1. Redistributions of source code must retain the above copyright
64  *    notice, this list of conditions and the following disclaimer.
65  * 2. Redistributions in binary form must reproduce the above copyright
66  *    notice, this list of conditions and the following disclaimer in the
67  *    documentation and/or other materials provided with the distribution.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
70  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
71  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
72  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
73  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
74  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
75  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
77  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
78  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
79  * POSSIBILITY OF SUCH DAMAGE.
80  */
81 #include "bpfilter.h"
82 
83 #include <sys/types.h>
84 #include <sys/device.h>
85 #include <sys/param.h>
86 #include <sys/kernel.h>
87 #include <sys/sockio.h>
88 #include <sys/systm.h>
89 
90 #include <net/if.h>
91 #include <net/if_media.h>
92 
93 #include <netinet/in.h>
94 #include <netinet/if_ether.h>
95 
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99 
100 #if NBPFILTER > 0
101 #include <net/bpf.h>
102 #endif
103 
104 /* #define AQ_DEBUG 1 */
105 #ifdef AQ_DEBUG
106 #define DPRINTF(x) printf x
107 #else
108 #define DPRINTF(x)
109 #endif /* AQ_DEBUG */
110 
111 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
112 
113 #define AQ_BAR0 				0x10
114 #define AQ_MAXQ 				8
115 
116 #define AQ_TXD_NUM 				2048
117 #define AQ_RXD_NUM 				2048
118 
119 #define AQ_TX_MAX_SEGMENTS			1	/* XXX */
120 
121 #define AQ_LINKSTAT_IRQ				31
122 
123 #define RPF_ACTION_HOST				1
124 
125 #define AQ_FW_SOFTRESET_REG			0x0000
126 #define  AQ_FW_SOFTRESET_DIS			(1 << 14)
127 #define  AQ_FW_SOFTRESET_RESET			(1 << 15)
128 #define AQ_FW_VERSION_REG			0x0018
129 #define AQ_HW_REVISION_REG			0x001c
130 #define AQ_GLB_NVR_INTERFACE1_REG		0x0100
131 #define AQ_FW_MBOX_CMD_REG			0x0200
132 #define  AQ_FW_MBOX_CMD_EXECUTE			0x00008000
133 #define  AQ_FW_MBOX_CMD_BUSY			0x00000100
134 #define AQ_FW_MBOX_ADDR_REG			0x0208
135 #define AQ_FW_MBOX_VAL_REG			0x020C
136 #define AQ_FW_GLB_CPU_SEM_REG(i)		(0x03a0 + (i) * 4)
137 #define AQ_FW_SEM_RAM_REG			AQ_FW_GLB_CPU_SEM_REG(2)
138 #define AQ_FW_GLB_CTL2_REG			0x0404
139 #define AQ_GLB_GENERAL_PROVISIONING9_REG	0x0520
140 #define AQ_GLB_NVR_PROVISIONING2_REG		0x0534
141 #define AQ_INTR_STATUS_REG			0x2000  /* intr status */
142 #define AQ_INTR_STATUS_CLR_REG			0x2050  /* intr status clear */
143 #define AQ_INTR_MASK_REG			0x2060	/* intr mask set */
144 #define AQ_INTR_MASK_CLR_REG			0x2070	/* intr mask clear */
145 #define AQ_INTR_AUTOMASK_REG			0x2090
146 
147 /* AQ_INTR_IRQ_MAP_TXRX_REG 0x2100-0x2140 */
148 #define AQ_INTR_IRQ_MAP_TXRX_REG(i)		(0x2100 + ((i) / 2) * 4)
149 #define AQ_INTR_IRQ_MAP_TX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
150 #define  AQ_INTR_IRQ_MAP_TX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 16 : 24))
151 #define  AQ_INTR_IRQ_MAP_TX_EN(i)		(1U << (((i) & 1) ? 23 : 31))
152 #define AQ_INTR_IRQ_MAP_RX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
153 #define  AQ_INTR_IRQ_MAP_RX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 0 : 8))
154 #define  AQ_INTR_IRQ_MAP_RX_EN(i)		(1U << (((i) & 1) ? 7 : 15))
155 
156 /* AQ_GEN_INTR_MAP_REG[AQ_RINGS_NUM] 0x2180-0x2200 */
157 #define AQ_GEN_INTR_MAP_REG(i)			(0x2180 + (i) * 4)
158 #define  AQ_B0_ERR_INT				8U
159 
160 #define AQ_INTR_CTRL_REG			0x2300
161 #define  AQ_INTR_CTRL_IRQMODE			((1 << 0) | (1 << 1))
162 #define AQ_INTR_CTRL_IRQMODE_LEGACY		0
163 #define AQ_INTR_CTRL_IRQMODE_MSI		1
164 #define AQ_INTR_CTRL_IRQMODE_MSIX		2
165 #define  AQ_INTR_CTRL_MULTIVEC			(1 << 2)
166 #define  AQ_INTR_CTRL_RESET_DIS			(1 << 29)
167 #define  AQ_INTR_CTRL_RESET_IRQ			(1 << 31)
168 #define AQ_MBOXIF_POWER_GATING_CONTROL_REG	0x32a8
169 
170 #define FW_MPI_MBOX_ADDR_REG			0x0360
171 #define FW1X_MPI_INIT1_REG			0x0364
172 #define FW1X_MPI_INIT2_REG			0x0370
173 #define FW1X_MPI_EFUSEADDR_REG			0x0374
174 
175 #define FW2X_MPI_EFUSEADDR_REG			0x0364
176 #define FW2X_MPI_CONTROL_REG			0x0368  /* 64bit */
177 #define FW2X_MPI_STATE_REG			0x0370  /* 64bit */
178 #define FW_BOOT_EXIT_CODE_REG			0x0388
179 
180 #define FW_BOOT_EXIT_CODE_REG			0x0388
181 #define  RBL_STATUS_DEAD			0x0000dead
182 #define  RBL_STATUS_SUCCESS			0x0000abba
183 #define  RBL_STATUS_FAILURE			0x00000bad
184 #define  RBL_STATUS_HOST_BOOT			0x0000f1a7
185 #define FW_MPI_DAISY_CHAIN_STATUS_REG		0x0704
186 #define AQ_PCI_REG_CONTROL_6_REG		0x1014
187 
188 #define FW_MPI_RESETCTRL_REG			0x4000
189 #define  FW_MPI_RESETCTRL_RESET_DIS		(1 << 29)
190 
191 #define RX_SYSCONTROL_REG			0x5000
192 #define  RX_SYSCONTROL_RESET_DIS		(1 << 29)
193 
194 #define RX_TCP_RSS_HASH_REG			0x5040
195 
196 #define RPF_L2BC_REG				0x5100
197 #define  RPF_L2BC_EN				(1 << 0)
198 #define  RPF_L2BC_PROMISC			(1 << 3)
199 #define  RPF_L2BC_ACTION			0x7000
200 #define  RPF_L2BC_THRESHOLD			0xFFFF0000
201 
202 #define AQ_HW_MAC_OWN				0
203 
204 /* RPF_L2UC_*_REG[34] (actual [38]?) */
205 #define RPF_L2UC_LSW_REG(i)                     (0x5110 + (i) * 8)
206 #define RPF_L2UC_MSW_REG(i)                     (0x5114 + (i) * 8)
207 #define  RPF_L2UC_MSW_MACADDR_HI		0xFFFF
208 #define  RPF_L2UC_MSW_ACTION			0x70000
209 #define  RPF_L2UC_MSW_EN			(1 << 31)
210 #define AQ_HW_MAC_NUM				34
211 
212 /* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
213 #define RPF_MCAST_FILTER_REG(i)			(0x5250 + (i) * 4)
214 #define  RPF_MCAST_FILTER_EN			(1 << 31)
215 #define RPF_MCAST_FILTER_MASK_REG		0x5270
216 #define  RPF_MCAST_FILTER_MASK_ALLMULTI		(1 << 14)
217 
218 #define RPF_VLAN_MODE_REG			0x5280
219 #define  RPF_VLAN_MODE_PROMISC			(1 << 1)
220 #define  RPF_VLAN_MODE_ACCEPT_UNTAGGED		(1 << 2)
221 #define  RPF_VLAN_MODE_UNTAGGED_ACTION		0x38
222 
223 #define RPF_VLAN_TPID_REG                       0x5284
224 #define  RPF_VLAN_TPID_OUTER			0xFFFF0000
225 #define  RPF_VLAN_TPID_INNER			0xFFFF
226 
227 /* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
228 #define RPF_ETHERTYPE_FILTER_REG(i)		(0x5300 + (i) * 4)
229 #define  RPF_ETHERTYPE_FILTER_EN		(1 << 31)
230 
231 /* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
232 #define RPF_L3_FILTER_REG(i)			(0x5380 + (i) * 4)
233 #define  RPF_L3_FILTER_L4_EN			(1 << 31)
234 
235 #define RX_FLR_RSS_CONTROL1_REG			0x54c0
236 #define  RX_FLR_RSS_CONTROL1_EN			(1 << 31)
237 
238 #define RPF_RPB_RX_TC_UPT_REG                   0x54c4
239 #define  RPF_RPB_RX_TC_UPT_MASK(i)              (0x00000007 << ((i) * 4))
240 
241 #define RPB_RPF_RX_REG				0x5700
242 #define  RPB_RPF_RX_TC_MODE			(1 << 8)
243 #define  RPB_RPF_RX_FC_MODE			0x30
244 #define  RPB_RPF_RX_BUF_EN			(1 << 0)
245 
246 /* RPB_RXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x5710-0x5790 */
247 #define RPB_RXB_BUFSIZE_REG(i)			(0x5710 + (i) * 0x10)
248 #define  RPB_RXB_BUFSIZE			0x1FF
249 #define RPB_RXB_XOFF_REG(i)			(0x5714 + (i) * 0x10)
250 #define  RPB_RXB_XOFF_EN			(1 << 31)
251 #define  RPB_RXB_XOFF_THRESH_HI                 0x3FFF0000
252 #define  RPB_RXB_XOFF_THRESH_LO                 0x3FFF
253 
254 #define RX_DMA_INT_DESC_WRWB_EN_REG		0x5a30
255 #define  RX_DMA_INT_DESC_WRWB_EN		(1 << 2)
256 #define  RX_DMA_INT_DESC_MODERATE_EN		(1 << 3)
257 
258 #define RX_INTR_MODERATION_CTL_REG(i)		(0x5a40 + (i) * 4)
259 #define  RX_INTR_MODERATION_CTL_EN		(1 << 1)
260 
261 #define RX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x5b00 + (i) * 0x20)
262 #define RX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x5b04 + (i) * 0x20)
263 #define RX_DMA_DESC_REG(i)			(0x5b08 + (i) * 0x20)
264 #define  RX_DMA_DESC_LEN			(0x3FF << 3)
265 #define  RX_DMA_DESC_RESET			(1 << 25)
266 #define  RX_DMA_DESC_HEADER_SPLIT		(1 << 28)
267 #define  RX_DMA_DESC_VLAN_STRIP			(1 << 29)
268 #define  RX_DMA_DESC_EN				(1 << 31)
269 #define RX_DMA_DESC_HEAD_PTR_REG(i)		(0x5b0c + (i) * 0x20)
270 #define  RX_DMA_DESC_HEAD_PTR			0xFFF
271 #define RX_DMA_DESC_TAIL_PTR_REG(i)		(0x5b10 + (i) * 0x20)
272 #define RX_DMA_DESC_BUFSIZE_REG(i)		(0x5b18 + (i) * 0x20)
273 #define  RX_DMA_DESC_BUFSIZE_DATA		0x000F
274 #define  RX_DMA_DESC_BUFSIZE_HDR		0x0FF0
275 
276 #define RX_DMA_DCAD_REG(i)			(0x6100 + (i) * 4)
277 #define  RX_DMA_DCAD_CPUID			0xFF
278 #define  RX_DMA_DCAD_PAYLOAD_EN			(1 << 29)
279 #define  RX_DMA_DCAD_HEADER_EN			(1 << 30)
280 #define  RX_DMA_DCAD_DESC_EN			(1 << 31)
281 
282 #define RX_DMA_DCA_REG				0x6180
283 #define  RX_DMA_DCA_EN				(1 << 31)
284 #define  RX_DMA_DCA_MODE			0xF
285 
286 #define TX_SYSCONTROL_REG			0x7000
287 #define  TX_SYSCONTROL_RESET_DIS		(1 << 29)
288 
289 #define TX_TPO2_REG				0x7040
290 #define  TX_TPO2_EN				(1 << 16)
291 
292 #define TPS_DESC_VM_ARB_MODE_REG		0x7300
293 #define  TPS_DESC_VM_ARB_MODE			(1 << 0)
294 #define TPS_DESC_RATE_REG			0x7310
295 #define  TPS_DESC_RATE_TA_RST			(1 << 31)
296 #define  TPS_DESC_RATE_LIM			0x7FF
297 #define TPS_DESC_TC_ARB_MODE_REG		0x7200
298 #define  TPS_DESC_TC_ARB_MODE			0x3
299 #define TPS_DATA_TC_ARB_MODE_REG		0x7100
300 #define  TPS_DATA_TC_ARB_MODE			(1 << 0)
301 
302 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7110-0x7130 */
303 #define TPS_DATA_TCT_REG(i)			(0x7110 + (i) * 4)
304 #define  TPS_DATA_TCT_CREDIT_MAX		0xFFF0000
305 #define  TPS_DATA_TCT_WEIGHT			0x1FF
306 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7210-0x7230 */
307 #define TPS_DESC_TCT_REG(i)			(0x7210 + (i) * 4)
308 #define  TPS_DESC_TCT_CREDIT_MAX		0xFFF0000
309 #define  TPS_DESC_TCT_WEIGHT			0x1FF
310 
311 #define AQ_HW_TXBUF_MAX         160
312 #define AQ_HW_RXBUF_MAX         320
313 
314 #define THM_LSO_TCP_FLAG1_REG			0x7820
315 #define  THM_LSO_TCP_FLAG1_FIRST		0xFFF
316 #define  THM_LSO_TCP_FLAG1_MID			0xFFF0000
317 #define THM_LSO_TCP_FLAG2_REG			0x7824
318 #define  THM_LSO_TCP_FLAG2_LAST			0xFFF
319 
320 #define TPB_TX_BUF_REG				0x7900
321 #define  TPB_TX_BUF_EN				(1 << 0)
322 #define  TPB_TX_BUF_SCP_INS_EN			(1 << 2)
323 #define  TPB_TX_BUF_TC_MODE_EN			(1 << 8)
324 
325 /* TPB_TXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x7910-7990 */
326 #define TPB_TXB_BUFSIZE_REG(i)			(0x7910 + (i) * 0x10)
327 #define  TPB_TXB_BUFSIZE                        (0xFF)
328 #define TPB_TXB_THRESH_REG(i)                   (0x7914 + (i) * 0x10)
329 #define  TPB_TXB_THRESH_HI                      0x1FFF0000
330 #define  TPB_TXB_THRESH_LO                      0x1FFF
331 
332 #define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG	0x7b20
333 
334 #define TX_DMA_INT_DESC_WRWB_EN_REG		0x7b40
335 #define  TX_DMA_INT_DESC_WRWB_EN		(1 << 1)
336 #define  TX_DMA_INT_DESC_MODERATE_EN		(1 << 4)
337 
338 #define TX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x7c00 + (i) * 0x40)
339 #define TX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x7c04 + (i) * 0x40)
340 #define TX_DMA_DESC_REG(i)			(0x7c08 + (i) * 0x40)
341 #define  TX_DMA_DESC_LEN			0x00000FF8
342 #define  TX_DMA_DESC_EN				0x80000000
343 #define TX_DMA_DESC_HEAD_PTR_REG(i)		(0x7c0c + (i) * 0x40)
344 #define  TX_DMA_DESC_HEAD_PTR			0x00000FFF
345 #define TX_DMA_DESC_TAIL_PTR_REG(i)		(0x7c10 + (i) * 0x40)
346 #define TX_DMA_DESC_WRWB_THRESH_REG(i)		(0x7c18 + (i) * 0x40)
347 #define  TX_DMA_DESC_WRWB_THRESH		0x00003F00
348 
349 #define TDM_DCAD_REG(i)				(0x8400 + (i) * 4)
350 #define  TDM_DCAD_CPUID				0x7F
351 #define  TDM_DCAD_CPUID_EN			0x80000000
352 
353 #define TDM_DCA_REG				0x8480
354 #define  TDM_DCA_EN				(1 << 31)
355 #define  TDM_DCA_MODE				0xF
356 
357 #define TX_INTR_MODERATION_CTL_REG(i)		(0x8980 + (i) * 4)
358 #define  TX_INTR_MODERATION_CTL_EN		(1 << 1)
359 
360 #define __LOWEST_SET_BIT(__mask) (((((uint32_t)__mask) - 1) & ((uint32_t)__mask)) ^ ((uint32_t)__mask))
361 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
362 
363 #if 0
364 #define AQ_READ_REG(sc, reg) \
365 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
366 
367 #endif
368 #define AQ_WRITE_REG(sc, reg, val) \
369 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
370 
371 #define AQ_WRITE_REG_BIT(sc, reg, mask, val)                    \
372 	do {                                                    \
373 		uint32_t _v;                                    \
374 		_v = AQ_READ_REG((sc), (reg));                  \
375 		_v &= ~(mask);                                  \
376 		if ((val) != 0)                                 \
377 			_v |= __SHIFTIN((val), (mask));         \
378 		AQ_WRITE_REG((sc), (reg), _v);                  \
379 	} while (/* CONSTCOND */ 0)
380 
381 #define AQ_READ64_REG(sc, reg)					\
382 	((uint64_t)AQ_READ_REG(sc, reg) |			\
383 	(((uint64_t)AQ_READ_REG(sc, (reg) + 4)) << 32))
384 
385 #define AQ_WRITE64_REG(sc, reg, val)				\
386 	do {							\
387 		AQ_WRITE_REG(sc, reg, (uint32_t)val);		\
388 		AQ_WRITE_REG(sc, reg + 4, (uint32_t)(val >> 32)); \
389 	} while (/* CONSTCOND */0)
390 
391 #define WAIT_FOR(expr, us, n, errp)                             \
392 	do {                                                    \
393 		unsigned int _n;                                \
394 		for (_n = n; (!(expr)) && _n != 0; --_n) {      \
395 			delay((us));                            \
396 		}                                               \
397 		if ((errp != NULL)) {                           \
398 			if (_n == 0)                            \
399 				*(errp) = ETIMEDOUT;            \
400 			else                                    \
401 				*(errp) = 0;                    \
402 		}                                               \
403 	} while (/* CONSTCOND */ 0)
404 
405 #define FW_VERSION_MAJOR(sc)	(((sc)->sc_fw_version >> 24) & 0xff)
406 #define FW_VERSION_MINOR(sc)	(((sc)->sc_fw_version >> 16) & 0xff)
407 #define FW_VERSION_BUILD(sc)	((sc)->sc_fw_version & 0xffff)
408 
409 #define FEATURES_MIPS		0x00000001
410 #define FEATURES_TPO2		0x00000002
411 #define FEATURES_RPF2		0x00000004
412 #define FEATURES_MPI_AQ		0x00000008
413 #define FEATURES_REV_A0		0x10000000
414 #define FEATURES_REV_A		(FEATURES_REV_A0)
415 #define FEATURES_REV_B0		0x20000000
416 #define FEATURES_REV_B1		0x40000000
417 #define FEATURES_REV_B		(FEATURES_REV_B0|FEATURES_REV_B1)
418 
419 /* lock for FW2X_MPI_{CONTROL,STATE]_REG read-modify-write */
420 #define AQ_MPI_LOCK(sc)		mtx_enter(&(sc)->sc_mpi_mutex);
421 #define AQ_MPI_UNLOCK(sc)	mtx_leave(&(sc)->sc_mpi_mutex);
422 
423 #define FW2X_CTRL_10BASET_HD			(1 << 0)
424 #define FW2X_CTRL_10BASET_FD			(1 << 1)
425 #define FW2X_CTRL_100BASETX_HD			(1 << 2)
426 #define FW2X_CTRL_100BASET4_HD			(1 << 3)
427 #define FW2X_CTRL_100BASET2_HD			(1 << 4)
428 #define FW2X_CTRL_100BASETX_FD			(1 << 5)
429 #define FW2X_CTRL_100BASET2_FD			(1 << 6)
430 #define FW2X_CTRL_1000BASET_HD			(1 << 7)
431 #define FW2X_CTRL_1000BASET_FD			(1 << 8)
432 #define FW2X_CTRL_2P5GBASET_FD			(1 << 9)
433 #define FW2X_CTRL_5GBASET_FD			(1 << 10)
434 #define FW2X_CTRL_10GBASET_FD			(1 << 11)
435 #define FW2X_CTRL_RESERVED1			(1ULL << 32)
436 #define FW2X_CTRL_10BASET_EEE			(1ULL << 33)
437 #define FW2X_CTRL_RESERVED2			(1ULL << 34)
438 #define FW2X_CTRL_PAUSE				(1ULL << 35)
439 #define FW2X_CTRL_ASYMMETRIC_PAUSE		(1ULL << 36)
440 #define FW2X_CTRL_100BASETX_EEE			(1ULL << 37)
441 #define FW2X_CTRL_RESERVED3			(1ULL << 38)
442 #define FW2X_CTRL_RESERVED4			(1ULL << 39)
443 #define FW2X_CTRL_1000BASET_FD_EEE		(1ULL << 40)
444 #define FW2X_CTRL_2P5GBASET_FD_EEE		(1ULL << 41)
445 #define FW2X_CTRL_5GBASET_FD_EEE		(1ULL << 42)
446 #define FW2X_CTRL_10GBASET_FD_EEE		(1ULL << 43)
447 #define FW2X_CTRL_RESERVED5			(1ULL << 44)
448 #define FW2X_CTRL_RESERVED6			(1ULL << 45)
449 #define FW2X_CTRL_RESERVED7			(1ULL << 46)
450 #define FW2X_CTRL_RESERVED8			(1ULL << 47)
451 #define FW2X_CTRL_RESERVED9			(1ULL << 48)
452 #define FW2X_CTRL_CABLE_DIAG			(1ULL << 49)
453 #define FW2X_CTRL_TEMPERATURE			(1ULL << 50)
454 #define FW2X_CTRL_DOWNSHIFT			(1ULL << 51)
455 #define FW2X_CTRL_PTP_AVB_EN			(1ULL << 52)
456 #define FW2X_CTRL_MEDIA_DETECT			(1ULL << 53)
457 #define FW2X_CTRL_LINK_DROP			(1ULL << 54)
458 #define FW2X_CTRL_SLEEP_PROXY			(1ULL << 55)
459 #define FW2X_CTRL_WOL				(1ULL << 56)
460 #define FW2X_CTRL_MAC_STOP			(1ULL << 57)
461 #define FW2X_CTRL_EXT_LOOPBACK			(1ULL << 58)
462 #define FW2X_CTRL_INT_LOOPBACK			(1ULL << 59)
463 #define FW2X_CTRL_EFUSE_AGENT			(1ULL << 60)
464 #define FW2X_CTRL_WOL_TIMER			(1ULL << 61)
465 #define FW2X_CTRL_STATISTICS			(1ULL << 62)
466 #define FW2X_CTRL_TRANSACTION_ID		(1ULL << 63)
467 
468 #define FW2X_CTRL_RATE_100M			FW2X_CTRL_100BASETX_FD
469 #define FW2X_CTRL_RATE_1G			FW2X_CTRL_1000BASET_FD
470 #define FW2X_CTRL_RATE_2G5			FW2X_CTRL_2P5GBASET_FD
471 #define FW2X_CTRL_RATE_5G			FW2X_CTRL_5GBASET_FD
472 #define FW2X_CTRL_RATE_10G			FW2X_CTRL_10GBASET_FD
473 #define FW2X_CTRL_RATE_MASK		\
474 	(FW2X_CTRL_RATE_100M |		\
475 	 FW2X_CTRL_RATE_1G |		\
476 	 FW2X_CTRL_RATE_2G5 |		\
477 	 FW2X_CTRL_RATE_5G |		\
478 	 FW2X_CTRL_RATE_10G)
479 #define FW2X_CTRL_EEE_MASK		\
480 	(FW2X_CTRL_10BASET_EEE |	\
481 	 FW2X_CTRL_100BASETX_EEE |	\
482 	 FW2X_CTRL_1000BASET_FD_EEE |	\
483 	 FW2X_CTRL_2P5GBASET_FD_EEE |	\
484 	 FW2X_CTRL_5GBASET_FD_EEE |	\
485 	 FW2X_CTRL_10GBASET_FD_EEE)
486 
487 enum aq_fw_bootloader_mode {
488 	FW_BOOT_MODE_UNKNOWN = 0,
489 	FW_BOOT_MODE_FLB,
490 	FW_BOOT_MODE_RBL_FLASH,
491 	FW_BOOT_MODE_RBL_HOST_BOOTLOAD
492 };
493 
494 enum aq_media_type {
495 	AQ_MEDIA_TYPE_UNKNOWN = 0,
496 	AQ_MEDIA_TYPE_FIBRE,
497 	AQ_MEDIA_TYPE_TP
498 };
499 
500 enum aq_link_speed {
501 	AQ_LINK_NONE    = 0,
502 	AQ_LINK_100M    = (1 << 0),
503 	AQ_LINK_1G      = (1 << 1),
504 	AQ_LINK_2G5     = (1 << 2),
505 	AQ_LINK_5G      = (1 << 3),
506 	AQ_LINK_10G     = (1 << 4)
507 };
508 
509 #define AQ_LINK_ALL	(AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | \
510 			    AQ_LINK_5G | AQ_LINK_10G )
511 #define AQ_LINK_AUTO	AQ_LINK_ALL
512 
513 enum aq_link_eee {
514 	AQ_EEE_DISABLE = 0,
515 	AQ_EEE_ENABLE = 1
516 };
517 
518 enum aq_hw_fw_mpi_state {
519 	MPI_DEINIT      = 0,
520 	MPI_RESET       = 1,
521 	MPI_INIT        = 2,
522 	MPI_POWER       = 4
523 };
524 
525 enum aq_link_fc {
526         AQ_FC_NONE = 0,
527         AQ_FC_RX = (1 << 0),
528         AQ_FC_TX = (1 << 1),
529         AQ_FC_ALL = (AQ_FC_RX | AQ_FC_TX)
530 };
531 
532 struct aq_dmamem {
533 	bus_dmamap_t		aqm_map;
534 	bus_dma_segment_t	aqm_seg;
535 	int			aqm_nsegs;
536 	size_t			aqm_size;
537 	caddr_t			aqm_kva;
538 };
539 
540 #define AQ_DMA_MAP(_aqm)	((_aqm)->aqm_map)
541 #define AQ_DMA_DVA(_aqm)	((_aqm)->aqm_map->dm_segs[0].ds_addr)
542 #define AQ_DMA_KVA(_aqm)	((void *)(_aqm)->aqm_kva)
543 #define AQ_DMA_LEN(_aqm)	((_aqm)->aqm_size)
544 
545 
546 struct aq_mailbox_header {
547         uint32_t version;
548         uint32_t transaction_id;
549         int32_t error;
550 } __packed __aligned(4);
551 
552 struct aq_hw_stats_s {
553         uint32_t uprc;
554         uint32_t mprc;
555         uint32_t bprc;
556         uint32_t erpt;
557         uint32_t uptc;
558         uint32_t mptc;
559         uint32_t bptc;
560         uint32_t erpr;
561         uint32_t mbtc;
562         uint32_t bbtc;
563         uint32_t mbrc;
564         uint32_t bbrc;
565         uint32_t ubrc;
566         uint32_t ubtc;
567         uint32_t ptc;
568         uint32_t prc;
569         uint32_t dpc;   /* not exists in fw2x_msm_statistics */
570         uint32_t cprc;  /* not exists in fw2x_msm_statistics */
571 } __packed __aligned(4);
572 
573 struct aq_fw2x_capabilities {
574         uint32_t caps_lo;
575         uint32_t caps_hi;
576 } __packed __aligned(4);
577 
578 struct aq_fw2x_msm_statistics {
579 	uint32_t uprc;
580 	uint32_t mprc;
581 	uint32_t bprc;
582 	uint32_t erpt;
583 	uint32_t uptc;
584 	uint32_t mptc;
585 	uint32_t bptc;
586 	uint32_t erpr;
587 	uint32_t mbtc;
588 	uint32_t bbtc;
589 	uint32_t mbrc;
590 	uint32_t bbrc;
591 	uint32_t ubrc;
592 	uint32_t ubtc;
593 	uint32_t ptc;
594 	uint32_t prc;
595 } __packed __aligned(4);
596 
597 struct aq_fw2x_phy_cable_diag_data {
598 	uint32_t lane_data[4];
599 } __packed __aligned(4);
600 
601 struct aq_fw2x_mailbox {		/* struct fwHostInterface */
602 	struct aq_mailbox_header header;
603 	struct aq_fw2x_msm_statistics msm;	/* msmStatistics_t msm; */
604 
605 	uint32_t phy_info1;
606 #define PHYINFO1_FAULT_CODE	__BITS(31,16)
607 #define PHYINFO1_PHY_H_BIT	__BITS(0,15)
608 	uint32_t phy_info2;
609 #define PHYINFO2_TEMPERATURE	__BITS(15,0)
610 #define PHYINFO2_CABLE_LEN	__BITS(23,16)
611 
612 	struct aq_fw2x_phy_cable_diag_data diag_data;
613 	uint32_t reserved[8];
614 
615 	struct aq_fw2x_capabilities caps;
616 
617 	/* ... */
618 } __packed __aligned(4);
619 
620 struct aq_rx_desc_read {
621 	uint64_t		buf_addr;
622 	uint64_t		hdr_addr;
623 } __packed;
624 
625 struct aq_rx_desc_wb {
626 	uint32_t		type;
627 #define AQ_RXDESC_TYPE_RSSTYPE	0x000f
628 #define AQ_RXDESC_TYPE_ETHER	0x0030
629 #define AQ_RXDESC_TYPE_PROTO	0x01c0
630 #define AQ_RXDESC_TYPE_VLAN	(1 << 9)
631 #define AQ_RXDESC_TYPE_VLAN2	(1 << 10)
632 #define AQ_RXDESC_TYPE_DMA_ERR	(1 << 12)
633 #define AQ_RXDESC_TYPE_V4_SUM	(1 << 19)
634 #define AQ_RXDESC_TYPE_TCP_SUM	(1 << 20)
635 	uint32_t		rss_hash;
636 	uint16_t		status;
637 #define AQ_RXDESC_STATUS_DD	(1 << 0)
638 #define AQ_RXDESC_STATUS_EOP	(1 << 1)
639 #define AQ_RXDESC_STATUS_MACERR (1 << 2)
640 #define AQ_RXDESC_STATUS_V4_SUM (1 << 3)
641 #define AQ_RXDESC_STATUS_L4_SUM_ERR (1 << 4)
642 #define AQ_RXDESC_STATUS_L4_SUM_OK (1 << 5)
643 	uint16_t		pkt_len;
644 	uint16_t		next_desc_ptr;
645 	uint16_t		vlan;
646 } __packed;
647 
648 struct aq_tx_desc {
649 	uint64_t		buf_addr;
650 	uint32_t		ctl1;
651 #define AQ_TXDESC_CTL1_TYPE_TXD	0x00000001
652 #define AQ_TXDESC_CTL1_TYPE_TXC	0x00000002
653 #define AQ_TXDESC_CTL1_BLEN_SHIFT 4
654 #define AQ_TXDESC_CTL1_DD	(1 << 20)
655 #define AQ_TXDESC_CTL1_CMD_EOP	(1 << 21)
656 #define AQ_TXDESC_CTL1_CMD_VLAN	(1 << 22)
657 #define AQ_TXDESC_CTL1_CMD_FCS	(1 << 23)
658 #define AQ_TXDESC_CTL1_CMD_IP4CSUM (1 << 24)
659 #define AQ_TXDESC_CTL1_CMD_L4CSUM (1 << 25)
660 #define AQ_TXDESC_CTL1_CMD_WB	(1 << 27)
661 
662 #define AQ_TXDESC_CTL1_VID_SHIFT 4
663 	uint32_t		ctl2;
664 #define AQ_TXDESC_CTL2_LEN_SHIFT 14
665 #define AQ_TXDESC_CTL2_CTX_EN	(1 << 13)
666 } __packed;
667 
668 struct aq_slot {
669 	bus_dmamap_t		 as_map;
670 	struct mbuf		*as_m;
671 };
672 
673 struct aq_rxring {
674 	struct ifiqueue		*rx_ifiq;
675 	struct aq_dmamem	 rx_mem;
676 	struct aq_slot		*rx_slots;
677 	int			 rx_q;
678 	int			 rx_irq;
679 
680 	struct timeout		 rx_refill;
681 	struct if_rxring	 rx_rxr;
682 	uint32_t		 rx_prod;
683 	uint32_t		 rx_cons;
684 };
685 
686 struct aq_txring {
687 	struct ifqueue		*tx_ifq;
688 	struct aq_dmamem	 tx_mem;
689 	struct aq_slot		*tx_slots;
690 	int			 tx_q;
691 	int			 tx_irq;
692 	uint32_t		 tx_prod;
693 	uint32_t		 tx_cons;
694 };
695 
696 struct aq_queues {
697 	char			 q_name[16];
698 	void			*q_ihc;
699 	struct aq_softc		*q_sc;
700 	int			 q_index;
701 	struct aq_rxring 	 q_rx;
702 	struct aq_txring 	 q_tx;
703 };
704 
705 
706 struct aq_softc;
707 struct aq_firmware_ops {
708 	int (*reset)(struct aq_softc *);
709 	int (*set_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state,
710 	    enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
711 	int (*get_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state *,
712 	    enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
713 	int (*get_stats)(struct aq_softc *, struct aq_hw_stats_s *);
714 };
715 
716 struct aq_softc {
717 	struct device		sc_dev;
718 	uint16_t		sc_product;
719 	uint16_t		sc_revision;
720 	bus_dma_tag_t		sc_dmat;
721 	pci_chipset_tag_t	sc_pc;
722 	pcitag_t		sc_pcitag;
723 	int			sc_nqueues;
724 	struct aq_queues	sc_queues[AQ_MAXQ];
725 	struct intrmap		*sc_intrmap;
726 	void			*sc_ih;
727 	bus_space_handle_t	sc_ioh;
728 	bus_space_tag_t		sc_iot;
729 
730 	uint32_t		sc_mbox_addr;
731 	int			sc_rbl_enabled;
732 	int			sc_fast_start_enabled;
733 	int			sc_flash_present;
734 	uint32_t		sc_fw_version;
735 	const struct		aq_firmware_ops *sc_fw_ops;
736 	uint64_t		sc_fw_caps;
737 	enum aq_media_type	sc_media_type;
738 	enum aq_link_speed	sc_available_rates;
739 	uint32_t		sc_features;
740 	int			sc_linkstat_irq;
741 	struct arpcom		sc_arpcom;
742 	struct ifmedia		sc_media;
743 
744 	struct ether_addr	sc_enaddr;
745 	struct mutex		sc_mpi_mutex;
746 };
747 
748 const struct pci_matchid aq_devices[] = {
749 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100 },
750 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107 },
751 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108 },
752 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109 },
753 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111 },
754 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112 },
755 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S },
756 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S },
757 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S },
758 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S },
759 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S },
760 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S },
761 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100 },
762 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107 },
763 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108 },
764 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109 },
765 };
766 
767 const struct aq_product {
768 	pci_vendor_id_t aq_vendor;
769 	pci_product_id_t aq_product;
770 	enum aq_media_type aq_media_type;
771 	enum aq_link_speed aq_available_rates;
772 } aq_products[] = {
773 {	PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100,
774 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
775 },
776 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107,
777 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
778 },
779 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108,
780 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
781 },
782 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109,
783 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
784 },
785 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
786 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
787 },
788 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
789 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
790 },
791 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S,
792 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
793 },
794 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S,
795 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
796 },
797 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S,
798 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
799 },
800 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S,
801 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
802 },
803 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S,
804 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
805 },
806 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S,
807 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
808 },
809 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100,
810 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
811 },
812 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107,
813 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
814 },
815 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108,
816 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
817 },
818 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109,
819 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
820 }
821 };
822 
823 int	aq_match(struct device *, void *, void *);
824 void	aq_attach(struct device *, struct device *, void *);
825 int	aq_detach(struct device *, int);
826 int	aq_activate(struct device *, int);
827 int	aq_intr(void *);
828 void	aq_global_software_reset(struct aq_softc *);
829 int	aq_fw_reset(struct aq_softc *);
830 int	aq_mac_soft_reset(struct aq_softc *, enum aq_fw_bootloader_mode *);
831 int	aq_mac_soft_reset_rbl(struct aq_softc *, enum aq_fw_bootloader_mode *);
832 int	aq_mac_soft_reset_flb(struct aq_softc *);
833 int	aq_fw_read_version(struct aq_softc *);
834 int	aq_fw_version_init(struct aq_softc *);
835 int	aq_hw_init_ucp(struct aq_softc *);
836 int	aq_fw_downld_dwords(struct aq_softc *, uint32_t, uint32_t *, uint32_t);
837 int	aq_get_mac_addr(struct aq_softc *);
838 int	aq_hw_reset(struct aq_softc *);
839 int	aq_hw_init(struct aq_softc *, int);
840 void	aq_hw_qos_set(struct aq_softc *);
841 void	aq_l3_filter_set(struct aq_softc *);
842 void	aq_hw_init_tx_path(struct aq_softc *);
843 void	aq_hw_init_rx_path(struct aq_softc *);
844 int	aq_set_mac_addr(struct aq_softc *, int, uint8_t *);
845 int	aq_set_linkmode(struct aq_softc *, enum aq_link_speed,
846     enum aq_link_fc, enum aq_link_eee);
847 void	aq_watchdog(struct ifnet *);
848 void	aq_enable_intr(struct aq_softc *, int, int);
849 int	aq_ioctl(struct ifnet *, u_long, caddr_t);
850 int	aq_up(struct aq_softc *);
851 void	aq_down(struct aq_softc *);
852 void	aq_iff(struct aq_softc *);
853 void	aq_start(struct ifqueue *);
854 void	aq_ifmedia_status(struct ifnet *, struct ifmediareq *);
855 int	aq_ifmedia_change(struct ifnet *);
856 void	aq_update_link_status(struct aq_softc *);
857 
858 void	aq_refill(void *);
859 int	aq_rx_fill(struct aq_softc *, struct aq_rxring *);
860 static inline unsigned int aq_rx_fill_slots(struct aq_softc *,
861 	    struct aq_rxring *, uint);
862 
863 int	aq_dmamem_alloc(struct aq_softc *, struct aq_dmamem *,
864 	    bus_size_t, u_int);
865 void	aq_dmamem_zero(struct aq_dmamem *);
866 void	aq_dmamem_free(struct aq_softc *, struct aq_dmamem *);
867 
868 int	aq_fw1x_reset(struct aq_softc *);
869 int	aq_fw1x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
870     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
871 int	aq_fw1x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
872     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
873 int	aq_fw1x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
874 
875 int	aq_fw2x_reset(struct aq_softc *);
876 int	aq_fw2x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
877     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
878 int	aq_fw2x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
879     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
880 int	aq_fw2x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
881 
882 const struct aq_firmware_ops aq_fw1x_ops = {
883 	.reset = aq_fw1x_reset,
884 	.set_mode = aq_fw1x_set_mode,
885 	.get_mode = aq_fw1x_get_mode,
886 	.get_stats = aq_fw1x_get_stats,
887 };
888 
889 const struct aq_firmware_ops aq_fw2x_ops = {
890 	.reset = aq_fw2x_reset,
891 	.set_mode = aq_fw2x_set_mode,
892 	.get_mode = aq_fw2x_get_mode,
893 	.get_stats = aq_fw2x_get_stats,
894 };
895 
896 struct cfattach aq_ca = {
897 	sizeof(struct aq_softc), aq_match, aq_attach, NULL,
898 	aq_activate
899 };
900 
901 struct cfdriver aq_cd = {
902 	NULL, "aq", DV_IFNET
903 };
904 
905 uint32_t
906 AQ_READ_REG(struct aq_softc *sc, uint32_t reg)
907 {
908 	uint32_t res;
909 
910 	res = bus_space_read_4(sc->sc_iot, sc->sc_ioh, reg);
911 
912 	return res;
913 }
914 
915 
916 int
917 aq_match(struct device *dev, void *match, void *aux)
918 {
919 	return pci_matchbyid((struct pci_attach_args *)aux, aq_devices,
920 	    sizeof(aq_devices) / sizeof(aq_devices[0]));
921 }
922 
923 const struct aq_product *
924 aq_lookup(const struct pci_attach_args *pa)
925 {
926 	unsigned int i;
927 
928 	for (i = 0; i < sizeof(aq_products) / sizeof(aq_products[0]); i++) {
929 	if (PCI_VENDOR(pa->pa_id) == aq_products[i].aq_vendor &&
930 		PCI_PRODUCT(pa->pa_id) == aq_products[i].aq_product) {
931 			return &aq_products[i];
932 		}
933 	}
934 
935 	return NULL;
936 }
937 
938 void
939 aq_attach(struct device *parent, struct device *self, void *aux)
940 {
941 	struct aq_softc *sc = (struct aq_softc *)self;
942 	struct pci_attach_args *pa = aux;
943 	const struct aq_product *aqp;
944 	pcireg_t command, bar, memtype;
945 	pci_chipset_tag_t pc;
946 	pci_intr_handle_t ih;
947 	int (*isr)(void *);
948 	const char *intrstr;
949 	pcitag_t tag;
950 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
951 	int irqmode;
952 	int i;
953 
954 	mtx_init(&sc->sc_mpi_mutex, IPL_NET);
955 
956 	sc->sc_dmat = pa->pa_dmat;
957 	sc->sc_pc = pc = pa->pa_pc;
958 	sc->sc_pcitag = tag = pa->pa_tag;
959 
960 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
961 	command |= PCI_COMMAND_MASTER_ENABLE;
962 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
963 
964 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
965 	sc->sc_revision = PCI_REVISION(pa->pa_class);
966 
967 	aqp = aq_lookup(pa);
968 
969 	bar = pci_conf_read(pc, tag, AQ_BAR0);
970 	if (PCI_MAPREG_TYPE(bar) != PCI_MAPREG_TYPE_MEM) {
971 		printf(": wrong BAR type\n");
972 		return;
973 	}
974 
975 	memtype = pci_mapreg_type(pc, tag, AQ_BAR0);
976 	if (pci_mapreg_map(pa, AQ_BAR0, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
977 	    NULL, NULL, 0)) {
978 		printf(": failed to map BAR0\n");
979 		return;
980 	}
981 
982 	sc->sc_nqueues = 1;
983 
984 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
985 		irqmode = AQ_INTR_CTRL_IRQMODE_MSIX;
986 	} else if (pci_intr_map_msi(pa, &ih) == 0) {
987 		irqmode = AQ_INTR_CTRL_IRQMODE_MSI;
988 	} else if (pci_intr_map(pa, &ih) == 0) {
989 		irqmode = AQ_INTR_CTRL_IRQMODE_LEGACY;
990 	} else {
991 		printf(": failed to map interrupt\n");
992 		return;
993 	}
994 
995 	isr = aq_intr;
996 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
997 	    IPL_NET | IPL_MPSAFE, isr, sc, self->dv_xname);
998 	intrstr = pci_intr_string(pa->pa_pc, ih);
999 	if (intrstr)
1000 		printf(": %s", intrstr);
1001 
1002 	if (aq_fw_reset(sc))
1003 		return;
1004 
1005 	DPRINTF((", FW version 0x%x", sc->sc_fw_version));
1006 
1007 	if (aq_fw_version_init(sc))
1008 		return;
1009 
1010 	if (aq_hw_init_ucp(sc))
1011 		return;
1012 
1013 	if (aq_hw_reset(sc))
1014 		return;
1015 
1016 	if (aq_get_mac_addr(sc))
1017 		return;
1018 
1019 	if (aq_hw_init(sc, irqmode))
1020 		return;
1021 
1022 	sc->sc_media_type = aqp->aq_media_type;
1023 	sc->sc_available_rates = aqp->aq_available_rates;
1024 
1025 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1026 	    aq_ifmedia_status);
1027 
1028 	bcopy(sc->sc_enaddr.ether_addr_octet, sc->sc_arpcom.ac_enaddr, 6);
1029 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
1030 	ifp->if_softc = sc;
1031 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1032 	ifp->if_xflags = IFXF_MPSAFE;
1033 	ifp->if_ioctl = aq_ioctl;
1034 	ifp->if_qstart = aq_start;
1035 	ifp->if_watchdog = aq_watchdog;
1036 	ifp->if_hardmtu = 9000;
1037 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1038 	ifq_set_maxlen(&ifp->if_snd, AQ_TXD_NUM);
1039 
1040 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1041 	    aq_ifmedia_status);
1042 	if (sc->sc_available_rates & AQ_LINK_100M) {
1043 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL);
1044 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0,
1045 		    NULL);
1046 	}
1047 
1048 	if (sc->sc_available_rates & AQ_LINK_1G) {
1049 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
1050 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0,
1051 		    NULL);
1052 	}
1053 
1054 	if (sc->sc_available_rates & AQ_LINK_2G5) {
1055 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1056 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX,
1057 		    0, NULL);
1058 	}
1059 
1060 	if (sc->sc_available_rates & AQ_LINK_5G) {
1061 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
1062 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
1063 		    0, NULL);
1064 	}
1065 
1066 	if (sc->sc_available_rates & AQ_LINK_10G) {
1067 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1068 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
1069 		    0, NULL);
1070 	}
1071 
1072 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1073 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO | IFM_FDX, 0, NULL);
1074 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1075 	aq_set_linkmode(sc, AQ_LINK_AUTO, AQ_FC_NONE, AQ_EEE_DISABLE);
1076 
1077         if_attach(ifp);
1078         ether_ifattach(ifp);
1079 
1080 	if_attach_iqueues(ifp, sc->sc_nqueues);
1081 	if_attach_queues(ifp, sc->sc_nqueues);
1082 
1083 	for (i = 0; i < sc->sc_nqueues; i++) {
1084 		struct aq_queues *aq = &sc->sc_queues[i];
1085 		struct aq_rxring *rx = &aq->q_rx;
1086 		struct aq_txring *tx = &aq->q_tx;
1087 
1088 		aq->q_sc = sc;
1089 		aq->q_index = i;
1090 		rx->rx_q = i;
1091 		rx->rx_irq = i * 2;
1092 		rx->rx_ifiq = ifp->if_iqs[i];
1093 		ifp->if_iqs[i]->ifiq_softc = aq;
1094 		timeout_set(&rx->rx_refill, aq_refill, rx);
1095 
1096 		tx->tx_q = i;
1097 		tx->tx_irq = rx->rx_irq + 1;
1098 		tx->tx_ifq = ifp->if_ifqs[i];
1099 		ifp->if_ifqs[i]->ifq_softc = aq;
1100 
1101 		if (sc->sc_nqueues > 1) {
1102 			/* map msix */
1103 		}
1104 
1105 		AQ_WRITE_REG(sc, TX_INTR_MODERATION_CTL_REG(i), 0);
1106 		AQ_WRITE_REG(sc, RX_INTR_MODERATION_CTL_REG(i), 0);
1107 	}
1108 
1109 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1110 	    TX_DMA_INT_DESC_WRWB_EN, 1);
1111 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1112 	    TX_DMA_INT_DESC_MODERATE_EN, 0);
1113 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1114 	    RX_DMA_INT_DESC_WRWB_EN, 1);
1115 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1116 	    RX_DMA_INT_DESC_MODERATE_EN, 0);
1117 
1118 	aq_enable_intr(sc, 1, 0);
1119 	printf("\n");
1120 }
1121 
1122 int
1123 aq_fw_reset(struct aq_softc *sc)
1124 {
1125 	uint32_t ver, v, boot_exit_code;
1126 	int i, error;
1127 	enum aq_fw_bootloader_mode mode;
1128 
1129 	mode = FW_BOOT_MODE_UNKNOWN;
1130 
1131 	ver = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1132 
1133 	for (i = 1000; i > 0; i--) {
1134 		v = AQ_READ_REG(sc, FW_MPI_DAISY_CHAIN_STATUS_REG);
1135 		boot_exit_code = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG);
1136 		if (v != 0x06000000 || boot_exit_code != 0)
1137 			break;
1138 	}
1139 
1140 	if (i <= 0) {
1141 		printf("%s: F/W reset failed. Neither RBL nor FLB started",
1142 		    DEVNAME(sc));
1143 		return ETIMEDOUT;
1144 	}
1145 
1146 	sc->sc_rbl_enabled = (boot_exit_code != 0);
1147 
1148 	/*
1149 	 * Having FW version 0 is an indicator that cold start
1150 	 * is in progress. This means two things:
1151 	 * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
1152 	 * 2) Driver may skip reset sequence and save time.
1153 	 */
1154 	if (sc->sc_fast_start_enabled && (ver != 0)) {
1155 		error = aq_fw_read_version(sc);
1156 		/* Skip reset as it just completed */
1157 		if (error == 0)
1158 			return 0;
1159 	}
1160 
1161 	error = aq_mac_soft_reset(sc, &mode);
1162 	if (error != 0) {
1163 		printf("%s: MAC reset failed: %d\n", DEVNAME(sc), error);
1164 		return error;
1165 	}
1166 
1167 	switch (mode) {
1168 	case FW_BOOT_MODE_FLB:
1169 		DPRINTF(("%s: FLB> F/W successfully loaded from flash.",
1170 		    DEVNAME(sc)));
1171 		sc->sc_flash_present = 1;
1172 		return aq_fw_read_version(sc);
1173 	case FW_BOOT_MODE_RBL_FLASH:
1174 		DPRINTF(("%s: RBL> F/W loaded from flash. Host Bootload "
1175 		    "disabled.", DEVNAME(sc)));
1176 		sc->sc_flash_present = 1;
1177 		return aq_fw_read_version(sc);
1178 	case FW_BOOT_MODE_UNKNOWN:
1179 		printf("%s: F/W bootload error: unknown bootloader type",
1180 		    DEVNAME(sc));
1181 		return ENOTSUP;
1182 	case FW_BOOT_MODE_RBL_HOST_BOOTLOAD:
1183 		printf("%s: RBL> F/W Host Bootload not implemented", DEVNAME(sc));
1184 		return ENOTSUP;
1185 	}
1186 
1187 	return ENOTSUP;
1188 }
1189 
1190 int
1191 aq_mac_soft_reset_rbl(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1192 {
1193 	int timo;
1194 
1195 	DPRINTF(("%s: RBL> MAC reset STARTED!\n", DEVNAME(sc)));
1196 
1197 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1198 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1199 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1200 
1201 	/* MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone */
1202 	AQ_WRITE_REG(sc, FW_BOOT_EXIT_CODE_REG, RBL_STATUS_DEAD);
1203 
1204 	aq_global_software_reset(sc);
1205 
1206 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e0);
1207 
1208 	/* Wait for RBL to finish boot process. */
1209 #define RBL_TIMEOUT_MS	10000
1210 	uint16_t rbl_status;
1211 	for (timo = RBL_TIMEOUT_MS; timo > 0; timo--) {
1212 		rbl_status = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG) & 0xffff;
1213 		if (rbl_status != 0 && rbl_status != RBL_STATUS_DEAD)
1214 			break;
1215 		delay(1000);
1216 	}
1217 
1218 	if (timo <= 0) {
1219 		printf("%s: RBL> RBL restart failed: timeout\n", DEVNAME(sc));
1220 		return EBUSY;
1221 	}
1222 
1223 	switch (rbl_status) {
1224 	case RBL_STATUS_SUCCESS:
1225 		if (mode != NULL)
1226 			*mode = FW_BOOT_MODE_RBL_FLASH;
1227 		DPRINTF(("%s: RBL> reset complete! [Flash]\n", DEVNAME(sc)));
1228 		break;
1229 	case RBL_STATUS_HOST_BOOT:
1230 		if (mode != NULL)
1231 			*mode = FW_BOOT_MODE_RBL_HOST_BOOTLOAD;
1232 		DPRINTF(("%s: RBL> reset complete! [Host Bootload]\n",
1233 		    DEVNAME(sc)));
1234 		break;
1235 	case RBL_STATUS_FAILURE:
1236 	default:
1237 		printf("%s: unknown RBL status 0x%x\n", DEVNAME(sc),
1238 		    rbl_status);
1239 		return EBUSY;
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 int
1246 aq_mac_soft_reset_flb(struct aq_softc *sc)
1247 {
1248 	uint32_t v;
1249 	int timo;
1250 
1251 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1252 	/*
1253 	 * Let Felicity hardware to complete SMBUS transaction before
1254 	 * Global software reset.
1255 	 */
1256 	delay(50000);
1257 
1258 	/*
1259 	 * If SPI burst transaction was interrupted(before running the script),
1260 	 * global software reset may not clear SPI interface.
1261 	 * Clean it up manually before global reset.
1262 	 */
1263 	AQ_WRITE_REG(sc, AQ_GLB_NVR_PROVISIONING2_REG, 0x00a0);
1264 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x009f);
1265 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x809f);
1266 	delay(50000);
1267 
1268 	v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1269 	v &= ~AQ_FW_SOFTRESET_DIS;
1270 	v |= AQ_FW_SOFTRESET_RESET;
1271 	AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1272 
1273 	/* Kickstart. */
1274 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1275 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1276 	if (!sc->sc_fast_start_enabled)
1277 		AQ_WRITE_REG(sc, AQ_GLB_GENERAL_PROVISIONING9_REG, 1);
1278 
1279 	/*
1280 	 * For the case SPI burst transaction was interrupted (by MCP reset
1281 	 * above), wait until it is completed by hardware.
1282 	 */
1283 	delay(50000);
1284 
1285 	/* MAC Kickstart */
1286 	if (!sc->sc_fast_start_enabled) {
1287 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x180e0);
1288 
1289 		uint32_t flb_status;
1290 		for (timo = 0; timo < 1000; timo++) {
1291 			flb_status = AQ_READ_REG(sc,
1292 			    FW_MPI_DAISY_CHAIN_STATUS_REG) & 0x10;
1293 			if (flb_status != 0)
1294 				break;
1295 			delay(1000);
1296 		}
1297 		if (flb_status == 0) {
1298 			printf("%s: FLB> MAC kickstart failed: timed out\n",
1299 			    DEVNAME(sc));
1300 			return ETIMEDOUT;
1301 		}
1302 		DPRINTF(("%s: FLB> MAC kickstart done, %d ms\n", DEVNAME(sc),
1303 		    timo));
1304 		/* FW reset */
1305 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1306 		/*
1307 		 * Let Felicity hardware complete SMBUS transaction before
1308 		 * Global software reset.
1309 		 */
1310 		delay(50000);
1311 		sc->sc_fast_start_enabled = true;
1312 	}
1313 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1314 
1315 	/* PHY Kickstart: #undone */
1316 	aq_global_software_reset(sc);
1317 
1318 	for (timo = 0; timo < 1000; timo++) {
1319 		if (AQ_READ_REG(sc, AQ_FW_VERSION_REG) != 0)
1320 			break;
1321 		delay(10000);
1322 	}
1323 	if (timo >= 1000) {
1324 		printf("%s: FLB> Global Soft Reset failed\n", DEVNAME(sc));
1325 		return ETIMEDOUT;
1326 	}
1327 	DPRINTF(("%s: FLB> F/W restart: %d ms\n", DEVNAME(sc), timo * 10));
1328 
1329 	return 0;
1330 
1331 }
1332 
1333 int
1334 aq_mac_soft_reset(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1335 {
1336 	if (sc->sc_rbl_enabled)
1337 		return aq_mac_soft_reset_rbl(sc, mode);
1338 
1339 	if (mode != NULL)
1340 		*mode = FW_BOOT_MODE_FLB;
1341 	return aq_mac_soft_reset_flb(sc);
1342 }
1343 
1344 void
1345 aq_global_software_reset(struct aq_softc *sc)
1346 {
1347         uint32_t v;
1348 
1349         AQ_WRITE_REG_BIT(sc, RX_SYSCONTROL_REG, RX_SYSCONTROL_RESET_DIS, 0);
1350         AQ_WRITE_REG_BIT(sc, TX_SYSCONTROL_REG, TX_SYSCONTROL_RESET_DIS, 0);
1351         AQ_WRITE_REG_BIT(sc, FW_MPI_RESETCTRL_REG,
1352             FW_MPI_RESETCTRL_RESET_DIS, 0);
1353 
1354         v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1355         v &= ~AQ_FW_SOFTRESET_DIS;
1356         v |= AQ_FW_SOFTRESET_RESET;
1357         AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1358 }
1359 
1360 int
1361 aq_fw_read_version(struct aq_softc *sc)
1362 {
1363 	int i, error = EBUSY;
1364 #define MAC_FW_START_TIMEOUT_MS 10000
1365 	for (i = 0; i < MAC_FW_START_TIMEOUT_MS; i++) {
1366 		sc->sc_fw_version = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1367 		if (sc->sc_fw_version != 0) {
1368 			error = 0;
1369 			break;
1370 		}
1371 		delay(1000);
1372 	}
1373 	return error;
1374 }
1375 
1376 int
1377 aq_fw_version_init(struct aq_softc *sc)
1378 {
1379 	int error = 0;
1380 	char fw_vers[sizeof("F/W version xxxxx.xxxxx.xxxxx")];
1381 
1382 	if (FW_VERSION_MAJOR(sc) == 1) {
1383 		sc->sc_fw_ops = &aq_fw1x_ops;
1384 	} else if ((FW_VERSION_MAJOR(sc) == 2) || (FW_VERSION_MAJOR(sc) == 3)) {
1385 		sc->sc_fw_ops = &aq_fw2x_ops;
1386 	} else {
1387 		printf("%s: Unsupported F/W version %d.%d.%d\n",
1388 		    DEVNAME(sc),
1389 		    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc),
1390 		    FW_VERSION_BUILD(sc));
1391 		return ENOTSUP;
1392 	}
1393 	snprintf(fw_vers, sizeof(fw_vers), "F/W version %d.%d.%d",
1394 	    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc), FW_VERSION_BUILD(sc));
1395 
1396 	/* detect revision */
1397 	uint32_t hwrev = AQ_READ_REG(sc, AQ_HW_REVISION_REG);
1398 	switch (hwrev & 0x0000000f) {
1399 	case 0x01:
1400 		printf(", revision A0, %s", fw_vers);
1401 		sc->sc_features |= FEATURES_REV_A0 |
1402 		    FEATURES_MPI_AQ | FEATURES_MIPS;
1403 		break;
1404 	case 0x02:
1405 		printf(", revision B0, %s", fw_vers);
1406 		sc->sc_features |= FEATURES_REV_B0 |
1407 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1408 		    FEATURES_TPO2 | FEATURES_RPF2;
1409 		break;
1410 	case 0x0A:
1411 		printf(", revision B1, %s", fw_vers);
1412 		sc->sc_features |= FEATURES_REV_B1 |
1413 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1414 		    FEATURES_TPO2 | FEATURES_RPF2;
1415 		break;
1416 	default:
1417 		printf(", Unknown revision (0x%08x)", hwrev);
1418 		error = ENOTSUP;
1419 		break;
1420 	}
1421 	return error;
1422 }
1423 
1424 int
1425 aq_hw_init_ucp(struct aq_softc *sc)
1426 {
1427 	int timo;
1428 
1429 	if (FW_VERSION_MAJOR(sc) == 1) {
1430 		if (AQ_READ_REG(sc, FW1X_MPI_INIT2_REG) == 0) {
1431 			uint32_t data;
1432 			arc4random_buf(&data, sizeof(data));
1433 			data &= 0xfefefefe;
1434 			data |= 0x02020202;
1435 			AQ_WRITE_REG(sc, FW1X_MPI_INIT2_REG, data);
1436 		}
1437 		AQ_WRITE_REG(sc, FW1X_MPI_INIT1_REG, 0);
1438 	}
1439 
1440 	for (timo = 100; timo > 0; timo--) {
1441 		sc->sc_mbox_addr = AQ_READ_REG(sc, FW_MPI_MBOX_ADDR_REG);
1442 		if (sc->sc_mbox_addr != 0)
1443 			break;
1444 		delay(1000);
1445 	}
1446 
1447 #define AQ_FW_MIN_VERSION	0x01050006
1448 #define AQ_FW_MIN_VERSION_STR	"1.5.6"
1449 	if (sc->sc_fw_version < AQ_FW_MIN_VERSION) {
1450 		printf("%s: atlantic: wrong FW version: " AQ_FW_MIN_VERSION_STR
1451 		    " or later required, this is %d.%d.%d\n",
1452 		    DEVNAME(sc),
1453 		    FW_VERSION_MAJOR(sc),
1454 		    FW_VERSION_MINOR(sc),
1455 		    FW_VERSION_BUILD(sc));
1456 		return ENOTSUP;
1457 	}
1458 
1459 	if (sc->sc_mbox_addr == 0)
1460 		printf("%s: NULL MBOX!!\n", DEVNAME(sc));
1461 
1462 	return 0;
1463 }
1464 
1465 int
1466 aq_hw_reset(struct aq_softc *sc)
1467 {
1468 	int error;
1469 
1470 	/* disable irq */
1471 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS, 0);
1472 
1473 	/* apply */
1474 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_IRQ, 1);
1475 
1476 	/* wait ack 10 times by 1ms */
1477 	WAIT_FOR(
1478 	    (AQ_READ_REG(sc, AQ_INTR_CTRL_REG) & AQ_INTR_CTRL_RESET_IRQ) == 0,
1479 	    1000, 10, &error);
1480 	if (error != 0) {
1481 		printf("%s: atlantic: IRQ reset failed: %d\n", DEVNAME(sc),
1482 		    error);
1483 		return error;
1484 	}
1485 
1486 	return sc->sc_fw_ops->reset(sc);
1487 }
1488 
1489 int
1490 aq_get_mac_addr(struct aq_softc *sc)
1491 {
1492 	uint32_t mac_addr[2];
1493 	uint32_t efuse_shadow_addr;
1494 	int err;
1495 
1496 	efuse_shadow_addr = 0;
1497 	if (FW_VERSION_MAJOR(sc) >= 2)
1498 		efuse_shadow_addr = AQ_READ_REG(sc, FW2X_MPI_EFUSEADDR_REG);
1499 	else
1500 		efuse_shadow_addr = AQ_READ_REG(sc, FW1X_MPI_EFUSEADDR_REG);
1501 
1502 	if (efuse_shadow_addr == 0) {
1503 		printf("%s: cannot get efuse addr", DEVNAME(sc));
1504 		return ENXIO;
1505 	}
1506 
1507 	DPRINTF(("%s: efuse_shadow_addr = %x\n", DEVNAME(sc), efuse_shadow_addr));
1508 
1509 	memset(mac_addr, 0, sizeof(mac_addr));
1510 	err = aq_fw_downld_dwords(sc, efuse_shadow_addr + (40 * 4),
1511 	    mac_addr, 2);
1512 	if (err < 0)
1513 		return err;
1514 
1515 	if (mac_addr[0] == 0 && mac_addr[1] == 0) {
1516 		printf("%s: mac address not found", DEVNAME(sc));
1517 		return ENXIO;
1518 	}
1519 
1520 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1521 	    mac_addr[1]));
1522 
1523 	mac_addr[0] = htobe32(mac_addr[0]);
1524 	mac_addr[1] = htobe32(mac_addr[1]);
1525 
1526 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1527 	    mac_addr[1]));
1528 
1529 	memcpy(sc->sc_enaddr.ether_addr_octet,
1530 	    (uint8_t *)mac_addr, ETHER_ADDR_LEN);
1531 	DPRINTF((": %s", ether_sprintf(sc->sc_enaddr.ether_addr_octet)));
1532 
1533 	return 0;
1534 }
1535 
1536 int
1537 aq_activate(struct device *self, int act)
1538 {
1539 	return 0;
1540 }
1541 
1542 int
1543 aq_fw_downld_dwords(struct aq_softc *sc, uint32_t addr, uint32_t *p,
1544     uint32_t cnt)
1545 {
1546 	uint32_t v;
1547 	int error = 0;
1548 
1549 	WAIT_FOR(AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG) == 1, 1, 10000, &error);
1550 	if (error != 0) {
1551 		AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1552 		v = AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG);
1553 		if (v == 0) {
1554 			printf("%s: %s:%d: timeout\n",
1555 			    DEVNAME(sc), __func__, __LINE__);
1556 			return ETIMEDOUT;
1557 		}
1558 	}
1559 
1560 	AQ_WRITE_REG(sc, AQ_FW_MBOX_ADDR_REG, addr);
1561 
1562 	error = 0;
1563 	for (; cnt > 0 && error == 0; cnt--) {
1564 		/* execute mailbox interface */
1565 		AQ_WRITE_REG_BIT(sc, AQ_FW_MBOX_CMD_REG,
1566 		    AQ_FW_MBOX_CMD_EXECUTE, 1);
1567 		if (sc->sc_features & FEATURES_REV_B1) {
1568 			WAIT_FOR(AQ_READ_REG(sc, AQ_FW_MBOX_ADDR_REG) != addr,
1569 			    1, 1000, &error);
1570 		} else {
1571 			WAIT_FOR((AQ_READ_REG(sc, AQ_FW_MBOX_CMD_REG) &
1572 			    AQ_FW_MBOX_CMD_BUSY) == 0,
1573 			    1, 1000, &error);
1574 		}
1575 		*p++ = AQ_READ_REG(sc, AQ_FW_MBOX_VAL_REG);
1576 		addr += sizeof(uint32_t);
1577 	}
1578 	AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1579 
1580 	if (error != 0)
1581 		printf("%s: %s:%d: timeout\n",
1582 		    DEVNAME(sc), __func__, __LINE__);
1583 
1584 	return error;
1585 }
1586 
1587 int
1588 aq_fw2x_reset(struct aq_softc *sc)
1589 {
1590 	struct aq_fw2x_capabilities caps = { 0 };
1591 	int error;
1592 
1593 	error = aq_fw_downld_dwords(sc,
1594 	    sc->sc_mbox_addr + offsetof(struct aq_fw2x_mailbox, caps),
1595 	    (uint32_t *)&caps, sizeof caps / sizeof(uint32_t));
1596 	if (error != 0) {
1597 		printf("%s: fw2x> can't get F/W capabilities mask, error %d\n",
1598 		    DEVNAME(sc), error);
1599 		return error;
1600 	}
1601 	sc->sc_fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
1602 
1603 	DPRINTF(("%s: fw2x> F/W capabilities=0x%llx\n", DEVNAME(sc),
1604 	    sc->sc_fw_caps));
1605 
1606 	return 0;
1607 }
1608 
1609 int
1610 aq_fw1x_reset(struct aq_softc *sc)
1611 {
1612 	printf("%s: unimplemented %s\n", DEVNAME(sc), __func__);
1613 	return 0;
1614 }
1615 
1616 int
1617 aq_fw1x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state w,
1618     enum aq_link_speed x, enum aq_link_fc y, enum aq_link_eee z)
1619 {
1620 	return 0;
1621 }
1622 
1623 int
1624 aq_fw1x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *w,
1625     enum aq_link_speed *x, enum aq_link_fc *y, enum aq_link_eee *z)
1626 {
1627 	return 0;
1628 }
1629 
1630 int
1631 aq_fw1x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1632 {
1633 	return 0;
1634 }
1635 
1636 
1637 int
1638 aq_fw2x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *modep,
1639     enum aq_link_speed *speedp, enum aq_link_fc *fcp, enum aq_link_eee *eeep)
1640 {
1641 	uint64_t mpi_state, mpi_ctrl;
1642 	enum aq_link_speed speed;
1643 	enum aq_link_fc fc;
1644 
1645 	AQ_MPI_LOCK(sc);
1646 
1647 	mpi_state = AQ_READ64_REG(sc, FW2X_MPI_STATE_REG);
1648 	if (modep != NULL) {
1649 		mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1650 		if (mpi_ctrl & FW2X_CTRL_RATE_MASK)
1651 			*modep = MPI_INIT;
1652 		else
1653 			*modep = MPI_DEINIT;
1654 	}
1655 
1656 	AQ_MPI_UNLOCK(sc);
1657 
1658 	if (mpi_state & FW2X_CTRL_RATE_10G)
1659 		speed = AQ_LINK_10G;
1660 	else if (mpi_state & FW2X_CTRL_RATE_5G)
1661 		speed = AQ_LINK_5G;
1662 	else if (mpi_state & FW2X_CTRL_RATE_2G5)
1663 		speed = AQ_LINK_2G5;
1664 	else if (mpi_state & FW2X_CTRL_RATE_1G)
1665 		speed = AQ_LINK_1G;
1666 	else if (mpi_state & FW2X_CTRL_RATE_100M)
1667 		speed = AQ_LINK_100M;
1668 	else
1669 		speed = AQ_LINK_NONE;
1670 	if (speedp != NULL)
1671 		*speedp = speed;
1672 
1673 	fc = AQ_FC_NONE;
1674 	if (mpi_state & FW2X_CTRL_PAUSE)
1675 		fc |= AQ_FC_RX;
1676 	if (mpi_state & FW2X_CTRL_ASYMMETRIC_PAUSE)
1677 		fc |= AQ_FC_TX;
1678 	if (fcp != NULL)
1679 		*fcp = fc;
1680 
1681 	if (eeep != NULL)
1682 		*eeep = AQ_EEE_DISABLE;
1683 
1684 	return 0;
1685 }
1686 
1687 int
1688 aq_fw2x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1689 {
1690 	return 0;
1691 }
1692 
1693 void
1694 aq_hw_l3_filter_set(struct aq_softc *sc)
1695 {
1696 	int i;
1697 
1698 	/* clear all filter */
1699 	for (i = 0; i < 8; i++) {
1700 		AQ_WRITE_REG_BIT(sc, RPF_L3_FILTER_REG(i),
1701 		    RPF_L3_FILTER_L4_EN, 0);
1702 	}
1703 }
1704 
1705 int
1706 aq_hw_init(struct aq_softc *sc, int irqmode)
1707 {
1708 	uint32_t v;
1709 
1710 	/* Force limit MRRS on RDM/TDM to 2K */
1711 	v = AQ_READ_REG(sc, AQ_PCI_REG_CONTROL_6_REG);
1712 	AQ_WRITE_REG(sc, AQ_PCI_REG_CONTROL_6_REG, (v & ~0x0707) | 0x0404);
1713 
1714 	/*
1715 	 * TX DMA total request limit. B0 hardware is not capable to
1716 	 * handle more than (8K-MRRS) incoming DMA data.
1717 	 * Value 24 in 256byte units
1718 	 */
1719 	AQ_WRITE_REG(sc, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG, 24);
1720 
1721 	aq_hw_init_tx_path(sc);
1722 	aq_hw_init_rx_path(sc);
1723 
1724 	if (aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_enaddr.ether_addr_octet))
1725 		return EINVAL;
1726 
1727 	aq_set_linkmode(sc, AQ_LINK_NONE, AQ_FC_NONE, AQ_EEE_DISABLE);
1728 
1729 	aq_hw_qos_set(sc);
1730 
1731 	/* Enable interrupt */
1732 	AQ_WRITE_REG(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS);
1733 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_MULTIVEC, 0);
1734 
1735 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_IRQMODE, irqmode);
1736 
1737 	AQ_WRITE_REG(sc, AQ_INTR_AUTOMASK_REG, 0xffffffff);
1738 
1739 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(0),
1740 	    ((AQ_B0_ERR_INT << 24) | (1U << 31)) |
1741 	    ((AQ_B0_ERR_INT << 16) | (1 << 23))
1742 	);
1743 
1744 	/* link interrupt */
1745 	sc->sc_linkstat_irq = AQ_LINKSTAT_IRQ;
1746 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(3),
1747 	    (1 << 7) | sc->sc_linkstat_irq);
1748 
1749 	return 0;
1750 }
1751 
1752 void
1753 aq_hw_init_tx_path(struct aq_softc *sc)
1754 {
1755 	/* Tx TC/RSS number config */
1756 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_TC_MODE_EN, 1);
1757 
1758 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1759 	    THM_LSO_TCP_FLAG1_FIRST, 0x0ff6);
1760 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1761 	    THM_LSO_TCP_FLAG1_MID,   0x0ff6);
1762 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG2_REG,
1763 	   THM_LSO_TCP_FLAG2_LAST,  0x0f7f);
1764 
1765 	/* misc */
1766 	AQ_WRITE_REG(sc, TX_TPO2_REG,
1767 	   (sc->sc_features & FEATURES_TPO2) ? TX_TPO2_EN : 0);
1768 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_EN, 0);
1769 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_MODE, 0);
1770 
1771 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_SCP_INS_EN, 1);
1772 }
1773 
1774 void
1775 aq_hw_init_rx_path(struct aq_softc *sc)
1776 {
1777 	int i;
1778 
1779 	/* clear setting */
1780 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 0);
1781 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 0);
1782 	AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG, 0);
1783 	for (i = 0; i < 32; i++) {
1784 		AQ_WRITE_REG_BIT(sc, RPF_ETHERTYPE_FILTER_REG(i),
1785 		   RPF_ETHERTYPE_FILTER_EN, 0);
1786 	}
1787 
1788 	/* L2 and Multicast filters */
1789 	for (i = 0; i < AQ_HW_MAC_NUM; i++) {
1790 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_EN, 0);
1791 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_ACTION,
1792 		    RPF_ACTION_HOST);
1793 	}
1794 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_MASK_REG, 0);
1795 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_REG(0), 0x00010fff);
1796 
1797 	/* Vlan filters */
1798 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_OUTER,
1799 	    ETHERTYPE_QINQ);
1800 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_INNER,
1801 	    ETHERTYPE_VLAN);
1802 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG, RPF_VLAN_MODE_PROMISC, 0);
1803 
1804 	if (sc->sc_features & FEATURES_REV_B) {
1805 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1806 		    RPF_VLAN_MODE_ACCEPT_UNTAGGED, 1);
1807 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1808 		    RPF_VLAN_MODE_UNTAGGED_ACTION, RPF_ACTION_HOST);
1809 	}
1810 
1811 	AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, 0);
1812 
1813 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_EN, 1);
1814 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_ACTION, RPF_ACTION_HOST);
1815 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_THRESHOLD, 0xffff);
1816 
1817 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_EN, 0);
1818 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_MODE, 0);
1819 }
1820 
1821 /* set multicast filter. index 0 for own address */
1822 int
1823 aq_set_mac_addr(struct aq_softc *sc, int index, uint8_t *enaddr)
1824 {
1825 	uint32_t h, l;
1826 
1827 	if (index >= AQ_HW_MAC_NUM)
1828 		return EINVAL;
1829 
1830 	if (enaddr == NULL) {
1831 		/* disable */
1832 		AQ_WRITE_REG_BIT(sc,
1833 		    RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1834 		return 0;
1835 	}
1836 
1837 	h = (enaddr[0] <<  8) | (enaddr[1]);
1838 	l = ((uint32_t)enaddr[2] << 24) | (enaddr[3] << 16) |
1839 	    (enaddr[4] <<  8) | (enaddr[5]);
1840 
1841 	/* disable, set, and enable */
1842 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1843 	AQ_WRITE_REG(sc, RPF_L2UC_LSW_REG(index), l);
1844 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index),
1845 	    RPF_L2UC_MSW_MACADDR_HI, h);
1846 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_ACTION, 1);
1847 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 1);
1848 
1849 	return 0;
1850 }
1851 
1852 int
1853 aq_get_linkmode(struct aq_softc *sc, enum aq_link_speed *speed,
1854     enum aq_link_fc *fc, enum aq_link_eee *eee)
1855 {
1856 	enum aq_hw_fw_mpi_state mode;
1857 	int error;
1858 
1859 	error = sc->sc_fw_ops->get_mode(sc, &mode, speed, fc, eee);
1860 	if (error != 0)
1861 		return error;
1862 	if (mode != MPI_INIT)
1863 		return ENXIO;
1864 
1865 	return 0;
1866 }
1867 
1868 int
1869 aq_set_linkmode(struct aq_softc *sc, enum aq_link_speed speed,
1870     enum aq_link_fc fc, enum aq_link_eee eee)
1871 {
1872 	return sc->sc_fw_ops->set_mode(sc, MPI_INIT, speed, fc, eee);
1873 }
1874 
1875 int
1876 aq_fw2x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state mode,
1877     enum aq_link_speed speed, enum aq_link_fc fc, enum aq_link_eee eee)
1878 {
1879 	uint64_t mpi_ctrl;
1880 	int error = 0;
1881 
1882 	AQ_MPI_LOCK(sc);
1883 
1884 	mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1885 
1886 	switch (mode) {
1887 	case MPI_INIT:
1888 		mpi_ctrl &= ~FW2X_CTRL_RATE_MASK;
1889 		if (speed & AQ_LINK_10G)
1890 			mpi_ctrl |= FW2X_CTRL_RATE_10G;
1891 		if (speed & AQ_LINK_5G)
1892 			mpi_ctrl |= FW2X_CTRL_RATE_5G;
1893 		if (speed & AQ_LINK_2G5)
1894 			mpi_ctrl |= FW2X_CTRL_RATE_2G5;
1895 		if (speed & AQ_LINK_1G)
1896 			mpi_ctrl |= FW2X_CTRL_RATE_1G;
1897 		if (speed & AQ_LINK_100M)
1898 			mpi_ctrl |= FW2X_CTRL_RATE_100M;
1899 
1900 		mpi_ctrl &= ~FW2X_CTRL_LINK_DROP;
1901 
1902 		mpi_ctrl &= ~FW2X_CTRL_EEE_MASK;
1903 		if (eee == AQ_EEE_ENABLE)
1904 			mpi_ctrl |= FW2X_CTRL_EEE_MASK;
1905 
1906 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1907 		if (fc & AQ_FC_RX)
1908 			mpi_ctrl |= FW2X_CTRL_PAUSE;
1909 		if (fc & AQ_FC_TX)
1910 			mpi_ctrl |= FW2X_CTRL_ASYMMETRIC_PAUSE;
1911 		break;
1912 	case MPI_DEINIT:
1913 		mpi_ctrl &= ~(FW2X_CTRL_RATE_MASK | FW2X_CTRL_EEE_MASK);
1914 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1915 		break;
1916 	default:
1917 		printf("%s: fw2x> unknown MPI state %d\n", DEVNAME(sc), mode);
1918 		error =  EINVAL;
1919 		goto failure;
1920 	}
1921 	AQ_WRITE64_REG(sc, FW2X_MPI_CONTROL_REG, mpi_ctrl);
1922 
1923  failure:
1924 	AQ_MPI_UNLOCK(sc);
1925 	return error;
1926 }
1927 
1928 void
1929 aq_hw_qos_set(struct aq_softc *sc)
1930 {
1931 	uint32_t tc = 0;
1932 	uint32_t buff_size;
1933 
1934 	/* TPS Descriptor rate init */
1935 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_TA_RST, 0);
1936 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_LIM, 0xa);
1937 
1938 	/* TPS VM init */
1939 	AQ_WRITE_REG_BIT(sc, TPS_DESC_VM_ARB_MODE_REG, TPS_DESC_VM_ARB_MODE, 0);
1940 
1941 	/* TPS TC credits init */
1942 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TC_ARB_MODE_REG, TPS_DESC_TC_ARB_MODE, 0);
1943 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TC_ARB_MODE_REG, TPS_DATA_TC_ARB_MODE, 0);
1944 
1945 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1946 	    TPS_DATA_TCT_CREDIT_MAX, 0xfff);
1947 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1948 	    TPS_DATA_TCT_WEIGHT, 0x64);
1949 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1950 	    TPS_DESC_TCT_CREDIT_MAX, 0x50);
1951 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1952 	    TPS_DESC_TCT_WEIGHT, 0x1e);
1953 
1954 	/* Tx buf size */
1955 	tc = 0;
1956 	buff_size = AQ_HW_TXBUF_MAX;
1957 	AQ_WRITE_REG_BIT(sc, TPB_TXB_BUFSIZE_REG(tc), TPB_TXB_BUFSIZE,
1958 	    buff_size);
1959 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_HI,
1960 	    (buff_size * (1024 / 32) * 66) / 100);
1961 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_LO,
1962 	    (buff_size * (1024 / 32) * 50) / 100);
1963 
1964 	/* QoS Rx buf size per TC */
1965 	tc = 0;
1966 	buff_size = AQ_HW_RXBUF_MAX;
1967 	AQ_WRITE_REG_BIT(sc, RPB_RXB_BUFSIZE_REG(tc), RPB_RXB_BUFSIZE,
1968 	    buff_size);
1969 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_EN, 0);
1970 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_HI,
1971 	    (buff_size * (1024 / 32) * 66) / 100);
1972 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_LO,
1973 	    (buff_size * (1024 / 32) * 50) / 100);
1974 
1975 	/* QoS 802.1p priority -> TC mapping */
1976 	int i_priority;
1977 	for (i_priority = 0; i_priority < 8; i_priority++) {
1978 		AQ_WRITE_REG_BIT(sc, RPF_RPB_RX_TC_UPT_REG,
1979 		    RPF_RPB_RX_TC_UPT_MASK(i_priority), 0);
1980 	}
1981 }
1982 
1983 void
1984 aq_txring_reset(struct aq_softc *sc, struct aq_txring *tx, int start)
1985 {
1986 	daddr_t paddr;
1987 
1988 	tx->tx_prod = 0;
1989 	tx->tx_cons = 0;
1990 
1991 	/* empty slots? */
1992 
1993 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 0);
1994 
1995 	if (start == 0)
1996 		return;
1997 
1998 	paddr = AQ_DMA_DVA(&tx->tx_mem);
1999 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRLSW_REG(tx->tx_q), paddr);
2000 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRMSW_REG(tx->tx_q),
2001 	    paddr >> 32);
2002 
2003 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_LEN,
2004 	    AQ_TXD_NUM / 8);
2005 
2006 	tx->tx_prod = AQ_READ_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q));
2007 	tx->tx_cons = tx->tx_prod;
2008 	AQ_WRITE_REG(sc, TX_DMA_DESC_WRWB_THRESH_REG(tx->tx_q), 0);
2009 
2010 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2011 	    AQ_INTR_IRQ_MAP_TX_IRQMAP(tx->tx_q), tx->tx_irq);
2012 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2013 	    AQ_INTR_IRQ_MAP_TX_EN(tx->tx_q), 1);
2014 
2015 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 1);
2016 
2017 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID, 0);
2018 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID_EN, 0);
2019 }
2020 
2021 void
2022 aq_rxring_reset(struct aq_softc *sc, struct aq_rxring *rx, int start)
2023 {
2024 	daddr_t paddr;
2025 
2026 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 0);
2027 	/* drain */
2028 
2029 	if (start == 0)
2030 		return;
2031 
2032 	paddr = AQ_DMA_DVA(&rx->rx_mem);
2033 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRLSW_REG(rx->rx_q), paddr);
2034 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRMSW_REG(rx->rx_q),
2035 	    paddr >> 32);
2036 
2037 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_LEN,
2038 	    AQ_RXD_NUM / 8);
2039 
2040 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2041 	    RX_DMA_DESC_BUFSIZE_DATA, MCLBYTES / 1024);
2042 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2043 	    RX_DMA_DESC_BUFSIZE_HDR, 0);
2044 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2045 	    RX_DMA_DESC_HEADER_SPLIT, 0);
2046 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2047 	    RX_DMA_DESC_VLAN_STRIP, 0);
2048 
2049 	rx->rx_cons = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2050 	    RX_DMA_DESC_HEAD_PTR;
2051 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_cons);
2052 	rx->rx_prod = rx->rx_cons;
2053 
2054 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2055 	    AQ_INTR_IRQ_MAP_RX_IRQMAP(rx->rx_q), rx->rx_irq);
2056 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2057 	    AQ_INTR_IRQ_MAP_RX_EN(rx->rx_q), 1);
2058 
2059 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2060 	    RX_DMA_DCAD_CPUID, 0);
2061 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2062 	    RX_DMA_DCAD_DESC_EN, 0);
2063 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2064 	    RX_DMA_DCAD_HEADER_EN, 0);
2065 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2066 	    RX_DMA_DCAD_PAYLOAD_EN, 0);
2067 
2068 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 1);
2069 }
2070 
2071 static inline unsigned int
2072 aq_rx_fill_slots(struct aq_softc *sc, struct aq_rxring *rx, uint nslots)
2073 {
2074 	struct aq_rx_desc_read *ring, *rd;
2075 	struct aq_slot *as;
2076 	struct mbuf *m;
2077 	uint p, fills;
2078 
2079 	ring = AQ_DMA_KVA(&rx->rx_mem);
2080 	p = rx->rx_prod;
2081 
2082 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2083 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTWRITE);
2084 
2085 	for (fills = 0; fills < nslots; fills++) {
2086 		as = &rx->rx_slots[p];
2087 		rd = &ring[p];
2088 
2089 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2090 		if (m == NULL)
2091 			break;
2092 
2093 		m->m_data += (m->m_ext.ext_size - MCLBYTES);
2094 		m->m_data += ETHER_ALIGN;
2095 		m->m_len = m->m_pkthdr.len = MCLBYTES;
2096 
2097 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2098 		    BUS_DMA_NOWAIT) != 0) {
2099 			m_freem(m);
2100 			break;
2101 		}
2102 		as->as_m = m;
2103 
2104 		htolem64(&rd->buf_addr, as->as_map->dm_segs[0].ds_addr);
2105 		rd->hdr_addr = 0;
2106 		p++;
2107 		if (p == AQ_RXD_NUM)
2108 			p = 0;
2109 	}
2110 
2111 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2112 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREWRITE);
2113 
2114 	rx->rx_prod = p;
2115 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_prod);
2116 	return (nslots - fills);
2117 }
2118 
2119 int
2120 aq_rx_fill(struct aq_softc *sc, struct aq_rxring *rx)
2121 {
2122 	u_int slots;
2123 
2124 	slots = if_rxr_get(&rx->rx_rxr, AQ_RXD_NUM);
2125 	if (slots == 0)
2126 		return 1;
2127 
2128 	slots = aq_rx_fill_slots(sc, rx, slots);
2129 	if_rxr_put(&rx->rx_rxr, slots);
2130 	return 0;
2131 }
2132 
2133 void
2134 aq_refill(void *xq)
2135 {
2136 	struct aq_queues *q = xq;
2137 	struct aq_softc *sc = q->q_sc;
2138 
2139 	aq_rx_fill(sc, &q->q_rx);
2140 
2141 	if (if_rxr_inuse(&q->q_rx.rx_rxr) == 0)
2142 		timeout_add(&q->q_rx.rx_refill, 1);
2143 }
2144 
2145 void
2146 aq_rxeof(struct aq_softc *sc, struct aq_rxring *rx)
2147 {
2148 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2149 	struct aq_rx_desc_wb *rxd;
2150 	struct aq_rx_desc_wb *ring;
2151 	struct aq_slot *as;
2152 	uint32_t end, idx;
2153 	uint16_t pktlen, status;
2154 	uint32_t rxd_type;
2155 	struct mbuf *m;
2156 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2157 	int rxfree;
2158 
2159 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2160 		return;
2161 
2162 	end = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2163 	    RX_DMA_DESC_HEAD_PTR;
2164 
2165 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2166 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTREAD);
2167 
2168 	rxfree = 0;
2169 	idx = rx->rx_cons;
2170 	ring = AQ_DMA_KVA(&rx->rx_mem);
2171 	while (idx != end) {
2172 		rxd = &ring[idx];
2173 		as = &rx->rx_slots[idx];
2174 
2175 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2176 		    as->as_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2177 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2178 
2179 		status = lemtoh16(&rxd->status);
2180 		if ((status & AQ_RXDESC_STATUS_DD) == 0)
2181 			break;
2182 
2183 		rxfree++;
2184 		m = as->as_m;
2185 		as->as_m = NULL;
2186 
2187 		pktlen = lemtoh16(&rxd->pkt_len);
2188 		rxd_type = lemtoh32(&rxd->type);
2189 		/* rss hash, vlan */
2190 
2191 		if ((status & AQ_RXDESC_STATUS_MACERR) ||
2192 		    (rxd_type & AQ_RXDESC_TYPE_DMA_ERR)) {
2193 			printf("%s:rx: rx error (status %x type %x)\n",
2194 			    DEVNAME(sc), status, rxd_type);
2195 			m_freem(m);
2196 		} else {
2197 			m->m_pkthdr.len = m->m_len = pktlen;
2198 			ml_enqueue(&ml, m);
2199 		}
2200 
2201 		idx++;
2202 		if (idx == AQ_RXD_NUM)
2203 			idx = 0;
2204 	}
2205 	rx->rx_cons = idx;
2206 
2207 	if (rxfree > 0) {
2208 		if_rxr_put(&rx->rx_rxr, rxfree);
2209 		if (ifiq_input(rx->rx_ifiq, &ml))
2210 			if_rxr_livelocked(&rx->rx_rxr);
2211 
2212 		aq_rx_fill(sc, rx);
2213 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
2214 			timeout_add(&rx->rx_refill, 1);
2215 	}
2216 }
2217 
2218 void
2219 aq_txeof(struct aq_softc *sc, struct aq_txring *tx)
2220 {
2221 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2222 	struct aq_slot *as;
2223 	uint32_t idx, end, free;
2224 
2225 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2226 		return;
2227 
2228 	idx = tx->tx_cons;
2229 	end = AQ_READ_REG(sc, TX_DMA_DESC_HEAD_PTR_REG(tx->tx_q)) &
2230 	    TX_DMA_DESC_HEAD_PTR;
2231 	free = 0;
2232 
2233 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2234 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTREAD);
2235 
2236 	while (idx != end) {
2237 		as = &tx->tx_slots[idx];
2238 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2239 
2240 		m_freem(as->as_m);
2241 		as->as_m = NULL;
2242 
2243 		idx++;
2244 		if (idx == AQ_TXD_NUM)
2245 			idx = 0;
2246 		free++;
2247 	}
2248 
2249 	tx->tx_cons = idx;
2250 
2251 	if (free != 0) {
2252 		if (ifq_is_oactive(tx->tx_ifq))
2253 			ifq_restart(tx->tx_ifq);
2254 	}
2255 }
2256 
2257 void
2258 aq_start(struct ifqueue *ifq)
2259 {
2260 	struct aq_queues *aq = ifq->ifq_softc;
2261 	struct aq_softc *sc = aq->q_sc;
2262 	struct aq_txring *tx = &aq->q_tx;
2263 	struct aq_tx_desc *ring, *txd;
2264 	struct aq_slot *as;
2265 	struct mbuf *m;
2266 	uint32_t idx, free, used, ctl1, ctl2;
2267 
2268 	idx = tx->tx_prod;
2269 	free = tx->tx_cons + AQ_TXD_NUM - tx->tx_prod;
2270 	used = 0;
2271 
2272 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2273 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTWRITE);
2274 	ring = (struct aq_tx_desc *)AQ_DMA_KVA(&tx->tx_mem);
2275 
2276 	for (;;) {
2277 		if (used + AQ_TX_MAX_SEGMENTS >= free) {
2278 			ifq_set_oactive(ifq);
2279 			break;
2280 		}
2281 
2282 		m = ifq_dequeue(ifq);
2283 		if (m == NULL)
2284 			break;
2285 
2286 		txd = ring + idx;
2287 		as = &tx->tx_slots[idx];
2288 
2289 		if (m_defrag(m, M_DONTWAIT) != 0) {
2290 			m_freem(m);
2291 			break;
2292 		}
2293 
2294 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2295 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) != 0) {
2296 			m_freem(m);
2297 			break;
2298 		}
2299 
2300 		as->as_m = m;
2301 
2302 #if NBPFILTER > 0
2303 		if (ifq->ifq_if->if_bpf)
2304 			bpf_mtap_ether(ifq->ifq_if->if_bpf, m, BPF_DIRECTION_OUT);
2305 #endif
2306 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2307 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2308 
2309 		ctl1 = AQ_TXDESC_CTL1_TYPE_TXD | (as->as_map->dm_segs[0].ds_len <<
2310 		    AQ_TXDESC_CTL1_BLEN_SHIFT) | AQ_TXDESC_CTL1_CMD_FCS |
2311 		    AQ_TXDESC_CTL1_CMD_EOP | AQ_TXDESC_CTL1_CMD_WB;
2312 		ctl2 = m->m_pkthdr.len << AQ_TXDESC_CTL2_LEN_SHIFT;
2313 
2314 		txd->buf_addr = htole64(as->as_map->dm_segs[0].ds_addr);
2315 		txd->ctl1 = htole32(ctl1);
2316 		txd->ctl2 = htole32(ctl2);
2317 
2318 		idx++;
2319 		if (idx == AQ_TXD_NUM)
2320 			idx = 0;
2321 		used++;
2322 	}
2323 
2324 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2325 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREWRITE);
2326 
2327 	if (used != 0) {
2328 		tx->tx_prod = idx;
2329 		AQ_WRITE_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q),
2330 		    tx->tx_prod);
2331 	}
2332 }
2333 
2334 int
2335 aq_intr(void *arg)
2336 {
2337 	struct aq_softc *sc = arg;
2338 	struct aq_queues *aq = &sc->sc_queues[0];
2339 	uint32_t status;
2340 
2341 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2342 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2343 
2344 	if (status & (1 << sc->sc_linkstat_irq))
2345 		aq_update_link_status(sc);
2346 
2347 	if (status & (1 << aq->q_tx.tx_irq)) {
2348 		aq_txeof(sc, &aq->q_tx);
2349 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2350 		    (1 << aq->q_tx.tx_irq));
2351 	}
2352 	if (status & (1 << aq->q_rx.rx_irq)) {
2353 		aq_rxeof(sc, &aq->q_rx);
2354 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2355 		    (1 << aq->q_rx.rx_irq));
2356 	}
2357 
2358 	return 1;
2359 }
2360 
2361 void
2362 aq_watchdog(struct ifnet *ifp)
2363 {
2364 
2365 }
2366 
2367 void
2368 aq_free_slots(struct aq_softc *sc, struct aq_slot *slots, int allocated,
2369     int total)
2370 {
2371 	struct aq_slot *as;
2372 
2373 	int i = allocated;
2374 	while (i-- > 0) {
2375 		as = &slots[i];
2376 		bus_dmamap_destroy(sc->sc_dmat, as->as_map);
2377 		if (as->as_m != NULL)
2378 			m_freem(as->as_m);
2379 	}
2380 	free(slots, M_DEVBUF, total * sizeof(*as));
2381 }
2382 
2383 int
2384 aq_queue_up(struct aq_softc *sc, struct aq_queues *aq)
2385 {
2386 	struct aq_rxring *rx;
2387 	struct aq_txring *tx;
2388 	struct aq_slot *as;
2389 	int i;
2390 
2391 	rx = &aq->q_rx;
2392 	rx->rx_slots = mallocarray(sizeof(*as), AQ_RXD_NUM, M_DEVBUF,
2393 	    M_WAITOK | M_ZERO);
2394 	if (rx->rx_slots == NULL) {
2395 		printf("%s: failed to allocate rx slots %d\n", DEVNAME(sc),
2396 		    aq->q_index);
2397 		return ENOMEM;
2398 	}
2399 
2400 	for (i = 0; i < AQ_RXD_NUM; i++) {
2401 		as = &rx->rx_slots[i];
2402 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2403 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2404 		    &as->as_map) != 0) {
2405 			printf("%s: failed to allocate rx dma maps %d\n",
2406 			    DEVNAME(sc), aq->q_index);
2407 			goto destroy_rx_slots;
2408 		}
2409 	}
2410 
2411 	if (aq_dmamem_alloc(sc, &rx->rx_mem, AQ_RXD_NUM *
2412 	    sizeof(struct aq_rx_desc_read), PAGE_SIZE) != 0) {
2413 		printf("%s: unable to allocate rx ring %d\n", DEVNAME(sc),
2414 		    aq->q_index);
2415 		goto destroy_rx_slots;
2416 	}
2417 
2418 	tx = &aq->q_tx;
2419 	tx->tx_slots = mallocarray(sizeof(*as), AQ_TXD_NUM, M_DEVBUF,
2420 	    M_WAITOK | M_ZERO);
2421 	if (tx->tx_slots == NULL) {
2422 		printf("%s: failed to allocate tx slots %d\n", DEVNAME(sc),
2423 		    aq->q_index);
2424 		goto destroy_rx_ring;
2425 	}
2426 
2427 	for (i = 0; i < AQ_TXD_NUM; i++) {
2428 		as = &tx->tx_slots[i];
2429 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2430 		    AQ_TX_MAX_SEGMENTS, MCLBYTES, 0,
2431 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2432 		    &as->as_map) != 0) {
2433 			printf("%s: failed to allocated tx dma maps %d\n",
2434 			    DEVNAME(sc), aq->q_index);
2435 			goto destroy_tx_slots;
2436 		}
2437 	}
2438 
2439 	if (aq_dmamem_alloc(sc, &tx->tx_mem, AQ_TXD_NUM *
2440 	    sizeof(struct aq_tx_desc), PAGE_SIZE) != 0) {
2441 		printf("%s: unable to allocate tx ring %d\n", DEVNAME(sc),
2442 		    aq->q_index);
2443 		goto destroy_tx_slots;
2444 	}
2445 
2446 	aq_txring_reset(sc, tx, 1);
2447 	aq_rxring_reset(sc, rx, 1);
2448 	return 0;
2449 
2450 destroy_tx_slots:
2451 	aq_free_slots(sc, tx->tx_slots, i, AQ_TXD_NUM);
2452 	tx->tx_slots = NULL;
2453 	i = AQ_RXD_NUM;
2454 
2455 destroy_rx_ring:
2456 	aq_dmamem_free(sc, &rx->rx_mem);
2457 destroy_rx_slots:
2458 	aq_free_slots(sc, rx->rx_slots, i, AQ_RXD_NUM);
2459 	rx->rx_slots = NULL;
2460 	return ENOMEM;
2461 }
2462 
2463 void
2464 aq_queue_down(struct aq_softc *sc, struct aq_queues *aq)
2465 {
2466 	struct aq_txring *tx;
2467 	struct aq_rxring *rx;
2468 
2469 	tx = &aq->q_tx;
2470 	aq_txring_reset(sc, &aq->q_tx, 0);
2471 	if (tx->tx_slots != NULL) {
2472 		aq_free_slots(sc, tx->tx_slots, AQ_TXD_NUM, AQ_TXD_NUM);
2473 		tx->tx_slots = NULL;
2474 	}
2475 
2476 	aq_dmamem_free(sc, &tx->tx_mem);
2477 
2478 	rx = &aq->q_rx;
2479 	aq_rxring_reset(sc, &aq->q_rx, 0);
2480 	if (rx->rx_slots != NULL) {
2481 		aq_free_slots(sc, rx->rx_slots, AQ_RXD_NUM, AQ_RXD_NUM);
2482 		rx->rx_slots = NULL;
2483 	}
2484 
2485 	aq_dmamem_free(sc, &rx->rx_mem);
2486 }
2487 
2488 int
2489 aq_up(struct aq_softc *sc)
2490 {
2491 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2492 	int i;
2493 
2494 	for (i = 0; i < sc->sc_nqueues; i++) {
2495 		if (aq_queue_up(sc, &sc->sc_queues[i]) != 0)
2496 			goto downqueues;
2497 	}
2498 
2499 	/* filters? */
2500 	/* enable checksum offload */
2501 
2502 	SET(ifp->if_flags, IFF_RUNNING);
2503 	aq_enable_intr(sc, 1, 1);
2504 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 1);
2505 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 1);
2506 
2507 	for (i = 0; i < sc->sc_nqueues; i++) {
2508 		struct aq_queues *aq = &sc->sc_queues[i];
2509 
2510 		if_rxr_init(&aq->q_rx.rx_rxr, 1, AQ_RXD_NUM - 1);
2511 		aq_rx_fill(sc, &aq->q_rx);
2512 
2513 		ifq_clr_oactive(aq->q_tx.tx_ifq);
2514 	}
2515 
2516 	return ENETRESET;
2517 
2518 downqueues:
2519 	for (i = 0; i < sc->sc_nqueues; i++)
2520 		aq_queue_down(sc, &sc->sc_queues[i]);
2521 	return ENOMEM;
2522 }
2523 
2524 void
2525 aq_down(struct aq_softc *sc)
2526 {
2527 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2528 	int i;
2529 
2530 	CLR(ifp->if_flags, IFF_RUNNING);
2531 
2532 	aq_enable_intr(sc, 1, 0);
2533 	intr_barrier(sc->sc_ih);
2534 
2535 	for (i = 0; i < sc->sc_nqueues; i++) {
2536 		/* queue intr barrier? */
2537 		aq_queue_down(sc, &sc->sc_queues[i]);
2538 	}
2539 }
2540 
2541 void
2542 aq_enable_intr(struct aq_softc *sc, int link, int txrx)
2543 {
2544 	uint32_t imask = 0;
2545 	int i;
2546 
2547 	if (txrx) {
2548 		for (i = 0; i < sc->sc_nqueues; i++) {
2549 			imask |= (1 << sc->sc_queues[i].q_tx.tx_irq);
2550 			imask |= (1 << sc->sc_queues[i].q_rx.rx_irq);
2551 		}
2552 	}
2553 
2554 	if (link)
2555 		imask |= (1 << sc->sc_linkstat_irq);
2556 
2557 	AQ_WRITE_REG(sc, AQ_INTR_MASK_REG, imask);
2558 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2559 }
2560 
2561 void
2562 aq_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2563 {
2564 	struct aq_softc *aq = ifp->if_softc;
2565 	enum aq_link_speed speed;
2566 	enum aq_link_fc fc;
2567 	int media;
2568 	int flow;
2569 
2570 	if (aq_get_linkmode(aq, &speed, &fc, NULL) != 0)
2571 		return;
2572 
2573 	switch (speed) {
2574 	case AQ_LINK_10G:
2575 		media = IFM_10G_T;
2576 		break;
2577 	case AQ_LINK_5G:
2578 		media = IFM_5000_T;
2579 		break;
2580 	case AQ_LINK_2G5:
2581 		media = IFM_2500_T;
2582 		break;
2583 	case AQ_LINK_1G:
2584 		media = IFM_1000_T;
2585 		break;
2586 	case AQ_LINK_100M:
2587 		media = IFM_100_TX;
2588 		break;
2589 	case AQ_LINK_NONE:
2590 		media = 0;
2591 		break;
2592 	}
2593 
2594 	flow = 0;
2595 	if (fc & AQ_FC_RX)
2596 		flow |= IFM_ETH_RXPAUSE;
2597 	if (fc & AQ_FC_TX)
2598 		flow |= IFM_ETH_TXPAUSE;
2599 
2600 	ifmr->ifm_status = IFM_AVALID;
2601 	if (speed != AQ_LINK_NONE) {
2602 		ifmr->ifm_status |= IFM_ACTIVE;
2603 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media | flow;
2604 	}
2605 }
2606 
2607 int
2608 aq_ifmedia_change(struct ifnet *ifp)
2609 {
2610 	struct aq_softc *sc = ifp->if_softc;
2611 	enum aq_link_speed rate = AQ_LINK_NONE;
2612 	enum aq_link_fc fc = AQ_FC_NONE;
2613 
2614 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2615 		return EINVAL;
2616 
2617 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2618 	case IFM_AUTO:
2619 		rate = AQ_LINK_AUTO;
2620 		break;
2621 	case IFM_NONE:
2622 		rate = AQ_LINK_NONE;
2623 		break;
2624 	case IFM_100_TX:
2625 		rate = AQ_LINK_100M;
2626 		break;
2627 	case IFM_1000_T:
2628 		rate = AQ_LINK_1G;
2629 		break;
2630 	case IFM_2500_T:
2631 		rate = AQ_LINK_2G5;
2632 		break;
2633 	case IFM_5000_T:
2634 		rate = AQ_LINK_5G;
2635 		break;
2636 	case IFM_10G_T:
2637 		rate = AQ_LINK_10G;
2638 		break;
2639 	default:
2640 		return ENODEV;
2641 	}
2642 
2643 	if (sc->sc_media.ifm_media & IFM_FLOW)
2644 		fc = AQ_FC_ALL;
2645 
2646 	return aq_set_linkmode(sc, rate, fc, AQ_EEE_DISABLE);
2647 }
2648 
2649 void
2650 aq_update_link_status(struct aq_softc *sc)
2651 {
2652 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2653 	enum aq_link_speed speed;
2654 	enum aq_link_fc fc;
2655 
2656 	if (aq_get_linkmode(sc, &speed, &fc, NULL) != 0)
2657 		return;
2658 
2659 	if (speed == AQ_LINK_NONE) {
2660 		if (ifp->if_link_state != LINK_STATE_DOWN) {
2661 			ifp->if_link_state = LINK_STATE_DOWN;
2662 			if_link_state_change(ifp);
2663 		}
2664 	} else {
2665 		if (ifp->if_link_state != LINK_STATE_FULL_DUPLEX) {
2666 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
2667 			if_link_state_change(ifp);
2668 		}
2669 	}
2670 }
2671 
2672 
2673 int
2674 aq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2675 {
2676 	struct aq_softc *sc = ifp->if_softc;
2677 	struct ifreq *ifr = (struct ifreq *)data;
2678 	int error = 0, s;
2679 
2680 	s = splnet();
2681 
2682 	switch (cmd) {
2683 	case SIOCSIFADDR:
2684 		ifp->if_flags |= IFF_UP;
2685 		if ((ifp->if_flags & IFF_RUNNING) == 0)
2686 			error = aq_up(sc);
2687 		break;
2688 	case SIOCSIFFLAGS:
2689 		if (ifp->if_flags & IFF_UP) {
2690 			if (ifp->if_flags & IFF_RUNNING)
2691 				error = ENETRESET;
2692 			else
2693 				error = aq_up(sc);
2694 		} else {
2695 			if (ifp->if_flags & IFF_RUNNING)
2696 				aq_down(sc);
2697 		}
2698 		break;
2699 	case SIOCSIFMEDIA:
2700 	case SIOCGIFMEDIA:
2701 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2702 		break;
2703 	default:
2704 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
2705 	}
2706 
2707 	if (error == ENETRESET) {
2708 		if (ifp->if_flags & IFF_RUNNING)
2709 			aq_iff(sc);
2710 		error = 0;
2711 	}
2712 
2713 	splx(s);
2714 	return error;
2715 }
2716 
2717 void
2718 aq_iff(struct aq_softc *sc)
2719 {
2720 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2721 	struct arpcom *ac = &sc->sc_arpcom;
2722 	struct ether_multi *enm;
2723 	struct ether_multistep step;
2724 	int idx;
2725 
2726 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2727 		ifp->if_flags |= IFF_ALLMULTI;
2728 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 1);
2729 	} else if (ac->ac_multicnt >= AQ_HW_MAC_NUM ||
2730 	    ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2731 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2732 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2733 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 1);
2734 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2735 		    RPF_MCAST_FILTER_EN, 1);
2736 	} else if (ac->ac_multicnt == 0) {
2737 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2738 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2739 		    RPF_MCAST_FILTER_EN, 0);
2740 	} else {
2741 		idx = AQ_HW_MAC_OWN + 1;
2742 
2743 		/* turn on allmulti while we're rewriting? */
2744 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2745 
2746 		ETHER_FIRST_MULTI(step, ac, enm);
2747 		while (enm != NULL) {
2748 			aq_set_mac_addr(sc, idx++, enm->enm_addrlo);
2749 			ETHER_NEXT_MULTI(step, enm);
2750 		}
2751 
2752 		for (; idx < AQ_HW_MAC_NUM; idx++)
2753 			aq_set_mac_addr(sc, idx, NULL);
2754 
2755 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2756 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 0);
2757 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2758 		    RPF_MCAST_FILTER_EN, 1);
2759 	}
2760 }
2761 
2762 int
2763 aq_dmamem_alloc(struct aq_softc *sc, struct aq_dmamem *aqm,
2764     bus_size_t size, u_int align)
2765 {
2766 	aqm->aqm_size = size;
2767 
2768 	if (bus_dmamap_create(sc->sc_dmat, aqm->aqm_size, 1,
2769 	    aqm->aqm_size, 0,
2770 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2771 	    &aqm->aqm_map) != 0)
2772 		return (1);
2773 	if (bus_dmamem_alloc(sc->sc_dmat, aqm->aqm_size,
2774 	    align, 0, &aqm->aqm_seg, 1, &aqm->aqm_nsegs,
2775 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2776 		goto destroy;
2777 	if (bus_dmamem_map(sc->sc_dmat, &aqm->aqm_seg, aqm->aqm_nsegs,
2778 	    aqm->aqm_size, &aqm->aqm_kva, BUS_DMA_WAITOK) != 0)
2779 		goto free;
2780 	if (bus_dmamap_load(sc->sc_dmat, aqm->aqm_map, aqm->aqm_kva,
2781 	    aqm->aqm_size, NULL, BUS_DMA_WAITOK) != 0)
2782 		goto unmap;
2783 
2784 	return (0);
2785 unmap:
2786 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2787 free:
2788 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2789 destroy:
2790 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2791 	return (1);
2792 }
2793 
2794 void
2795 aq_dmamem_free(struct aq_softc *sc, struct aq_dmamem *aqm)
2796 {
2797 	bus_dmamap_unload(sc->sc_dmat, aqm->aqm_map);
2798 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2799 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2800 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2801 }
2802