xref: /openbsd-src/sys/dev/pci/if_aq_pci.c (revision f0cad5c99b2bb814c8d45ed0f44a97bdf676a560)
1 /* $OpenBSD: if_aq_pci.c,v 1.1 2021/09/02 10:11:21 mlarkin Exp $ */
2 /*	$NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2021 Jonathan Matthew <jonathan@d14n.org>
6  * Copyright (c) 2021 Mike Larkin <mlarkin@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /**
22  * aQuantia Corporation Network Driver
23  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  *
29  *   (1) Redistributions of source code must retain the above
30  *   copyright notice, this list of conditions and the following
31  *   disclaimer.
32  *
33  *   (2) Redistributions in binary form must reproduce the above
34  *   copyright notice, this list of conditions and the following
35  *   disclaimer in the documentation and/or other materials provided
36  *   with the distribution.
37  *
38  *   (3) The name of the author may not be used to endorse or promote
39  *   products derived from this software without specific prior
40  *   written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
43  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
44  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
48  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  *
54  */
55 
56 /*-
57  * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
58  * All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  * modification, are permitted provided that the following conditions
62  * are met:
63  * 1. Redistributions of source code must retain the above copyright
64  *    notice, this list of conditions and the following disclaimer.
65  * 2. Redistributions in binary form must reproduce the above copyright
66  *    notice, this list of conditions and the following disclaimer in the
67  *    documentation and/or other materials provided with the distribution.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
70  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
71  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
72  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
73  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
74  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
75  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
77  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
78  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
79  * POSSIBILITY OF SUCH DAMAGE.
80  */
81 #include <sys/types.h>
82 #include <sys/device.h>
83 #include <sys/param.h>
84 #include <sys/kernel.h>
85 #include <sys/sockio.h>
86 #include <sys/systm.h>
87 
88 #include <net/if.h>
89 #include <net/if_media.h>
90 
91 #include <netinet/in.h>
92 #include <netinet/if_ether.h>
93 
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97 
98 /* #define AQ_DEBUG 1 */
99 #ifdef AQ_DEBUG
100 #define DPRINTF(x) printf x
101 #else
102 #define DPRINTF(x)
103 #endif /* AQ_DEBUG */
104 
105 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
106 
107 #define AQ_BAR0 				0x10
108 #define AQ_MAXQ 				8
109 
110 #define AQ_TXD_NUM 				2048
111 #define AQ_RXD_NUM 				2048
112 
113 #define AQ_TX_MAX_SEGMENTS			1	/* XXX */
114 
115 #define AQ_LINKSTAT_IRQ				31
116 
117 #define RPF_ACTION_HOST				1
118 
119 #define AQ_FW_SOFTRESET_REG			0x0000
120 #define  AQ_FW_SOFTRESET_DIS			(1 << 14)
121 #define  AQ_FW_SOFTRESET_RESET			(1 << 15)
122 #define AQ_FW_VERSION_REG			0x0018
123 #define AQ_HW_REVISION_REG			0x001c
124 #define AQ_GLB_NVR_INTERFACE1_REG		0x0100
125 #define AQ_FW_MBOX_CMD_REG			0x0200
126 #define  AQ_FW_MBOX_CMD_EXECUTE			0x00008000
127 #define  AQ_FW_MBOX_CMD_BUSY			0x00000100
128 #define AQ_FW_MBOX_ADDR_REG			0x0208
129 #define AQ_FW_MBOX_VAL_REG			0x020C
130 #define AQ_FW_GLB_CPU_SEM_REG(i)		(0x03a0 + (i) * 4)
131 #define AQ_FW_SEM_RAM_REG			AQ_FW_GLB_CPU_SEM_REG(2)
132 #define AQ_FW_GLB_CTL2_REG			0x0404
133 #define AQ_GLB_GENERAL_PROVISIONING9_REG	0x0520
134 #define AQ_GLB_NVR_PROVISIONING2_REG		0x0534
135 #define AQ_INTR_STATUS_REG			0x2000  /* intr status */
136 #define AQ_INTR_STATUS_CLR_REG			0x2050  /* intr status clear */
137 #define AQ_INTR_MASK_REG			0x2060	/* intr mask set */
138 #define AQ_INTR_MASK_CLR_REG			0x2070	/* intr mask clear */
139 #define AQ_INTR_AUTOMASK_REG			0x2090
140 
141 /* AQ_INTR_IRQ_MAP_TXRX_REG 0x2100-0x2140 */
142 #define AQ_INTR_IRQ_MAP_TXRX_REG(i)		(0x2100 + ((i) / 2) * 4)
143 #define AQ_INTR_IRQ_MAP_TX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
144 #define  AQ_INTR_IRQ_MAP_TX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 16 : 24))
145 #define  AQ_INTR_IRQ_MAP_TX_EN(i)		(1U << (((i) & 1) ? 23 : 31))
146 #define AQ_INTR_IRQ_MAP_RX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
147 #define  AQ_INTR_IRQ_MAP_RX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 0 : 8))
148 #define  AQ_INTR_IRQ_MAP_RX_EN(i)		(1U << (((i) & 1) ? 7 : 15))
149 
150 /* AQ_GEN_INTR_MAP_REG[AQ_RINGS_NUM] 0x2180-0x2200 */
151 #define AQ_GEN_INTR_MAP_REG(i)			(0x2180 + (i) * 4)
152 #define  AQ_B0_ERR_INT				8U
153 
154 #define AQ_INTR_CTRL_REG			0x2300
155 #define  AQ_INTR_CTRL_IRQMODE			((1 << 0) | (1 << 1))
156 #define AQ_INTR_CTRL_IRQMODE_LEGACY		0
157 #define AQ_INTR_CTRL_IRQMODE_MSI		1
158 #define AQ_INTR_CTRL_IRQMODE_MSIX		2
159 #define  AQ_INTR_CTRL_MULTIVEC			(1 << 2)
160 #define  AQ_INTR_CTRL_RESET_DIS			(1 << 29)
161 #define  AQ_INTR_CTRL_RESET_IRQ			(1 << 31)
162 #define AQ_MBOXIF_POWER_GATING_CONTROL_REG	0x32a8
163 
164 #define FW_MPI_MBOX_ADDR_REG			0x0360
165 #define FW1X_MPI_INIT1_REG			0x0364
166 #define FW1X_MPI_INIT2_REG			0x0370
167 #define FW1X_MPI_EFUSEADDR_REG			0x0374
168 
169 #define FW2X_MPI_EFUSEADDR_REG			0x0364
170 #define FW2X_MPI_CONTROL_REG			0x0368  /* 64bit */
171 #define FW2X_MPI_STATE_REG			0x0370  /* 64bit */
172 #define FW_BOOT_EXIT_CODE_REG			0x0388
173 
174 #define FW_BOOT_EXIT_CODE_REG			0x0388
175 #define  RBL_STATUS_DEAD			0x0000dead
176 #define  RBL_STATUS_SUCCESS			0x0000abba
177 #define  RBL_STATUS_FAILURE			0x00000bad
178 #define  RBL_STATUS_HOST_BOOT			0x0000f1a7
179 #define FW_MPI_DAISY_CHAIN_STATUS_REG		0x0704
180 #define AQ_PCI_REG_CONTROL_6_REG		0x1014
181 
182 #define FW_MPI_RESETCTRL_REG			0x4000
183 #define  FW_MPI_RESETCTRL_RESET_DIS		(1 << 29)
184 
185 #define RX_SYSCONTROL_REG			0x5000
186 #define  RX_SYSCONTROL_RESET_DIS		(1 << 29)
187 
188 #define RX_TCP_RSS_HASH_REG			0x5040
189 
190 #define RPF_L2BC_REG				0x5100
191 #define  RPF_L2BC_EN				(1 << 0)
192 #define  RPF_L2BC_PROMISC			(1 << 3)
193 #define  RPF_L2BC_ACTION			0x7000
194 #define  RPF_L2BC_THRESHOLD			0xFFFF0000
195 
196 #define AQ_HW_MAC_OWN				0
197 
198 /* RPF_L2UC_*_REG[34] (actual [38]?) */
199 #define RPF_L2UC_LSW_REG(i)                     (0x5110 + (i) * 8)
200 #define RPF_L2UC_MSW_REG(i)                     (0x5114 + (i) * 8)
201 #define  RPF_L2UC_MSW_MACADDR_HI		0xFFFF
202 #define  RPF_L2UC_MSW_ACTION			0x70000
203 #define  RPF_L2UC_MSW_EN			(1 << 31)
204 #define AQ_HW_MAC_NUM				34
205 
206 /* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
207 #define RPF_MCAST_FILTER_REG(i)			(0x5250 + (i) * 4)
208 #define  RPF_MCAST_FILTER_EN			(1 << 31)
209 #define RPF_MCAST_FILTER_MASK_REG		0x5270
210 #define  RPF_MCAST_FILTER_MASK_ALLMULTI		(1 << 14)
211 
212 #define RPF_VLAN_MODE_REG			0x5280
213 #define  RPF_VLAN_MODE_PROMISC			(1 << 1)
214 #define  RPF_VLAN_MODE_ACCEPT_UNTAGGED		(1 << 2)
215 #define  RPF_VLAN_MODE_UNTAGGED_ACTION		0x38
216 
217 #define RPF_VLAN_TPID_REG                       0x5284
218 #define  RPF_VLAN_TPID_OUTER			0xFFFF0000
219 #define  RPF_VLAN_TPID_INNER			0xFFFF
220 
221 /* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
222 #define RPF_ETHERTYPE_FILTER_REG(i)		(0x5300 + (i) * 4)
223 #define  RPF_ETHERTYPE_FILTER_EN		(1 << 31)
224 
225 /* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
226 #define RPF_L3_FILTER_REG(i)			(0x5380 + (i) * 4)
227 #define  RPF_L3_FILTER_L4_EN			(1 << 31)
228 
229 #define RX_FLR_RSS_CONTROL1_REG			0x54c0
230 #define  RX_FLR_RSS_CONTROL1_EN			(1 << 31)
231 
232 #define RPF_RPB_RX_TC_UPT_REG                   0x54c4
233 #define  RPF_RPB_RX_TC_UPT_MASK(i)              (0x00000007 << ((i) * 4))
234 
235 #define RPB_RPF_RX_REG				0x5700
236 #define  RPB_RPF_RX_TC_MODE			(1 << 8)
237 #define  RPB_RPF_RX_FC_MODE			0x30
238 #define  RPB_RPF_RX_BUF_EN			(1 << 0)
239 
240 /* RPB_RXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x5710-0x5790 */
241 #define RPB_RXB_BUFSIZE_REG(i)			(0x5710 + (i) * 0x10)
242 #define  RPB_RXB_BUFSIZE			0x1FF
243 #define RPB_RXB_XOFF_REG(i)			(0x5714 + (i) * 0x10)
244 #define  RPB_RXB_XOFF_EN			(1 << 31)
245 #define  RPB_RXB_XOFF_THRESH_HI                 0x3FFF0000
246 #define  RPB_RXB_XOFF_THRESH_LO                 0x3FFF
247 
248 #define RX_DMA_INT_DESC_WRWB_EN_REG		0x5a30
249 #define  RX_DMA_INT_DESC_WRWB_EN		(1 << 2)
250 #define  RX_DMA_INT_DESC_MODERATE_EN		(1 << 3)
251 
252 #define RX_INTR_MODERATION_CTL_REG(i)		(0x5a40 + (i) * 4)
253 #define  RX_INTR_MODERATION_CTL_EN		(1 << 1)
254 
255 #define RX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x5b00 + (i) * 0x20)
256 #define RX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x5b04 + (i) * 0x20)
257 #define RX_DMA_DESC_REG(i)			(0x5b08 + (i) * 0x20)
258 #define  RX_DMA_DESC_LEN			(0x3FF << 3)
259 #define  RX_DMA_DESC_RESET			(1 << 25)
260 #define  RX_DMA_DESC_HEADER_SPLIT		(1 << 28)
261 #define  RX_DMA_DESC_VLAN_STRIP			(1 << 29)
262 #define  RX_DMA_DESC_EN				(1 << 31)
263 #define RX_DMA_DESC_HEAD_PTR_REG(i)		(0x5b0c + (i) * 0x20)
264 #define  RX_DMA_DESC_HEAD_PTR			0xFFF
265 #define RX_DMA_DESC_TAIL_PTR_REG(i)		(0x5b10 + (i) * 0x20)
266 #define RX_DMA_DESC_BUFSIZE_REG(i)		(0x5b18 + (i) * 0x20)
267 #define  RX_DMA_DESC_BUFSIZE_DATA		0x000F
268 #define  RX_DMA_DESC_BUFSIZE_HDR		0x0FF0
269 
270 #define RX_DMA_DCAD_REG(i)			(0x6100 + (i) * 4)
271 #define  RX_DMA_DCAD_CPUID			0xFF
272 #define  RX_DMA_DCAD_PAYLOAD_EN			(1 << 29)
273 #define  RX_DMA_DCAD_HEADER_EN			(1 << 30)
274 #define  RX_DMA_DCAD_DESC_EN			(1 << 31)
275 
276 #define RX_DMA_DCA_REG				0x6180
277 #define  RX_DMA_DCA_EN				(1 << 31)
278 #define  RX_DMA_DCA_MODE			0xF
279 
280 #define TX_SYSCONTROL_REG			0x7000
281 #define  TX_SYSCONTROL_RESET_DIS		(1 << 29)
282 
283 #define TX_TPO2_REG				0x7040
284 #define  TX_TPO2_EN				(1 << 16)
285 
286 #define TPS_DESC_VM_ARB_MODE_REG		0x7300
287 #define  TPS_DESC_VM_ARB_MODE			(1 << 0)
288 #define TPS_DESC_RATE_REG			0x7310
289 #define  TPS_DESC_RATE_TA_RST			(1 << 31)
290 #define  TPS_DESC_RATE_LIM			0x7FF
291 #define TPS_DESC_TC_ARB_MODE_REG		0x7200
292 #define  TPS_DESC_TC_ARB_MODE			0x3
293 #define TPS_DATA_TC_ARB_MODE_REG		0x7100
294 #define  TPS_DATA_TC_ARB_MODE			(1 << 0)
295 
296 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7110-0x7130 */
297 #define TPS_DATA_TCT_REG(i)			(0x7110 + (i) * 4)
298 #define  TPS_DATA_TCT_CREDIT_MAX		0xFFF0000
299 #define  TPS_DATA_TCT_WEIGHT			0x1FF
300 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7210-0x7230 */
301 #define TPS_DESC_TCT_REG(i)			(0x7210 + (i) * 4)
302 #define  TPS_DESC_TCT_CREDIT_MAX		0xFFF0000
303 #define  TPS_DESC_TCT_WEIGHT			0x1FF
304 
305 #define AQ_HW_TXBUF_MAX         160
306 #define AQ_HW_RXBUF_MAX         320
307 
308 #define THM_LSO_TCP_FLAG1_REG			0x7820
309 #define  THM_LSO_TCP_FLAG1_FIRST		0xFFF
310 #define  THM_LSO_TCP_FLAG1_MID			0xFFF0000
311 #define THM_LSO_TCP_FLAG2_REG			0x7824
312 #define  THM_LSO_TCP_FLAG2_LAST			0xFFF
313 
314 #define TPB_TX_BUF_REG				0x7900
315 #define  TPB_TX_BUF_EN				(1 << 0)
316 #define  TPB_TX_BUF_SCP_INS_EN			(1 << 2)
317 #define  TPB_TX_BUF_TC_MODE_EN			(1 << 8)
318 
319 /* TPB_TXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x7910-7990 */
320 #define TPB_TXB_BUFSIZE_REG(i)			(0x7910 + (i) * 0x10)
321 #define  TPB_TXB_BUFSIZE                        (0xFF)
322 #define TPB_TXB_THRESH_REG(i)                   (0x7914 + (i) * 0x10)
323 #define  TPB_TXB_THRESH_HI                      0x1FFF0000
324 #define  TPB_TXB_THRESH_LO                      0x1FFF
325 
326 #define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG	0x7b20
327 
328 #define TX_DMA_INT_DESC_WRWB_EN_REG		0x7b40
329 #define  TX_DMA_INT_DESC_WRWB_EN		(1 << 1)
330 #define  TX_DMA_INT_DESC_MODERATE_EN		(1 << 4)
331 
332 #define TX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x7c00 + (i) * 0x40)
333 #define TX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x7c04 + (i) * 0x40)
334 #define TX_DMA_DESC_REG(i)			(0x7c08 + (i) * 0x40)
335 #define  TX_DMA_DESC_LEN			0x00000FF8
336 #define  TX_DMA_DESC_EN				0x80000000
337 #define TX_DMA_DESC_HEAD_PTR_REG(i)		(0x7c0c + (i) * 0x40)
338 #define  TX_DMA_DESC_HEAD_PTR			0x00000FFF
339 #define TX_DMA_DESC_TAIL_PTR_REG(i)		(0x7c10 + (i) * 0x40)
340 #define TX_DMA_DESC_WRWB_THRESH_REG(i)		(0x7c18 + (i) * 0x40)
341 #define  TX_DMA_DESC_WRWB_THRESH		0x00003F00
342 
343 #define TDM_DCAD_REG(i)				(0x8400 + (i) * 4)
344 #define  TDM_DCAD_CPUID				0x7F
345 #define  TDM_DCAD_CPUID_EN			0x80000000
346 
347 #define TDM_DCA_REG				0x8480
348 #define  TDM_DCA_EN				(1 << 31)
349 #define  TDM_DCA_MODE				0xF
350 
351 #define TX_INTR_MODERATION_CTL_REG(i)		(0x8980 + (i) * 4)
352 #define  TX_INTR_MODERATION_CTL_EN		(1 << 1)
353 
354 #define __LOWEST_SET_BIT(__mask) (((((uint32_t)__mask) - 1) & ((uint32_t)__mask)) ^ ((uint32_t)__mask))
355 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
356 
357 #if 0
358 #define AQ_READ_REG(sc, reg) \
359 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
360 
361 #endif
362 #define AQ_WRITE_REG(sc, reg, val) \
363 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
364 
365 #define AQ_WRITE_REG_BIT(sc, reg, mask, val)                    \
366 	do {                                                    \
367 		uint32_t _v;                                    \
368 		_v = AQ_READ_REG((sc), (reg));                  \
369 		_v &= ~(mask);                                  \
370 		if ((val) != 0)                                 \
371 			_v |= __SHIFTIN((val), (mask));         \
372 		AQ_WRITE_REG((sc), (reg), _v);                  \
373 	} while (/* CONSTCOND */ 0)
374 
375 #define AQ_READ64_REG(sc, reg)					\
376 	((uint64_t)AQ_READ_REG(sc, reg) |			\
377 	(((uint64_t)AQ_READ_REG(sc, (reg) + 4)) << 32))
378 
379 #define AQ_WRITE64_REG(sc, reg, val)				\
380 	do {							\
381 		AQ_WRITE_REG(sc, reg, (uint32_t)val);		\
382 		AQ_WRITE_REG(sc, reg + 4, (uint32_t)(val >> 32)); \
383 	} while (/* CONSTCOND */0)
384 
385 #define WAIT_FOR(expr, us, n, errp)                             \
386 	do {                                                    \
387 		unsigned int _n;                                \
388 		for (_n = n; (!(expr)) && _n != 0; --_n) {      \
389 			delay((us));                            \
390 		}                                               \
391 		if ((errp != NULL)) {                           \
392 			if (_n == 0)                            \
393 				*(errp) = ETIMEDOUT;            \
394 			else                                    \
395 				*(errp) = 0;                    \
396 		}                                               \
397 	} while (/* CONSTCOND */ 0)
398 
399 #define FW_VERSION_MAJOR(sc)	(((sc)->sc_fw_version >> 24) & 0xff)
400 #define FW_VERSION_MINOR(sc)	(((sc)->sc_fw_version >> 16) & 0xff)
401 #define FW_VERSION_BUILD(sc)	((sc)->sc_fw_version & 0xffff)
402 
403 #define FEATURES_MIPS		0x00000001
404 #define FEATURES_TPO2		0x00000002
405 #define FEATURES_RPF2		0x00000004
406 #define FEATURES_MPI_AQ		0x00000008
407 #define FEATURES_REV_A0		0x10000000
408 #define FEATURES_REV_A		(FEATURES_REV_A0)
409 #define FEATURES_REV_B0		0x20000000
410 #define FEATURES_REV_B1		0x40000000
411 #define FEATURES_REV_B		(FEATURES_REV_B0|FEATURES_REV_B1)
412 
413 /* lock for FW2X_MPI_{CONTROL,STATE]_REG read-modify-write */
414 #define AQ_MPI_LOCK(sc)		mtx_enter(&(sc)->sc_mpi_mutex);
415 #define AQ_MPI_UNLOCK(sc)	mtx_leave(&(sc)->sc_mpi_mutex);
416 
417 #define FW2X_CTRL_10BASET_HD			(1 << 0)
418 #define FW2X_CTRL_10BASET_FD			(1 << 1)
419 #define FW2X_CTRL_100BASETX_HD			(1 << 2)
420 #define FW2X_CTRL_100BASET4_HD			(1 << 3)
421 #define FW2X_CTRL_100BASET2_HD			(1 << 4)
422 #define FW2X_CTRL_100BASETX_FD			(1 << 5)
423 #define FW2X_CTRL_100BASET2_FD			(1 << 6)
424 #define FW2X_CTRL_1000BASET_HD			(1 << 7)
425 #define FW2X_CTRL_1000BASET_FD			(1 << 8)
426 #define FW2X_CTRL_2P5GBASET_FD			(1 << 9)
427 #define FW2X_CTRL_5GBASET_FD			(1 << 10)
428 #define FW2X_CTRL_10GBASET_FD			(1 << 11)
429 #define FW2X_CTRL_RESERVED1			(1ULL << 32)
430 #define FW2X_CTRL_10BASET_EEE			(1ULL << 33)
431 #define FW2X_CTRL_RESERVED2			(1ULL << 34)
432 #define FW2X_CTRL_PAUSE				(1ULL << 35)
433 #define FW2X_CTRL_ASYMMETRIC_PAUSE		(1ULL << 36)
434 #define FW2X_CTRL_100BASETX_EEE			(1ULL << 37)
435 #define FW2X_CTRL_RESERVED3			(1ULL << 38)
436 #define FW2X_CTRL_RESERVED4			(1ULL << 39)
437 #define FW2X_CTRL_1000BASET_FD_EEE		(1ULL << 40)
438 #define FW2X_CTRL_2P5GBASET_FD_EEE		(1ULL << 41)
439 #define FW2X_CTRL_5GBASET_FD_EEE		(1ULL << 42)
440 #define FW2X_CTRL_10GBASET_FD_EEE		(1ULL << 43)
441 #define FW2X_CTRL_RESERVED5			(1ULL << 44)
442 #define FW2X_CTRL_RESERVED6			(1ULL << 45)
443 #define FW2X_CTRL_RESERVED7			(1ULL << 46)
444 #define FW2X_CTRL_RESERVED8			(1ULL << 47)
445 #define FW2X_CTRL_RESERVED9			(1ULL << 48)
446 #define FW2X_CTRL_CABLE_DIAG			(1ULL << 49)
447 #define FW2X_CTRL_TEMPERATURE			(1ULL << 50)
448 #define FW2X_CTRL_DOWNSHIFT			(1ULL << 51)
449 #define FW2X_CTRL_PTP_AVB_EN			(1ULL << 52)
450 #define FW2X_CTRL_MEDIA_DETECT			(1ULL << 53)
451 #define FW2X_CTRL_LINK_DROP			(1ULL << 54)
452 #define FW2X_CTRL_SLEEP_PROXY			(1ULL << 55)
453 #define FW2X_CTRL_WOL				(1ULL << 56)
454 #define FW2X_CTRL_MAC_STOP			(1ULL << 57)
455 #define FW2X_CTRL_EXT_LOOPBACK			(1ULL << 58)
456 #define FW2X_CTRL_INT_LOOPBACK			(1ULL << 59)
457 #define FW2X_CTRL_EFUSE_AGENT			(1ULL << 60)
458 #define FW2X_CTRL_WOL_TIMER			(1ULL << 61)
459 #define FW2X_CTRL_STATISTICS			(1ULL << 62)
460 #define FW2X_CTRL_TRANSACTION_ID		(1ULL << 63)
461 
462 #define FW2X_CTRL_RATE_100M			FW2X_CTRL_100BASETX_FD
463 #define FW2X_CTRL_RATE_1G			FW2X_CTRL_1000BASET_FD
464 #define FW2X_CTRL_RATE_2G5			FW2X_CTRL_2P5GBASET_FD
465 #define FW2X_CTRL_RATE_5G			FW2X_CTRL_5GBASET_FD
466 #define FW2X_CTRL_RATE_10G			FW2X_CTRL_10GBASET_FD
467 #define FW2X_CTRL_RATE_MASK		\
468 	(FW2X_CTRL_RATE_100M |		\
469 	 FW2X_CTRL_RATE_1G |		\
470 	 FW2X_CTRL_RATE_2G5 |		\
471 	 FW2X_CTRL_RATE_5G |		\
472 	 FW2X_CTRL_RATE_10G)
473 #define FW2X_CTRL_EEE_MASK		\
474 	(FW2X_CTRL_10BASET_EEE |	\
475 	 FW2X_CTRL_100BASETX_EEE |	\
476 	 FW2X_CTRL_1000BASET_FD_EEE |	\
477 	 FW2X_CTRL_2P5GBASET_FD_EEE |	\
478 	 FW2X_CTRL_5GBASET_FD_EEE |	\
479 	 FW2X_CTRL_10GBASET_FD_EEE)
480 
481 enum aq_fw_bootloader_mode {
482 	FW_BOOT_MODE_UNKNOWN = 0,
483 	FW_BOOT_MODE_FLB,
484 	FW_BOOT_MODE_RBL_FLASH,
485 	FW_BOOT_MODE_RBL_HOST_BOOTLOAD
486 };
487 
488 enum aq_media_type {
489 	AQ_MEDIA_TYPE_UNKNOWN = 0,
490 	AQ_MEDIA_TYPE_FIBRE,
491 	AQ_MEDIA_TYPE_TP
492 };
493 
494 enum aq_link_speed {
495 	AQ_LINK_NONE    = 0,
496 	AQ_LINK_100M    = (1 << 0),
497 	AQ_LINK_1G      = (1 << 1),
498 	AQ_LINK_2G5     = (1 << 2),
499 	AQ_LINK_5G      = (1 << 3),
500 	AQ_LINK_10G     = (1 << 4)
501 };
502 
503 #define AQ_LINK_ALL	(AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | \
504 			    AQ_LINK_5G | AQ_LINK_10G )
505 #define AQ_LINK_AUTO	AQ_LINK_ALL
506 
507 enum aq_link_eee {
508 	AQ_EEE_DISABLE = 0,
509 	AQ_EEE_ENABLE = 1
510 };
511 
512 enum aq_hw_fw_mpi_state {
513 	MPI_DEINIT      = 0,
514 	MPI_RESET       = 1,
515 	MPI_INIT        = 2,
516 	MPI_POWER       = 4
517 };
518 
519 enum aq_link_fc {
520         AQ_FC_NONE = 0,
521         AQ_FC_RX = (1 << 0),
522         AQ_FC_TX = (1 << 1),
523         AQ_FC_ALL = (AQ_FC_RX | AQ_FC_TX)
524 };
525 
526 struct aq_dmamem {
527 	bus_dmamap_t		aqm_map;
528 	bus_dma_segment_t	aqm_seg;
529 	int			aqm_nsegs;
530 	size_t			aqm_size;
531 	caddr_t			aqm_kva;
532 };
533 
534 #define AQ_DMA_MAP(_aqm)	((_aqm)->aqm_map)
535 #define AQ_DMA_DVA(_aqm)	((_aqm)->aqm_map->dm_segs[0].ds_addr)
536 #define AQ_DMA_KVA(_aqm)	((void *)(_aqm)->aqm_kva)
537 #define AQ_DMA_LEN(_aqm)	((_aqm)->aqm_size)
538 
539 
540 struct aq_mailbox_header {
541         uint32_t version;
542         uint32_t transaction_id;
543         int32_t error;
544 } __packed __aligned(4);
545 
546 struct aq_hw_stats_s {
547         uint32_t uprc;
548         uint32_t mprc;
549         uint32_t bprc;
550         uint32_t erpt;
551         uint32_t uptc;
552         uint32_t mptc;
553         uint32_t bptc;
554         uint32_t erpr;
555         uint32_t mbtc;
556         uint32_t bbtc;
557         uint32_t mbrc;
558         uint32_t bbrc;
559         uint32_t ubrc;
560         uint32_t ubtc;
561         uint32_t ptc;
562         uint32_t prc;
563         uint32_t dpc;   /* not exists in fw2x_msm_statistics */
564         uint32_t cprc;  /* not exists in fw2x_msm_statistics */
565 } __packed __aligned(4);
566 
567 struct aq_fw2x_capabilities {
568         uint32_t caps_lo;
569         uint32_t caps_hi;
570 } __packed __aligned(4);
571 
572 struct aq_fw2x_msm_statistics {
573 	uint32_t uprc;
574 	uint32_t mprc;
575 	uint32_t bprc;
576 	uint32_t erpt;
577 	uint32_t uptc;
578 	uint32_t mptc;
579 	uint32_t bptc;
580 	uint32_t erpr;
581 	uint32_t mbtc;
582 	uint32_t bbtc;
583 	uint32_t mbrc;
584 	uint32_t bbrc;
585 	uint32_t ubrc;
586 	uint32_t ubtc;
587 	uint32_t ptc;
588 	uint32_t prc;
589 } __packed __aligned(4);
590 
591 struct aq_fw2x_phy_cable_diag_data {
592 	uint32_t lane_data[4];
593 } __packed __aligned(4);
594 
595 struct aq_fw2x_mailbox {		/* struct fwHostInterface */
596 	struct aq_mailbox_header header;
597 	struct aq_fw2x_msm_statistics msm;	/* msmStatistics_t msm; */
598 
599 	uint32_t phy_info1;
600 #define PHYINFO1_FAULT_CODE	__BITS(31,16)
601 #define PHYINFO1_PHY_H_BIT	__BITS(0,15)
602 	uint32_t phy_info2;
603 #define PHYINFO2_TEMPERATURE	__BITS(15,0)
604 #define PHYINFO2_CABLE_LEN	__BITS(23,16)
605 
606 	struct aq_fw2x_phy_cable_diag_data diag_data;
607 	uint32_t reserved[8];
608 
609 	struct aq_fw2x_capabilities caps;
610 
611 	/* ... */
612 } __packed __aligned(4);
613 
614 struct aq_rx_desc_read {
615 	uint64_t		buf_addr;
616 	uint64_t		hdr_addr;
617 } __packed;
618 
619 struct aq_rx_desc_wb {
620 	uint32_t		type;
621 #define AQ_RXDESC_TYPE_RSSTYPE	0x000f
622 #define AQ_RXDESC_TYPE_ETHER	0x0030
623 #define AQ_RXDESC_TYPE_PROTO	0x01c0
624 #define AQ_RXDESC_TYPE_VLAN	(1 << 9)
625 #define AQ_RXDESC_TYPE_VLAN2	(1 << 10)
626 #define AQ_RXDESC_TYPE_DMA_ERR	(1 << 12)
627 #define AQ_RXDESC_TYPE_V4_SUM	(1 << 19)
628 #define AQ_RXDESC_TYPE_TCP_SUM	(1 << 20)
629 	uint32_t		rss_hash;
630 	uint16_t		status;
631 #define AQ_RXDESC_STATUS_DD	(1 << 0)
632 #define AQ_RXDESC_STATUS_EOP	(1 << 1)
633 #define AQ_RXDESC_STATUS_MACERR (1 << 2)
634 #define AQ_RXDESC_STATUS_V4_SUM (1 << 3)
635 #define AQ_RXDESC_STATUS_L4_SUM_ERR (1 << 4)
636 #define AQ_RXDESC_STATUS_L4_SUM_OK (1 << 5)
637 	uint16_t		pkt_len;
638 	uint16_t		next_desc_ptr;
639 	uint16_t		vlan;
640 } __packed;
641 
642 struct aq_tx_desc {
643 	uint64_t		buf_addr;
644 	uint32_t		ctl1;
645 #define AQ_TXDESC_CTL1_TYPE_TXD	0x00000001
646 #define AQ_TXDESC_CTL1_TYPE_TXC	0x00000002
647 #define AQ_TXDESC_CTL1_BLEN_SHIFT 4
648 #define AQ_TXDESC_CTL1_DD	(1 << 20)
649 #define AQ_TXDESC_CTL1_CMD_EOP	(1 << 21)
650 #define AQ_TXDESC_CTL1_CMD_VLAN	(1 << 22)
651 #define AQ_TXDESC_CTL1_CMD_FCS	(1 << 23)
652 #define AQ_TXDESC_CTL1_CMD_IP4CSUM (1 << 24)
653 #define AQ_TXDESC_CTL1_CMD_L4CSUM (1 << 25)
654 #define AQ_TXDESC_CTL1_CMD_WB	(1 << 27)
655 
656 #define AQ_TXDESC_CTL1_VID_SHIFT 4
657 	uint32_t		ctl2;
658 #define AQ_TXDESC_CTL2_LEN_SHIFT 14
659 #define AQ_TXDESC_CTL2_CTX_EN	(1 << 13)
660 } __packed;
661 
662 struct aq_slot {
663 	bus_dmamap_t		 as_map;
664 	struct mbuf		*as_m;
665 };
666 
667 struct aq_rxring {
668 	struct ifiqueue		*rx_ifiq;
669 	struct aq_dmamem	 rx_mem;
670 	struct aq_slot		*rx_slots;
671 	int			 rx_q;
672 	int			 rx_irq;
673 
674 	struct timeout		 rx_refill;
675 	struct if_rxring	 rx_rxr;
676 	uint32_t		 rx_prod;
677 	uint32_t		 rx_cons;
678 };
679 
680 struct aq_txring {
681 	struct ifqueue		*tx_ifq;
682 	struct aq_dmamem	 tx_mem;
683 	struct aq_slot		*tx_slots;
684 	int			 tx_q;
685 	int			 tx_irq;
686 	uint32_t		 tx_prod;
687 	uint32_t		 tx_cons;
688 };
689 
690 struct aq_queues {
691 	char			 q_name[16];
692 	void			*q_ihc;
693 	struct aq_softc		*q_sc;
694 	int			 q_index;
695 	struct aq_rxring 	 q_rx;
696 	struct aq_txring 	 q_tx;
697 };
698 
699 
700 struct aq_softc;
701 struct aq_firmware_ops {
702 	int (*reset)(struct aq_softc *);
703 	int (*set_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state,
704 	    enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
705 	int (*get_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state *,
706 	    enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
707 	int (*get_stats)(struct aq_softc *, struct aq_hw_stats_s *);
708 };
709 
710 struct aq_softc {
711 	struct device		sc_dev;
712 	uint16_t		sc_product;
713 	uint16_t		sc_revision;
714 	bus_dma_tag_t		sc_dmat;
715 	pci_chipset_tag_t	sc_pc;
716 	pcitag_t		sc_pcitag;
717 	int			sc_nqueues;
718 	struct aq_queues	sc_queues[AQ_MAXQ];
719 	struct intrmap		*sc_intrmap;
720 	void			*sc_ih;
721 	bus_space_handle_t	sc_ioh;
722 	bus_space_tag_t		sc_iot;
723 
724 	uint32_t		sc_mbox_addr;
725 	int			sc_rbl_enabled;
726 	int			sc_fast_start_enabled;
727 	int			sc_flash_present;
728 	uint32_t		sc_fw_version;
729 	const struct		aq_firmware_ops *sc_fw_ops;
730 	uint64_t		sc_fw_caps;
731 	enum aq_media_type	sc_media_type;
732 	enum aq_link_speed	sc_available_rates;
733 	uint32_t		sc_features;
734 	int			sc_linkstat_irq;
735 	struct arpcom		sc_arpcom;
736 	struct ifmedia		sc_media;
737 
738 	struct ether_addr	sc_enaddr;
739 	struct mutex		sc_mpi_mutex;
740 };
741 
742 const struct pci_matchid aq_devices[] = {
743 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100 },
744 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107 },
745 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108 },
746 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109 },
747 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111 },
748 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112 },
749 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S },
750 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S },
751 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S },
752 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S },
753 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S },
754 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S },
755 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100 },
756 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107 },
757 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108 },
758 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109 },
759 };
760 
761 const struct aq_product {
762 	pci_vendor_id_t aq_vendor;
763 	pci_product_id_t aq_product;
764 	enum aq_media_type aq_media_type;
765 	enum aq_link_speed aq_available_rates;
766 } aq_products[] = {
767 {	PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100,
768 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
769 },
770 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107,
771 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
772 },
773 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108,
774 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
775 },
776 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109,
777 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
778 },
779 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
780 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
781 },
782 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
783 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
784 },
785 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S,
786 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
787 },
788 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S,
789 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
790 },
791 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S,
792 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
793 },
794 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S,
795 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
796 },
797 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S,
798 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
799 },
800 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S,
801 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
802 },
803 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100,
804 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
805 },
806 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107,
807 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
808 },
809 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108,
810 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
811 },
812 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109,
813 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
814 }
815 };
816 
817 int	aq_match(struct device *, void *, void *);
818 void	aq_attach(struct device *, struct device *, void *);
819 int	aq_detach(struct device *, int);
820 int	aq_activate(struct device *, int);
821 int	aq_intr(void *);
822 void	aq_global_software_reset(struct aq_softc *);
823 int	aq_fw_reset(struct aq_softc *);
824 int	aq_mac_soft_reset(struct aq_softc *, enum aq_fw_bootloader_mode *);
825 int	aq_mac_soft_reset_rbl(struct aq_softc *, enum aq_fw_bootloader_mode *);
826 int	aq_mac_soft_reset_flb(struct aq_softc *);
827 int	aq_fw_read_version(struct aq_softc *);
828 int	aq_fw_version_init(struct aq_softc *);
829 int	aq_hw_init_ucp(struct aq_softc *);
830 int	aq_fw_downld_dwords(struct aq_softc *, uint32_t, uint32_t *, uint32_t);
831 int	aq_get_mac_addr(struct aq_softc *);
832 int	aq_hw_reset(struct aq_softc *);
833 int	aq_hw_init(struct aq_softc *, int);
834 void	aq_hw_qos_set(struct aq_softc *);
835 void	aq_l3_filter_set(struct aq_softc *);
836 void	aq_hw_init_tx_path(struct aq_softc *);
837 void	aq_hw_init_rx_path(struct aq_softc *);
838 int	aq_set_mac_addr(struct aq_softc *, int, uint8_t *);
839 int	aq_set_linkmode(struct aq_softc *, enum aq_link_speed,
840     enum aq_link_fc, enum aq_link_eee);
841 void	aq_watchdog(struct ifnet *);
842 void	aq_enable_intr(struct aq_softc *, int, int);
843 int	aq_ioctl(struct ifnet *, u_long, caddr_t);
844 int	aq_up(struct aq_softc *);
845 void	aq_down(struct aq_softc *);
846 void	aq_iff(struct aq_softc *);
847 void	aq_start(struct ifqueue *);
848 void	aq_ifmedia_status(struct ifnet *, struct ifmediareq *);
849 int	aq_ifmedia_change(struct ifnet *);
850 void	aq_update_link_status(struct aq_softc *);
851 
852 void	aq_refill(void *);
853 int	aq_rx_fill(struct aq_softc *, struct aq_rxring *);
854 static inline unsigned int aq_rx_fill_slots(struct aq_softc *,
855 	    struct aq_rxring *, uint);
856 
857 int	aq_dmamem_alloc(struct aq_softc *, struct aq_dmamem *,
858 	    bus_size_t, u_int);
859 void	aq_dmamem_zero(struct aq_dmamem *);
860 void	aq_dmamem_free(struct aq_softc *, struct aq_dmamem *);
861 
862 int	aq_fw1x_reset(struct aq_softc *);
863 int	aq_fw1x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
864     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
865 int	aq_fw1x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
866     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
867 int	aq_fw1x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
868 
869 int	aq_fw2x_reset(struct aq_softc *);
870 int	aq_fw2x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
871     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
872 int	aq_fw2x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
873     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
874 int	aq_fw2x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
875 
876 const struct aq_firmware_ops aq_fw1x_ops = {
877 	.reset = aq_fw1x_reset,
878 	.set_mode = aq_fw1x_set_mode,
879 	.get_mode = aq_fw1x_get_mode,
880 	.get_stats = aq_fw1x_get_stats,
881 };
882 
883 const struct aq_firmware_ops aq_fw2x_ops = {
884 	.reset = aq_fw2x_reset,
885 	.set_mode = aq_fw2x_set_mode,
886 	.get_mode = aq_fw2x_get_mode,
887 	.get_stats = aq_fw2x_get_stats,
888 };
889 
890 struct cfattach aq_ca = {
891 	sizeof(struct aq_softc), aq_match, aq_attach, NULL,
892 	aq_activate
893 };
894 
895 struct cfdriver aq_cd = {
896 	NULL, "aq", DV_IFNET
897 };
898 
899 uint32_t
900 AQ_READ_REG(struct aq_softc *sc, uint32_t reg)
901 {
902 	uint32_t res;
903 
904 	res = bus_space_read_4(sc->sc_iot, sc->sc_ioh, reg);
905 
906 	return res;
907 }
908 
909 
910 int
911 aq_match(struct device *dev, void *match, void *aux)
912 {
913 	return pci_matchbyid((struct pci_attach_args *)aux, aq_devices,
914 	    sizeof(aq_devices) / sizeof(aq_devices[0]));
915 }
916 
917 const struct aq_product *
918 aq_lookup(const struct pci_attach_args *pa)
919 {
920 	unsigned int i;
921 
922 	for (i = 0; i < sizeof(aq_products) / sizeof(aq_products[0]); i++) {
923 	if (PCI_VENDOR(pa->pa_id) == aq_products[i].aq_vendor &&
924 		PCI_PRODUCT(pa->pa_id) == aq_products[i].aq_product) {
925 			return &aq_products[i];
926 		}
927 	}
928 
929 	return NULL;
930 }
931 
932 void
933 aq_attach(struct device *parent, struct device *self, void *aux)
934 {
935 	struct aq_softc *sc = (struct aq_softc *)self;
936 	struct pci_attach_args *pa = aux;
937 	const struct aq_product *aqp;
938 	pcireg_t command, bar, memtype;
939 	pci_chipset_tag_t pc;
940 	pci_intr_handle_t ih;
941 	int (*isr)(void *);
942 	const char *intrstr;
943 	pcitag_t tag;
944 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
945 	int irqmode;
946 	int i;
947 
948 	mtx_init(&sc->sc_mpi_mutex, IPL_NET);
949 
950 	sc->sc_dmat = pa->pa_dmat;
951 	sc->sc_pc = pc = pa->pa_pc;
952 	sc->sc_pcitag = tag = pa->pa_tag;
953 
954 	command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
955 	command |= PCI_COMMAND_MASTER_ENABLE;
956 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command);
957 
958 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
959 	sc->sc_revision = PCI_REVISION(pa->pa_class);
960 
961 	aqp = aq_lookup(pa);
962 
963 	bar = pci_conf_read(pc, tag, AQ_BAR0);
964 	if ((PCI_MAPREG_MEM_ADDR(bar) == 0) ||
965 	    (PCI_MAPREG_TYPE(bar) != PCI_MAPREG_TYPE_MEM)) {
966 		printf("%s: wrong BAR type\n", DEVNAME(sc));
967 		return;
968 	}
969 
970 	memtype = pci_mapreg_type(pc, tag, AQ_BAR0);
971 	if (pci_mapreg_map(pa, AQ_BAR0, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
972 	    NULL, NULL, 0)) {
973 		printf(": failed to map BAR0\n");
974 		return;
975 	}
976 
977 	sc->sc_nqueues = 1;
978 
979 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
980 		irqmode = AQ_INTR_CTRL_IRQMODE_MSIX;
981 	} else if (pci_intr_map_msi(pa, &ih) == 0) {
982 		irqmode = AQ_INTR_CTRL_IRQMODE_MSI;
983 	} else if (pci_intr_map(pa, &ih) == 0) {
984 		irqmode = AQ_INTR_CTRL_IRQMODE_LEGACY;
985 	} else {
986 		printf(": failed to map interrupt\n");
987 		return;
988 	}
989 
990 	isr = aq_intr;
991 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
992 	    IPL_NET | IPL_MPSAFE, isr, sc, self->dv_xname);
993 	intrstr = pci_intr_string(pa->pa_pc, ih);
994 	if (intrstr)
995 		printf(": %s", intrstr);
996 
997 	if (aq_fw_reset(sc))
998 		return;
999 
1000 	DPRINTF((", FW version 0x%x", sc->sc_fw_version));
1001 
1002 	if (aq_fw_version_init(sc))
1003 		return;
1004 
1005 	if (aq_hw_init_ucp(sc))
1006 		return;
1007 
1008 	if (aq_hw_reset(sc))
1009 		return;
1010 
1011 	if (aq_get_mac_addr(sc))
1012 		return;
1013 
1014 	if (aq_hw_init(sc, irqmode))
1015 		return;
1016 
1017 	sc->sc_media_type = aqp->aq_media_type;
1018 	sc->sc_available_rates = aqp->aq_available_rates;
1019 
1020 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1021 	    aq_ifmedia_status);
1022 
1023 	bcopy(sc->sc_enaddr.ether_addr_octet, sc->sc_arpcom.ac_enaddr, 6);
1024 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
1025 	ifp->if_softc = sc;
1026 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1027 	ifp->if_xflags = IFXF_MPSAFE;
1028 	ifp->if_ioctl = aq_ioctl;
1029 	ifp->if_qstart = aq_start;
1030 	ifp->if_watchdog = aq_watchdog;
1031 	ifp->if_hardmtu = 9000;
1032 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1033 	ifq_set_maxlen(&ifp->if_snd, AQ_TXD_NUM);
1034 
1035 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1036 	    aq_ifmedia_status);
1037 	if (sc->sc_available_rates & AQ_LINK_100M) {
1038 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL);
1039 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0,
1040 		    NULL);
1041 	}
1042 
1043 	if (sc->sc_available_rates & AQ_LINK_1G) {
1044 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
1045 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0,
1046 		    NULL);
1047 	}
1048 
1049 	if (sc->sc_available_rates & AQ_LINK_2G5) {
1050 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1051 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX,
1052 		    0, NULL);
1053 	}
1054 
1055 	if (sc->sc_available_rates & AQ_LINK_5G) {
1056 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
1057 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
1058 		    0, NULL);
1059 	}
1060 
1061 	if (sc->sc_available_rates & AQ_LINK_10G) {
1062 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1063 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
1064 		    0, NULL);
1065 	}
1066 
1067 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1068 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO | IFM_FDX, 0, NULL);
1069 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1070 	aq_set_linkmode(sc, AQ_LINK_AUTO, AQ_FC_NONE, AQ_EEE_DISABLE);
1071 
1072         if_attach(ifp);
1073         ether_ifattach(ifp);
1074 
1075 	if_attach_iqueues(ifp, sc->sc_nqueues);
1076 	if_attach_queues(ifp, sc->sc_nqueues);
1077 
1078 	for (i = 0; i < sc->sc_nqueues; i++) {
1079 		struct aq_queues *aq = &sc->sc_queues[i];
1080 		struct aq_rxring *rx = &aq->q_rx;
1081 		struct aq_txring *tx = &aq->q_tx;
1082 
1083 		aq->q_sc = sc;
1084 		aq->q_index = i;
1085 		rx->rx_q = i;
1086 		rx->rx_irq = i * 2;
1087 		rx->rx_ifiq = ifp->if_iqs[i];
1088 		ifp->if_iqs[i]->ifiq_softc = aq;
1089 		timeout_set(&rx->rx_refill, aq_refill, rx);
1090 
1091 		tx->tx_q = i;
1092 		tx->tx_irq = rx->rx_irq + 1;
1093 		tx->tx_ifq = ifp->if_ifqs[i];
1094 		ifp->if_ifqs[i]->ifq_softc = aq;
1095 
1096 		if (sc->sc_nqueues > 1) {
1097 			/* map msix */
1098 		}
1099 
1100 		AQ_WRITE_REG(sc, TX_INTR_MODERATION_CTL_REG(i), 0);
1101 		AQ_WRITE_REG(sc, RX_INTR_MODERATION_CTL_REG(i), 0);
1102 	}
1103 
1104 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1105 	    TX_DMA_INT_DESC_WRWB_EN, 1);
1106 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1107 	    TX_DMA_INT_DESC_MODERATE_EN, 0);
1108 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1109 	    RX_DMA_INT_DESC_WRWB_EN, 1);
1110 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1111 	    RX_DMA_INT_DESC_MODERATE_EN, 0);
1112 
1113 	aq_enable_intr(sc, 1, 0);
1114 	printf("\n");
1115 }
1116 
1117 int
1118 aq_fw_reset(struct aq_softc *sc)
1119 {
1120 	uint32_t ver, v, boot_exit_code;
1121 	int i, error;
1122 	enum aq_fw_bootloader_mode mode;
1123 
1124 	mode = FW_BOOT_MODE_UNKNOWN;
1125 
1126 	ver = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1127 
1128 	for (i = 1000; i > 0; i--) {
1129 		v = AQ_READ_REG(sc, FW_MPI_DAISY_CHAIN_STATUS_REG);
1130 		boot_exit_code = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG);
1131 		if (v != 0x06000000 || boot_exit_code != 0)
1132 			break;
1133 	}
1134 
1135 	if (i <= 0) {
1136 		printf("%s: F/W reset failed. Neither RBL nor FLB started",
1137 		    DEVNAME(sc));
1138 		return ETIMEDOUT;
1139 	}
1140 
1141 	sc->sc_rbl_enabled = (boot_exit_code != 0);
1142 
1143 	/*
1144 	 * Having FW version 0 is an indicator that cold start
1145 	 * is in progress. This means two things:
1146 	 * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
1147 	 * 2) Driver may skip reset sequence and save time.
1148 	 */
1149 	if (sc->sc_fast_start_enabled && (ver != 0)) {
1150 		error = aq_fw_read_version(sc);
1151 		/* Skip reset as it just completed */
1152 		if (error == 0)
1153 			return 0;
1154 	}
1155 
1156 	error = aq_mac_soft_reset(sc, &mode);
1157 	if (error != 0) {
1158 		printf("%s: MAC reset failed: %d\n", DEVNAME(sc), error);
1159 		return error;
1160 	}
1161 
1162 	switch (mode) {
1163 	case FW_BOOT_MODE_FLB:
1164 		DPRINTF(("%s: FLB> F/W successfully loaded from flash.",
1165 		    DEVNAME(sc)));
1166 		sc->sc_flash_present = 1;
1167 		return aq_fw_read_version(sc);
1168 	case FW_BOOT_MODE_RBL_FLASH:
1169 		DPRINTF(("%s: RBL> F/W loaded from flash. Host Bootload "
1170 		    "disabled.", DEVNAME(sc)));
1171 		sc->sc_flash_present = 1;
1172 		return aq_fw_read_version(sc);
1173 	case FW_BOOT_MODE_UNKNOWN:
1174 		printf("%s: F/W bootload error: unknown bootloader type",
1175 		    DEVNAME(sc));
1176 		return ENOTSUP;
1177 	case FW_BOOT_MODE_RBL_HOST_BOOTLOAD:
1178 		printf("%s: RBL> F/W Host Bootload not implemented", DEVNAME(sc));
1179 		return ENOTSUP;
1180 	}
1181 
1182 	return ENOTSUP;
1183 }
1184 
1185 int
1186 aq_mac_soft_reset_rbl(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1187 {
1188 	int timo;
1189 
1190 	DPRINTF(("%s: RBL> MAC reset STARTED!\n", DEVNAME(sc)));
1191 
1192 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1193 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1194 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1195 
1196 	/* MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone */
1197 	AQ_WRITE_REG(sc, FW_BOOT_EXIT_CODE_REG, RBL_STATUS_DEAD);
1198 
1199 	aq_global_software_reset(sc);
1200 
1201 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e0);
1202 
1203 	/* Wait for RBL to finish boot process. */
1204 #define RBL_TIMEOUT_MS	10000
1205 	uint16_t rbl_status;
1206 	for (timo = RBL_TIMEOUT_MS; timo > 0; timo--) {
1207 		rbl_status = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG) & 0xffff;
1208 		if (rbl_status != 0 && rbl_status != RBL_STATUS_DEAD)
1209 			break;
1210 		delay(1000);
1211 	}
1212 
1213 	if (timo <= 0) {
1214 		printf("%s: RBL> RBL restart failed: timeout\n", DEVNAME(sc));
1215 		return EBUSY;
1216 	}
1217 
1218 	switch (rbl_status) {
1219 	case RBL_STATUS_SUCCESS:
1220 		if (mode != NULL)
1221 			*mode = FW_BOOT_MODE_RBL_FLASH;
1222 		DPRINTF(("%s: RBL> reset complete! [Flash]\n", DEVNAME(sc)));
1223 		break;
1224 	case RBL_STATUS_HOST_BOOT:
1225 		if (mode != NULL)
1226 			*mode = FW_BOOT_MODE_RBL_HOST_BOOTLOAD;
1227 		DPRINTF(("%s: RBL> reset complete! [Host Bootload]\n",
1228 		    DEVNAME(sc)));
1229 		break;
1230 	case RBL_STATUS_FAILURE:
1231 	default:
1232 		printf("%s: unknown RBL status 0x%x\n", DEVNAME(sc),
1233 		    rbl_status);
1234 		return EBUSY;
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 int
1241 aq_mac_soft_reset_flb(struct aq_softc *sc)
1242 {
1243 	uint32_t v;
1244 	int timo;
1245 
1246 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1247 	/*
1248 	 * Let Felicity hardware to complete SMBUS transaction before
1249 	 * Global software reset.
1250 	 */
1251 	delay(50000);
1252 
1253 	/*
1254 	 * If SPI burst transaction was interrupted(before running the script),
1255 	 * global software reset may not clear SPI interface.
1256 	 * Clean it up manually before global reset.
1257 	 */
1258 	AQ_WRITE_REG(sc, AQ_GLB_NVR_PROVISIONING2_REG, 0x00a0);
1259 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x009f);
1260 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x809f);
1261 	delay(50000);
1262 
1263 	v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1264 	v &= ~AQ_FW_SOFTRESET_DIS;
1265 	v |= AQ_FW_SOFTRESET_RESET;
1266 	AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1267 
1268 	/* Kickstart. */
1269 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1270 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1271 	if (!sc->sc_fast_start_enabled)
1272 		AQ_WRITE_REG(sc, AQ_GLB_GENERAL_PROVISIONING9_REG, 1);
1273 
1274 	/*
1275 	 * For the case SPI burst transaction was interrupted (by MCP reset
1276 	 * above), wait until it is completed by hardware.
1277 	 */
1278 	delay(50000);
1279 
1280 	/* MAC Kickstart */
1281 	if (!sc->sc_fast_start_enabled) {
1282 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x180e0);
1283 
1284 		uint32_t flb_status;
1285 		for (timo = 0; timo < 1000; timo++) {
1286 			flb_status = AQ_READ_REG(sc,
1287 			    FW_MPI_DAISY_CHAIN_STATUS_REG) & 0x10;
1288 			if (flb_status != 0)
1289 				break;
1290 			delay(1000);
1291 		}
1292 		if (flb_status == 0) {
1293 			printf("%s: FLB> MAC kickstart failed: timed out\n",
1294 			    DEVNAME(sc));
1295 			return ETIMEDOUT;
1296 		}
1297 		DPRINTF(("%s: FLB> MAC kickstart done, %d ms\n", DEVNAME(sc),
1298 		    timo));
1299 		/* FW reset */
1300 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1301 		/*
1302 		 * Let Felicity hardware complete SMBUS transaction before
1303 		 * Global software reset.
1304 		 */
1305 		delay(50000);
1306 		sc->sc_fast_start_enabled = true;
1307 	}
1308 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1309 
1310 	/* PHY Kickstart: #undone */
1311 	aq_global_software_reset(sc);
1312 
1313 	for (timo = 0; timo < 1000; timo++) {
1314 		if (AQ_READ_REG(sc, AQ_FW_VERSION_REG) != 0)
1315 			break;
1316 		delay(10000);
1317 	}
1318 	if (timo >= 1000) {
1319 		printf("%s: FLB> Global Soft Reset failed\n", DEVNAME(sc));
1320 		return ETIMEDOUT;
1321 	}
1322 	DPRINTF(("%s: FLB> F/W restart: %d ms\n", DEVNAME(sc), timo * 10));
1323 
1324 	return 0;
1325 
1326 }
1327 
1328 int
1329 aq_mac_soft_reset(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1330 {
1331 	if (sc->sc_rbl_enabled)
1332 		return aq_mac_soft_reset_rbl(sc, mode);
1333 
1334 	if (mode != NULL)
1335 		*mode = FW_BOOT_MODE_FLB;
1336 	return aq_mac_soft_reset_flb(sc);
1337 }
1338 
1339 void
1340 aq_global_software_reset(struct aq_softc *sc)
1341 {
1342         uint32_t v;
1343 
1344         AQ_WRITE_REG_BIT(sc, RX_SYSCONTROL_REG, RX_SYSCONTROL_RESET_DIS, 0);
1345         AQ_WRITE_REG_BIT(sc, TX_SYSCONTROL_REG, TX_SYSCONTROL_RESET_DIS, 0);
1346         AQ_WRITE_REG_BIT(sc, FW_MPI_RESETCTRL_REG,
1347             FW_MPI_RESETCTRL_RESET_DIS, 0);
1348 
1349         v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1350         v &= ~AQ_FW_SOFTRESET_DIS;
1351         v |= AQ_FW_SOFTRESET_RESET;
1352         AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1353 }
1354 
1355 int
1356 aq_fw_read_version(struct aq_softc *sc)
1357 {
1358 	int i, error = EBUSY;
1359 #define MAC_FW_START_TIMEOUT_MS 10000
1360 	for (i = 0; i < MAC_FW_START_TIMEOUT_MS; i++) {
1361 		sc->sc_fw_version = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1362 		if (sc->sc_fw_version != 0) {
1363 			error = 0;
1364 			break;
1365 		}
1366 		delay(1000);
1367 	}
1368 	return error;
1369 }
1370 
1371 int
1372 aq_fw_version_init(struct aq_softc *sc)
1373 {
1374 	int error = 0;
1375 	char fw_vers[sizeof("F/W version xxxxx.xxxxx.xxxxx")];
1376 
1377 	if (FW_VERSION_MAJOR(sc) == 1) {
1378 		sc->sc_fw_ops = &aq_fw1x_ops;
1379 	} else if ((FW_VERSION_MAJOR(sc) == 2) || (FW_VERSION_MAJOR(sc) == 3)) {
1380 		sc->sc_fw_ops = &aq_fw2x_ops;
1381 	} else {
1382 		printf("%s: Unsupported F/W version %d.%d.%d\n",
1383 		    DEVNAME(sc),
1384 		    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc),
1385 		    FW_VERSION_BUILD(sc));
1386 		return ENOTSUP;
1387 	}
1388 	snprintf(fw_vers, sizeof(fw_vers), "F/W version %d.%d.%d",
1389 	    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc), FW_VERSION_BUILD(sc));
1390 
1391 	/* detect revision */
1392 	uint32_t hwrev = AQ_READ_REG(sc, AQ_HW_REVISION_REG);
1393 	switch (hwrev & 0x0000000f) {
1394 	case 0x01:
1395 		printf(", revision A0, %s", fw_vers);
1396 		sc->sc_features |= FEATURES_REV_A0 |
1397 		    FEATURES_MPI_AQ | FEATURES_MIPS;
1398 		break;
1399 	case 0x02:
1400 		printf(", revision B0, %s", fw_vers);
1401 		sc->sc_features |= FEATURES_REV_B0 |
1402 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1403 		    FEATURES_TPO2 | FEATURES_RPF2;
1404 		break;
1405 	case 0x0A:
1406 		printf(", revision B1, %s", fw_vers);
1407 		sc->sc_features |= FEATURES_REV_B1 |
1408 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1409 		    FEATURES_TPO2 | FEATURES_RPF2;
1410 		break;
1411 	default:
1412 		printf(", Unknown revision (0x%08x)", hwrev);
1413 		error = ENOTSUP;
1414 		break;
1415 	}
1416 	return error;
1417 }
1418 
1419 int
1420 aq_hw_init_ucp(struct aq_softc *sc)
1421 {
1422 	int timo;
1423 
1424 	if (FW_VERSION_MAJOR(sc) == 1) {
1425 		if (AQ_READ_REG(sc, FW1X_MPI_INIT2_REG) == 0) {
1426 			uint32_t data;
1427 			arc4random_buf(&data, sizeof(data));
1428 			data &= 0xfefefefe;
1429 			data |= 0x02020202;
1430 			AQ_WRITE_REG(sc, FW1X_MPI_INIT2_REG, data);
1431 		}
1432 		AQ_WRITE_REG(sc, FW1X_MPI_INIT1_REG, 0);
1433 	}
1434 
1435 	for (timo = 100; timo > 0; timo--) {
1436 		sc->sc_mbox_addr = AQ_READ_REG(sc, FW_MPI_MBOX_ADDR_REG);
1437 		if (sc->sc_mbox_addr != 0)
1438 			break;
1439 		delay(1000);
1440 	}
1441 
1442 #define AQ_FW_MIN_VERSION	0x01050006
1443 #define AQ_FW_MIN_VERSION_STR	"1.5.6"
1444 	if (sc->sc_fw_version < AQ_FW_MIN_VERSION) {
1445 		printf("%s: atlantic: wrong FW version: " AQ_FW_MIN_VERSION_STR
1446 		    " or later required, this is %d.%d.%d\n",
1447 		    DEVNAME(sc),
1448 		    FW_VERSION_MAJOR(sc),
1449 		    FW_VERSION_MINOR(sc),
1450 		    FW_VERSION_BUILD(sc));
1451 		return ENOTSUP;
1452 	}
1453 
1454 	if (sc->sc_mbox_addr == 0)
1455 		printf("%s: NULL MBOX!!\n", DEVNAME(sc));
1456 
1457 	return 0;
1458 }
1459 
1460 int
1461 aq_hw_reset(struct aq_softc *sc)
1462 {
1463 	int error;
1464 
1465 	/* disable irq */
1466 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS, 0);
1467 
1468 	/* apply */
1469 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_IRQ, 1);
1470 
1471 	/* wait ack 10 times by 1ms */
1472 	WAIT_FOR(
1473 	    (AQ_READ_REG(sc, AQ_INTR_CTRL_REG) & AQ_INTR_CTRL_RESET_IRQ) == 0,
1474 	    1000, 10, &error);
1475 	if (error != 0) {
1476 		printf("%s: atlantic: IRQ reset failed: %d\n", DEVNAME(sc),
1477 		    error);
1478 		return error;
1479 	}
1480 
1481 	return sc->sc_fw_ops->reset(sc);
1482 }
1483 
1484 int
1485 aq_get_mac_addr(struct aq_softc *sc)
1486 {
1487 	uint32_t mac_addr[2];
1488 	uint32_t efuse_shadow_addr;
1489 	int err;
1490 
1491 	efuse_shadow_addr = 0;
1492 	if (FW_VERSION_MAJOR(sc) >= 2)
1493 		efuse_shadow_addr = AQ_READ_REG(sc, FW2X_MPI_EFUSEADDR_REG);
1494 	else
1495 		efuse_shadow_addr = AQ_READ_REG(sc, FW1X_MPI_EFUSEADDR_REG);
1496 
1497 	if (efuse_shadow_addr == 0) {
1498 		printf("%s: cannot get efuse addr", DEVNAME(sc));
1499 		return ENXIO;
1500 	}
1501 
1502 	DPRINTF(("%s: efuse_shadow_addr = %x\n", DEVNAME(sc), efuse_shadow_addr));
1503 
1504 	memset(mac_addr, 0, sizeof(mac_addr));
1505 	err = aq_fw_downld_dwords(sc, efuse_shadow_addr + (40 * 4),
1506 	    mac_addr, 2);
1507 	if (err < 0)
1508 		return err;
1509 
1510 	if (mac_addr[0] == 0 && mac_addr[1] == 0) {
1511 		printf("%s: mac address not found", DEVNAME(sc));
1512 		return ENXIO;
1513 	}
1514 
1515 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1516 	    mac_addr[1]));
1517 
1518 	mac_addr[0] = htobe32(mac_addr[0]);
1519 	mac_addr[1] = htobe32(mac_addr[1]);
1520 
1521 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1522 	    mac_addr[1]));
1523 
1524 	memcpy(sc->sc_enaddr.ether_addr_octet,
1525 	    (uint8_t *)mac_addr, ETHER_ADDR_LEN);
1526 	DPRINTF((": %s", ether_sprintf(sc->sc_enaddr.ether_addr_octet)));
1527 
1528 	return 0;
1529 }
1530 
1531 int
1532 aq_activate(struct device *self, int act)
1533 {
1534 	return 0;
1535 }
1536 
1537 int
1538 aq_fw_downld_dwords(struct aq_softc *sc, uint32_t addr, uint32_t *p,
1539     uint32_t cnt)
1540 {
1541 	uint32_t v;
1542 	int error = 0;
1543 
1544 	WAIT_FOR(AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG) == 1, 1, 10000, &error);
1545 	if (error != 0) {
1546 		AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1547 		v = AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG);
1548 		if (v == 0) {
1549 			printf("%s: %s:%d: timeout\n",
1550 			    DEVNAME(sc), __func__, __LINE__);
1551 			return ETIMEDOUT;
1552 		}
1553 	}
1554 
1555 	AQ_WRITE_REG(sc, AQ_FW_MBOX_ADDR_REG, addr);
1556 
1557 	error = 0;
1558 	for (; cnt > 0 && error == 0; cnt--) {
1559 		/* execute mailbox interface */
1560 		AQ_WRITE_REG_BIT(sc, AQ_FW_MBOX_CMD_REG,
1561 		    AQ_FW_MBOX_CMD_EXECUTE, 1);
1562 		if (sc->sc_features & FEATURES_REV_B1) {
1563 			WAIT_FOR(AQ_READ_REG(sc, AQ_FW_MBOX_ADDR_REG) != addr,
1564 			    1, 1000, &error);
1565 		} else {
1566 			WAIT_FOR((AQ_READ_REG(sc, AQ_FW_MBOX_CMD_REG) &
1567 			    AQ_FW_MBOX_CMD_BUSY) == 0,
1568 			    1, 1000, &error);
1569 		}
1570 		*p++ = AQ_READ_REG(sc, AQ_FW_MBOX_VAL_REG);
1571 		addr += sizeof(uint32_t);
1572 	}
1573 	AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1574 
1575 	if (error != 0)
1576 		printf("%s: %s:%d: timeout\n",
1577 		    DEVNAME(sc), __func__, __LINE__);
1578 
1579 	return error;
1580 }
1581 
1582 int
1583 aq_fw2x_reset(struct aq_softc *sc)
1584 {
1585 	struct aq_fw2x_capabilities caps = { 0 };
1586 	int error;
1587 
1588 	error = aq_fw_downld_dwords(sc,
1589 	    sc->sc_mbox_addr + offsetof(struct aq_fw2x_mailbox, caps),
1590 	    (uint32_t *)&caps, sizeof caps / sizeof(uint32_t));
1591 	if (error != 0) {
1592 		printf("%s: fw2x> can't get F/W capabilities mask, error %d\n",
1593 		    DEVNAME(sc), error);
1594 		return error;
1595 	}
1596 	sc->sc_fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
1597 
1598 	DPRINTF(("%s: fw2x> F/W capabilities=0x%llx\n", DEVNAME(sc),
1599 	    sc->sc_fw_caps));
1600 
1601 	return 0;
1602 }
1603 
1604 int
1605 aq_fw1x_reset(struct aq_softc *sc)
1606 {
1607 	printf("%s: unimplemented %s\n", DEVNAME(sc), __func__);
1608 	return 0;
1609 }
1610 
1611 int
1612 aq_fw1x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state w,
1613     enum aq_link_speed x, enum aq_link_fc y, enum aq_link_eee z)
1614 {
1615 	return 0;
1616 }
1617 
1618 int
1619 aq_fw1x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *w,
1620     enum aq_link_speed *x, enum aq_link_fc *y, enum aq_link_eee *z)
1621 {
1622 	return 0;
1623 }
1624 
1625 int
1626 aq_fw1x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1627 {
1628 	return 0;
1629 }
1630 
1631 
1632 int
1633 aq_fw2x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *modep,
1634     enum aq_link_speed *speedp, enum aq_link_fc *fcp, enum aq_link_eee *eeep)
1635 {
1636 	uint64_t mpi_state, mpi_ctrl;
1637 	enum aq_link_speed speed;
1638 	enum aq_link_fc fc;
1639 
1640 	AQ_MPI_LOCK(sc);
1641 
1642 	mpi_state = AQ_READ64_REG(sc, FW2X_MPI_STATE_REG);
1643 	if (modep != NULL) {
1644 		mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1645 		if (mpi_ctrl & FW2X_CTRL_RATE_MASK)
1646 			*modep = MPI_INIT;
1647 		else
1648 			*modep = MPI_DEINIT;
1649 	}
1650 
1651 	AQ_MPI_UNLOCK(sc);
1652 
1653 	if (mpi_state & FW2X_CTRL_RATE_10G)
1654 		speed = AQ_LINK_10G;
1655 	else if (mpi_state & FW2X_CTRL_RATE_5G)
1656 		speed = AQ_LINK_5G;
1657 	else if (mpi_state & FW2X_CTRL_RATE_2G5)
1658 		speed = AQ_LINK_2G5;
1659 	else if (mpi_state & FW2X_CTRL_RATE_1G)
1660 		speed = AQ_LINK_1G;
1661 	else if (mpi_state & FW2X_CTRL_RATE_100M)
1662 		speed = AQ_LINK_100M;
1663 	else
1664 		speed = AQ_LINK_NONE;
1665 	if (speedp != NULL)
1666 		*speedp = speed;
1667 
1668 	fc = AQ_FC_NONE;
1669 	if (mpi_state & FW2X_CTRL_PAUSE)
1670 		fc |= AQ_FC_RX;
1671 	if (mpi_state & FW2X_CTRL_ASYMMETRIC_PAUSE)
1672 		fc |= AQ_FC_TX;
1673 	if (fcp != NULL)
1674 		*fcp = fc;
1675 
1676 	if (eeep != NULL)
1677 		*eeep = AQ_EEE_DISABLE;
1678 
1679 	return 0;
1680 }
1681 
1682 int
1683 aq_fw2x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1684 {
1685 	return 0;
1686 }
1687 
1688 void
1689 aq_hw_l3_filter_set(struct aq_softc *sc)
1690 {
1691 	int i;
1692 
1693 	/* clear all filter */
1694 	for (i = 0; i < 8; i++) {
1695 		AQ_WRITE_REG_BIT(sc, RPF_L3_FILTER_REG(i),
1696 		    RPF_L3_FILTER_L4_EN, 0);
1697 	}
1698 }
1699 
1700 int
1701 aq_hw_init(struct aq_softc *sc, int irqmode)
1702 {
1703 	uint32_t v;
1704 
1705 	/* Force limit MRRS on RDM/TDM to 2K */
1706 	v = AQ_READ_REG(sc, AQ_PCI_REG_CONTROL_6_REG);
1707 	AQ_WRITE_REG(sc, AQ_PCI_REG_CONTROL_6_REG, (v & ~0x0707) | 0x0404);
1708 
1709 	/*
1710 	 * TX DMA total request limit. B0 hardware is not capable to
1711 	 * handle more than (8K-MRRS) incoming DMA data.
1712 	 * Value 24 in 256byte units
1713 	 */
1714 	AQ_WRITE_REG(sc, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG, 24);
1715 
1716 	aq_hw_init_tx_path(sc);
1717 	aq_hw_init_rx_path(sc);
1718 
1719 	if (aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_enaddr.ether_addr_octet))
1720 		return EINVAL;
1721 
1722 	aq_set_linkmode(sc, AQ_LINK_NONE, AQ_FC_NONE, AQ_EEE_DISABLE);
1723 
1724 	aq_hw_qos_set(sc);
1725 
1726 	/* Enable interrupt */
1727 	AQ_WRITE_REG(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS);
1728 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_MULTIVEC, 0);
1729 
1730 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_IRQMODE, irqmode);
1731 
1732 	AQ_WRITE_REG(sc, AQ_INTR_AUTOMASK_REG, 0xffffffff);
1733 
1734 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(0),
1735 	    ((AQ_B0_ERR_INT << 24) | (1U << 31)) |
1736 	    ((AQ_B0_ERR_INT << 16) | (1 << 23))
1737 	);
1738 
1739 	/* link interrupt */
1740 	sc->sc_linkstat_irq = AQ_LINKSTAT_IRQ;
1741 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(3),
1742 	    (1 << 7) | sc->sc_linkstat_irq);
1743 
1744 	return 0;
1745 }
1746 
1747 void
1748 aq_hw_init_tx_path(struct aq_softc *sc)
1749 {
1750 	/* Tx TC/RSS number config */
1751 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_TC_MODE_EN, 1);
1752 
1753 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1754 	    THM_LSO_TCP_FLAG1_FIRST, 0x0ff6);
1755 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1756 	    THM_LSO_TCP_FLAG1_MID,   0x0ff6);
1757 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG2_REG,
1758 	   THM_LSO_TCP_FLAG2_LAST,  0x0f7f);
1759 
1760 	/* misc */
1761 	AQ_WRITE_REG(sc, TX_TPO2_REG,
1762 	   (sc->sc_features & FEATURES_TPO2) ? TX_TPO2_EN : 0);
1763 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_EN, 0);
1764 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_MODE, 0);
1765 
1766 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_SCP_INS_EN, 1);
1767 }
1768 
1769 void
1770 aq_hw_init_rx_path(struct aq_softc *sc)
1771 {
1772 	int i;
1773 
1774 	/* clear setting */
1775 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 0);
1776 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 0);
1777 	AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG, 0);
1778 	for (i = 0; i < 32; i++) {
1779 		AQ_WRITE_REG_BIT(sc, RPF_ETHERTYPE_FILTER_REG(i),
1780 		   RPF_ETHERTYPE_FILTER_EN, 0);
1781 	}
1782 
1783 	/* L2 and Multicast filters */
1784 	for (i = 0; i < AQ_HW_MAC_NUM; i++) {
1785 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_EN, 0);
1786 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_ACTION,
1787 		    RPF_ACTION_HOST);
1788 	}
1789 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_MASK_REG, 0);
1790 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_REG(0), 0x00010fff);
1791 
1792 	/* Vlan filters */
1793 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_OUTER,
1794 	    ETHERTYPE_QINQ);
1795 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_INNER,
1796 	    ETHERTYPE_VLAN);
1797 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG, RPF_VLAN_MODE_PROMISC, 0);
1798 
1799 	if (sc->sc_features & FEATURES_REV_B) {
1800 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1801 		    RPF_VLAN_MODE_ACCEPT_UNTAGGED, 1);
1802 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1803 		    RPF_VLAN_MODE_UNTAGGED_ACTION, RPF_ACTION_HOST);
1804 	}
1805 
1806 	AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, 0);
1807 
1808 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_EN, 1);
1809 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_ACTION, RPF_ACTION_HOST);
1810 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_THRESHOLD, 0xffff);
1811 
1812 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_EN, 0);
1813 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_MODE, 0);
1814 }
1815 
1816 /* set multicast filter. index 0 for own address */
1817 int
1818 aq_set_mac_addr(struct aq_softc *sc, int index, uint8_t *enaddr)
1819 {
1820 	uint32_t h, l;
1821 
1822 	if (index >= AQ_HW_MAC_NUM)
1823 		return EINVAL;
1824 
1825 	if (enaddr == NULL) {
1826 		/* disable */
1827 		AQ_WRITE_REG_BIT(sc,
1828 		    RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1829 		return 0;
1830 	}
1831 
1832 	h = (enaddr[0] <<  8) | (enaddr[1]);
1833 	l = ((uint32_t)enaddr[2] << 24) | (enaddr[3] << 16) |
1834 	    (enaddr[4] <<  8) | (enaddr[5]);
1835 
1836 	/* disable, set, and enable */
1837 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1838 	AQ_WRITE_REG(sc, RPF_L2UC_LSW_REG(index), l);
1839 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index),
1840 	    RPF_L2UC_MSW_MACADDR_HI, h);
1841 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_ACTION, 1);
1842 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 1);
1843 
1844 	return 0;
1845 }
1846 
1847 int
1848 aq_get_linkmode(struct aq_softc *sc, enum aq_link_speed *speed,
1849     enum aq_link_fc *fc, enum aq_link_eee *eee)
1850 {
1851 	enum aq_hw_fw_mpi_state mode;
1852 	int error;
1853 
1854 	error = sc->sc_fw_ops->get_mode(sc, &mode, speed, fc, eee);
1855 	if (error != 0)
1856 		return error;
1857 	if (mode != MPI_INIT)
1858 		return ENXIO;
1859 
1860 	return 0;
1861 }
1862 
1863 int
1864 aq_set_linkmode(struct aq_softc *sc, enum aq_link_speed speed,
1865     enum aq_link_fc fc, enum aq_link_eee eee)
1866 {
1867 	return sc->sc_fw_ops->set_mode(sc, MPI_INIT, speed, fc, eee);
1868 }
1869 
1870 int
1871 aq_fw2x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state mode,
1872     enum aq_link_speed speed, enum aq_link_fc fc, enum aq_link_eee eee)
1873 {
1874 	uint64_t mpi_ctrl;
1875 	int error = 0;
1876 
1877 	AQ_MPI_LOCK(sc);
1878 
1879 	mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1880 
1881 	switch (mode) {
1882 	case MPI_INIT:
1883 		mpi_ctrl &= ~FW2X_CTRL_RATE_MASK;
1884 		if (speed & AQ_LINK_10G)
1885 			mpi_ctrl |= FW2X_CTRL_RATE_10G;
1886 		if (speed & AQ_LINK_5G)
1887 			mpi_ctrl |= FW2X_CTRL_RATE_5G;
1888 		if (speed & AQ_LINK_2G5)
1889 			mpi_ctrl |= FW2X_CTRL_RATE_2G5;
1890 		if (speed & AQ_LINK_1G)
1891 			mpi_ctrl |= FW2X_CTRL_RATE_1G;
1892 		if (speed & AQ_LINK_100M)
1893 			mpi_ctrl |= FW2X_CTRL_RATE_100M;
1894 
1895 		mpi_ctrl &= ~FW2X_CTRL_LINK_DROP;
1896 
1897 		mpi_ctrl &= ~FW2X_CTRL_EEE_MASK;
1898 		if (eee == AQ_EEE_ENABLE)
1899 			mpi_ctrl |= FW2X_CTRL_EEE_MASK;
1900 
1901 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1902 		if (fc & AQ_FC_RX)
1903 			mpi_ctrl |= FW2X_CTRL_PAUSE;
1904 		if (fc & AQ_FC_TX)
1905 			mpi_ctrl |= FW2X_CTRL_ASYMMETRIC_PAUSE;
1906 		break;
1907 	case MPI_DEINIT:
1908 		mpi_ctrl &= ~(FW2X_CTRL_RATE_MASK | FW2X_CTRL_EEE_MASK);
1909 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1910 		break;
1911 	default:
1912 		printf("%s: fw2x> unknown MPI state %d\n", DEVNAME(sc), mode);
1913 		error =  EINVAL;
1914 		goto failure;
1915 	}
1916 	AQ_WRITE64_REG(sc, FW2X_MPI_CONTROL_REG, mpi_ctrl);
1917 
1918  failure:
1919 	AQ_MPI_UNLOCK(sc);
1920 	return error;
1921 }
1922 
1923 void
1924 aq_hw_qos_set(struct aq_softc *sc)
1925 {
1926 	uint32_t tc = 0;
1927 	uint32_t buff_size;
1928 
1929 	/* TPS Descriptor rate init */
1930 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_TA_RST, 0);
1931 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_LIM, 0xa);
1932 
1933 	/* TPS VM init */
1934 	AQ_WRITE_REG_BIT(sc, TPS_DESC_VM_ARB_MODE_REG, TPS_DESC_VM_ARB_MODE, 0);
1935 
1936 	/* TPS TC credits init */
1937 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TC_ARB_MODE_REG, TPS_DESC_TC_ARB_MODE, 0);
1938 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TC_ARB_MODE_REG, TPS_DATA_TC_ARB_MODE, 0);
1939 
1940 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1941 	    TPS_DATA_TCT_CREDIT_MAX, 0xfff);
1942 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1943 	    TPS_DATA_TCT_WEIGHT, 0x64);
1944 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1945 	    TPS_DESC_TCT_CREDIT_MAX, 0x50);
1946 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1947 	    TPS_DESC_TCT_WEIGHT, 0x1e);
1948 
1949 	/* Tx buf size */
1950 	tc = 0;
1951 	buff_size = AQ_HW_TXBUF_MAX;
1952 	AQ_WRITE_REG_BIT(sc, TPB_TXB_BUFSIZE_REG(tc), TPB_TXB_BUFSIZE,
1953 	    buff_size);
1954 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_HI,
1955 	    (buff_size * (1024 / 32) * 66) / 100);
1956 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_LO,
1957 	    (buff_size * (1024 / 32) * 50) / 100);
1958 
1959 	/* QoS Rx buf size per TC */
1960 	tc = 0;
1961 	buff_size = AQ_HW_RXBUF_MAX;
1962 	AQ_WRITE_REG_BIT(sc, RPB_RXB_BUFSIZE_REG(tc), RPB_RXB_BUFSIZE,
1963 	    buff_size);
1964 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_EN, 0);
1965 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_HI,
1966 	    (buff_size * (1024 / 32) * 66) / 100);
1967 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_LO,
1968 	    (buff_size * (1024 / 32) * 50) / 100);
1969 
1970 	/* QoS 802.1p priority -> TC mapping */
1971 	int i_priority;
1972 	for (i_priority = 0; i_priority < 8; i_priority++) {
1973 		AQ_WRITE_REG_BIT(sc, RPF_RPB_RX_TC_UPT_REG,
1974 		    RPF_RPB_RX_TC_UPT_MASK(i_priority), 0);
1975 	}
1976 }
1977 
1978 void
1979 aq_txring_reset(struct aq_softc *sc, struct aq_txring *tx, int start)
1980 {
1981 	daddr_t paddr;
1982 
1983 	tx->tx_prod = 0;
1984 	tx->tx_cons = 0;
1985 
1986 	/* empty slots? */
1987 
1988 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 0);
1989 
1990 	if (start == 0)
1991 		return;
1992 
1993 	paddr = AQ_DMA_DVA(&tx->tx_mem);
1994 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRLSW_REG(tx->tx_q), paddr);
1995 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRMSW_REG(tx->tx_q),
1996 	    paddr >> 32);
1997 
1998 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_LEN,
1999 	    AQ_TXD_NUM / 8);
2000 
2001 	tx->tx_prod = AQ_READ_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q));
2002 	tx->tx_cons = tx->tx_prod;
2003 	AQ_WRITE_REG(sc, TX_DMA_DESC_WRWB_THRESH_REG(tx->tx_q), 0);
2004 
2005 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2006 	    AQ_INTR_IRQ_MAP_TX_IRQMAP(tx->tx_q), tx->tx_irq);
2007 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2008 	    AQ_INTR_IRQ_MAP_TX_EN(tx->tx_q), 1);
2009 
2010 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 1);
2011 
2012 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID, 0);
2013 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID_EN, 0);
2014 }
2015 
2016 void
2017 aq_rxring_reset(struct aq_softc *sc, struct aq_rxring *rx, int start)
2018 {
2019 	daddr_t paddr;
2020 
2021 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 0);
2022 	/* drain */
2023 
2024 	if (start == 0)
2025 		return;
2026 
2027 	paddr = AQ_DMA_DVA(&rx->rx_mem);
2028 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRLSW_REG(rx->rx_q), paddr);
2029 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRMSW_REG(rx->rx_q),
2030 	    paddr >> 32);
2031 
2032 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_LEN,
2033 	    AQ_RXD_NUM / 8);
2034 
2035 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2036 	    RX_DMA_DESC_BUFSIZE_DATA, MCLBYTES / 1024);
2037 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2038 	    RX_DMA_DESC_BUFSIZE_HDR, 0);
2039 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2040 	    RX_DMA_DESC_HEADER_SPLIT, 0);
2041 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2042 	    RX_DMA_DESC_VLAN_STRIP, 0);
2043 
2044 	rx->rx_cons = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2045 	    RX_DMA_DESC_HEAD_PTR;
2046 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_cons);
2047 	rx->rx_prod = rx->rx_cons;
2048 
2049 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2050 	    AQ_INTR_IRQ_MAP_RX_IRQMAP(rx->rx_q), rx->rx_irq);
2051 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2052 	    AQ_INTR_IRQ_MAP_RX_EN(rx->rx_q), 1);
2053 
2054 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2055 	    RX_DMA_DCAD_CPUID, 0);
2056 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2057 	    RX_DMA_DCAD_DESC_EN, 0);
2058 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2059 	    RX_DMA_DCAD_HEADER_EN, 0);
2060 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2061 	    RX_DMA_DCAD_PAYLOAD_EN, 0);
2062 
2063 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 1);
2064 }
2065 
2066 static inline unsigned int
2067 aq_rx_fill_slots(struct aq_softc *sc, struct aq_rxring *rx, uint nslots)
2068 {
2069 	struct aq_rx_desc_read *ring, *rd;
2070 	struct aq_slot *as;
2071 	struct mbuf *m;
2072 	uint p, fills;
2073 
2074 	ring = AQ_DMA_KVA(&rx->rx_mem);
2075 	p = rx->rx_prod;
2076 
2077 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2078 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTWRITE);
2079 
2080 	for (fills = 0; fills < nslots; fills++) {
2081 		as = &rx->rx_slots[p];
2082 		rd = &ring[p];
2083 
2084 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2085 		if (m == NULL)
2086 			break;
2087 
2088 		m->m_data += (m->m_ext.ext_size - MCLBYTES);
2089 		m->m_data += ETHER_ALIGN;
2090 		m->m_len = m->m_pkthdr.len = MCLBYTES;
2091 
2092 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2093 		    BUS_DMA_NOWAIT) != 0) {
2094 			m_freem(m);
2095 			break;
2096 		}
2097 		as->as_m = m;
2098 
2099 		htolem64(&rd->buf_addr, as->as_map->dm_segs[0].ds_addr);
2100 		rd->hdr_addr = 0;
2101 		p++;
2102 		if (p == AQ_RXD_NUM)
2103 			p = 0;
2104 	}
2105 
2106 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2107 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREWRITE);
2108 
2109 	rx->rx_prod = p;
2110 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_prod);
2111 	return (nslots - fills);
2112 }
2113 
2114 int
2115 aq_rx_fill(struct aq_softc *sc, struct aq_rxring *rx)
2116 {
2117 	u_int slots;
2118 
2119 	slots = if_rxr_get(&rx->rx_rxr, AQ_RXD_NUM);
2120 	if (slots == 0)
2121 		return 1;
2122 
2123 	slots = aq_rx_fill_slots(sc, rx, slots);
2124 	if_rxr_put(&rx->rx_rxr, slots);
2125 	return 0;
2126 }
2127 
2128 void
2129 aq_refill(void *xq)
2130 {
2131 	struct aq_queues *q = xq;
2132 	struct aq_softc *sc = q->q_sc;
2133 
2134 	aq_rx_fill(sc, &q->q_rx);
2135 
2136 	if (if_rxr_inuse(&q->q_rx.rx_rxr) == 0)
2137 		timeout_add(&q->q_rx.rx_refill, 1);
2138 }
2139 
2140 void
2141 aq_rxeof(struct aq_softc *sc, struct aq_rxring *rx)
2142 {
2143 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2144 	struct aq_rx_desc_wb *rxd;
2145 	struct aq_rx_desc_wb *ring;
2146 	struct aq_slot *as;
2147 	uint32_t end, idx;
2148 	uint16_t pktlen, status;
2149 	uint32_t rxd_type;
2150 	struct mbuf *m;
2151 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2152 	int rxfree;
2153 
2154 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2155 		return;
2156 
2157 	end = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2158 	    RX_DMA_DESC_HEAD_PTR;
2159 
2160 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2161 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTREAD);
2162 
2163 	rxfree = 0;
2164 	idx = rx->rx_cons;
2165 	ring = AQ_DMA_KVA(&rx->rx_mem);
2166 	while (idx != end) {
2167 		rxd = &ring[idx];
2168 		as = &rx->rx_slots[idx];
2169 
2170 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2171 		    as->as_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2172 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2173 
2174 		status = lemtoh16(&rxd->status);
2175 		if ((status & AQ_RXDESC_STATUS_DD) == 0)
2176 			break;
2177 
2178 		rxfree++;
2179 		m = as->as_m;
2180 		as->as_m = NULL;
2181 
2182 		pktlen = lemtoh16(&rxd->pkt_len);
2183 		rxd_type = lemtoh32(&rxd->type);
2184 		/* rss hash, vlan */
2185 
2186 		if ((status & AQ_RXDESC_STATUS_MACERR) ||
2187 		    (rxd_type & AQ_RXDESC_TYPE_DMA_ERR)) {
2188 			printf("%s:rx: rx error (status %x type %x)\n",
2189 			    DEVNAME(sc), status, rxd_type);
2190 			m_freem(m);
2191 		} else {
2192 			m->m_pkthdr.len = m->m_len = pktlen;
2193 			ml_enqueue(&ml, m);
2194 		}
2195 
2196 		idx++;
2197 		if (idx == AQ_RXD_NUM)
2198 			idx = 0;
2199 	}
2200 	rx->rx_cons = idx;
2201 
2202 	if (rxfree > 0) {
2203 		if_rxr_put(&rx->rx_rxr, rxfree);
2204 		if (ifiq_input(rx->rx_ifiq, &ml))
2205 			if_rxr_livelocked(&rx->rx_rxr);
2206 
2207 		aq_rx_fill(sc, rx);
2208 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
2209 			timeout_add(&rx->rx_refill, 1);
2210 	}
2211 }
2212 
2213 void
2214 aq_txeof(struct aq_softc *sc, struct aq_txring *tx)
2215 {
2216 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2217 	struct aq_slot *as;
2218 	uint32_t idx, end, free;
2219 
2220 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2221 		return;
2222 
2223 	idx = tx->tx_cons;
2224 	end = AQ_READ_REG(sc, TX_DMA_DESC_HEAD_PTR_REG(tx->tx_q)) &
2225 	    TX_DMA_DESC_HEAD_PTR;
2226 	free = 0;
2227 
2228 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2229 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTREAD);
2230 
2231 	while (idx != end) {
2232 		as = &tx->tx_slots[idx];
2233 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2234 
2235 		m_freem(as->as_m);
2236 		as->as_m = NULL;
2237 
2238 		idx++;
2239 		if (idx == AQ_TXD_NUM)
2240 			idx = 0;
2241 		free++;
2242 	}
2243 
2244 	tx->tx_cons = idx;
2245 
2246 	if (free != 0) {
2247 		if (ifq_is_oactive(tx->tx_ifq))
2248 			ifq_restart(tx->tx_ifq);
2249 	}
2250 }
2251 
2252 void
2253 aq_start(struct ifqueue *ifq)
2254 {
2255 	struct aq_queues *aq = ifq->ifq_softc;
2256 	struct aq_softc *sc = aq->q_sc;
2257 	struct aq_txring *tx = &aq->q_tx;
2258 	struct aq_tx_desc *ring, *txd;
2259 	struct aq_slot *as;
2260 	struct mbuf *m;
2261 	uint32_t idx, free, used, ctl1, ctl2;
2262 
2263 	idx = tx->tx_prod;
2264 	free = tx->tx_cons + AQ_TXD_NUM - tx->tx_prod;
2265 	used = 0;
2266 
2267 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2268 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTWRITE);
2269 	ring = (struct aq_tx_desc *)AQ_DMA_KVA(&tx->tx_mem);
2270 
2271 	for (;;) {
2272 		if (used + AQ_TX_MAX_SEGMENTS >= free) {
2273 			ifq_set_oactive(ifq);
2274 			break;
2275 		}
2276 
2277 		m = ifq_dequeue(ifq);
2278 		if (m == NULL)
2279 			break;
2280 
2281 		txd = ring + idx;
2282 		as = &tx->tx_slots[idx];
2283 
2284 		if (m_defrag(m, M_DONTWAIT) != 0) {
2285 			m_freem(m);
2286 			break;
2287 		}
2288 
2289 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2290 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) != 0) {
2291 			m_freem(m);
2292 			break;
2293 		}
2294 
2295 		as->as_m = m;
2296 
2297 #if NBPFILTER > 0
2298 		if (ifp->if_bpf)
2299 			bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT);
2300 #endif
2301 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2302 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2303 
2304 		ctl1 = AQ_TXDESC_CTL1_TYPE_TXD | (as->as_map->dm_segs[0].ds_len <<
2305 		    AQ_TXDESC_CTL1_BLEN_SHIFT) | AQ_TXDESC_CTL1_CMD_FCS |
2306 		    AQ_TXDESC_CTL1_CMD_EOP | AQ_TXDESC_CTL1_CMD_WB;
2307 		ctl2 = m->m_pkthdr.len << AQ_TXDESC_CTL2_LEN_SHIFT;
2308 
2309 		txd->buf_addr = htole64(as->as_map->dm_segs[0].ds_addr);
2310 		txd->ctl1 = htole32(ctl1);
2311 		txd->ctl2 = htole32(ctl2);
2312 
2313 		idx++;
2314 		if (idx == AQ_TXD_NUM)
2315 			idx = 0;
2316 		used++;
2317 	}
2318 
2319 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2320 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREWRITE);
2321 
2322 	if (used != 0) {
2323 		tx->tx_prod = idx;
2324 		AQ_WRITE_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q),
2325 		    tx->tx_prod);
2326 	}
2327 }
2328 
2329 int
2330 aq_intr(void *arg)
2331 {
2332 	struct aq_softc *sc = arg;
2333 	struct aq_queues *aq = &sc->sc_queues[0];
2334 	uint32_t status;
2335 
2336 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2337 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2338 
2339 	if (status & (1 << sc->sc_linkstat_irq))
2340 		aq_update_link_status(sc);
2341 
2342 	if (status & (1 << aq->q_tx.tx_irq)) {
2343 		aq_txeof(sc, &aq->q_tx);
2344 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2345 		    (1 << aq->q_tx.tx_irq));
2346 	}
2347 	if (status & (1 << aq->q_rx.rx_irq)) {
2348 		aq_rxeof(sc, &aq->q_rx);
2349 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2350 		    (1 << aq->q_rx.rx_irq));
2351 	}
2352 
2353 	return 1;
2354 }
2355 
2356 void
2357 aq_watchdog(struct ifnet *ifp)
2358 {
2359 
2360 }
2361 
2362 void
2363 aq_free_slots(struct aq_softc *sc, struct aq_slot *slots, int allocated,
2364     int total)
2365 {
2366 	struct aq_slot *as;
2367 
2368 	int i = allocated;
2369 	while (i-- > 0) {
2370 		as = &slots[i];
2371 		bus_dmamap_destroy(sc->sc_dmat, as->as_map);
2372 		if (as->as_m != NULL)
2373 			m_freem(as->as_m);
2374 	}
2375 	free(slots, M_DEVBUF, total * sizeof(*as));
2376 }
2377 
2378 int
2379 aq_queue_up(struct aq_softc *sc, struct aq_queues *aq)
2380 {
2381 	struct aq_rxring *rx;
2382 	struct aq_txring *tx;
2383 	struct aq_slot *as;
2384 	int i;
2385 
2386 	rx = &aq->q_rx;
2387 	rx->rx_slots = mallocarray(sizeof(*as), AQ_RXD_NUM, M_DEVBUF,
2388 	    M_WAITOK | M_ZERO);
2389 	if (rx->rx_slots == NULL) {
2390 		printf("%s: failed to allocate rx slots %d\n", DEVNAME(sc),
2391 		    aq->q_index);
2392 		return ENOMEM;
2393 	}
2394 
2395 	for (i = 0; i < AQ_RXD_NUM; i++) {
2396 		as = &rx->rx_slots[i];
2397 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2398 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2399 		    &as->as_map) != 0) {
2400 			printf("%s: failed to allocate rx dma maps %d\n",
2401 			    DEVNAME(sc), aq->q_index);
2402 			goto destroy_rx_slots;
2403 		}
2404 	}
2405 
2406 	if (aq_dmamem_alloc(sc, &rx->rx_mem, AQ_RXD_NUM *
2407 	    sizeof(struct aq_rx_desc_read), PAGE_SIZE) != 0) {
2408 		printf("%s: unable to allocate rx ring %d\n", DEVNAME(sc),
2409 		    aq->q_index);
2410 		goto destroy_rx_slots;
2411 	}
2412 
2413 	tx = &aq->q_tx;
2414 	tx->tx_slots = mallocarray(sizeof(*as), AQ_TXD_NUM, M_DEVBUF,
2415 	    M_WAITOK | M_ZERO);
2416 	if (tx->tx_slots == NULL) {
2417 		printf("%s: failed to allocate tx slots %d\n", DEVNAME(sc),
2418 		    aq->q_index);
2419 		goto destroy_rx_ring;
2420 	}
2421 
2422 	for (i = 0; i < AQ_TXD_NUM; i++) {
2423 		as = &tx->tx_slots[i];
2424 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2425 		    AQ_TX_MAX_SEGMENTS, MCLBYTES, 0,
2426 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2427 		    &as->as_map) != 0) {
2428 			printf("%s: failed to allocated tx dma maps %d\n",
2429 			    DEVNAME(sc), aq->q_index);
2430 			goto destroy_tx_slots;
2431 		}
2432 	}
2433 
2434 	if (aq_dmamem_alloc(sc, &tx->tx_mem, AQ_TXD_NUM *
2435 	    sizeof(struct aq_tx_desc), PAGE_SIZE) != 0) {
2436 		printf("%s: unable to allocate tx ring %d\n", DEVNAME(sc),
2437 		    aq->q_index);
2438 		goto destroy_tx_slots;
2439 	}
2440 
2441 	aq_txring_reset(sc, tx, 1);
2442 	aq_rxring_reset(sc, rx, 1);
2443 	return 0;
2444 
2445 destroy_tx_slots:
2446 	aq_free_slots(sc, tx->tx_slots, i, AQ_TXD_NUM);
2447 	tx->tx_slots = NULL;
2448 	i = AQ_RXD_NUM;
2449 
2450 destroy_rx_ring:
2451 	aq_dmamem_free(sc, &rx->rx_mem);
2452 destroy_rx_slots:
2453 	aq_free_slots(sc, rx->rx_slots, i, AQ_RXD_NUM);
2454 	rx->rx_slots = NULL;
2455 	return ENOMEM;
2456 }
2457 
2458 void
2459 aq_queue_down(struct aq_softc *sc, struct aq_queues *aq)
2460 {
2461 	struct aq_txring *tx;
2462 	struct aq_rxring *rx;
2463 
2464 	tx = &aq->q_tx;
2465 	aq_txring_reset(sc, &aq->q_tx, 0);
2466 	if (tx->tx_slots != NULL) {
2467 		aq_free_slots(sc, tx->tx_slots, AQ_TXD_NUM, AQ_TXD_NUM);
2468 		tx->tx_slots = NULL;
2469 	}
2470 
2471 	aq_dmamem_free(sc, &tx->tx_mem);
2472 
2473 	rx = &aq->q_rx;
2474 	aq_rxring_reset(sc, &aq->q_rx, 0);
2475 	if (rx->rx_slots != NULL) {
2476 		aq_free_slots(sc, rx->rx_slots, AQ_RXD_NUM, AQ_RXD_NUM);
2477 		rx->rx_slots = NULL;
2478 	}
2479 
2480 	aq_dmamem_free(sc, &rx->rx_mem);
2481 }
2482 
2483 int
2484 aq_up(struct aq_softc *sc)
2485 {
2486 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2487 	int i;
2488 
2489 	for (i = 0; i < sc->sc_nqueues; i++) {
2490 		if (aq_queue_up(sc, &sc->sc_queues[i]) != 0)
2491 			goto downqueues;
2492 	}
2493 
2494 	/* filters? */
2495 	/* enable checksum offload */
2496 
2497 	SET(ifp->if_flags, IFF_RUNNING);
2498 	aq_enable_intr(sc, 1, 1);
2499 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 1);
2500 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 1);
2501 
2502 	for (i = 0; i < sc->sc_nqueues; i++) {
2503 		struct aq_queues *aq = &sc->sc_queues[i];
2504 
2505 		if_rxr_init(&aq->q_rx.rx_rxr, 1, AQ_RXD_NUM - 1);
2506 		aq_rx_fill(sc, &aq->q_rx);
2507 
2508 		ifq_clr_oactive(aq->q_tx.tx_ifq);
2509 	}
2510 
2511 	return ENETRESET;
2512 
2513 downqueues:
2514 	for (i = 0; i < sc->sc_nqueues; i++)
2515 		aq_queue_down(sc, &sc->sc_queues[i]);
2516 	return ENOMEM;
2517 }
2518 
2519 void
2520 aq_down(struct aq_softc *sc)
2521 {
2522 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2523 	int i;
2524 
2525 	CLR(ifp->if_flags, IFF_RUNNING);
2526 
2527 	aq_enable_intr(sc, 1, 0);
2528 	intr_barrier(sc->sc_ih);
2529 
2530 	for (i = 0; i < sc->sc_nqueues; i++) {
2531 		/* queue intr barrier? */
2532 		aq_queue_down(sc, &sc->sc_queues[i]);
2533 	}
2534 }
2535 
2536 void
2537 aq_enable_intr(struct aq_softc *sc, int link, int txrx)
2538 {
2539 	uint32_t imask = 0;
2540 	int i;
2541 
2542 	if (txrx) {
2543 		for (i = 0; i < sc->sc_nqueues; i++) {
2544 			imask |= (1 << sc->sc_queues[i].q_tx.tx_irq);
2545 			imask |= (1 << sc->sc_queues[i].q_rx.rx_irq);
2546 		}
2547 	}
2548 
2549 	if (link)
2550 		imask |= (1 << sc->sc_linkstat_irq);
2551 
2552 	AQ_WRITE_REG(sc, AQ_INTR_MASK_REG, imask);
2553 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2554 }
2555 
2556 void
2557 aq_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2558 {
2559 	struct aq_softc *aq = ifp->if_softc;
2560 	enum aq_link_speed speed;
2561 	enum aq_link_fc fc;
2562 	int media;
2563 	int flow;
2564 
2565 	if (aq_get_linkmode(aq, &speed, &fc, NULL) != 0)
2566 		return;
2567 
2568 	switch (speed) {
2569 	case AQ_LINK_10G:
2570 		media = IFM_10G_T;
2571 		break;
2572 	case AQ_LINK_5G:
2573 		media = IFM_5000_T;
2574 		break;
2575 	case AQ_LINK_2G5:
2576 		media = IFM_2500_T;
2577 		break;
2578 	case AQ_LINK_1G:
2579 		media = IFM_1000_T;
2580 		break;
2581 	case AQ_LINK_100M:
2582 		media = IFM_100_TX;
2583 		break;
2584 	case AQ_LINK_NONE:
2585 		media = 0;
2586 		break;
2587 	}
2588 
2589 	flow = 0;
2590 	if (fc & AQ_FC_RX)
2591 		flow |= IFM_ETH_RXPAUSE;
2592 	if (fc & AQ_FC_TX)
2593 		flow |= IFM_ETH_TXPAUSE;
2594 
2595 	ifmr->ifm_status = IFM_AVALID;
2596 	if (speed != AQ_LINK_NONE) {
2597 		ifmr->ifm_status |= IFM_ACTIVE;
2598 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media | flow;
2599 	}
2600 }
2601 
2602 int
2603 aq_ifmedia_change(struct ifnet *ifp)
2604 {
2605 	struct aq_softc *sc = ifp->if_softc;
2606 	enum aq_link_speed rate = AQ_LINK_NONE;
2607 	enum aq_link_fc fc = AQ_FC_NONE;
2608 
2609 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2610 		return EINVAL;
2611 
2612 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2613 	case IFM_AUTO:
2614 		rate = AQ_LINK_AUTO;
2615 		break;
2616 	case IFM_NONE:
2617 		rate = AQ_LINK_NONE;
2618 		break;
2619 	case IFM_100_TX:
2620 		rate = AQ_LINK_100M;
2621 		break;
2622 	case IFM_1000_T:
2623 		rate = AQ_LINK_1G;
2624 		break;
2625 	case IFM_2500_T:
2626 		rate = AQ_LINK_2G5;
2627 		break;
2628 	case IFM_5000_T:
2629 		rate = AQ_LINK_5G;
2630 		break;
2631 	case IFM_10G_T:
2632 		rate = AQ_LINK_10G;
2633 		break;
2634 	default:
2635 		return ENODEV;
2636 	}
2637 
2638 	if (sc->sc_media.ifm_media & IFM_FLOW)
2639 		fc = AQ_FC_ALL;
2640 
2641 	return aq_set_linkmode(sc, rate, fc, AQ_EEE_DISABLE);
2642 }
2643 
2644 void
2645 aq_update_link_status(struct aq_softc *sc)
2646 {
2647 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2648 	enum aq_link_speed speed;
2649 	enum aq_link_fc fc;
2650 
2651 	if (aq_get_linkmode(sc, &speed, &fc, NULL) != 0)
2652 		return;
2653 
2654 	if (speed == AQ_LINK_NONE) {
2655 		if (ifp->if_link_state != LINK_STATE_DOWN) {
2656 			ifp->if_link_state = LINK_STATE_DOWN;
2657 			if_link_state_change(ifp);
2658 		}
2659 	} else {
2660 		if (ifp->if_link_state != LINK_STATE_FULL_DUPLEX) {
2661 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
2662 			if_link_state_change(ifp);
2663 		}
2664 	}
2665 }
2666 
2667 
2668 int
2669 aq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2670 {
2671 	struct aq_softc *sc = ifp->if_softc;
2672 	struct ifreq *ifr = (struct ifreq *)data;
2673 	int error = 0, s;
2674 
2675 	s = splnet();
2676 
2677 	switch (cmd) {
2678 	case SIOCSIFADDR:
2679 		ifp->if_flags |= IFF_UP;
2680 		if ((ifp->if_flags & IFF_RUNNING) == 0)
2681 			error = aq_up(sc);
2682 		break;
2683 	case SIOCSIFFLAGS:
2684 		if (ifp->if_flags & IFF_UP) {
2685 			if (ifp->if_flags & IFF_RUNNING)
2686 				error = ENETRESET;
2687 			else
2688 				error = aq_up(sc);
2689 		} else {
2690 			if (ifp->if_flags & IFF_RUNNING)
2691 				aq_down(sc);
2692 		}
2693 		break;
2694 	case SIOCSIFMEDIA:
2695 	case SIOCGIFMEDIA:
2696 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2697 		break;
2698 	default:
2699 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
2700 	}
2701 
2702 	if (error == ENETRESET) {
2703 		if (ifp->if_flags & IFF_RUNNING)
2704 			aq_iff(sc);
2705 		error = 0;
2706 	}
2707 
2708 	splx(s);
2709 	return error;
2710 }
2711 
2712 void
2713 aq_iff(struct aq_softc *sc)
2714 {
2715 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2716 	struct arpcom *ac = &sc->sc_arpcom;
2717 	struct ether_multi *enm;
2718 	struct ether_multistep step;
2719 	int idx;
2720 
2721 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2722 		ifp->if_flags |= IFF_ALLMULTI;
2723 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 1);
2724 	} else if (ac->ac_multicnt >= AQ_HW_MAC_NUM ||
2725 	    ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2726 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2727 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2728 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 1);
2729 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2730 		    RPF_MCAST_FILTER_EN, 1);
2731 	} else if (ac->ac_multicnt == 0) {
2732 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2733 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2734 		    RPF_MCAST_FILTER_EN, 0);
2735 	} else {
2736 		idx = AQ_HW_MAC_OWN + 1;
2737 
2738 		/* turn on allmulti while we're rewriting? */
2739 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2740 
2741 		ETHER_FIRST_MULTI(step, ac, enm);
2742 		while (enm != NULL) {
2743 			aq_set_mac_addr(sc, idx++, enm->enm_addrlo);
2744 			ETHER_NEXT_MULTI(step, enm);
2745 		}
2746 
2747 		for (; idx < AQ_HW_MAC_NUM; idx++)
2748 			aq_set_mac_addr(sc, idx, NULL);
2749 
2750 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2751 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 0);
2752 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2753 		    RPF_MCAST_FILTER_EN, 1);
2754 	}
2755 }
2756 
2757 int
2758 aq_dmamem_alloc(struct aq_softc *sc, struct aq_dmamem *aqm,
2759     bus_size_t size, u_int align)
2760 {
2761 	aqm->aqm_size = size;
2762 
2763 	if (bus_dmamap_create(sc->sc_dmat, aqm->aqm_size, 1,
2764 	    aqm->aqm_size, 0,
2765 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2766 	    &aqm->aqm_map) != 0)
2767 		return (1);
2768 	if (bus_dmamem_alloc(sc->sc_dmat, aqm->aqm_size,
2769 	    align, 0, &aqm->aqm_seg, 1, &aqm->aqm_nsegs,
2770 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2771 		goto destroy;
2772 	if (bus_dmamem_map(sc->sc_dmat, &aqm->aqm_seg, aqm->aqm_nsegs,
2773 	    aqm->aqm_size, &aqm->aqm_kva, BUS_DMA_WAITOK) != 0)
2774 		goto free;
2775 	if (bus_dmamap_load(sc->sc_dmat, aqm->aqm_map, aqm->aqm_kva,
2776 	    aqm->aqm_size, NULL, BUS_DMA_WAITOK) != 0)
2777 		goto unmap;
2778 
2779 	return (0);
2780 unmap:
2781 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2782 free:
2783 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2784 destroy:
2785 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2786 	return (1);
2787 }
2788 
2789 void
2790 aq_dmamem_free(struct aq_softc *sc, struct aq_dmamem *aqm)
2791 {
2792 	bus_dmamap_unload(sc->sc_dmat, aqm->aqm_map);
2793 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2794 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2795 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2796 }
2797