xref: /openbsd-src/sys/dev/pci/if_aq_pci.c (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /* $OpenBSD: if_aq_pci.c,v 1.4 2021/10/09 08:38:13 jmatthew Exp $ */
2 /*	$NetBSD: if_aq.c,v 1.27 2021/06/16 00:21:18 riastradh Exp $	*/
3 
4 /*
5  * Copyright (c) 2021 Jonathan Matthew <jonathan@d14n.org>
6  * Copyright (c) 2021 Mike Larkin <mlarkin@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /**
22  * aQuantia Corporation Network Driver
23  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
24  *
25  * Redistribution and use in source and binary forms, with or without
26  * modification, are permitted provided that the following conditions
27  * are met:
28  *
29  *   (1) Redistributions of source code must retain the above
30  *   copyright notice, this list of conditions and the following
31  *   disclaimer.
32  *
33  *   (2) Redistributions in binary form must reproduce the above
34  *   copyright notice, this list of conditions and the following
35  *   disclaimer in the documentation and/or other materials provided
36  *   with the distribution.
37  *
38  *   (3) The name of the author may not be used to endorse or promote
39  *   products derived from this software without specific prior
40  *   written permission.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
43  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
44  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
46  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
48  * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
51  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
52  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53  *
54  */
55 
56 /*-
57  * Copyright (c) 2020 Ryo Shimizu <ryo@nerv.org>
58  * All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  * modification, are permitted provided that the following conditions
62  * are met:
63  * 1. Redistributions of source code must retain the above copyright
64  *    notice, this list of conditions and the following disclaimer.
65  * 2. Redistributions in binary form must reproduce the above copyright
66  *    notice, this list of conditions and the following disclaimer in the
67  *    documentation and/or other materials provided with the distribution.
68  *
69  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
70  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
71  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
72  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
73  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
74  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
75  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
76  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
77  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
78  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
79  * POSSIBILITY OF SUCH DAMAGE.
80  */
81 #include "bpfilter.h"
82 
83 #include <sys/types.h>
84 #include <sys/device.h>
85 #include <sys/param.h>
86 #include <sys/kernel.h>
87 #include <sys/sockio.h>
88 #include <sys/systm.h>
89 
90 #include <net/if.h>
91 #include <net/if_media.h>
92 
93 #include <netinet/in.h>
94 #include <netinet/if_ether.h>
95 
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99 
100 #if NBPFILTER > 0
101 #include <net/bpf.h>
102 #endif
103 
104 /* #define AQ_DEBUG 1 */
105 #ifdef AQ_DEBUG
106 #define DPRINTF(x) printf x
107 #else
108 #define DPRINTF(x)
109 #endif /* AQ_DEBUG */
110 
111 #define DEVNAME(_s)	((_s)->sc_dev.dv_xname)
112 
113 #define AQ_BAR0 				0x10
114 #define AQ_MAXQ 				8
115 
116 #define AQ_TXD_NUM 				2048
117 #define AQ_RXD_NUM 				2048
118 
119 #define AQ_TX_MAX_SEGMENTS			1	/* XXX */
120 
121 #define AQ_LINKSTAT_IRQ				31
122 
123 #define RPF_ACTION_HOST				1
124 
125 #define AQ_FW_SOFTRESET_REG			0x0000
126 #define  AQ_FW_SOFTRESET_DIS			(1 << 14)
127 #define  AQ_FW_SOFTRESET_RESET			(1 << 15)
128 #define AQ_FW_VERSION_REG			0x0018
129 #define AQ_HW_REVISION_REG			0x001c
130 #define AQ_GLB_NVR_INTERFACE1_REG		0x0100
131 #define AQ_FW_MBOX_CMD_REG			0x0200
132 #define  AQ_FW_MBOX_CMD_EXECUTE			0x00008000
133 #define  AQ_FW_MBOX_CMD_BUSY			0x00000100
134 #define AQ_FW_MBOX_ADDR_REG			0x0208
135 #define AQ_FW_MBOX_VAL_REG			0x020C
136 #define AQ_FW_GLB_CPU_SEM_REG(i)		(0x03a0 + (i) * 4)
137 #define AQ_FW_SEM_RAM_REG			AQ_FW_GLB_CPU_SEM_REG(2)
138 #define AQ_FW_GLB_CTL2_REG			0x0404
139 #define AQ_GLB_GENERAL_PROVISIONING9_REG	0x0520
140 #define AQ_GLB_NVR_PROVISIONING2_REG		0x0534
141 #define AQ_INTR_STATUS_REG			0x2000  /* intr status */
142 #define AQ_INTR_STATUS_CLR_REG			0x2050  /* intr status clear */
143 #define AQ_INTR_MASK_REG			0x2060	/* intr mask set */
144 #define AQ_INTR_MASK_CLR_REG			0x2070	/* intr mask clear */
145 #define AQ_INTR_AUTOMASK_REG			0x2090
146 
147 /* AQ_INTR_IRQ_MAP_TXRX_REG 0x2100-0x2140 */
148 #define AQ_INTR_IRQ_MAP_TXRX_REG(i)		(0x2100 + ((i) / 2) * 4)
149 #define AQ_INTR_IRQ_MAP_TX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
150 #define  AQ_INTR_IRQ_MAP_TX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 16 : 24))
151 #define  AQ_INTR_IRQ_MAP_TX_EN(i)		(1U << (((i) & 1) ? 23 : 31))
152 #define AQ_INTR_IRQ_MAP_RX_REG(i)		AQ_INTR_IRQ_MAP_TXRX_REG(i)
153 #define  AQ_INTR_IRQ_MAP_RX_IRQMAP(i)		(0x1FU << (((i) & 1) ? 0 : 8))
154 #define  AQ_INTR_IRQ_MAP_RX_EN(i)		(1U << (((i) & 1) ? 7 : 15))
155 
156 /* AQ_GEN_INTR_MAP_REG[AQ_RINGS_NUM] 0x2180-0x2200 */
157 #define AQ_GEN_INTR_MAP_REG(i)			(0x2180 + (i) * 4)
158 #define  AQ_B0_ERR_INT				8U
159 
160 #define AQ_INTR_CTRL_REG			0x2300
161 #define  AQ_INTR_CTRL_IRQMODE			((1 << 0) | (1 << 1))
162 #define AQ_INTR_CTRL_IRQMODE_LEGACY		0
163 #define AQ_INTR_CTRL_IRQMODE_MSI		1
164 #define AQ_INTR_CTRL_IRQMODE_MSIX		2
165 #define  AQ_INTR_CTRL_MULTIVEC			(1 << 2)
166 #define  AQ_INTR_CTRL_RESET_DIS			(1 << 29)
167 #define  AQ_INTR_CTRL_RESET_IRQ			(1 << 31)
168 #define AQ_MBOXIF_POWER_GATING_CONTROL_REG	0x32a8
169 
170 #define FW_MPI_MBOX_ADDR_REG			0x0360
171 #define FW1X_MPI_INIT1_REG			0x0364
172 #define FW1X_MPI_INIT2_REG			0x0370
173 #define FW1X_MPI_EFUSEADDR_REG			0x0374
174 
175 #define FW2X_MPI_EFUSEADDR_REG			0x0364
176 #define FW2X_MPI_CONTROL_REG			0x0368  /* 64bit */
177 #define FW2X_MPI_STATE_REG			0x0370  /* 64bit */
178 #define FW_BOOT_EXIT_CODE_REG			0x0388
179 
180 #define FW_BOOT_EXIT_CODE_REG			0x0388
181 #define  RBL_STATUS_DEAD			0x0000dead
182 #define  RBL_STATUS_SUCCESS			0x0000abba
183 #define  RBL_STATUS_FAILURE			0x00000bad
184 #define  RBL_STATUS_HOST_BOOT			0x0000f1a7
185 #define FW_MPI_DAISY_CHAIN_STATUS_REG		0x0704
186 #define AQ_PCI_REG_CONTROL_6_REG		0x1014
187 
188 #define FW_MPI_RESETCTRL_REG			0x4000
189 #define  FW_MPI_RESETCTRL_RESET_DIS		(1 << 29)
190 
191 #define RX_SYSCONTROL_REG			0x5000
192 #define  RX_SYSCONTROL_RESET_DIS		(1 << 29)
193 
194 #define RX_TCP_RSS_HASH_REG			0x5040
195 
196 #define RPF_L2BC_REG				0x5100
197 #define  RPF_L2BC_EN				(1 << 0)
198 #define  RPF_L2BC_PROMISC			(1 << 3)
199 #define  RPF_L2BC_ACTION			0x7000
200 #define  RPF_L2BC_THRESHOLD			0xFFFF0000
201 
202 #define AQ_HW_MAC_OWN				0
203 
204 /* RPF_L2UC_*_REG[34] (actual [38]?) */
205 #define RPF_L2UC_LSW_REG(i)                     (0x5110 + (i) * 8)
206 #define RPF_L2UC_MSW_REG(i)                     (0x5114 + (i) * 8)
207 #define  RPF_L2UC_MSW_MACADDR_HI		0xFFFF
208 #define  RPF_L2UC_MSW_ACTION			0x70000
209 #define  RPF_L2UC_MSW_EN			(1 << 31)
210 #define AQ_HW_MAC_NUM				34
211 
212 /* RPF_MCAST_FILTER_REG[8] 0x5250-0x5270 */
213 #define RPF_MCAST_FILTER_REG(i)			(0x5250 + (i) * 4)
214 #define  RPF_MCAST_FILTER_EN			(1 << 31)
215 #define RPF_MCAST_FILTER_MASK_REG		0x5270
216 #define  RPF_MCAST_FILTER_MASK_ALLMULTI		(1 << 14)
217 
218 #define RPF_VLAN_MODE_REG			0x5280
219 #define  RPF_VLAN_MODE_PROMISC			(1 << 1)
220 #define  RPF_VLAN_MODE_ACCEPT_UNTAGGED		(1 << 2)
221 #define  RPF_VLAN_MODE_UNTAGGED_ACTION		0x38
222 
223 #define RPF_VLAN_TPID_REG                       0x5284
224 #define  RPF_VLAN_TPID_OUTER			0xFFFF0000
225 #define  RPF_VLAN_TPID_INNER			0xFFFF
226 
227 /* RPF_ETHERTYPE_FILTER_REG[AQ_RINGS_NUM] 0x5300-0x5380 */
228 #define RPF_ETHERTYPE_FILTER_REG(i)		(0x5300 + (i) * 4)
229 #define  RPF_ETHERTYPE_FILTER_EN		(1 << 31)
230 
231 /* RPF_L3_FILTER_REG[8] 0x5380-0x53a0 */
232 #define RPF_L3_FILTER_REG(i)			(0x5380 + (i) * 4)
233 #define  RPF_L3_FILTER_L4_EN			(1 << 31)
234 
235 #define RX_FLR_RSS_CONTROL1_REG			0x54c0
236 #define  RX_FLR_RSS_CONTROL1_EN			(1 << 31)
237 
238 #define RPF_RPB_RX_TC_UPT_REG                   0x54c4
239 #define  RPF_RPB_RX_TC_UPT_MASK(i)              (0x00000007 << ((i) * 4))
240 
241 #define RPB_RPF_RX_REG				0x5700
242 #define  RPB_RPF_RX_TC_MODE			(1 << 8)
243 #define  RPB_RPF_RX_FC_MODE			0x30
244 #define  RPB_RPF_RX_BUF_EN			(1 << 0)
245 
246 /* RPB_RXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x5710-0x5790 */
247 #define RPB_RXB_BUFSIZE_REG(i)			(0x5710 + (i) * 0x10)
248 #define  RPB_RXB_BUFSIZE			0x1FF
249 #define RPB_RXB_XOFF_REG(i)			(0x5714 + (i) * 0x10)
250 #define  RPB_RXB_XOFF_EN			(1 << 31)
251 #define  RPB_RXB_XOFF_THRESH_HI                 0x3FFF0000
252 #define  RPB_RXB_XOFF_THRESH_LO                 0x3FFF
253 
254 #define RX_DMA_INT_DESC_WRWB_EN_REG		0x5a30
255 #define  RX_DMA_INT_DESC_WRWB_EN		(1 << 2)
256 #define  RX_DMA_INT_DESC_MODERATE_EN		(1 << 3)
257 
258 #define RX_INTR_MODERATION_CTL_REG(i)		(0x5a40 + (i) * 4)
259 #define  RX_INTR_MODERATION_CTL_EN		(1 << 1)
260 
261 #define RX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x5b00 + (i) * 0x20)
262 #define RX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x5b04 + (i) * 0x20)
263 #define RX_DMA_DESC_REG(i)			(0x5b08 + (i) * 0x20)
264 #define  RX_DMA_DESC_LEN			(0x3FF << 3)
265 #define  RX_DMA_DESC_RESET			(1 << 25)
266 #define  RX_DMA_DESC_HEADER_SPLIT		(1 << 28)
267 #define  RX_DMA_DESC_VLAN_STRIP			(1 << 29)
268 #define  RX_DMA_DESC_EN				(1 << 31)
269 #define RX_DMA_DESC_HEAD_PTR_REG(i)		(0x5b0c + (i) * 0x20)
270 #define  RX_DMA_DESC_HEAD_PTR			0xFFF
271 #define RX_DMA_DESC_TAIL_PTR_REG(i)		(0x5b10 + (i) * 0x20)
272 #define RX_DMA_DESC_BUFSIZE_REG(i)		(0x5b18 + (i) * 0x20)
273 #define  RX_DMA_DESC_BUFSIZE_DATA		0x000F
274 #define  RX_DMA_DESC_BUFSIZE_HDR		0x0FF0
275 
276 #define RX_DMA_DCAD_REG(i)			(0x6100 + (i) * 4)
277 #define  RX_DMA_DCAD_CPUID			0xFF
278 #define  RX_DMA_DCAD_PAYLOAD_EN			(1 << 29)
279 #define  RX_DMA_DCAD_HEADER_EN			(1 << 30)
280 #define  RX_DMA_DCAD_DESC_EN			(1 << 31)
281 
282 #define RX_DMA_DCA_REG				0x6180
283 #define  RX_DMA_DCA_EN				(1 << 31)
284 #define  RX_DMA_DCA_MODE			0xF
285 
286 #define TX_SYSCONTROL_REG			0x7000
287 #define  TX_SYSCONTROL_RESET_DIS		(1 << 29)
288 
289 #define TX_TPO2_REG				0x7040
290 #define  TX_TPO2_EN				(1 << 16)
291 
292 #define TPS_DESC_VM_ARB_MODE_REG		0x7300
293 #define  TPS_DESC_VM_ARB_MODE			(1 << 0)
294 #define TPS_DESC_RATE_REG			0x7310
295 #define  TPS_DESC_RATE_TA_RST			(1 << 31)
296 #define  TPS_DESC_RATE_LIM			0x7FF
297 #define TPS_DESC_TC_ARB_MODE_REG		0x7200
298 #define  TPS_DESC_TC_ARB_MODE			0x3
299 #define TPS_DATA_TC_ARB_MODE_REG		0x7100
300 #define  TPS_DATA_TC_ARB_MODE			(1 << 0)
301 
302 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7110-0x7130 */
303 #define TPS_DATA_TCT_REG(i)			(0x7110 + (i) * 4)
304 #define  TPS_DATA_TCT_CREDIT_MAX		0xFFF0000
305 #define  TPS_DATA_TCT_WEIGHT			0x1FF
306 /* TPS_DATA_TCT_REG[AQ_TRAFFICCLASS_NUM] 0x7210-0x7230 */
307 #define TPS_DESC_TCT_REG(i)			(0x7210 + (i) * 4)
308 #define  TPS_DESC_TCT_CREDIT_MAX		0xFFF0000
309 #define  TPS_DESC_TCT_WEIGHT			0x1FF
310 
311 #define AQ_HW_TXBUF_MAX         160
312 #define AQ_HW_RXBUF_MAX         320
313 
314 #define THM_LSO_TCP_FLAG1_REG			0x7820
315 #define  THM_LSO_TCP_FLAG1_FIRST		0xFFF
316 #define  THM_LSO_TCP_FLAG1_MID			0xFFF0000
317 #define THM_LSO_TCP_FLAG2_REG			0x7824
318 #define  THM_LSO_TCP_FLAG2_LAST			0xFFF
319 
320 #define TPB_TX_BUF_REG				0x7900
321 #define  TPB_TX_BUF_EN				(1 << 0)
322 #define  TPB_TX_BUF_SCP_INS_EN			(1 << 2)
323 #define  TPB_TX_BUF_TC_MODE_EN			(1 << 8)
324 
325 /* TPB_TXB_BUFSIZE_REG[AQ_TRAFFICCLASS_NUM] 0x7910-7990 */
326 #define TPB_TXB_BUFSIZE_REG(i)			(0x7910 + (i) * 0x10)
327 #define  TPB_TXB_BUFSIZE                        (0xFF)
328 #define TPB_TXB_THRESH_REG(i)                   (0x7914 + (i) * 0x10)
329 #define  TPB_TXB_THRESH_HI                      0x1FFF0000
330 #define  TPB_TXB_THRESH_LO                      0x1FFF
331 
332 #define AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG	0x7b20
333 
334 #define TX_DMA_INT_DESC_WRWB_EN_REG		0x7b40
335 #define  TX_DMA_INT_DESC_WRWB_EN		(1 << 1)
336 #define  TX_DMA_INT_DESC_MODERATE_EN		(1 << 4)
337 
338 #define TX_DMA_DESC_BASE_ADDRLSW_REG(i)		(0x7c00 + (i) * 0x40)
339 #define TX_DMA_DESC_BASE_ADDRMSW_REG(i)		(0x7c04 + (i) * 0x40)
340 #define TX_DMA_DESC_REG(i)			(0x7c08 + (i) * 0x40)
341 #define  TX_DMA_DESC_LEN			0x00000FF8
342 #define  TX_DMA_DESC_EN				0x80000000
343 #define TX_DMA_DESC_HEAD_PTR_REG(i)		(0x7c0c + (i) * 0x40)
344 #define  TX_DMA_DESC_HEAD_PTR			0x00000FFF
345 #define TX_DMA_DESC_TAIL_PTR_REG(i)		(0x7c10 + (i) * 0x40)
346 #define TX_DMA_DESC_WRWB_THRESH_REG(i)		(0x7c18 + (i) * 0x40)
347 #define  TX_DMA_DESC_WRWB_THRESH		0x00003F00
348 
349 #define TDM_DCAD_REG(i)				(0x8400 + (i) * 4)
350 #define  TDM_DCAD_CPUID				0x7F
351 #define  TDM_DCAD_CPUID_EN			0x80000000
352 
353 #define TDM_DCA_REG				0x8480
354 #define  TDM_DCA_EN				(1 << 31)
355 #define  TDM_DCA_MODE				0xF
356 
357 #define TX_INTR_MODERATION_CTL_REG(i)		(0x8980 + (i) * 4)
358 #define  TX_INTR_MODERATION_CTL_EN		(1 << 1)
359 
360 #define __LOWEST_SET_BIT(__mask) (((((uint32_t)__mask) - 1) & ((uint32_t)__mask)) ^ ((uint32_t)__mask))
361 #define __SHIFTIN(__x, __mask) ((__x) * __LOWEST_SET_BIT(__mask))
362 
363 #if 0
364 #define AQ_READ_REG(sc, reg) \
365 	bus_space_read_4((sc)->sc_iot, (sc)->sc_ioh, (reg))
366 
367 #endif
368 #define AQ_WRITE_REG(sc, reg, val) \
369 	bus_space_write_4((sc)->sc_iot, (sc)->sc_ioh, (reg), (val))
370 
371 #define AQ_WRITE_REG_BIT(sc, reg, mask, val)                    \
372 	do {                                                    \
373 		uint32_t _v;                                    \
374 		_v = AQ_READ_REG((sc), (reg));                  \
375 		_v &= ~(mask);                                  \
376 		if ((val) != 0)                                 \
377 			_v |= __SHIFTIN((val), (mask));         \
378 		AQ_WRITE_REG((sc), (reg), _v);                  \
379 	} while (/* CONSTCOND */ 0)
380 
381 #define AQ_READ64_REG(sc, reg)					\
382 	((uint64_t)AQ_READ_REG(sc, reg) |			\
383 	(((uint64_t)AQ_READ_REG(sc, (reg) + 4)) << 32))
384 
385 #define AQ_WRITE64_REG(sc, reg, val)				\
386 	do {							\
387 		AQ_WRITE_REG(sc, reg, (uint32_t)val);		\
388 		AQ_WRITE_REG(sc, reg + 4, (uint32_t)(val >> 32)); \
389 	} while (/* CONSTCOND */0)
390 
391 #define WAIT_FOR(expr, us, n, errp)                             \
392 	do {                                                    \
393 		unsigned int _n;                                \
394 		for (_n = n; (!(expr)) && _n != 0; --_n) {      \
395 			delay((us));                            \
396 		}                                               \
397 		if ((errp != NULL)) {                           \
398 			if (_n == 0)                            \
399 				*(errp) = ETIMEDOUT;            \
400 			else                                    \
401 				*(errp) = 0;                    \
402 		}                                               \
403 	} while (/* CONSTCOND */ 0)
404 
405 #define FW_VERSION_MAJOR(sc)	(((sc)->sc_fw_version >> 24) & 0xff)
406 #define FW_VERSION_MINOR(sc)	(((sc)->sc_fw_version >> 16) & 0xff)
407 #define FW_VERSION_BUILD(sc)	((sc)->sc_fw_version & 0xffff)
408 
409 #define FEATURES_MIPS		0x00000001
410 #define FEATURES_TPO2		0x00000002
411 #define FEATURES_RPF2		0x00000004
412 #define FEATURES_MPI_AQ		0x00000008
413 #define FEATURES_REV_A0		0x10000000
414 #define FEATURES_REV_A		(FEATURES_REV_A0)
415 #define FEATURES_REV_B0		0x20000000
416 #define FEATURES_REV_B1		0x40000000
417 #define FEATURES_REV_B		(FEATURES_REV_B0|FEATURES_REV_B1)
418 
419 /* lock for FW2X_MPI_{CONTROL,STATE]_REG read-modify-write */
420 #define AQ_MPI_LOCK(sc)		mtx_enter(&(sc)->sc_mpi_mutex);
421 #define AQ_MPI_UNLOCK(sc)	mtx_leave(&(sc)->sc_mpi_mutex);
422 
423 #define FW2X_CTRL_10BASET_HD			(1 << 0)
424 #define FW2X_CTRL_10BASET_FD			(1 << 1)
425 #define FW2X_CTRL_100BASETX_HD			(1 << 2)
426 #define FW2X_CTRL_100BASET4_HD			(1 << 3)
427 #define FW2X_CTRL_100BASET2_HD			(1 << 4)
428 #define FW2X_CTRL_100BASETX_FD			(1 << 5)
429 #define FW2X_CTRL_100BASET2_FD			(1 << 6)
430 #define FW2X_CTRL_1000BASET_HD			(1 << 7)
431 #define FW2X_CTRL_1000BASET_FD			(1 << 8)
432 #define FW2X_CTRL_2P5GBASET_FD			(1 << 9)
433 #define FW2X_CTRL_5GBASET_FD			(1 << 10)
434 #define FW2X_CTRL_10GBASET_FD			(1 << 11)
435 #define FW2X_CTRL_RESERVED1			(1ULL << 32)
436 #define FW2X_CTRL_10BASET_EEE			(1ULL << 33)
437 #define FW2X_CTRL_RESERVED2			(1ULL << 34)
438 #define FW2X_CTRL_PAUSE				(1ULL << 35)
439 #define FW2X_CTRL_ASYMMETRIC_PAUSE		(1ULL << 36)
440 #define FW2X_CTRL_100BASETX_EEE			(1ULL << 37)
441 #define FW2X_CTRL_RESERVED3			(1ULL << 38)
442 #define FW2X_CTRL_RESERVED4			(1ULL << 39)
443 #define FW2X_CTRL_1000BASET_FD_EEE		(1ULL << 40)
444 #define FW2X_CTRL_2P5GBASET_FD_EEE		(1ULL << 41)
445 #define FW2X_CTRL_5GBASET_FD_EEE		(1ULL << 42)
446 #define FW2X_CTRL_10GBASET_FD_EEE		(1ULL << 43)
447 #define FW2X_CTRL_RESERVED5			(1ULL << 44)
448 #define FW2X_CTRL_RESERVED6			(1ULL << 45)
449 #define FW2X_CTRL_RESERVED7			(1ULL << 46)
450 #define FW2X_CTRL_RESERVED8			(1ULL << 47)
451 #define FW2X_CTRL_RESERVED9			(1ULL << 48)
452 #define FW2X_CTRL_CABLE_DIAG			(1ULL << 49)
453 #define FW2X_CTRL_TEMPERATURE			(1ULL << 50)
454 #define FW2X_CTRL_DOWNSHIFT			(1ULL << 51)
455 #define FW2X_CTRL_PTP_AVB_EN			(1ULL << 52)
456 #define FW2X_CTRL_MEDIA_DETECT			(1ULL << 53)
457 #define FW2X_CTRL_LINK_DROP			(1ULL << 54)
458 #define FW2X_CTRL_SLEEP_PROXY			(1ULL << 55)
459 #define FW2X_CTRL_WOL				(1ULL << 56)
460 #define FW2X_CTRL_MAC_STOP			(1ULL << 57)
461 #define FW2X_CTRL_EXT_LOOPBACK			(1ULL << 58)
462 #define FW2X_CTRL_INT_LOOPBACK			(1ULL << 59)
463 #define FW2X_CTRL_EFUSE_AGENT			(1ULL << 60)
464 #define FW2X_CTRL_WOL_TIMER			(1ULL << 61)
465 #define FW2X_CTRL_STATISTICS			(1ULL << 62)
466 #define FW2X_CTRL_TRANSACTION_ID		(1ULL << 63)
467 
468 #define FW2X_CTRL_RATE_100M			FW2X_CTRL_100BASETX_FD
469 #define FW2X_CTRL_RATE_1G			FW2X_CTRL_1000BASET_FD
470 #define FW2X_CTRL_RATE_2G5			FW2X_CTRL_2P5GBASET_FD
471 #define FW2X_CTRL_RATE_5G			FW2X_CTRL_5GBASET_FD
472 #define FW2X_CTRL_RATE_10G			FW2X_CTRL_10GBASET_FD
473 #define FW2X_CTRL_RATE_MASK		\
474 	(FW2X_CTRL_RATE_100M |		\
475 	 FW2X_CTRL_RATE_1G |		\
476 	 FW2X_CTRL_RATE_2G5 |		\
477 	 FW2X_CTRL_RATE_5G |		\
478 	 FW2X_CTRL_RATE_10G)
479 #define FW2X_CTRL_EEE_MASK		\
480 	(FW2X_CTRL_10BASET_EEE |	\
481 	 FW2X_CTRL_100BASETX_EEE |	\
482 	 FW2X_CTRL_1000BASET_FD_EEE |	\
483 	 FW2X_CTRL_2P5GBASET_FD_EEE |	\
484 	 FW2X_CTRL_5GBASET_FD_EEE |	\
485 	 FW2X_CTRL_10GBASET_FD_EEE)
486 
487 enum aq_fw_bootloader_mode {
488 	FW_BOOT_MODE_UNKNOWN = 0,
489 	FW_BOOT_MODE_FLB,
490 	FW_BOOT_MODE_RBL_FLASH,
491 	FW_BOOT_MODE_RBL_HOST_BOOTLOAD
492 };
493 
494 enum aq_media_type {
495 	AQ_MEDIA_TYPE_UNKNOWN = 0,
496 	AQ_MEDIA_TYPE_FIBRE,
497 	AQ_MEDIA_TYPE_TP
498 };
499 
500 enum aq_link_speed {
501 	AQ_LINK_NONE    = 0,
502 	AQ_LINK_100M    = (1 << 0),
503 	AQ_LINK_1G      = (1 << 1),
504 	AQ_LINK_2G5     = (1 << 2),
505 	AQ_LINK_5G      = (1 << 3),
506 	AQ_LINK_10G     = (1 << 4)
507 };
508 
509 #define AQ_LINK_ALL	(AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | \
510 			    AQ_LINK_5G | AQ_LINK_10G )
511 #define AQ_LINK_AUTO	AQ_LINK_ALL
512 
513 enum aq_link_eee {
514 	AQ_EEE_DISABLE = 0,
515 	AQ_EEE_ENABLE = 1
516 };
517 
518 enum aq_hw_fw_mpi_state {
519 	MPI_DEINIT      = 0,
520 	MPI_RESET       = 1,
521 	MPI_INIT        = 2,
522 	MPI_POWER       = 4
523 };
524 
525 enum aq_link_fc {
526         AQ_FC_NONE = 0,
527         AQ_FC_RX = (1 << 0),
528         AQ_FC_TX = (1 << 1),
529         AQ_FC_ALL = (AQ_FC_RX | AQ_FC_TX)
530 };
531 
532 struct aq_dmamem {
533 	bus_dmamap_t		aqm_map;
534 	bus_dma_segment_t	aqm_seg;
535 	int			aqm_nsegs;
536 	size_t			aqm_size;
537 	caddr_t			aqm_kva;
538 };
539 
540 #define AQ_DMA_MAP(_aqm)	((_aqm)->aqm_map)
541 #define AQ_DMA_DVA(_aqm)	((_aqm)->aqm_map->dm_segs[0].ds_addr)
542 #define AQ_DMA_KVA(_aqm)	((void *)(_aqm)->aqm_kva)
543 #define AQ_DMA_LEN(_aqm)	((_aqm)->aqm_size)
544 
545 
546 struct aq_mailbox_header {
547         uint32_t version;
548         uint32_t transaction_id;
549         int32_t error;
550 } __packed __aligned(4);
551 
552 struct aq_hw_stats_s {
553         uint32_t uprc;
554         uint32_t mprc;
555         uint32_t bprc;
556         uint32_t erpt;
557         uint32_t uptc;
558         uint32_t mptc;
559         uint32_t bptc;
560         uint32_t erpr;
561         uint32_t mbtc;
562         uint32_t bbtc;
563         uint32_t mbrc;
564         uint32_t bbrc;
565         uint32_t ubrc;
566         uint32_t ubtc;
567         uint32_t ptc;
568         uint32_t prc;
569         uint32_t dpc;   /* not exists in fw2x_msm_statistics */
570         uint32_t cprc;  /* not exists in fw2x_msm_statistics */
571 } __packed __aligned(4);
572 
573 struct aq_fw2x_capabilities {
574         uint32_t caps_lo;
575         uint32_t caps_hi;
576 } __packed __aligned(4);
577 
578 struct aq_fw2x_msm_statistics {
579 	uint32_t uprc;
580 	uint32_t mprc;
581 	uint32_t bprc;
582 	uint32_t erpt;
583 	uint32_t uptc;
584 	uint32_t mptc;
585 	uint32_t bptc;
586 	uint32_t erpr;
587 	uint32_t mbtc;
588 	uint32_t bbtc;
589 	uint32_t mbrc;
590 	uint32_t bbrc;
591 	uint32_t ubrc;
592 	uint32_t ubtc;
593 	uint32_t ptc;
594 	uint32_t prc;
595 } __packed __aligned(4);
596 
597 struct aq_fw2x_phy_cable_diag_data {
598 	uint32_t lane_data[4];
599 } __packed __aligned(4);
600 
601 struct aq_fw2x_mailbox {		/* struct fwHostInterface */
602 	struct aq_mailbox_header header;
603 	struct aq_fw2x_msm_statistics msm;	/* msmStatistics_t msm; */
604 
605 	uint32_t phy_info1;
606 #define PHYINFO1_FAULT_CODE	__BITS(31,16)
607 #define PHYINFO1_PHY_H_BIT	__BITS(0,15)
608 	uint32_t phy_info2;
609 #define PHYINFO2_TEMPERATURE	__BITS(15,0)
610 #define PHYINFO2_CABLE_LEN	__BITS(23,16)
611 
612 	struct aq_fw2x_phy_cable_diag_data diag_data;
613 	uint32_t reserved[8];
614 
615 	struct aq_fw2x_capabilities caps;
616 
617 	/* ... */
618 } __packed __aligned(4);
619 
620 struct aq_rx_desc_read {
621 	uint64_t		buf_addr;
622 	uint64_t		hdr_addr;
623 } __packed;
624 
625 struct aq_rx_desc_wb {
626 	uint32_t		type;
627 #define AQ_RXDESC_TYPE_RSSTYPE	0x000f
628 #define AQ_RXDESC_TYPE_ETHER	0x0030
629 #define AQ_RXDESC_TYPE_PROTO	0x01c0
630 #define AQ_RXDESC_TYPE_VLAN	(1 << 9)
631 #define AQ_RXDESC_TYPE_VLAN2	(1 << 10)
632 #define AQ_RXDESC_TYPE_DMA_ERR	(1 << 12)
633 #define AQ_RXDESC_TYPE_V4_SUM	(1 << 19)
634 #define AQ_RXDESC_TYPE_TCP_SUM	(1 << 20)
635 	uint32_t		rss_hash;
636 	uint16_t		status;
637 #define AQ_RXDESC_STATUS_DD	(1 << 0)
638 #define AQ_RXDESC_STATUS_EOP	(1 << 1)
639 #define AQ_RXDESC_STATUS_MACERR (1 << 2)
640 #define AQ_RXDESC_STATUS_V4_SUM (1 << 3)
641 #define AQ_RXDESC_STATUS_L4_SUM_ERR (1 << 4)
642 #define AQ_RXDESC_STATUS_L4_SUM_OK (1 << 5)
643 	uint16_t		pkt_len;
644 	uint16_t		next_desc_ptr;
645 	uint16_t		vlan;
646 } __packed;
647 
648 struct aq_tx_desc {
649 	uint64_t		buf_addr;
650 	uint32_t		ctl1;
651 #define AQ_TXDESC_CTL1_TYPE_TXD	0x00000001
652 #define AQ_TXDESC_CTL1_TYPE_TXC	0x00000002
653 #define AQ_TXDESC_CTL1_BLEN_SHIFT 4
654 #define AQ_TXDESC_CTL1_DD	(1 << 20)
655 #define AQ_TXDESC_CTL1_CMD_EOP	(1 << 21)
656 #define AQ_TXDESC_CTL1_CMD_VLAN	(1 << 22)
657 #define AQ_TXDESC_CTL1_CMD_FCS	(1 << 23)
658 #define AQ_TXDESC_CTL1_CMD_IP4CSUM (1 << 24)
659 #define AQ_TXDESC_CTL1_CMD_L4CSUM (1 << 25)
660 #define AQ_TXDESC_CTL1_CMD_WB	(1 << 27)
661 
662 #define AQ_TXDESC_CTL1_VID_SHIFT 4
663 	uint32_t		ctl2;
664 #define AQ_TXDESC_CTL2_LEN_SHIFT 14
665 #define AQ_TXDESC_CTL2_CTX_EN	(1 << 13)
666 } __packed;
667 
668 struct aq_slot {
669 	bus_dmamap_t		 as_map;
670 	struct mbuf		*as_m;
671 };
672 
673 struct aq_rxring {
674 	struct ifiqueue		*rx_ifiq;
675 	struct aq_dmamem	 rx_mem;
676 	struct aq_slot		*rx_slots;
677 	int			 rx_q;
678 	int			 rx_irq;
679 
680 	struct timeout		 rx_refill;
681 	struct if_rxring	 rx_rxr;
682 	uint32_t		 rx_prod;
683 	uint32_t		 rx_cons;
684 };
685 
686 struct aq_txring {
687 	struct ifqueue		*tx_ifq;
688 	struct aq_dmamem	 tx_mem;
689 	struct aq_slot		*tx_slots;
690 	int			 tx_q;
691 	int			 tx_irq;
692 	uint32_t		 tx_prod;
693 	uint32_t		 tx_cons;
694 };
695 
696 struct aq_queues {
697 	char			 q_name[16];
698 	void			*q_ihc;
699 	struct aq_softc		*q_sc;
700 	int			 q_index;
701 	struct aq_rxring 	 q_rx;
702 	struct aq_txring 	 q_tx;
703 };
704 
705 
706 struct aq_softc;
707 struct aq_firmware_ops {
708 	int (*reset)(struct aq_softc *);
709 	int (*set_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state,
710 	    enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
711 	int (*get_mode)(struct aq_softc *, enum aq_hw_fw_mpi_state *,
712 	    enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
713 	int (*get_stats)(struct aq_softc *, struct aq_hw_stats_s *);
714 };
715 
716 struct aq_softc {
717 	struct device		sc_dev;
718 	uint16_t		sc_product;
719 	uint16_t		sc_revision;
720 	bus_dma_tag_t		sc_dmat;
721 	pci_chipset_tag_t	sc_pc;
722 	pcitag_t		sc_pcitag;
723 	int			sc_nqueues;
724 	struct aq_queues	sc_queues[AQ_MAXQ];
725 	struct intrmap		*sc_intrmap;
726 	void			*sc_ih;
727 	bus_space_handle_t	sc_ioh;
728 	bus_space_tag_t		sc_iot;
729 
730 	uint32_t		sc_mbox_addr;
731 	int			sc_rbl_enabled;
732 	int			sc_fast_start_enabled;
733 	int			sc_flash_present;
734 	uint32_t		sc_fw_version;
735 	const struct		aq_firmware_ops *sc_fw_ops;
736 	uint64_t		sc_fw_caps;
737 	enum aq_media_type	sc_media_type;
738 	enum aq_link_speed	sc_available_rates;
739 	uint32_t		sc_features;
740 	int			sc_linkstat_irq;
741 	struct arpcom		sc_arpcom;
742 	struct ifmedia		sc_media;
743 
744 	struct ether_addr	sc_enaddr;
745 	struct mutex		sc_mpi_mutex;
746 };
747 
748 const struct pci_matchid aq_devices[] = {
749 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100 },
750 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107 },
751 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108 },
752 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109 },
753 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111 },
754 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112 },
755 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S },
756 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S },
757 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S },
758 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S },
759 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S },
760 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S },
761 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100 },
762 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107 },
763 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108 },
764 	{ PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109 },
765 };
766 
767 const struct aq_product {
768 	pci_vendor_id_t aq_vendor;
769 	pci_product_id_t aq_product;
770 	enum aq_media_type aq_media_type;
771 	enum aq_link_speed aq_available_rates;
772 } aq_products[] = {
773 {	PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100,
774 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
775 },
776 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107,
777 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
778 },
779 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108,
780 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
781 },
782 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109,
783 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
784 },
785 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111,
786 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
787 },
788 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112,
789 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
790 },
791 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC100S,
792 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
793 },
794 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC107S,
795 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
796 },
797 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC108S,
798 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
799 },
800 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC109S,
801 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
802 },
803 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC111S,
804 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
805 },
806 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_AQC112S,
807 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
808 },
809 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D100,
810 	AQ_MEDIA_TYPE_FIBRE, AQ_LINK_ALL
811 },
812 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D107,
813 	AQ_MEDIA_TYPE_TP, AQ_LINK_ALL
814 },
815 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D108,
816 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5 | AQ_LINK_5G
817 },
818 { PCI_VENDOR_AQUANTIA, PCI_PRODUCT_AQUANTIA_D109,
819 	AQ_MEDIA_TYPE_TP, AQ_LINK_100M | AQ_LINK_1G | AQ_LINK_2G5
820 }
821 };
822 
823 int	aq_match(struct device *, void *, void *);
824 void	aq_attach(struct device *, struct device *, void *);
825 int	aq_detach(struct device *, int);
826 int	aq_activate(struct device *, int);
827 int	aq_intr(void *);
828 void	aq_global_software_reset(struct aq_softc *);
829 int	aq_fw_reset(struct aq_softc *);
830 int	aq_mac_soft_reset(struct aq_softc *, enum aq_fw_bootloader_mode *);
831 int	aq_mac_soft_reset_rbl(struct aq_softc *, enum aq_fw_bootloader_mode *);
832 int	aq_mac_soft_reset_flb(struct aq_softc *);
833 int	aq_fw_read_version(struct aq_softc *);
834 int	aq_fw_version_init(struct aq_softc *);
835 int	aq_hw_init_ucp(struct aq_softc *);
836 int	aq_fw_downld_dwords(struct aq_softc *, uint32_t, uint32_t *, uint32_t);
837 int	aq_get_mac_addr(struct aq_softc *);
838 int	aq_hw_reset(struct aq_softc *);
839 int	aq_hw_init(struct aq_softc *, int);
840 void	aq_hw_qos_set(struct aq_softc *);
841 void	aq_l3_filter_set(struct aq_softc *);
842 void	aq_hw_init_tx_path(struct aq_softc *);
843 void	aq_hw_init_rx_path(struct aq_softc *);
844 int	aq_set_mac_addr(struct aq_softc *, int, uint8_t *);
845 int	aq_set_linkmode(struct aq_softc *, enum aq_link_speed,
846     enum aq_link_fc, enum aq_link_eee);
847 void	aq_watchdog(struct ifnet *);
848 void	aq_enable_intr(struct aq_softc *, int, int);
849 int	aq_ioctl(struct ifnet *, u_long, caddr_t);
850 int	aq_up(struct aq_softc *);
851 void	aq_down(struct aq_softc *);
852 void	aq_iff(struct aq_softc *);
853 void	aq_start(struct ifqueue *);
854 void	aq_ifmedia_status(struct ifnet *, struct ifmediareq *);
855 int	aq_ifmedia_change(struct ifnet *);
856 void	aq_update_link_status(struct aq_softc *);
857 
858 void	aq_refill(void *);
859 int	aq_rx_fill(struct aq_softc *, struct aq_rxring *);
860 static inline unsigned int aq_rx_fill_slots(struct aq_softc *,
861 	    struct aq_rxring *, uint);
862 
863 int	aq_dmamem_alloc(struct aq_softc *, struct aq_dmamem *,
864 	    bus_size_t, u_int);
865 void	aq_dmamem_zero(struct aq_dmamem *);
866 void	aq_dmamem_free(struct aq_softc *, struct aq_dmamem *);
867 
868 int	aq_fw1x_reset(struct aq_softc *);
869 int	aq_fw1x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
870     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
871 int	aq_fw1x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
872     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
873 int	aq_fw1x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
874 
875 int	aq_fw2x_reset(struct aq_softc *);
876 int	aq_fw2x_get_mode(struct aq_softc *, enum aq_hw_fw_mpi_state *,
877     enum aq_link_speed *, enum aq_link_fc *, enum aq_link_eee *);
878 int	aq_fw2x_set_mode(struct aq_softc *, enum aq_hw_fw_mpi_state,
879     enum aq_link_speed, enum aq_link_fc, enum aq_link_eee);
880 int	aq_fw2x_get_stats(struct aq_softc *, struct aq_hw_stats_s *);
881 
882 const struct aq_firmware_ops aq_fw1x_ops = {
883 	.reset = aq_fw1x_reset,
884 	.set_mode = aq_fw1x_set_mode,
885 	.get_mode = aq_fw1x_get_mode,
886 	.get_stats = aq_fw1x_get_stats,
887 };
888 
889 const struct aq_firmware_ops aq_fw2x_ops = {
890 	.reset = aq_fw2x_reset,
891 	.set_mode = aq_fw2x_set_mode,
892 	.get_mode = aq_fw2x_get_mode,
893 	.get_stats = aq_fw2x_get_stats,
894 };
895 
896 struct cfattach aq_ca = {
897 	sizeof(struct aq_softc), aq_match, aq_attach, NULL,
898 	aq_activate
899 };
900 
901 struct cfdriver aq_cd = {
902 	NULL, "aq", DV_IFNET
903 };
904 
905 uint32_t
906 AQ_READ_REG(struct aq_softc *sc, uint32_t reg)
907 {
908 	uint32_t res;
909 
910 	res = bus_space_read_4(sc->sc_iot, sc->sc_ioh, reg);
911 
912 	return res;
913 }
914 
915 
916 int
917 aq_match(struct device *dev, void *match, void *aux)
918 {
919 	return pci_matchbyid((struct pci_attach_args *)aux, aq_devices,
920 	    sizeof(aq_devices) / sizeof(aq_devices[0]));
921 }
922 
923 const struct aq_product *
924 aq_lookup(const struct pci_attach_args *pa)
925 {
926 	unsigned int i;
927 
928 	for (i = 0; i < sizeof(aq_products) / sizeof(aq_products[0]); i++) {
929 	if (PCI_VENDOR(pa->pa_id) == aq_products[i].aq_vendor &&
930 		PCI_PRODUCT(pa->pa_id) == aq_products[i].aq_product) {
931 			return &aq_products[i];
932 		}
933 	}
934 
935 	return NULL;
936 }
937 
938 void
939 aq_attach(struct device *parent, struct device *self, void *aux)
940 {
941 	struct aq_softc *sc = (struct aq_softc *)self;
942 	struct pci_attach_args *pa = aux;
943 	const struct aq_product *aqp;
944 	pcireg_t bar, memtype;
945 	pci_chipset_tag_t pc;
946 	pci_intr_handle_t ih;
947 	int (*isr)(void *);
948 	const char *intrstr;
949 	pcitag_t tag;
950 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
951 	int irqmode;
952 	int i;
953 
954 	mtx_init(&sc->sc_mpi_mutex, IPL_NET);
955 
956 	sc->sc_dmat = pa->pa_dmat;
957 	sc->sc_pc = pc = pa->pa_pc;
958 	sc->sc_pcitag = tag = pa->pa_tag;
959 
960 	sc->sc_product = PCI_PRODUCT(pa->pa_id);
961 	sc->sc_revision = PCI_REVISION(pa->pa_class);
962 
963 	aqp = aq_lookup(pa);
964 
965 	bar = pci_conf_read(pc, tag, AQ_BAR0);
966 	if (PCI_MAPREG_TYPE(bar) != PCI_MAPREG_TYPE_MEM) {
967 		printf(": wrong BAR type\n");
968 		return;
969 	}
970 
971 	memtype = pci_mapreg_type(pc, tag, AQ_BAR0);
972 	if (pci_mapreg_map(pa, AQ_BAR0, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
973 	    NULL, NULL, 0)) {
974 		printf(": failed to map BAR0\n");
975 		return;
976 	}
977 
978 	sc->sc_nqueues = 1;
979 
980 	if (pci_intr_map_msix(pa, 0, &ih) == 0) {
981 		irqmode = AQ_INTR_CTRL_IRQMODE_MSIX;
982 	} else if (pci_intr_map_msi(pa, &ih) == 0) {
983 		irqmode = AQ_INTR_CTRL_IRQMODE_MSI;
984 	} else if (pci_intr_map(pa, &ih) == 0) {
985 		irqmode = AQ_INTR_CTRL_IRQMODE_LEGACY;
986 	} else {
987 		printf(": failed to map interrupt\n");
988 		return;
989 	}
990 
991 	isr = aq_intr;
992 	sc->sc_ih = pci_intr_establish(pa->pa_pc, ih,
993 	    IPL_NET | IPL_MPSAFE, isr, sc, self->dv_xname);
994 	intrstr = pci_intr_string(pa->pa_pc, ih);
995 	if (intrstr)
996 		printf(": %s", intrstr);
997 
998 	if (aq_fw_reset(sc))
999 		return;
1000 
1001 	DPRINTF((", FW version 0x%x", sc->sc_fw_version));
1002 
1003 	if (aq_fw_version_init(sc))
1004 		return;
1005 
1006 	if (aq_hw_init_ucp(sc))
1007 		return;
1008 
1009 	if (aq_hw_reset(sc))
1010 		return;
1011 
1012 	if (aq_get_mac_addr(sc))
1013 		return;
1014 
1015 	if (aq_hw_init(sc, irqmode))
1016 		return;
1017 
1018 	sc->sc_media_type = aqp->aq_media_type;
1019 	sc->sc_available_rates = aqp->aq_available_rates;
1020 
1021 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1022 	    aq_ifmedia_status);
1023 
1024 	bcopy(sc->sc_enaddr.ether_addr_octet, sc->sc_arpcom.ac_enaddr, 6);
1025 	strlcpy(ifp->if_xname, self->dv_xname, IFNAMSIZ);
1026 	ifp->if_softc = sc;
1027 	ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
1028 	ifp->if_xflags = IFXF_MPSAFE;
1029 	ifp->if_ioctl = aq_ioctl;
1030 	ifp->if_qstart = aq_start;
1031 	ifp->if_watchdog = aq_watchdog;
1032 	ifp->if_hardmtu = 9000;
1033 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1034 	ifq_set_maxlen(&ifp->if_snd, AQ_TXD_NUM);
1035 
1036 	ifmedia_init(&sc->sc_media, IFM_IMASK, aq_ifmedia_change,
1037 	    aq_ifmedia_status);
1038 	if (sc->sc_available_rates & AQ_LINK_100M) {
1039 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX, 0, NULL);
1040 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_100_TX|IFM_FDX, 0,
1041 		    NULL);
1042 	}
1043 
1044 	if (sc->sc_available_rates & AQ_LINK_1G) {
1045 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T, 0, NULL);
1046 		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_1000_T|IFM_FDX, 0,
1047 		    NULL);
1048 	}
1049 
1050 	if (sc->sc_available_rates & AQ_LINK_2G5) {
1051 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T, 0, NULL);
1052 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_2500_T | IFM_FDX,
1053 		    0, NULL);
1054 	}
1055 
1056 	if (sc->sc_available_rates & AQ_LINK_5G) {
1057 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T, 0, NULL);
1058 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_5000_T | IFM_FDX,
1059 		    0, NULL);
1060 	}
1061 
1062 	if (sc->sc_available_rates & AQ_LINK_10G) {
1063 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T, 0, NULL);
1064 		ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10G_T | IFM_FDX,
1065 		    0, NULL);
1066 	}
1067 
1068 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1069 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO | IFM_FDX, 0, NULL);
1070 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1071 	aq_set_linkmode(sc, AQ_LINK_AUTO, AQ_FC_NONE, AQ_EEE_DISABLE);
1072 
1073         if_attach(ifp);
1074         ether_ifattach(ifp);
1075 
1076 	if_attach_iqueues(ifp, sc->sc_nqueues);
1077 	if_attach_queues(ifp, sc->sc_nqueues);
1078 
1079 	for (i = 0; i < sc->sc_nqueues; i++) {
1080 		struct aq_queues *aq = &sc->sc_queues[i];
1081 		struct aq_rxring *rx = &aq->q_rx;
1082 		struct aq_txring *tx = &aq->q_tx;
1083 
1084 		aq->q_sc = sc;
1085 		aq->q_index = i;
1086 		rx->rx_q = i;
1087 		rx->rx_irq = i * 2;
1088 		rx->rx_ifiq = ifp->if_iqs[i];
1089 		ifp->if_iqs[i]->ifiq_softc = aq;
1090 		timeout_set(&rx->rx_refill, aq_refill, rx);
1091 
1092 		tx->tx_q = i;
1093 		tx->tx_irq = rx->rx_irq + 1;
1094 		tx->tx_ifq = ifp->if_ifqs[i];
1095 		ifp->if_ifqs[i]->ifq_softc = aq;
1096 
1097 		if (sc->sc_nqueues > 1) {
1098 			/* map msix */
1099 		}
1100 
1101 		AQ_WRITE_REG(sc, TX_INTR_MODERATION_CTL_REG(i), 0);
1102 		AQ_WRITE_REG(sc, RX_INTR_MODERATION_CTL_REG(i), 0);
1103 	}
1104 
1105 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1106 	    TX_DMA_INT_DESC_WRWB_EN, 1);
1107 	AQ_WRITE_REG_BIT(sc, TX_DMA_INT_DESC_WRWB_EN_REG,
1108 	    TX_DMA_INT_DESC_MODERATE_EN, 0);
1109 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1110 	    RX_DMA_INT_DESC_WRWB_EN, 1);
1111 	AQ_WRITE_REG_BIT(sc, RX_DMA_INT_DESC_WRWB_EN_REG,
1112 	    RX_DMA_INT_DESC_MODERATE_EN, 0);
1113 
1114 	aq_enable_intr(sc, 1, 0);
1115 	printf("\n");
1116 }
1117 
1118 int
1119 aq_fw_reset(struct aq_softc *sc)
1120 {
1121 	uint32_t ver, v, boot_exit_code;
1122 	int i, error;
1123 	enum aq_fw_bootloader_mode mode;
1124 
1125 	mode = FW_BOOT_MODE_UNKNOWN;
1126 
1127 	ver = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1128 
1129 	for (i = 1000; i > 0; i--) {
1130 		v = AQ_READ_REG(sc, FW_MPI_DAISY_CHAIN_STATUS_REG);
1131 		boot_exit_code = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG);
1132 		if (v != 0x06000000 || boot_exit_code != 0)
1133 			break;
1134 	}
1135 
1136 	if (i <= 0) {
1137 		printf("%s: F/W reset failed. Neither RBL nor FLB started",
1138 		    DEVNAME(sc));
1139 		return ETIMEDOUT;
1140 	}
1141 
1142 	sc->sc_rbl_enabled = (boot_exit_code != 0);
1143 
1144 	/*
1145 	 * Having FW version 0 is an indicator that cold start
1146 	 * is in progress. This means two things:
1147 	 * 1) Driver have to wait for FW/HW to finish boot (500ms giveup)
1148 	 * 2) Driver may skip reset sequence and save time.
1149 	 */
1150 	if (sc->sc_fast_start_enabled && (ver != 0)) {
1151 		error = aq_fw_read_version(sc);
1152 		/* Skip reset as it just completed */
1153 		if (error == 0)
1154 			return 0;
1155 	}
1156 
1157 	error = aq_mac_soft_reset(sc, &mode);
1158 	if (error != 0) {
1159 		printf("%s: MAC reset failed: %d\n", DEVNAME(sc), error);
1160 		return error;
1161 	}
1162 
1163 	switch (mode) {
1164 	case FW_BOOT_MODE_FLB:
1165 		DPRINTF(("%s: FLB> F/W successfully loaded from flash.",
1166 		    DEVNAME(sc)));
1167 		sc->sc_flash_present = 1;
1168 		return aq_fw_read_version(sc);
1169 	case FW_BOOT_MODE_RBL_FLASH:
1170 		DPRINTF(("%s: RBL> F/W loaded from flash. Host Bootload "
1171 		    "disabled.", DEVNAME(sc)));
1172 		sc->sc_flash_present = 1;
1173 		return aq_fw_read_version(sc);
1174 	case FW_BOOT_MODE_UNKNOWN:
1175 		printf("%s: F/W bootload error: unknown bootloader type",
1176 		    DEVNAME(sc));
1177 		return ENOTSUP;
1178 	case FW_BOOT_MODE_RBL_HOST_BOOTLOAD:
1179 		printf("%s: RBL> F/W Host Bootload not implemented", DEVNAME(sc));
1180 		return ENOTSUP;
1181 	}
1182 
1183 	return ENOTSUP;
1184 }
1185 
1186 int
1187 aq_mac_soft_reset_rbl(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1188 {
1189 	int timo;
1190 
1191 	DPRINTF(("%s: RBL> MAC reset STARTED!\n", DEVNAME(sc)));
1192 
1193 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1194 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1195 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1196 
1197 	/* MAC FW will reload PHY FW if 1E.1000.3 was cleaned - #undone */
1198 	AQ_WRITE_REG(sc, FW_BOOT_EXIT_CODE_REG, RBL_STATUS_DEAD);
1199 
1200 	aq_global_software_reset(sc);
1201 
1202 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e0);
1203 
1204 	/* Wait for RBL to finish boot process. */
1205 #define RBL_TIMEOUT_MS	10000
1206 	uint16_t rbl_status;
1207 	for (timo = RBL_TIMEOUT_MS; timo > 0; timo--) {
1208 		rbl_status = AQ_READ_REG(sc, FW_BOOT_EXIT_CODE_REG) & 0xffff;
1209 		if (rbl_status != 0 && rbl_status != RBL_STATUS_DEAD)
1210 			break;
1211 		delay(1000);
1212 	}
1213 
1214 	if (timo <= 0) {
1215 		printf("%s: RBL> RBL restart failed: timeout\n", DEVNAME(sc));
1216 		return EBUSY;
1217 	}
1218 
1219 	switch (rbl_status) {
1220 	case RBL_STATUS_SUCCESS:
1221 		if (mode != NULL)
1222 			*mode = FW_BOOT_MODE_RBL_FLASH;
1223 		DPRINTF(("%s: RBL> reset complete! [Flash]\n", DEVNAME(sc)));
1224 		break;
1225 	case RBL_STATUS_HOST_BOOT:
1226 		if (mode != NULL)
1227 			*mode = FW_BOOT_MODE_RBL_HOST_BOOTLOAD;
1228 		DPRINTF(("%s: RBL> reset complete! [Host Bootload]\n",
1229 		    DEVNAME(sc)));
1230 		break;
1231 	case RBL_STATUS_FAILURE:
1232 	default:
1233 		printf("%s: unknown RBL status 0x%x\n", DEVNAME(sc),
1234 		    rbl_status);
1235 		return EBUSY;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 int
1242 aq_mac_soft_reset_flb(struct aq_softc *sc)
1243 {
1244 	uint32_t v;
1245 	int timo;
1246 
1247 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x40e1);
1248 	/*
1249 	 * Let Felicity hardware to complete SMBUS transaction before
1250 	 * Global software reset.
1251 	 */
1252 	delay(50000);
1253 
1254 	/*
1255 	 * If SPI burst transaction was interrupted(before running the script),
1256 	 * global software reset may not clear SPI interface.
1257 	 * Clean it up manually before global reset.
1258 	 */
1259 	AQ_WRITE_REG(sc, AQ_GLB_NVR_PROVISIONING2_REG, 0x00a0);
1260 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x009f);
1261 	AQ_WRITE_REG(sc, AQ_GLB_NVR_INTERFACE1_REG, 0x809f);
1262 	delay(50000);
1263 
1264 	v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1265 	v &= ~AQ_FW_SOFTRESET_DIS;
1266 	v |= AQ_FW_SOFTRESET_RESET;
1267 	AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1268 
1269 	/* Kickstart. */
1270 	AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1271 	AQ_WRITE_REG(sc, AQ_MBOXIF_POWER_GATING_CONTROL_REG, 0);
1272 	if (!sc->sc_fast_start_enabled)
1273 		AQ_WRITE_REG(sc, AQ_GLB_GENERAL_PROVISIONING9_REG, 1);
1274 
1275 	/*
1276 	 * For the case SPI burst transaction was interrupted (by MCP reset
1277 	 * above), wait until it is completed by hardware.
1278 	 */
1279 	delay(50000);
1280 
1281 	/* MAC Kickstart */
1282 	if (!sc->sc_fast_start_enabled) {
1283 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x180e0);
1284 
1285 		uint32_t flb_status;
1286 		for (timo = 0; timo < 1000; timo++) {
1287 			flb_status = AQ_READ_REG(sc,
1288 			    FW_MPI_DAISY_CHAIN_STATUS_REG) & 0x10;
1289 			if (flb_status != 0)
1290 				break;
1291 			delay(1000);
1292 		}
1293 		if (flb_status == 0) {
1294 			printf("%s: FLB> MAC kickstart failed: timed out\n",
1295 			    DEVNAME(sc));
1296 			return ETIMEDOUT;
1297 		}
1298 		DPRINTF(("%s: FLB> MAC kickstart done, %d ms\n", DEVNAME(sc),
1299 		    timo));
1300 		/* FW reset */
1301 		AQ_WRITE_REG(sc, AQ_FW_GLB_CTL2_REG, 0x80e0);
1302 		/*
1303 		 * Let Felicity hardware complete SMBUS transaction before
1304 		 * Global software reset.
1305 		 */
1306 		delay(50000);
1307 		sc->sc_fast_start_enabled = true;
1308 	}
1309 	AQ_WRITE_REG(sc, AQ_FW_GLB_CPU_SEM_REG(0), 1);
1310 
1311 	/* PHY Kickstart: #undone */
1312 	aq_global_software_reset(sc);
1313 
1314 	for (timo = 0; timo < 1000; timo++) {
1315 		if (AQ_READ_REG(sc, AQ_FW_VERSION_REG) != 0)
1316 			break;
1317 		delay(10000);
1318 	}
1319 	if (timo >= 1000) {
1320 		printf("%s: FLB> Global Soft Reset failed\n", DEVNAME(sc));
1321 		return ETIMEDOUT;
1322 	}
1323 	DPRINTF(("%s: FLB> F/W restart: %d ms\n", DEVNAME(sc), timo * 10));
1324 
1325 	return 0;
1326 
1327 }
1328 
1329 int
1330 aq_mac_soft_reset(struct aq_softc *sc, enum aq_fw_bootloader_mode *mode)
1331 {
1332 	if (sc->sc_rbl_enabled)
1333 		return aq_mac_soft_reset_rbl(sc, mode);
1334 
1335 	if (mode != NULL)
1336 		*mode = FW_BOOT_MODE_FLB;
1337 	return aq_mac_soft_reset_flb(sc);
1338 }
1339 
1340 void
1341 aq_global_software_reset(struct aq_softc *sc)
1342 {
1343         uint32_t v;
1344 
1345         AQ_WRITE_REG_BIT(sc, RX_SYSCONTROL_REG, RX_SYSCONTROL_RESET_DIS, 0);
1346         AQ_WRITE_REG_BIT(sc, TX_SYSCONTROL_REG, TX_SYSCONTROL_RESET_DIS, 0);
1347         AQ_WRITE_REG_BIT(sc, FW_MPI_RESETCTRL_REG,
1348             FW_MPI_RESETCTRL_RESET_DIS, 0);
1349 
1350         v = AQ_READ_REG(sc, AQ_FW_SOFTRESET_REG);
1351         v &= ~AQ_FW_SOFTRESET_DIS;
1352         v |= AQ_FW_SOFTRESET_RESET;
1353         AQ_WRITE_REG(sc, AQ_FW_SOFTRESET_REG, v);
1354 }
1355 
1356 int
1357 aq_fw_read_version(struct aq_softc *sc)
1358 {
1359 	int i, error = EBUSY;
1360 #define MAC_FW_START_TIMEOUT_MS 10000
1361 	for (i = 0; i < MAC_FW_START_TIMEOUT_MS; i++) {
1362 		sc->sc_fw_version = AQ_READ_REG(sc, AQ_FW_VERSION_REG);
1363 		if (sc->sc_fw_version != 0) {
1364 			error = 0;
1365 			break;
1366 		}
1367 		delay(1000);
1368 	}
1369 	return error;
1370 }
1371 
1372 int
1373 aq_fw_version_init(struct aq_softc *sc)
1374 {
1375 	int error = 0;
1376 	char fw_vers[sizeof("F/W version xxxxx.xxxxx.xxxxx")];
1377 
1378 	if (FW_VERSION_MAJOR(sc) == 1) {
1379 		sc->sc_fw_ops = &aq_fw1x_ops;
1380 	} else if ((FW_VERSION_MAJOR(sc) == 2) || (FW_VERSION_MAJOR(sc) == 3)) {
1381 		sc->sc_fw_ops = &aq_fw2x_ops;
1382 	} else {
1383 		printf("%s: Unsupported F/W version %d.%d.%d\n",
1384 		    DEVNAME(sc),
1385 		    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc),
1386 		    FW_VERSION_BUILD(sc));
1387 		return ENOTSUP;
1388 	}
1389 	snprintf(fw_vers, sizeof(fw_vers), "F/W version %d.%d.%d",
1390 	    FW_VERSION_MAJOR(sc), FW_VERSION_MINOR(sc), FW_VERSION_BUILD(sc));
1391 
1392 	/* detect revision */
1393 	uint32_t hwrev = AQ_READ_REG(sc, AQ_HW_REVISION_REG);
1394 	switch (hwrev & 0x0000000f) {
1395 	case 0x01:
1396 		printf(", revision A0, %s", fw_vers);
1397 		sc->sc_features |= FEATURES_REV_A0 |
1398 		    FEATURES_MPI_AQ | FEATURES_MIPS;
1399 		break;
1400 	case 0x02:
1401 		printf(", revision B0, %s", fw_vers);
1402 		sc->sc_features |= FEATURES_REV_B0 |
1403 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1404 		    FEATURES_TPO2 | FEATURES_RPF2;
1405 		break;
1406 	case 0x0A:
1407 		printf(", revision B1, %s", fw_vers);
1408 		sc->sc_features |= FEATURES_REV_B1 |
1409 		    FEATURES_MPI_AQ | FEATURES_MIPS |
1410 		    FEATURES_TPO2 | FEATURES_RPF2;
1411 		break;
1412 	default:
1413 		printf(", Unknown revision (0x%08x)", hwrev);
1414 		error = ENOTSUP;
1415 		break;
1416 	}
1417 	return error;
1418 }
1419 
1420 int
1421 aq_hw_init_ucp(struct aq_softc *sc)
1422 {
1423 	int timo;
1424 
1425 	if (FW_VERSION_MAJOR(sc) == 1) {
1426 		if (AQ_READ_REG(sc, FW1X_MPI_INIT2_REG) == 0) {
1427 			uint32_t data;
1428 			arc4random_buf(&data, sizeof(data));
1429 			data &= 0xfefefefe;
1430 			data |= 0x02020202;
1431 			AQ_WRITE_REG(sc, FW1X_MPI_INIT2_REG, data);
1432 		}
1433 		AQ_WRITE_REG(sc, FW1X_MPI_INIT1_REG, 0);
1434 	}
1435 
1436 	for (timo = 100; timo > 0; timo--) {
1437 		sc->sc_mbox_addr = AQ_READ_REG(sc, FW_MPI_MBOX_ADDR_REG);
1438 		if (sc->sc_mbox_addr != 0)
1439 			break;
1440 		delay(1000);
1441 	}
1442 
1443 #define AQ_FW_MIN_VERSION	0x01050006
1444 #define AQ_FW_MIN_VERSION_STR	"1.5.6"
1445 	if (sc->sc_fw_version < AQ_FW_MIN_VERSION) {
1446 		printf("%s: atlantic: wrong FW version: " AQ_FW_MIN_VERSION_STR
1447 		    " or later required, this is %d.%d.%d\n",
1448 		    DEVNAME(sc),
1449 		    FW_VERSION_MAJOR(sc),
1450 		    FW_VERSION_MINOR(sc),
1451 		    FW_VERSION_BUILD(sc));
1452 		return ENOTSUP;
1453 	}
1454 
1455 	if (sc->sc_mbox_addr == 0)
1456 		printf("%s: NULL MBOX!!\n", DEVNAME(sc));
1457 
1458 	return 0;
1459 }
1460 
1461 int
1462 aq_hw_reset(struct aq_softc *sc)
1463 {
1464 	int error;
1465 
1466 	/* disable irq */
1467 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS, 0);
1468 
1469 	/* apply */
1470 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_IRQ, 1);
1471 
1472 	/* wait ack 10 times by 1ms */
1473 	WAIT_FOR(
1474 	    (AQ_READ_REG(sc, AQ_INTR_CTRL_REG) & AQ_INTR_CTRL_RESET_IRQ) == 0,
1475 	    1000, 10, &error);
1476 	if (error != 0) {
1477 		printf("%s: atlantic: IRQ reset failed: %d\n", DEVNAME(sc),
1478 		    error);
1479 		return error;
1480 	}
1481 
1482 	return sc->sc_fw_ops->reset(sc);
1483 }
1484 
1485 int
1486 aq_get_mac_addr(struct aq_softc *sc)
1487 {
1488 	uint32_t mac_addr[2];
1489 	uint32_t efuse_shadow_addr;
1490 	int err;
1491 
1492 	efuse_shadow_addr = 0;
1493 	if (FW_VERSION_MAJOR(sc) >= 2)
1494 		efuse_shadow_addr = AQ_READ_REG(sc, FW2X_MPI_EFUSEADDR_REG);
1495 	else
1496 		efuse_shadow_addr = AQ_READ_REG(sc, FW1X_MPI_EFUSEADDR_REG);
1497 
1498 	if (efuse_shadow_addr == 0) {
1499 		printf("%s: cannot get efuse addr", DEVNAME(sc));
1500 		return ENXIO;
1501 	}
1502 
1503 	DPRINTF(("%s: efuse_shadow_addr = %x\n", DEVNAME(sc), efuse_shadow_addr));
1504 
1505 	memset(mac_addr, 0, sizeof(mac_addr));
1506 	err = aq_fw_downld_dwords(sc, efuse_shadow_addr + (40 * 4),
1507 	    mac_addr, 2);
1508 	if (err < 0)
1509 		return err;
1510 
1511 	if (mac_addr[0] == 0 && mac_addr[1] == 0) {
1512 		printf("%s: mac address not found", DEVNAME(sc));
1513 		return ENXIO;
1514 	}
1515 
1516 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1517 	    mac_addr[1]));
1518 
1519 	mac_addr[0] = htobe32(mac_addr[0]);
1520 	mac_addr[1] = htobe32(mac_addr[1]);
1521 
1522 	DPRINTF(("%s: mac0 %x mac1 %x\n", DEVNAME(sc), mac_addr[0],
1523 	    mac_addr[1]));
1524 
1525 	memcpy(sc->sc_enaddr.ether_addr_octet,
1526 	    (uint8_t *)mac_addr, ETHER_ADDR_LEN);
1527 	DPRINTF((": %s", ether_sprintf(sc->sc_enaddr.ether_addr_octet)));
1528 
1529 	return 0;
1530 }
1531 
1532 int
1533 aq_activate(struct device *self, int act)
1534 {
1535 	return 0;
1536 }
1537 
1538 int
1539 aq_fw_downld_dwords(struct aq_softc *sc, uint32_t addr, uint32_t *p,
1540     uint32_t cnt)
1541 {
1542 	uint32_t v;
1543 	int error = 0;
1544 
1545 	WAIT_FOR(AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG) == 1, 1, 10000, &error);
1546 	if (error != 0) {
1547 		AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1548 		v = AQ_READ_REG(sc, AQ_FW_SEM_RAM_REG);
1549 		if (v == 0) {
1550 			printf("%s: %s:%d: timeout\n",
1551 			    DEVNAME(sc), __func__, __LINE__);
1552 			return ETIMEDOUT;
1553 		}
1554 	}
1555 
1556 	AQ_WRITE_REG(sc, AQ_FW_MBOX_ADDR_REG, addr);
1557 
1558 	error = 0;
1559 	for (; cnt > 0 && error == 0; cnt--) {
1560 		/* execute mailbox interface */
1561 		AQ_WRITE_REG_BIT(sc, AQ_FW_MBOX_CMD_REG,
1562 		    AQ_FW_MBOX_CMD_EXECUTE, 1);
1563 		if (sc->sc_features & FEATURES_REV_B1) {
1564 			WAIT_FOR(AQ_READ_REG(sc, AQ_FW_MBOX_ADDR_REG) != addr,
1565 			    1, 1000, &error);
1566 		} else {
1567 			WAIT_FOR((AQ_READ_REG(sc, AQ_FW_MBOX_CMD_REG) &
1568 			    AQ_FW_MBOX_CMD_BUSY) == 0,
1569 			    1, 1000, &error);
1570 		}
1571 		*p++ = AQ_READ_REG(sc, AQ_FW_MBOX_VAL_REG);
1572 		addr += sizeof(uint32_t);
1573 	}
1574 	AQ_WRITE_REG(sc, AQ_FW_SEM_RAM_REG, 1);
1575 
1576 	if (error != 0)
1577 		printf("%s: %s:%d: timeout\n",
1578 		    DEVNAME(sc), __func__, __LINE__);
1579 
1580 	return error;
1581 }
1582 
1583 int
1584 aq_fw2x_reset(struct aq_softc *sc)
1585 {
1586 	struct aq_fw2x_capabilities caps = { 0 };
1587 	int error;
1588 
1589 	error = aq_fw_downld_dwords(sc,
1590 	    sc->sc_mbox_addr + offsetof(struct aq_fw2x_mailbox, caps),
1591 	    (uint32_t *)&caps, sizeof caps / sizeof(uint32_t));
1592 	if (error != 0) {
1593 		printf("%s: fw2x> can't get F/W capabilities mask, error %d\n",
1594 		    DEVNAME(sc), error);
1595 		return error;
1596 	}
1597 	sc->sc_fw_caps = caps.caps_lo | ((uint64_t)caps.caps_hi << 32);
1598 
1599 	DPRINTF(("%s: fw2x> F/W capabilities=0x%llx\n", DEVNAME(sc),
1600 	    sc->sc_fw_caps));
1601 
1602 	return 0;
1603 }
1604 
1605 int
1606 aq_fw1x_reset(struct aq_softc *sc)
1607 {
1608 	printf("%s: unimplemented %s\n", DEVNAME(sc), __func__);
1609 	return 0;
1610 }
1611 
1612 int
1613 aq_fw1x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state w,
1614     enum aq_link_speed x, enum aq_link_fc y, enum aq_link_eee z)
1615 {
1616 	return 0;
1617 }
1618 
1619 int
1620 aq_fw1x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *w,
1621     enum aq_link_speed *x, enum aq_link_fc *y, enum aq_link_eee *z)
1622 {
1623 	return 0;
1624 }
1625 
1626 int
1627 aq_fw1x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1628 {
1629 	return 0;
1630 }
1631 
1632 
1633 int
1634 aq_fw2x_get_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state *modep,
1635     enum aq_link_speed *speedp, enum aq_link_fc *fcp, enum aq_link_eee *eeep)
1636 {
1637 	uint64_t mpi_state, mpi_ctrl;
1638 	enum aq_link_speed speed;
1639 	enum aq_link_fc fc;
1640 
1641 	AQ_MPI_LOCK(sc);
1642 
1643 	mpi_state = AQ_READ64_REG(sc, FW2X_MPI_STATE_REG);
1644 	if (modep != NULL) {
1645 		mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1646 		if (mpi_ctrl & FW2X_CTRL_RATE_MASK)
1647 			*modep = MPI_INIT;
1648 		else
1649 			*modep = MPI_DEINIT;
1650 	}
1651 
1652 	AQ_MPI_UNLOCK(sc);
1653 
1654 	if (mpi_state & FW2X_CTRL_RATE_10G)
1655 		speed = AQ_LINK_10G;
1656 	else if (mpi_state & FW2X_CTRL_RATE_5G)
1657 		speed = AQ_LINK_5G;
1658 	else if (mpi_state & FW2X_CTRL_RATE_2G5)
1659 		speed = AQ_LINK_2G5;
1660 	else if (mpi_state & FW2X_CTRL_RATE_1G)
1661 		speed = AQ_LINK_1G;
1662 	else if (mpi_state & FW2X_CTRL_RATE_100M)
1663 		speed = AQ_LINK_100M;
1664 	else
1665 		speed = AQ_LINK_NONE;
1666 	if (speedp != NULL)
1667 		*speedp = speed;
1668 
1669 	fc = AQ_FC_NONE;
1670 	if (mpi_state & FW2X_CTRL_PAUSE)
1671 		fc |= AQ_FC_RX;
1672 	if (mpi_state & FW2X_CTRL_ASYMMETRIC_PAUSE)
1673 		fc |= AQ_FC_TX;
1674 	if (fcp != NULL)
1675 		*fcp = fc;
1676 
1677 	if (eeep != NULL)
1678 		*eeep = AQ_EEE_DISABLE;
1679 
1680 	return 0;
1681 }
1682 
1683 int
1684 aq_fw2x_get_stats(struct aq_softc *sc, struct aq_hw_stats_s *w)
1685 {
1686 	return 0;
1687 }
1688 
1689 void
1690 aq_hw_l3_filter_set(struct aq_softc *sc)
1691 {
1692 	int i;
1693 
1694 	/* clear all filter */
1695 	for (i = 0; i < 8; i++) {
1696 		AQ_WRITE_REG_BIT(sc, RPF_L3_FILTER_REG(i),
1697 		    RPF_L3_FILTER_L4_EN, 0);
1698 	}
1699 }
1700 
1701 int
1702 aq_hw_init(struct aq_softc *sc, int irqmode)
1703 {
1704 	uint32_t v;
1705 
1706 	/* Force limit MRRS on RDM/TDM to 2K */
1707 	v = AQ_READ_REG(sc, AQ_PCI_REG_CONTROL_6_REG);
1708 	AQ_WRITE_REG(sc, AQ_PCI_REG_CONTROL_6_REG, (v & ~0x0707) | 0x0404);
1709 
1710 	/*
1711 	 * TX DMA total request limit. B0 hardware is not capable to
1712 	 * handle more than (8K-MRRS) incoming DMA data.
1713 	 * Value 24 in 256byte units
1714 	 */
1715 	AQ_WRITE_REG(sc, AQ_HW_TX_DMA_TOTAL_REQ_LIMIT_REG, 24);
1716 
1717 	aq_hw_init_tx_path(sc);
1718 	aq_hw_init_rx_path(sc);
1719 
1720 	if (aq_set_mac_addr(sc, AQ_HW_MAC_OWN, sc->sc_enaddr.ether_addr_octet))
1721 		return EINVAL;
1722 
1723 	aq_set_linkmode(sc, AQ_LINK_NONE, AQ_FC_NONE, AQ_EEE_DISABLE);
1724 
1725 	aq_hw_qos_set(sc);
1726 
1727 	/* Enable interrupt */
1728 	AQ_WRITE_REG(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_RESET_DIS);
1729 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_MULTIVEC, 0);
1730 
1731 	AQ_WRITE_REG_BIT(sc, AQ_INTR_CTRL_REG, AQ_INTR_CTRL_IRQMODE, irqmode);
1732 
1733 	AQ_WRITE_REG(sc, AQ_INTR_AUTOMASK_REG, 0xffffffff);
1734 
1735 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(0),
1736 	    ((AQ_B0_ERR_INT << 24) | (1U << 31)) |
1737 	    ((AQ_B0_ERR_INT << 16) | (1 << 23))
1738 	);
1739 
1740 	/* link interrupt */
1741 	sc->sc_linkstat_irq = AQ_LINKSTAT_IRQ;
1742 	AQ_WRITE_REG(sc, AQ_GEN_INTR_MAP_REG(3),
1743 	    (1 << 7) | sc->sc_linkstat_irq);
1744 
1745 	return 0;
1746 }
1747 
1748 void
1749 aq_hw_init_tx_path(struct aq_softc *sc)
1750 {
1751 	/* Tx TC/RSS number config */
1752 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_TC_MODE_EN, 1);
1753 
1754 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1755 	    THM_LSO_TCP_FLAG1_FIRST, 0x0ff6);
1756 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG1_REG,
1757 	    THM_LSO_TCP_FLAG1_MID,   0x0ff6);
1758 	AQ_WRITE_REG_BIT(sc, THM_LSO_TCP_FLAG2_REG,
1759 	   THM_LSO_TCP_FLAG2_LAST,  0x0f7f);
1760 
1761 	/* misc */
1762 	AQ_WRITE_REG(sc, TX_TPO2_REG,
1763 	   (sc->sc_features & FEATURES_TPO2) ? TX_TPO2_EN : 0);
1764 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_EN, 0);
1765 	AQ_WRITE_REG_BIT(sc, TDM_DCA_REG, TDM_DCA_MODE, 0);
1766 
1767 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_SCP_INS_EN, 1);
1768 }
1769 
1770 void
1771 aq_hw_init_rx_path(struct aq_softc *sc)
1772 {
1773 	int i;
1774 
1775 	/* clear setting */
1776 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_TC_MODE, 0);
1777 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_FC_MODE, 0);
1778 	AQ_WRITE_REG(sc, RX_FLR_RSS_CONTROL1_REG, 0);
1779 	for (i = 0; i < 32; i++) {
1780 		AQ_WRITE_REG_BIT(sc, RPF_ETHERTYPE_FILTER_REG(i),
1781 		   RPF_ETHERTYPE_FILTER_EN, 0);
1782 	}
1783 
1784 	/* L2 and Multicast filters */
1785 	for (i = 0; i < AQ_HW_MAC_NUM; i++) {
1786 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_EN, 0);
1787 		AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(i), RPF_L2UC_MSW_ACTION,
1788 		    RPF_ACTION_HOST);
1789 	}
1790 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_MASK_REG, 0);
1791 	AQ_WRITE_REG(sc, RPF_MCAST_FILTER_REG(0), 0x00010fff);
1792 
1793 	/* Vlan filters */
1794 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_OUTER,
1795 	    ETHERTYPE_QINQ);
1796 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_TPID_REG, RPF_VLAN_TPID_INNER,
1797 	    ETHERTYPE_VLAN);
1798 	AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG, RPF_VLAN_MODE_PROMISC, 0);
1799 
1800 	if (sc->sc_features & FEATURES_REV_B) {
1801 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1802 		    RPF_VLAN_MODE_ACCEPT_UNTAGGED, 1);
1803 		AQ_WRITE_REG_BIT(sc, RPF_VLAN_MODE_REG,
1804 		    RPF_VLAN_MODE_UNTAGGED_ACTION, RPF_ACTION_HOST);
1805 	}
1806 
1807 	AQ_WRITE_REG(sc, RX_TCP_RSS_HASH_REG, 0);
1808 
1809 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_EN, 1);
1810 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_ACTION, RPF_ACTION_HOST);
1811 	AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_THRESHOLD, 0xffff);
1812 
1813 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_EN, 0);
1814 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCA_REG, RX_DMA_DCA_MODE, 0);
1815 }
1816 
1817 /* set multicast filter. index 0 for own address */
1818 int
1819 aq_set_mac_addr(struct aq_softc *sc, int index, uint8_t *enaddr)
1820 {
1821 	uint32_t h, l;
1822 
1823 	if (index >= AQ_HW_MAC_NUM)
1824 		return EINVAL;
1825 
1826 	if (enaddr == NULL) {
1827 		/* disable */
1828 		AQ_WRITE_REG_BIT(sc,
1829 		    RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1830 		return 0;
1831 	}
1832 
1833 	h = (enaddr[0] <<  8) | (enaddr[1]);
1834 	l = ((uint32_t)enaddr[2] << 24) | (enaddr[3] << 16) |
1835 	    (enaddr[4] <<  8) | (enaddr[5]);
1836 
1837 	/* disable, set, and enable */
1838 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 0);
1839 	AQ_WRITE_REG(sc, RPF_L2UC_LSW_REG(index), l);
1840 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index),
1841 	    RPF_L2UC_MSW_MACADDR_HI, h);
1842 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_ACTION, 1);
1843 	AQ_WRITE_REG_BIT(sc, RPF_L2UC_MSW_REG(index), RPF_L2UC_MSW_EN, 1);
1844 
1845 	return 0;
1846 }
1847 
1848 int
1849 aq_get_linkmode(struct aq_softc *sc, enum aq_link_speed *speed,
1850     enum aq_link_fc *fc, enum aq_link_eee *eee)
1851 {
1852 	enum aq_hw_fw_mpi_state mode;
1853 	int error;
1854 
1855 	error = sc->sc_fw_ops->get_mode(sc, &mode, speed, fc, eee);
1856 	if (error != 0)
1857 		return error;
1858 	if (mode != MPI_INIT)
1859 		return ENXIO;
1860 
1861 	return 0;
1862 }
1863 
1864 int
1865 aq_set_linkmode(struct aq_softc *sc, enum aq_link_speed speed,
1866     enum aq_link_fc fc, enum aq_link_eee eee)
1867 {
1868 	return sc->sc_fw_ops->set_mode(sc, MPI_INIT, speed, fc, eee);
1869 }
1870 
1871 int
1872 aq_fw2x_set_mode(struct aq_softc *sc, enum aq_hw_fw_mpi_state mode,
1873     enum aq_link_speed speed, enum aq_link_fc fc, enum aq_link_eee eee)
1874 {
1875 	uint64_t mpi_ctrl;
1876 	int error = 0;
1877 
1878 	AQ_MPI_LOCK(sc);
1879 
1880 	mpi_ctrl = AQ_READ64_REG(sc, FW2X_MPI_CONTROL_REG);
1881 
1882 	switch (mode) {
1883 	case MPI_INIT:
1884 		mpi_ctrl &= ~FW2X_CTRL_RATE_MASK;
1885 		if (speed & AQ_LINK_10G)
1886 			mpi_ctrl |= FW2X_CTRL_RATE_10G;
1887 		if (speed & AQ_LINK_5G)
1888 			mpi_ctrl |= FW2X_CTRL_RATE_5G;
1889 		if (speed & AQ_LINK_2G5)
1890 			mpi_ctrl |= FW2X_CTRL_RATE_2G5;
1891 		if (speed & AQ_LINK_1G)
1892 			mpi_ctrl |= FW2X_CTRL_RATE_1G;
1893 		if (speed & AQ_LINK_100M)
1894 			mpi_ctrl |= FW2X_CTRL_RATE_100M;
1895 
1896 		mpi_ctrl &= ~FW2X_CTRL_LINK_DROP;
1897 
1898 		mpi_ctrl &= ~FW2X_CTRL_EEE_MASK;
1899 		if (eee == AQ_EEE_ENABLE)
1900 			mpi_ctrl |= FW2X_CTRL_EEE_MASK;
1901 
1902 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1903 		if (fc & AQ_FC_RX)
1904 			mpi_ctrl |= FW2X_CTRL_PAUSE;
1905 		if (fc & AQ_FC_TX)
1906 			mpi_ctrl |= FW2X_CTRL_ASYMMETRIC_PAUSE;
1907 		break;
1908 	case MPI_DEINIT:
1909 		mpi_ctrl &= ~(FW2X_CTRL_RATE_MASK | FW2X_CTRL_EEE_MASK);
1910 		mpi_ctrl &= ~(FW2X_CTRL_PAUSE | FW2X_CTRL_ASYMMETRIC_PAUSE);
1911 		break;
1912 	default:
1913 		printf("%s: fw2x> unknown MPI state %d\n", DEVNAME(sc), mode);
1914 		error =  EINVAL;
1915 		goto failure;
1916 	}
1917 	AQ_WRITE64_REG(sc, FW2X_MPI_CONTROL_REG, mpi_ctrl);
1918 
1919  failure:
1920 	AQ_MPI_UNLOCK(sc);
1921 	return error;
1922 }
1923 
1924 void
1925 aq_hw_qos_set(struct aq_softc *sc)
1926 {
1927 	uint32_t tc = 0;
1928 	uint32_t buff_size;
1929 
1930 	/* TPS Descriptor rate init */
1931 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_TA_RST, 0);
1932 	AQ_WRITE_REG_BIT(sc, TPS_DESC_RATE_REG, TPS_DESC_RATE_LIM, 0xa);
1933 
1934 	/* TPS VM init */
1935 	AQ_WRITE_REG_BIT(sc, TPS_DESC_VM_ARB_MODE_REG, TPS_DESC_VM_ARB_MODE, 0);
1936 
1937 	/* TPS TC credits init */
1938 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TC_ARB_MODE_REG, TPS_DESC_TC_ARB_MODE, 0);
1939 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TC_ARB_MODE_REG, TPS_DATA_TC_ARB_MODE, 0);
1940 
1941 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1942 	    TPS_DATA_TCT_CREDIT_MAX, 0xfff);
1943 	AQ_WRITE_REG_BIT(sc, TPS_DATA_TCT_REG(tc),
1944 	    TPS_DATA_TCT_WEIGHT, 0x64);
1945 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1946 	    TPS_DESC_TCT_CREDIT_MAX, 0x50);
1947 	AQ_WRITE_REG_BIT(sc, TPS_DESC_TCT_REG(tc),
1948 	    TPS_DESC_TCT_WEIGHT, 0x1e);
1949 
1950 	/* Tx buf size */
1951 	tc = 0;
1952 	buff_size = AQ_HW_TXBUF_MAX;
1953 	AQ_WRITE_REG_BIT(sc, TPB_TXB_BUFSIZE_REG(tc), TPB_TXB_BUFSIZE,
1954 	    buff_size);
1955 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_HI,
1956 	    (buff_size * (1024 / 32) * 66) / 100);
1957 	AQ_WRITE_REG_BIT(sc, TPB_TXB_THRESH_REG(tc), TPB_TXB_THRESH_LO,
1958 	    (buff_size * (1024 / 32) * 50) / 100);
1959 
1960 	/* QoS Rx buf size per TC */
1961 	tc = 0;
1962 	buff_size = AQ_HW_RXBUF_MAX;
1963 	AQ_WRITE_REG_BIT(sc, RPB_RXB_BUFSIZE_REG(tc), RPB_RXB_BUFSIZE,
1964 	    buff_size);
1965 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_EN, 0);
1966 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_HI,
1967 	    (buff_size * (1024 / 32) * 66) / 100);
1968 	AQ_WRITE_REG_BIT(sc, RPB_RXB_XOFF_REG(tc), RPB_RXB_XOFF_THRESH_LO,
1969 	    (buff_size * (1024 / 32) * 50) / 100);
1970 
1971 	/* QoS 802.1p priority -> TC mapping */
1972 	int i_priority;
1973 	for (i_priority = 0; i_priority < 8; i_priority++) {
1974 		AQ_WRITE_REG_BIT(sc, RPF_RPB_RX_TC_UPT_REG,
1975 		    RPF_RPB_RX_TC_UPT_MASK(i_priority), 0);
1976 	}
1977 }
1978 
1979 void
1980 aq_txring_reset(struct aq_softc *sc, struct aq_txring *tx, int start)
1981 {
1982 	daddr_t paddr;
1983 
1984 	tx->tx_prod = 0;
1985 	tx->tx_cons = 0;
1986 
1987 	/* empty slots? */
1988 
1989 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 0);
1990 
1991 	if (start == 0)
1992 		return;
1993 
1994 	paddr = AQ_DMA_DVA(&tx->tx_mem);
1995 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRLSW_REG(tx->tx_q), paddr);
1996 	AQ_WRITE_REG(sc, TX_DMA_DESC_BASE_ADDRMSW_REG(tx->tx_q),
1997 	    paddr >> 32);
1998 
1999 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_LEN,
2000 	    AQ_TXD_NUM / 8);
2001 
2002 	tx->tx_prod = AQ_READ_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q));
2003 	tx->tx_cons = tx->tx_prod;
2004 	AQ_WRITE_REG(sc, TX_DMA_DESC_WRWB_THRESH_REG(tx->tx_q), 0);
2005 
2006 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2007 	    AQ_INTR_IRQ_MAP_TX_IRQMAP(tx->tx_q), tx->tx_irq);
2008 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_TX_REG(tx->tx_q),
2009 	    AQ_INTR_IRQ_MAP_TX_EN(tx->tx_q), 1);
2010 
2011 	AQ_WRITE_REG_BIT(sc, TX_DMA_DESC_REG(tx->tx_q), TX_DMA_DESC_EN, 1);
2012 
2013 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID, 0);
2014 	AQ_WRITE_REG_BIT(sc, TDM_DCAD_REG(tx->tx_q), TDM_DCAD_CPUID_EN, 0);
2015 }
2016 
2017 void
2018 aq_rxring_reset(struct aq_softc *sc, struct aq_rxring *rx, int start)
2019 {
2020 	daddr_t paddr;
2021 
2022 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 0);
2023 	/* drain */
2024 
2025 	if (start == 0)
2026 		return;
2027 
2028 	paddr = AQ_DMA_DVA(&rx->rx_mem);
2029 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRLSW_REG(rx->rx_q), paddr);
2030 	AQ_WRITE_REG(sc, RX_DMA_DESC_BASE_ADDRMSW_REG(rx->rx_q),
2031 	    paddr >> 32);
2032 
2033 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_LEN,
2034 	    AQ_RXD_NUM / 8);
2035 
2036 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2037 	    RX_DMA_DESC_BUFSIZE_DATA, MCLBYTES / 1024);
2038 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_BUFSIZE_REG(rx->rx_q),
2039 	    RX_DMA_DESC_BUFSIZE_HDR, 0);
2040 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2041 	    RX_DMA_DESC_HEADER_SPLIT, 0);
2042 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q),
2043 	    RX_DMA_DESC_VLAN_STRIP, 0);
2044 
2045 	rx->rx_cons = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2046 	    RX_DMA_DESC_HEAD_PTR;
2047 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_cons);
2048 	rx->rx_prod = rx->rx_cons;
2049 
2050 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2051 	    AQ_INTR_IRQ_MAP_RX_IRQMAP(rx->rx_q), rx->rx_irq);
2052 	AQ_WRITE_REG_BIT(sc, AQ_INTR_IRQ_MAP_RX_REG(rx->rx_q),
2053 	    AQ_INTR_IRQ_MAP_RX_EN(rx->rx_q), 1);
2054 
2055 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2056 	    RX_DMA_DCAD_CPUID, 0);
2057 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2058 	    RX_DMA_DCAD_DESC_EN, 0);
2059 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2060 	    RX_DMA_DCAD_HEADER_EN, 0);
2061 	AQ_WRITE_REG_BIT(sc, RX_DMA_DCAD_REG(rx->rx_q),
2062 	    RX_DMA_DCAD_PAYLOAD_EN, 0);
2063 
2064 	AQ_WRITE_REG_BIT(sc, RX_DMA_DESC_REG(rx->rx_q), RX_DMA_DESC_EN, 1);
2065 }
2066 
2067 static inline unsigned int
2068 aq_rx_fill_slots(struct aq_softc *sc, struct aq_rxring *rx, uint nslots)
2069 {
2070 	struct aq_rx_desc_read *ring, *rd;
2071 	struct aq_slot *as;
2072 	struct mbuf *m;
2073 	uint p, fills;
2074 
2075 	ring = AQ_DMA_KVA(&rx->rx_mem);
2076 	p = rx->rx_prod;
2077 
2078 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2079 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTWRITE);
2080 
2081 	for (fills = 0; fills < nslots; fills++) {
2082 		as = &rx->rx_slots[p];
2083 		rd = &ring[p];
2084 
2085 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2086 		if (m == NULL)
2087 			break;
2088 
2089 		m->m_data += (m->m_ext.ext_size - MCLBYTES);
2090 		m->m_data += ETHER_ALIGN;
2091 		m->m_len = m->m_pkthdr.len = MCLBYTES;
2092 
2093 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2094 		    BUS_DMA_NOWAIT) != 0) {
2095 			m_freem(m);
2096 			break;
2097 		}
2098 		as->as_m = m;
2099 
2100 		htolem64(&rd->buf_addr, as->as_map->dm_segs[0].ds_addr);
2101 		rd->hdr_addr = 0;
2102 		p++;
2103 		if (p == AQ_RXD_NUM)
2104 			p = 0;
2105 	}
2106 
2107 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2108 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_PREWRITE);
2109 
2110 	rx->rx_prod = p;
2111 	AQ_WRITE_REG(sc, RX_DMA_DESC_TAIL_PTR_REG(rx->rx_q), rx->rx_prod);
2112 	return (nslots - fills);
2113 }
2114 
2115 int
2116 aq_rx_fill(struct aq_softc *sc, struct aq_rxring *rx)
2117 {
2118 	u_int slots;
2119 
2120 	slots = if_rxr_get(&rx->rx_rxr, AQ_RXD_NUM);
2121 	if (slots == 0)
2122 		return 1;
2123 
2124 	slots = aq_rx_fill_slots(sc, rx, slots);
2125 	if_rxr_put(&rx->rx_rxr, slots);
2126 	return 0;
2127 }
2128 
2129 void
2130 aq_refill(void *xq)
2131 {
2132 	struct aq_queues *q = xq;
2133 	struct aq_softc *sc = q->q_sc;
2134 
2135 	aq_rx_fill(sc, &q->q_rx);
2136 
2137 	if (if_rxr_inuse(&q->q_rx.rx_rxr) == 0)
2138 		timeout_add(&q->q_rx.rx_refill, 1);
2139 }
2140 
2141 void
2142 aq_rxeof(struct aq_softc *sc, struct aq_rxring *rx)
2143 {
2144 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2145 	struct aq_rx_desc_wb *rxd;
2146 	struct aq_rx_desc_wb *ring;
2147 	struct aq_slot *as;
2148 	uint32_t end, idx;
2149 	uint16_t pktlen, status;
2150 	uint32_t rxd_type;
2151 	struct mbuf *m;
2152 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2153 	int rxfree;
2154 
2155 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2156 		return;
2157 
2158 	end = AQ_READ_REG(sc, RX_DMA_DESC_HEAD_PTR_REG(rx->rx_q)) &
2159 	    RX_DMA_DESC_HEAD_PTR;
2160 
2161 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&rx->rx_mem), 0,
2162 	    AQ_DMA_LEN(&rx->rx_mem), BUS_DMASYNC_POSTREAD);
2163 
2164 	rxfree = 0;
2165 	idx = rx->rx_cons;
2166 	ring = AQ_DMA_KVA(&rx->rx_mem);
2167 	while (idx != end) {
2168 		rxd = &ring[idx];
2169 		as = &rx->rx_slots[idx];
2170 
2171 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2172 		    as->as_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2173 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2174 
2175 		status = lemtoh16(&rxd->status);
2176 		if ((status & AQ_RXDESC_STATUS_DD) == 0)
2177 			break;
2178 
2179 		rxfree++;
2180 		m = as->as_m;
2181 		as->as_m = NULL;
2182 
2183 		pktlen = lemtoh16(&rxd->pkt_len);
2184 		rxd_type = lemtoh32(&rxd->type);
2185 		/* rss hash, vlan */
2186 
2187 		if ((status & AQ_RXDESC_STATUS_MACERR) ||
2188 		    (rxd_type & AQ_RXDESC_TYPE_DMA_ERR)) {
2189 			printf("%s:rx: rx error (status %x type %x)\n",
2190 			    DEVNAME(sc), status, rxd_type);
2191 			m_freem(m);
2192 		} else {
2193 			m->m_pkthdr.len = m->m_len = pktlen;
2194 			ml_enqueue(&ml, m);
2195 		}
2196 
2197 		idx++;
2198 		if (idx == AQ_RXD_NUM)
2199 			idx = 0;
2200 	}
2201 	rx->rx_cons = idx;
2202 
2203 	if (rxfree > 0) {
2204 		if_rxr_put(&rx->rx_rxr, rxfree);
2205 		if (ifiq_input(rx->rx_ifiq, &ml))
2206 			if_rxr_livelocked(&rx->rx_rxr);
2207 
2208 		aq_rx_fill(sc, rx);
2209 		if (if_rxr_inuse(&rx->rx_rxr) == 0)
2210 			timeout_add(&rx->rx_refill, 1);
2211 	}
2212 }
2213 
2214 void
2215 aq_txeof(struct aq_softc *sc, struct aq_txring *tx)
2216 {
2217 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2218 	struct aq_slot *as;
2219 	uint32_t idx, end, free;
2220 
2221 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2222 		return;
2223 
2224 	idx = tx->tx_cons;
2225 	end = AQ_READ_REG(sc, TX_DMA_DESC_HEAD_PTR_REG(tx->tx_q)) &
2226 	    TX_DMA_DESC_HEAD_PTR;
2227 	free = 0;
2228 
2229 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2230 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTREAD);
2231 
2232 	while (idx != end) {
2233 		as = &tx->tx_slots[idx];
2234 		bus_dmamap_unload(sc->sc_dmat, as->as_map);
2235 
2236 		m_freem(as->as_m);
2237 		as->as_m = NULL;
2238 
2239 		idx++;
2240 		if (idx == AQ_TXD_NUM)
2241 			idx = 0;
2242 		free++;
2243 	}
2244 
2245 	tx->tx_cons = idx;
2246 
2247 	if (free != 0) {
2248 		if (ifq_is_oactive(tx->tx_ifq))
2249 			ifq_restart(tx->tx_ifq);
2250 	}
2251 }
2252 
2253 void
2254 aq_start(struct ifqueue *ifq)
2255 {
2256 	struct aq_queues *aq = ifq->ifq_softc;
2257 	struct aq_softc *sc = aq->q_sc;
2258 	struct aq_txring *tx = &aq->q_tx;
2259 	struct aq_tx_desc *ring, *txd;
2260 	struct aq_slot *as;
2261 	struct mbuf *m;
2262 	uint32_t idx, free, used, ctl1, ctl2;
2263 
2264 	idx = tx->tx_prod;
2265 	free = tx->tx_cons + AQ_TXD_NUM - tx->tx_prod;
2266 	used = 0;
2267 
2268 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2269 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_POSTWRITE);
2270 	ring = (struct aq_tx_desc *)AQ_DMA_KVA(&tx->tx_mem);
2271 
2272 	for (;;) {
2273 		if (used + AQ_TX_MAX_SEGMENTS >= free) {
2274 			ifq_set_oactive(ifq);
2275 			break;
2276 		}
2277 
2278 		m = ifq_dequeue(ifq);
2279 		if (m == NULL)
2280 			break;
2281 
2282 		txd = ring + idx;
2283 		as = &tx->tx_slots[idx];
2284 
2285 		if (m_defrag(m, M_DONTWAIT) != 0) {
2286 			m_freem(m);
2287 			break;
2288 		}
2289 
2290 		if (bus_dmamap_load_mbuf(sc->sc_dmat, as->as_map, m,
2291 		    BUS_DMA_STREAMING | BUS_DMA_NOWAIT) != 0) {
2292 			m_freem(m);
2293 			break;
2294 		}
2295 
2296 		as->as_m = m;
2297 
2298 #if NBPFILTER > 0
2299 		if (ifq->ifq_if->if_bpf)
2300 			bpf_mtap_ether(ifq->ifq_if->if_bpf, m, BPF_DIRECTION_OUT);
2301 #endif
2302 		bus_dmamap_sync(sc->sc_dmat, as->as_map, 0,
2303 		    as->as_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2304 
2305 		ctl1 = AQ_TXDESC_CTL1_TYPE_TXD | (as->as_map->dm_segs[0].ds_len <<
2306 		    AQ_TXDESC_CTL1_BLEN_SHIFT) | AQ_TXDESC_CTL1_CMD_FCS |
2307 		    AQ_TXDESC_CTL1_CMD_EOP | AQ_TXDESC_CTL1_CMD_WB;
2308 		ctl2 = m->m_pkthdr.len << AQ_TXDESC_CTL2_LEN_SHIFT;
2309 
2310 		txd->buf_addr = htole64(as->as_map->dm_segs[0].ds_addr);
2311 		txd->ctl1 = htole32(ctl1);
2312 		txd->ctl2 = htole32(ctl2);
2313 
2314 		idx++;
2315 		if (idx == AQ_TXD_NUM)
2316 			idx = 0;
2317 		used++;
2318 	}
2319 
2320 	bus_dmamap_sync(sc->sc_dmat, AQ_DMA_MAP(&tx->tx_mem), 0,
2321 	    AQ_DMA_LEN(&tx->tx_mem), BUS_DMASYNC_PREWRITE);
2322 
2323 	if (used != 0) {
2324 		tx->tx_prod = idx;
2325 		AQ_WRITE_REG(sc, TX_DMA_DESC_TAIL_PTR_REG(tx->tx_q),
2326 		    tx->tx_prod);
2327 	}
2328 }
2329 
2330 int
2331 aq_intr(void *arg)
2332 {
2333 	struct aq_softc *sc = arg;
2334 	struct aq_queues *aq = &sc->sc_queues[0];
2335 	uint32_t status;
2336 
2337 	status = AQ_READ_REG(sc, AQ_INTR_STATUS_REG);
2338 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2339 
2340 	if (status & (1 << sc->sc_linkstat_irq))
2341 		aq_update_link_status(sc);
2342 
2343 	if (status & (1 << aq->q_tx.tx_irq)) {
2344 		aq_txeof(sc, &aq->q_tx);
2345 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2346 		    (1 << aq->q_tx.tx_irq));
2347 	}
2348 	if (status & (1 << aq->q_rx.rx_irq)) {
2349 		aq_rxeof(sc, &aq->q_rx);
2350 		AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG,
2351 		    (1 << aq->q_rx.rx_irq));
2352 	}
2353 
2354 	return 1;
2355 }
2356 
2357 void
2358 aq_watchdog(struct ifnet *ifp)
2359 {
2360 
2361 }
2362 
2363 void
2364 aq_free_slots(struct aq_softc *sc, struct aq_slot *slots, int allocated,
2365     int total)
2366 {
2367 	struct aq_slot *as;
2368 
2369 	int i = allocated;
2370 	while (i-- > 0) {
2371 		as = &slots[i];
2372 		bus_dmamap_destroy(sc->sc_dmat, as->as_map);
2373 		if (as->as_m != NULL)
2374 			m_freem(as->as_m);
2375 	}
2376 	free(slots, M_DEVBUF, total * sizeof(*as));
2377 }
2378 
2379 int
2380 aq_queue_up(struct aq_softc *sc, struct aq_queues *aq)
2381 {
2382 	struct aq_rxring *rx;
2383 	struct aq_txring *tx;
2384 	struct aq_slot *as;
2385 	int i;
2386 
2387 	rx = &aq->q_rx;
2388 	rx->rx_slots = mallocarray(sizeof(*as), AQ_RXD_NUM, M_DEVBUF,
2389 	    M_WAITOK | M_ZERO);
2390 	if (rx->rx_slots == NULL) {
2391 		printf("%s: failed to allocate rx slots %d\n", DEVNAME(sc),
2392 		    aq->q_index);
2393 		return ENOMEM;
2394 	}
2395 
2396 	for (i = 0; i < AQ_RXD_NUM; i++) {
2397 		as = &rx->rx_slots[i];
2398 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0,
2399 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2400 		    &as->as_map) != 0) {
2401 			printf("%s: failed to allocate rx dma maps %d\n",
2402 			    DEVNAME(sc), aq->q_index);
2403 			goto destroy_rx_slots;
2404 		}
2405 	}
2406 
2407 	if (aq_dmamem_alloc(sc, &rx->rx_mem, AQ_RXD_NUM *
2408 	    sizeof(struct aq_rx_desc_read), PAGE_SIZE) != 0) {
2409 		printf("%s: unable to allocate rx ring %d\n", DEVNAME(sc),
2410 		    aq->q_index);
2411 		goto destroy_rx_slots;
2412 	}
2413 
2414 	tx = &aq->q_tx;
2415 	tx->tx_slots = mallocarray(sizeof(*as), AQ_TXD_NUM, M_DEVBUF,
2416 	    M_WAITOK | M_ZERO);
2417 	if (tx->tx_slots == NULL) {
2418 		printf("%s: failed to allocate tx slots %d\n", DEVNAME(sc),
2419 		    aq->q_index);
2420 		goto destroy_rx_ring;
2421 	}
2422 
2423 	for (i = 0; i < AQ_TXD_NUM; i++) {
2424 		as = &tx->tx_slots[i];
2425 		if (bus_dmamap_create(sc->sc_dmat, MCLBYTES,
2426 		    AQ_TX_MAX_SEGMENTS, MCLBYTES, 0,
2427 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2428 		    &as->as_map) != 0) {
2429 			printf("%s: failed to allocated tx dma maps %d\n",
2430 			    DEVNAME(sc), aq->q_index);
2431 			goto destroy_tx_slots;
2432 		}
2433 	}
2434 
2435 	if (aq_dmamem_alloc(sc, &tx->tx_mem, AQ_TXD_NUM *
2436 	    sizeof(struct aq_tx_desc), PAGE_SIZE) != 0) {
2437 		printf("%s: unable to allocate tx ring %d\n", DEVNAME(sc),
2438 		    aq->q_index);
2439 		goto destroy_tx_slots;
2440 	}
2441 
2442 	aq_txring_reset(sc, tx, 1);
2443 	aq_rxring_reset(sc, rx, 1);
2444 	return 0;
2445 
2446 destroy_tx_slots:
2447 	aq_free_slots(sc, tx->tx_slots, i, AQ_TXD_NUM);
2448 	tx->tx_slots = NULL;
2449 	i = AQ_RXD_NUM;
2450 
2451 destroy_rx_ring:
2452 	aq_dmamem_free(sc, &rx->rx_mem);
2453 destroy_rx_slots:
2454 	aq_free_slots(sc, rx->rx_slots, i, AQ_RXD_NUM);
2455 	rx->rx_slots = NULL;
2456 	return ENOMEM;
2457 }
2458 
2459 void
2460 aq_queue_down(struct aq_softc *sc, struct aq_queues *aq)
2461 {
2462 	struct aq_txring *tx;
2463 	struct aq_rxring *rx;
2464 
2465 	tx = &aq->q_tx;
2466 	aq_txring_reset(sc, &aq->q_tx, 0);
2467 	if (tx->tx_slots != NULL) {
2468 		aq_free_slots(sc, tx->tx_slots, AQ_TXD_NUM, AQ_TXD_NUM);
2469 		tx->tx_slots = NULL;
2470 	}
2471 
2472 	aq_dmamem_free(sc, &tx->tx_mem);
2473 
2474 	rx = &aq->q_rx;
2475 	aq_rxring_reset(sc, &aq->q_rx, 0);
2476 	if (rx->rx_slots != NULL) {
2477 		aq_free_slots(sc, rx->rx_slots, AQ_RXD_NUM, AQ_RXD_NUM);
2478 		rx->rx_slots = NULL;
2479 	}
2480 
2481 	aq_dmamem_free(sc, &rx->rx_mem);
2482 }
2483 
2484 int
2485 aq_up(struct aq_softc *sc)
2486 {
2487 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2488 	int i;
2489 
2490 	for (i = 0; i < sc->sc_nqueues; i++) {
2491 		if (aq_queue_up(sc, &sc->sc_queues[i]) != 0)
2492 			goto downqueues;
2493 	}
2494 
2495 	/* filters? */
2496 	/* enable checksum offload */
2497 
2498 	SET(ifp->if_flags, IFF_RUNNING);
2499 	aq_enable_intr(sc, 1, 1);
2500 	AQ_WRITE_REG_BIT(sc, TPB_TX_BUF_REG, TPB_TX_BUF_EN, 1);
2501 	AQ_WRITE_REG_BIT(sc, RPB_RPF_RX_REG, RPB_RPF_RX_BUF_EN, 1);
2502 
2503 	for (i = 0; i < sc->sc_nqueues; i++) {
2504 		struct aq_queues *aq = &sc->sc_queues[i];
2505 
2506 		if_rxr_init(&aq->q_rx.rx_rxr, 1, AQ_RXD_NUM - 1);
2507 		aq_rx_fill(sc, &aq->q_rx);
2508 
2509 		ifq_clr_oactive(aq->q_tx.tx_ifq);
2510 	}
2511 
2512 	return ENETRESET;
2513 
2514 downqueues:
2515 	for (i = 0; i < sc->sc_nqueues; i++)
2516 		aq_queue_down(sc, &sc->sc_queues[i]);
2517 	return ENOMEM;
2518 }
2519 
2520 void
2521 aq_down(struct aq_softc *sc)
2522 {
2523 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2524 	int i;
2525 
2526 	CLR(ifp->if_flags, IFF_RUNNING);
2527 
2528 	aq_enable_intr(sc, 1, 0);
2529 	intr_barrier(sc->sc_ih);
2530 
2531 	for (i = 0; i < sc->sc_nqueues; i++) {
2532 		/* queue intr barrier? */
2533 		aq_queue_down(sc, &sc->sc_queues[i]);
2534 	}
2535 }
2536 
2537 void
2538 aq_enable_intr(struct aq_softc *sc, int link, int txrx)
2539 {
2540 	uint32_t imask = 0;
2541 	int i;
2542 
2543 	if (txrx) {
2544 		for (i = 0; i < sc->sc_nqueues; i++) {
2545 			imask |= (1 << sc->sc_queues[i].q_tx.tx_irq);
2546 			imask |= (1 << sc->sc_queues[i].q_rx.rx_irq);
2547 		}
2548 	}
2549 
2550 	if (link)
2551 		imask |= (1 << sc->sc_linkstat_irq);
2552 
2553 	AQ_WRITE_REG(sc, AQ_INTR_MASK_REG, imask);
2554 	AQ_WRITE_REG(sc, AQ_INTR_STATUS_CLR_REG, 0xffffffff);
2555 }
2556 
2557 void
2558 aq_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2559 {
2560 	struct aq_softc *aq = ifp->if_softc;
2561 	enum aq_link_speed speed;
2562 	enum aq_link_fc fc;
2563 	int media;
2564 	int flow;
2565 
2566 	if (aq_get_linkmode(aq, &speed, &fc, NULL) != 0)
2567 		return;
2568 
2569 	switch (speed) {
2570 	case AQ_LINK_10G:
2571 		media = IFM_10G_T;
2572 		break;
2573 	case AQ_LINK_5G:
2574 		media = IFM_5000_T;
2575 		break;
2576 	case AQ_LINK_2G5:
2577 		media = IFM_2500_T;
2578 		break;
2579 	case AQ_LINK_1G:
2580 		media = IFM_1000_T;
2581 		break;
2582 	case AQ_LINK_100M:
2583 		media = IFM_100_TX;
2584 		break;
2585 	case AQ_LINK_NONE:
2586 		media = 0;
2587 		break;
2588 	}
2589 
2590 	flow = 0;
2591 	if (fc & AQ_FC_RX)
2592 		flow |= IFM_ETH_RXPAUSE;
2593 	if (fc & AQ_FC_TX)
2594 		flow |= IFM_ETH_TXPAUSE;
2595 
2596 	ifmr->ifm_status = IFM_AVALID;
2597 	if (speed != AQ_LINK_NONE) {
2598 		ifmr->ifm_status |= IFM_ACTIVE;
2599 		ifmr->ifm_active = IFM_ETHER | IFM_AUTO | media | flow;
2600 	}
2601 }
2602 
2603 int
2604 aq_ifmedia_change(struct ifnet *ifp)
2605 {
2606 	struct aq_softc *sc = ifp->if_softc;
2607 	enum aq_link_speed rate = AQ_LINK_NONE;
2608 	enum aq_link_fc fc = AQ_FC_NONE;
2609 
2610 	if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER)
2611 		return EINVAL;
2612 
2613 	switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) {
2614 	case IFM_AUTO:
2615 		rate = AQ_LINK_AUTO;
2616 		break;
2617 	case IFM_NONE:
2618 		rate = AQ_LINK_NONE;
2619 		break;
2620 	case IFM_100_TX:
2621 		rate = AQ_LINK_100M;
2622 		break;
2623 	case IFM_1000_T:
2624 		rate = AQ_LINK_1G;
2625 		break;
2626 	case IFM_2500_T:
2627 		rate = AQ_LINK_2G5;
2628 		break;
2629 	case IFM_5000_T:
2630 		rate = AQ_LINK_5G;
2631 		break;
2632 	case IFM_10G_T:
2633 		rate = AQ_LINK_10G;
2634 		break;
2635 	default:
2636 		return ENODEV;
2637 	}
2638 
2639 	if (sc->sc_media.ifm_media & IFM_FLOW)
2640 		fc = AQ_FC_ALL;
2641 
2642 	return aq_set_linkmode(sc, rate, fc, AQ_EEE_DISABLE);
2643 }
2644 
2645 void
2646 aq_update_link_status(struct aq_softc *sc)
2647 {
2648 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2649 	enum aq_link_speed speed;
2650 	enum aq_link_fc fc;
2651 
2652 	if (aq_get_linkmode(sc, &speed, &fc, NULL) != 0)
2653 		return;
2654 
2655 	if (speed == AQ_LINK_NONE) {
2656 		if (ifp->if_link_state != LINK_STATE_DOWN) {
2657 			ifp->if_link_state = LINK_STATE_DOWN;
2658 			if_link_state_change(ifp);
2659 		}
2660 	} else {
2661 		if (ifp->if_link_state != LINK_STATE_FULL_DUPLEX) {
2662 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
2663 			if_link_state_change(ifp);
2664 		}
2665 	}
2666 }
2667 
2668 
2669 int
2670 aq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2671 {
2672 	struct aq_softc *sc = ifp->if_softc;
2673 	struct ifreq *ifr = (struct ifreq *)data;
2674 	int error = 0, s;
2675 
2676 	s = splnet();
2677 
2678 	switch (cmd) {
2679 	case SIOCSIFADDR:
2680 		ifp->if_flags |= IFF_UP;
2681 		if ((ifp->if_flags & IFF_RUNNING) == 0)
2682 			error = aq_up(sc);
2683 		break;
2684 	case SIOCSIFFLAGS:
2685 		if (ifp->if_flags & IFF_UP) {
2686 			if (ifp->if_flags & IFF_RUNNING)
2687 				error = ENETRESET;
2688 			else
2689 				error = aq_up(sc);
2690 		} else {
2691 			if (ifp->if_flags & IFF_RUNNING)
2692 				aq_down(sc);
2693 		}
2694 		break;
2695 	case SIOCSIFMEDIA:
2696 	case SIOCGIFMEDIA:
2697 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2698 		break;
2699 	default:
2700 		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
2701 	}
2702 
2703 	if (error == ENETRESET) {
2704 		if (ifp->if_flags & IFF_RUNNING)
2705 			aq_iff(sc);
2706 		error = 0;
2707 	}
2708 
2709 	splx(s);
2710 	return error;
2711 }
2712 
2713 void
2714 aq_iff(struct aq_softc *sc)
2715 {
2716 	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
2717 	struct arpcom *ac = &sc->sc_arpcom;
2718 	struct ether_multi *enm;
2719 	struct ether_multistep step;
2720 	int idx;
2721 
2722 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) {
2723 		ifp->if_flags |= IFF_ALLMULTI;
2724 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 1);
2725 	} else if (ac->ac_multicnt >= AQ_HW_MAC_NUM ||
2726 	    ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2727 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2728 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2729 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 1);
2730 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2731 		    RPF_MCAST_FILTER_EN, 1);
2732 	} else if (ac->ac_multicnt == 0) {
2733 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2734 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2735 		    RPF_MCAST_FILTER_EN, 0);
2736 	} else {
2737 		idx = AQ_HW_MAC_OWN + 1;
2738 
2739 		/* turn on allmulti while we're rewriting? */
2740 		AQ_WRITE_REG_BIT(sc, RPF_L2BC_REG, RPF_L2BC_PROMISC, 0);
2741 
2742 		ETHER_FIRST_MULTI(step, ac, enm);
2743 		while (enm != NULL) {
2744 			aq_set_mac_addr(sc, idx++, enm->enm_addrlo);
2745 			ETHER_NEXT_MULTI(step, enm);
2746 		}
2747 
2748 		for (; idx < AQ_HW_MAC_NUM; idx++)
2749 			aq_set_mac_addr(sc, idx, NULL);
2750 
2751 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_MASK_REG,
2752 		    RPF_MCAST_FILTER_MASK_ALLMULTI, 0);
2753 		AQ_WRITE_REG_BIT(sc, RPF_MCAST_FILTER_REG(0),
2754 		    RPF_MCAST_FILTER_EN, 1);
2755 	}
2756 }
2757 
2758 int
2759 aq_dmamem_alloc(struct aq_softc *sc, struct aq_dmamem *aqm,
2760     bus_size_t size, u_int align)
2761 {
2762 	aqm->aqm_size = size;
2763 
2764 	if (bus_dmamap_create(sc->sc_dmat, aqm->aqm_size, 1,
2765 	    aqm->aqm_size, 0,
2766 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2767 	    &aqm->aqm_map) != 0)
2768 		return (1);
2769 	if (bus_dmamem_alloc(sc->sc_dmat, aqm->aqm_size,
2770 	    align, 0, &aqm->aqm_seg, 1, &aqm->aqm_nsegs,
2771 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2772 		goto destroy;
2773 	if (bus_dmamem_map(sc->sc_dmat, &aqm->aqm_seg, aqm->aqm_nsegs,
2774 	    aqm->aqm_size, &aqm->aqm_kva, BUS_DMA_WAITOK) != 0)
2775 		goto free;
2776 	if (bus_dmamap_load(sc->sc_dmat, aqm->aqm_map, aqm->aqm_kva,
2777 	    aqm->aqm_size, NULL, BUS_DMA_WAITOK) != 0)
2778 		goto unmap;
2779 
2780 	return (0);
2781 unmap:
2782 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2783 free:
2784 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2785 destroy:
2786 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2787 	return (1);
2788 }
2789 
2790 void
2791 aq_dmamem_free(struct aq_softc *sc, struct aq_dmamem *aqm)
2792 {
2793 	bus_dmamap_unload(sc->sc_dmat, aqm->aqm_map);
2794 	bus_dmamem_unmap(sc->sc_dmat, aqm->aqm_kva, aqm->aqm_size);
2795 	bus_dmamem_free(sc->sc_dmat, &aqm->aqm_seg, 1);
2796 	bus_dmamap_destroy(sc->sc_dmat, aqm->aqm_map);
2797 }
2798