xref: /openbsd-src/sys/dev/pci/if_ixl.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: if_ixl.c,v 1.7 2017/12/21 03:58:27 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sockio.h>
55 #include <sys/mbuf.h>
56 #include <sys/kernel.h>
57 #include <sys/socket.h>
58 #include <sys/device.h>
59 #include <sys/pool.h>
60 #include <sys/queue.h>
61 #include <sys/timeout.h>
62 #include <sys/task.h>
63 
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66 
67 #include <net/if.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcidevs.h>
81 
82 #define I40E_MASK(mask, shift)		((mask) << (shift))
83 #define I40E_PF_RESET_WAIT_COUNT	200
84 #define I40E_AQ_LARGE_BUF		512
85 
86 /* bitfields for Tx queue mapping in QTX_CTL */
87 #define I40E_QTX_CTL_VF_QUEUE		0x0
88 #define I40E_QTX_CTL_VM_QUEUE		0x1
89 #define I40E_QTX_CTL_PF_QUEUE		0x2
90 
91 #define I40E_QUEUE_TYPE_EOL		0x7ff
92 #define I40E_INTR_NOTX_QUEUE		0
93 
94 #define I40E_QUEUE_TYPE_RX		0x0
95 #define I40E_QUEUE_TYPE_TX		0x1
96 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
97 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
98 
99 #define I40E_ITR_INDEX_RX		0x0
100 #define I40E_ITR_INDEX_TX		0x1
101 #define I40E_ITR_INDEX_OTHER		0x2
102 #define I40E_ITR_INDEX_NONE		0x3
103 
104 #include <dev/pci/if_ixlreg.h>
105 
106 #define I40E_INTR_NOTX_QUEUE		0
107 #define I40E_INTR_NOTX_INTR		0
108 #define I40E_INTR_NOTX_RX_QUEUE		0
109 #define I40E_INTR_NOTX_TX_QUEUE		1
110 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
111 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
112 
113 struct ixl_aq_desc {
114 	uint16_t	iaq_flags;
115 #define	IXL_AQ_DD		(1U << 0)
116 #define	IXL_AQ_CMP		(1U << 1)
117 #define IXL_AQ_ERR		(1U << 2)
118 #define IXL_AQ_VFE		(1U << 3)
119 #define IXL_AQ_LB		(1U << 9)
120 #define IXL_AQ_RD		(1U << 10)
121 #define IXL_AQ_VFC		(1U << 11)
122 #define IXL_AQ_BUF		(1U << 12)
123 #define IXL_AQ_SI		(1U << 13)
124 #define IXL_AQ_EI		(1U << 14)
125 #define IXL_AQ_FE		(1U << 15)
126 
127 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
128 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
129 				    "\003ERR" "\002CMP" "\001DD"
130 
131 	uint16_t	iaq_opcode;
132 
133 	uint16_t	iaq_datalen;
134 	uint16_t	iaq_retval;
135 
136 	uint64_t	iaq_cookie;
137 
138 	uint32_t	iaq_param[4];
139 /*	iaq_data_hi	iaq_param[2] */
140 /*	iaq_data_lo	iaq_param[3] */
141 } __packed __aligned(8);
142 
143 /* aq commands */
144 #define IXL_AQ_OP_GET_VERSION		0x0001
145 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
146 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
147 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
148 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
149 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
150 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
151 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
152 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
153 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
154 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
155 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
156 #define IXL_AQ_OP_ADD_VSI		0x0210
157 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
158 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
159 #define IXL_AQ_OP_ADD_VEB		0x0230
160 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
161 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
162 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
163 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
164 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
165 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
166 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
167 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
168 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
169 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
170 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
171 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
172 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
173 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
174 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
175 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
176 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
177 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
178 
179 struct ixl_aq_mac_addresses {
180 	uint8_t		pf_lan[ETHER_ADDR_LEN];
181 	uint8_t		pf_san[ETHER_ADDR_LEN];
182 	uint8_t		port[ETHER_ADDR_LEN];
183 	uint8_t		pf_wol[ETHER_ADDR_LEN];
184 } __packed;
185 
186 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
187 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
188 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
189 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
190 
191 struct ixl_aq_capability {
192 	uint16_t	cap_id;
193 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
194 #define IXL_AQ_CAP_MNG_MODE		0x0002
195 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
196 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
197 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
198 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
199 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
200 #define IXL_AQ_CAP_SRIOV		0x0012
201 #define IXL_AQ_CAP_VF			0x0013
202 #define IXL_AQ_CAP_VMDQ			0x0014
203 #define IXL_AQ_CAP_8021QBG		0x0015
204 #define IXL_AQ_CAP_8021QBR		0x0016
205 #define IXL_AQ_CAP_VSI			0x0017
206 #define IXL_AQ_CAP_DCB			0x0018
207 #define IXL_AQ_CAP_FCOE			0x0021
208 #define IXL_AQ_CAP_ISCSI		0x0022
209 #define IXL_AQ_CAP_RSS			0x0040
210 #define IXL_AQ_CAP_RXQ			0x0041
211 #define IXL_AQ_CAP_TXQ			0x0042
212 #define IXL_AQ_CAP_MSIX			0x0043
213 #define IXL_AQ_CAP_VF_MSIX		0x0044
214 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
215 #define IXL_AQ_CAP_1588			0x0046
216 #define IXL_AQ_CAP_IWARP		0x0051
217 #define IXL_AQ_CAP_LED			0x0061
218 #define IXL_AQ_CAP_SDP			0x0062
219 #define IXL_AQ_CAP_MDIO			0x0063
220 #define IXL_AQ_CAP_WSR_PROT		0x0064
221 #define IXL_AQ_CAP_NVM_MGMT		0x0080
222 #define IXL_AQ_CAP_FLEX10		0x00F1
223 #define IXL_AQ_CAP_CEM			0x00F2
224 	uint8_t		major_rev;
225 	uint8_t		minor_rev;
226 	uint32_t	number;
227 	uint32_t	logical_id;
228 	uint32_t	phys_id;
229 	uint8_t		_reserved[16];
230 } __packed __aligned(4);
231 
232 #define IXL_LLDP_SHUTDOWN		0x1
233 
234 struct ixl_aq_switch_config {
235 	uint16_t	num_reported;
236 	uint16_t	num_total;
237 	uint8_t		_reserved[12];
238 } __packed __aligned(4);
239 
240 struct ixl_aq_switch_config_element {
241 	uint8_t		type;
242 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
243 #define IXL_AQ_SW_ELEM_TYPE_PF		2
244 #define IXL_AQ_SW_ELEM_TYPE_VF		3
245 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
246 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
247 #define IXL_AQ_SW_ELEM_TYPE_PV		16
248 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
249 #define IXL_AQ_SW_ELEM_TYPE_PA		18
250 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
251 	uint8_t		revision;
252 #define IXL_AQ_SW_ELEM_REV_1		1
253 	uint16_t	seid;
254 
255 	uint16_t	uplink_seid;
256 	uint16_t	downlink_seid;
257 
258 	uint8_t		_reserved[3];
259 	uint8_t		connection_type;
260 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
261 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
262 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
263 
264 	uint16_t	scheduler_id;
265 	uint16_t	element_info;
266 } __packed __aligned(4);
267 
268 #define IXL_PHY_TYPE_SGMII		0x00
269 #define IXL_PHY_TYPE_1000BASE_KX	0x01
270 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
271 #define IXL_PHY_TYPE_10GBASE_KR		0x03
272 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
273 #define IXL_PHY_TYPE_XAUI		0x05
274 #define IXL_PHY_TYPE_XFI		0x06
275 #define IXL_PHY_TYPE_SFI		0x07
276 #define IXL_PHY_TYPE_XLAUI		0x08
277 #define IXL_PHY_TYPE_XLPPI		0x09
278 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
279 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
280 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
281 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
282 #define IXL_PHY_TYPE_100BASE_TX		0x11
283 #define IXL_PHY_TYPE_1000BASE_T		0x12
284 #define IXL_PHY_TYPE_10GBASE_T		0x13
285 #define IXL_PHY_TYPE_10GBASE_SR		0x14
286 #define IXL_PHY_TYPE_10GBASE_LR		0x15
287 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
288 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
289 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
290 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
291 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
292 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
293 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
294 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
295 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
296 
297 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
298 #define IXL_PHY_TYPE_25GBASE_CR		0x20
299 #define IXL_PHY_TYPE_25GBASE_SR		0x21
300 #define IXL_PHY_TYPE_25GBASE_LR		0x22
301 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
302 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
303 
304 struct ixl_aq_module_desc {
305 	uint8_t		oui[3];
306 	uint8_t		_reserved1;
307 	uint8_t		part_number[16];
308 	uint8_t		revision[4];
309 	uint8_t		_reserved2[8];
310 } __packed __aligned(4);
311 
312 struct ixl_aq_phy_abilities {
313 	uint32_t	phy_type;
314 
315 	uint8_t		link_speed;
316 #define IXL_AQ_PHY_LINK_SPEED_100MB	0x1
317 #define IXL_AQ_PHY_LINK_SPEED_1000MB	0x2
318 #define IXL_AQ_PHY_LINK_SPEED_10GB	0x3
319 #define IXL_AQ_PHY_LINK_SPEED_40GB	0x4
320 #define IXL_AQ_PHY_LINK_SPEED_20GB	0x5
321 #define IXL_AQ_PHY_LINK_SPEED_25GB	0x6
322 	uint8_t		abilities;
323 	uint16_t	eee_capability;
324 
325 	uint32_t	eeer_val;
326 
327 	uint8_t		d3_lpan;
328 	uint8_t		phy_type_ext;
329 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
330 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
331 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
332 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
333 	uint8_t		fec_cfg_curr_mod_ext_info;
334 #define IXL_AQ_ENABLE_FEC_KR		0x01
335 #define IXL_AQ_ENABLE_FEC_RS		0x02
336 #define IXL_AQ_REQUEST_FEC_KR		0x04
337 #define IXL_AQ_REQUEST_FEC_RS		0x08
338 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
339 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
340 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
341 	uint8_t		ext_comp_code;
342 
343 	uint8_t		phy_id[4];
344 
345 	uint8_t		module_type[3];
346 	uint8_t		qualified_module_count;
347 #define IXL_AQ_PHY_MAX_QMS		16
348 	struct ixl_aq_module_desc
349 			qualified_module[IXL_AQ_PHY_MAX_QMS];
350 } __packed __aligned(4);
351 
352 struct ixl_aq_vsi_param {
353 	uint16_t	uplink_seid;
354 	uint8_t		connect_type;
355 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
356 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
357 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
358 	uint8_t		_reserved1;
359 
360 	uint8_t		vf_id;
361 	uint8_t		_reserved2;
362 	uint16_t	vsi_flags;
363 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
364 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
365 #define IXL_AQ_VSI_TYPE_VF		0x0
366 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
367 #define IXL_AQ_VSI_TYPE_PF		0x2
368 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
369 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
370 
371 	uint32_t	addr_hi;
372 	uint32_t	addr_lo;
373 } __packed __aligned(16);
374 
375 struct ixl_aq_vsi_reply {
376 	uint16_t	seid;
377 	uint16_t	vsi_number;
378 
379 	uint16_t	vsis_used;
380 	uint16_t	vsis_free;
381 
382 	uint32_t	addr_hi;
383 	uint32_t	addr_lo;
384 } __packed __aligned(16);
385 
386 struct ixl_aq_vsi_data {
387 	/* first 96 byte are written by SW */
388 	uint16_t	valid_sections;
389 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
390 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
391 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
392 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
393 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
394 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
395 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
396 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
397 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
398 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
399 	/* switch section */
400 	uint16_t	switch_id;
401 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
402 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
403 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
404 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
405 
406 	uint8_t		_reserved1[2];
407 	/* security section */
408 	uint8_t		sec_flags;
409 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
410 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
411 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
412 	uint8_t		_reserved2;
413 
414 	/* vlan section */
415 	uint16_t	pvid;
416 	uint16_t	fcoe_pvid;
417 
418 	uint8_t		port_vlan_flags;
419 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
420 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
421 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
422 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED 	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
423 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
424 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
425 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
426 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
427 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
428 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
429 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
430 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
431 	uint8_t		_reserved3[3];
432 
433 	/* ingress egress up section */
434 	uint32_t	ingress_table;
435 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
436 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
437 	uint32_t	egress_table;
438 
439 	/* cascaded pv section */
440 	uint16_t	cas_pv_tag;
441 	uint8_t		cas_pv_flags;
442 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
443 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
444 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
445 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
446 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
447 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
448 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
449 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
450 					(1 << 6)
451 	uint8_t		_reserved4;
452 
453 	/* queue mapping section */
454 	uint16_t	mapping_flags;
455 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
456 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
457 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
458 	uint16_t	queue_mapping[16];
459 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
460 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
461 	uint16_t	tc_mapping[8];
462 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
463 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
464 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
465 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
466 
467 	/* queueing option section */
468 	uint8_t		queueing_opt_flags;
469 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
470 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
471 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
472 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
473 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
474 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
475 	uint8_t		_reserved5[3];
476 
477 	/* scheduler section */
478 	uint8_t		up_enable_bits;
479 	uint8_t		_reserved6;
480 
481 	/* outer up section */
482 	uint32_t	outer_up_table; /* same as ingress/egress tables */
483 	uint8_t		_reserved7[8];
484 
485 	/* last 32 bytes are written by FW */
486 	uint16_t	qs_handle[8];
487 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
488 	uint16_t	stat_counter_idx;
489 	uint16_t	sched_id;
490 
491 	uint8_t		_reserved8[12];
492 } __packed __aligned(8);
493 
494 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
495 
496 struct ixl_aq_vsi_promisc_param {
497 	uint16_t	flags;
498 	uint16_t	valid_flags;
499 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
500 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
501 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
502 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
503 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
504 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
505 
506 	uint16_t	seid;
507 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
508 	uint16_t	vlan;
509 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
510 	uint32_t	reserved[2];
511 } __packed __aligned(8);
512 
513 struct ixl_aq_veb_param {
514 	uint16_t	uplink_seid;
515 	uint16_t	downlink_seid;
516 	uint16_t	veb_flags;
517 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
518 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
519 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
520 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
521 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
522 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
523 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
524 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
525 	uint8_t		enable_tcs;
526 	uint8_t		_reserved[9];
527 } __packed __aligned(16);
528 
529 struct ixl_aq_veb_reply {
530 	uint16_t	_reserved1;
531 	uint16_t	_reserved2;
532 	uint16_t	_reserved3;
533 	uint16_t	switch_seid;
534 	uint16_t	veb_seid;
535 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
536 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
537 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
538 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
539 	uint16_t	statistic_index;
540 	uint16_t	vebs_used;
541 	uint16_t	vebs_free;
542 } __packed __aligned(16);
543 
544 /* GET PHY ABILITIES param[0] */
545 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
546 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
547 
548 /* RESTART_AN param[0] */
549 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
550 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
551 
552 struct ixl_aq_link_status { /* this occupies the iaq_param space */
553 	uint16_t	command_flags; /* only field set on command */
554 #define IXL_AQ_LSE_MASK			0x3
555 #define IXL_AQ_LSE_NOP			0x0
556 #define IXL_AQ_LSE_DISABLE		0x2
557 #define IXL_AQ_LSE_ENABLE		0x3
558 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
559 	uint8_t		phy_type;
560 	uint8_t		link_speed;
561 	uint8_t		link_info;
562 #define IXL_AQ_LINK_UP_FUNCTION		0x01
563 #define IXL_AQ_LINK_FAULT		0x02
564 #define IXL_AQ_LINK_FAULT_TX		0x04
565 #define IXL_AQ_LINK_FAULT_RX		0x08
566 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
567 #define IXL_AQ_LINK_UP_PORT		0x20
568 #define IXL_AQ_MEDIA_AVAILABLE		0x40
569 #define IXL_AQ_SIGNAL_DETECT		0x80
570 	uint8_t		an_info;
571 #define IXL_AQ_AN_COMPLETED		0x01
572 #define IXL_AQ_LP_AN_ABILITY		0x02
573 #define IXL_AQ_PD_FAULT			0x04
574 #define IXL_AQ_FEC_EN			0x08
575 #define IXL_AQ_PHY_LOW_POWER		0x10
576 #define IXL_AQ_LINK_PAUSE_TX		0x20
577 #define IXL_AQ_LINK_PAUSE_RX		0x40
578 #define IXL_AQ_QUALIFIED_MODULE		0x80
579 
580 	uint8_t		ext_info;
581 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
582 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
583 #define IXL_AQ_LINK_TX_SHIFT		0x02
584 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
585 #define IXL_AQ_LINK_TX_ACTIVE		0x00
586 #define IXL_AQ_LINK_TX_DRAINED		0x01
587 #define IXL_AQ_LINK_TX_FLUSHED		0x03
588 #define IXL_AQ_LINK_FORCED_40G		0x10
589 /* 25G Error Codes */
590 #define IXL_AQ_25G_NO_ERR		0X00
591 #define IXL_AQ_25G_NOT_PRESENT		0X01
592 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
593 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
594 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
595 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
596 	uint8_t		loopback;
597 	uint16_t	max_frame_size;
598 
599 	uint8_t		config;
600 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
601 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
602 #define IXL_AQ_CONFIG_CRC_ENA	0x04
603 #define IXL_AQ_CONFIG_PACING_MASK	0x78
604 	uint8_t		power_desc;
605 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
606 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
607 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
608 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
609 #define IXL_AQ_PWR_CLASS_MASK		0x03
610 
611 	uint8_t		reserved[4];
612 } __packed __aligned(4);
613 /* event mask command flags for param[2] */
614 #define IXL_AQ_PHY_EV_MASK		0x3ff
615 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
616 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
617 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
618 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
619 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
620 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
621 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
622 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
623 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
624 
625 /* aq response codes */
626 #define IXL_AQ_RC_OK			0  /* success */
627 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
628 #define IXL_AQ_RC_ENOENT		2  /* No such element */
629 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
630 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
631 #define IXL_AQ_RC_EIO			5  /* I/O error */
632 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
633 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
634 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
635 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
636 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
637 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
638 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
639 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
640 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
641 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
642 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
643 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
644 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
645 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
646 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
647 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
648 #define IXL_AQ_RC_EFBIG			22 /* file too large */
649 
650 struct ixl_tx_desc {
651 	uint64_t		addr;
652 	uint64_t		cmd;
653 #define IXL_TX_DESC_DTYPE_SHIFT		0
654 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
655 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
656 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
657 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
658 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
659 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
660 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
661 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
662 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
663 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
664 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
665 
666 #define IXL_TX_DESC_CMD_SHIFT		4
667 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
668 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
669 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
670 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
671 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
672 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
673 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
674 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
675 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
676 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
677 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
678 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
679 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
680 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
681 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
682 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
683 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
684 
685 #define IXL_TX_DESC_MACLEN_SHIFT	16
686 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
687 #define IXL_TX_DESC_IPLEN_SHIFT		23
688 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
689 #define IXL_TX_DESC_L4LEN_SHIFT		30
690 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
691 #define IXL_TX_DESC_FCLEN_SHIFT		30
692 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
693 
694 #define IXL_TX_DESC_BSIZE_SHIFT		34
695 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
696 #define IXL_TX_DESC_BSIZE_MASK		\
697 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
698 } __packed __aligned(16);
699 
700 struct ixl_rx_rd_desc_16 {
701 	uint64_t		paddr; /* packet addr */
702 	uint64_t		haddr; /* header addr */
703 } __packed __aligned(16);
704 
705 struct ixl_rx_rd_desc_32 {
706 	uint64_t		paddr; /* packet addr */
707 	uint64_t		haddr; /* header addr */
708 	uint64_t		_reserved1;
709 	uint64_t		_reserved2;
710 } __packed __aligned(16);
711 
712 struct ixl_rx_wb_desc_16 {
713 	uint64_t		qword0;
714 	uint64_t		qword1;
715 #define IXL_RX_DESC_DD			(1 << 0)
716 #define IXL_RX_DESC_EOP			(1 << 1)
717 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
718 #define IXL_RX_DESC_L3L4P		(1 << 3)
719 #define IXL_RX_DESC_CRCP		(1 << 4)
720 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
721 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
722 #define IXL_RX_DESC_UMB_SHIFT		9
723 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
724 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
725 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
726 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
727 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
728 #define IXL_RX_DESC_FLM			(1 << 11)
729 #define IXL_RX_DESC_FLTSTAT_SHIFT 	12
730 #define IXL_RX_DESC_FLTSTAT_MASK 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
731 #define IXL_RX_DESC_FLTSTAT_NODATA 	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
732 #define IXL_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
733 #define IXL_RX_DESC_FLTSTAT_RSS 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
734 #define IXL_RX_DESC_LPBK		(1 << 14)
735 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
736 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
737 
738 #define IXL_RX_DESC_RXE			(1 << 19)
739 #define IXL_RX_DESC_HBO			(1 << 21)
740 #define IXL_RX_DESC_IPE			(1 << 22)
741 #define IXL_RX_DESC_L4E			(1 << 23)
742 #define IXL_RX_DESC_EIPE		(1 << 24)
743 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
744 
745 #define IXL_RX_DESC_PTYPE_SHIFT		30
746 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
747 
748 #define IXL_RX_DESC_PLEN_SHIFT		38
749 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
750 #define IXL_RX_DESC_HLEN_SHIFT		42
751 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
752 } __packed __aligned(16);
753 
754 struct ixl_rx_wb_desc_32 {
755 	uint64_t		qword0;
756 	uint64_t		qword1;
757 	uint64_t		qword2;
758 	uint64_t		qword3;
759 } __packed __aligned(16);
760 
761 #define IXL_TX_PKT_DESCS		8
762 #define IXL_TX_QUEUE_ALIGN		128
763 #define IXL_RX_QUEUE_ALIGN		128
764 
765 #define IXL_HARDMTU			9706 /* - ETHER_HEADER_LEN? */
766 
767 #define IXL_PCIREG			PCI_MAPREG_START
768 
769 #define IXL_ITR0			0x0
770 #define IXL_ITR1			0x1
771 #define IXL_ITR2			0x2
772 #define IXL_NOITR			0x2
773 
774 #define IXL_AQ_NUM			256
775 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
776 #define IXL_AQ_ALIGN			64 /* lol */
777 #define IXL_AQ_BUFLEN			4096
778 
779 #define IXL_HMC_ROUNDUP			512
780 #define IXL_HMC_PGSIZE			4096
781 #define IXL_HMC_DVASZ			sizeof(uint64_t)
782 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
783 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
784 #define IXL_HMC_PDVALID			1ULL
785 
786 struct ixl_aq_regs {
787 	bus_size_t		atq_tail;
788 	bus_size_t		atq_head;
789 	bus_size_t		atq_len;
790 	bus_size_t		atq_bal;
791 	bus_size_t		atq_bah;
792 
793 	bus_size_t		arq_tail;
794 	bus_size_t		arq_head;
795 	bus_size_t		arq_len;
796 	bus_size_t		arq_bal;
797 	bus_size_t		arq_bah;
798 
799 	uint32_t		atq_len_enable;
800 	uint32_t		atq_tail_mask;
801 	uint32_t		atq_head_mask;
802 
803 	uint32_t		arq_len_enable;
804 	uint32_t		arq_tail_mask;
805 	uint32_t		arq_head_mask;
806 };
807 
808 struct ixl_phy_type {
809 	uint64_t	phy_type;
810 	uint64_t	ifm_type;
811 };
812 
813 struct ixl_speed_type {
814 	uint8_t		dev_speed;
815 	uint64_t	net_speed;
816 };
817 
818 struct ixl_aq_buf {
819 	SIMPLEQ_ENTRY(ixl_aq_buf)
820 				 aqb_entry;
821 	void			*aqb_data;
822 	bus_dmamap_t		 aqb_map;
823 };
824 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
825 
826 struct ixl_dmamem {
827 	bus_dmamap_t		ixm_map;
828 	bus_dma_segment_t	ixm_seg;
829 	int			ixm_nsegs;
830 	size_t			ixm_size;
831 	caddr_t			ixm_kva;
832 };
833 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
834 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
835 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
836 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
837 
838 struct ixl_hmc_entry {
839 	uint64_t		 hmc_base;
840 	uint32_t		 hmc_count;
841 	uint32_t		 hmc_size;
842 };
843 
844 #define IXL_HMC_LAN_TX		 0
845 #define IXL_HMC_LAN_RX		 1
846 #define IXL_HMC_FCOE_CTX	 2
847 #define IXL_HMC_FCOE_FILTER	 3
848 #define IXL_HMC_COUNT		 4
849 
850 struct ixl_hmc_pack {
851 	uint16_t		offset;
852 	uint16_t		width;
853 	uint16_t		lsb;
854 };
855 
856 /*
857  * these hmc objects have weird sizes and alignments, so these are abstract
858  * representations of them that are nice for c to populate.
859  *
860  * the packing code relies on little-endian values being stored in the fields,
861  * no high bits in the fields being set, and the fields must be packed in the
862  * same order as they are in the ctx structure.
863  */
864 
865 struct ixl_hmc_rxq {
866 	uint16_t		 head;
867 	uint8_t			 cpuid;
868 	uint64_t		 base;
869 #define IXL_HMC_RXQ_BASE_UNIT		128
870 	uint16_t		 qlen;
871 	uint16_t		 dbuff;
872 #define IXL_HMC_RXQ_DBUFF_UNIT		128
873 	uint8_t			 hbuff;
874 #define IXL_HMC_RXQ_HBUFF_UNIT		64
875 	uint8_t			 dtype;
876 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
877 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
878 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
879 	uint8_t			 dsize;
880 #define IXL_HMC_RXQ_DSIZE_16		0
881 #define IXL_HMC_RXQ_DSIZE_32		1
882 	uint8_t			 crcstrip;
883 	uint8_t			 fc_ena;
884 	uint8_t			 l2sel;
885 	uint8_t			 hsplit_0;
886 	uint8_t			 hsplit_1;
887 	uint8_t			 showiv;
888 	uint16_t		 rxmax;
889 	uint8_t			 tphrdesc_ena;
890 	uint8_t			 tphwdesc_ena;
891 	uint8_t			 tphdata_ena;
892 	uint8_t			 tphhead_ena;
893 	uint8_t			 lrxqthresh;
894 	uint8_t			 prefena;
895 };
896 
897 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
898 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
899 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
900 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
901 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
902 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
903 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
904 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
905 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
906 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
907 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
908 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
909 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
910 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
911 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
912 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
913 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
914 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
915 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
916 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
917 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
918 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
919 };
920 
921 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
922 
923 struct ixl_hmc_txq {
924 	uint16_t		head;
925 	uint8_t			new_context;
926 	uint64_t		base;
927 #define IXL_HMC_TXQ_BASE_UNIT		128
928 	uint8_t			fc_ena;
929 	uint8_t			timesync_ena;
930 	uint8_t			fd_ena;
931 	uint8_t			alt_vlan_ena;
932 	uint16_t		thead_wb;
933 	uint8_t			cpuid;
934 	uint8_t			head_wb_ena;
935 #define IXL_HMC_TXQ_DESC_WB		0
936 #define IXL_HMC_TXQ_HEAD_WB		1
937 	uint16_t		qlen;
938 	uint8_t			tphrdesc_ena;
939 	uint8_t			tphrpacket_ena;
940 	uint8_t			tphwdesc_ena;
941 	uint64_t		head_wb_addr;
942 	uint32_t		crc;
943 	uint16_t		rdylist;
944 	uint8_t			rdylist_act;
945 };
946 
947 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
948 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
949 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
950 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
951 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
952 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
953 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
954 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
955 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
956 /* line 1 */
957 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
958 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
959 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
960 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
961 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
962 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
963 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
964 /* line 7 */
965 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
966 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
967 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
968 };
969 
970 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
971 
972 struct ixl_tx_map {
973 	struct mbuf		*txm_m;
974 	bus_dmamap_t		 txm_map;
975 	unsigned int		 txm_eop;
976 };
977 
978 struct ixl_tx_ring {
979 	unsigned int		 txr_prod;
980 	unsigned int		 txr_cons;
981 
982 	struct ixl_tx_map	*txr_maps;
983 	struct ixl_dmamem	 txr_mem;
984 
985 	bus_size_t		 txr_tail;
986 	unsigned int		 txr_qid;
987 };
988 
989 struct ixl_rx_map {
990 	struct mbuf		*rxm_m;
991 	bus_dmamap_t		 rxm_map;
992 };
993 
994 struct ixl_rx_ring {
995 	struct ixl_softc	*rxr_sc;
996 
997 	struct if_rxring	 rxr_acct;
998 	struct timeout		 rxr_refill;
999 
1000 	unsigned int		 rxr_prod;
1001 	unsigned int		 rxr_cons;
1002 
1003 	struct ixl_rx_map	*rxr_maps;
1004 	struct ixl_dmamem	 rxr_mem;
1005 
1006 	struct mbuf		*rxr_m_head;
1007 	struct mbuf		**rxr_m_tail;
1008 
1009 	bus_size_t		 rxr_tail;
1010 	unsigned int		 rxr_qid;
1011 };
1012 
1013 struct ixl_softc {
1014 	struct device		 sc_dev;
1015 	struct arpcom		 sc_ac;
1016 	struct ifmedia		 sc_media;
1017 	uint64_t		 sc_media_status;
1018 	uint64_t		 sc_media_active;
1019 
1020 	pci_chipset_tag_t	 sc_pc;
1021 	pci_intr_handle_t	 sc_ih;
1022 	void			*sc_ihc;
1023 	pcitag_t		 sc_tag;
1024 
1025 	bus_dma_tag_t		 sc_dmat;
1026 	bus_space_tag_t		 sc_memt;
1027 	bus_space_handle_t	 sc_memh;
1028 	bus_size_t		 sc_mems;
1029 
1030 	uint8_t			 sc_pf_id;
1031 	uint16_t		 sc_uplink_seid;	/* le */
1032 	uint16_t		 sc_downlink_seid;	/* le */
1033 	uint16_t		 sc_veb_seid;		/* le */
1034 	uint16_t		 sc_vsi_number;		/* le */
1035 	uint16_t		 sc_seid;
1036 	unsigned int		 sc_base_queue;
1037 
1038 	struct ixl_dmamem	 sc_vsi;
1039 
1040 	const struct ixl_aq_regs *
1041 				 sc_aq_regs;
1042 
1043 	struct mutex		 sc_atq_mtx;
1044 	struct ixl_dmamem	 sc_atq;
1045 	unsigned int		 sc_atq_prod;
1046 	unsigned int		 sc_atq_cons;
1047 
1048 	struct ixl_dmamem	 sc_arq;
1049 	struct task		 sc_arq_task;
1050 	struct ixl_aq_bufs	 sc_arq_idle;
1051 	struct ixl_aq_bufs	 sc_arq_live;
1052 	struct if_rxring	 sc_arq_ring;
1053 	unsigned int		 sc_arq_prod;
1054 	unsigned int		 sc_arq_cons;
1055 
1056 	struct ixl_dmamem	 sc_hmc_sd;
1057 	struct ixl_dmamem	 sc_hmc_pd;
1058 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1059 
1060 	unsigned int		 sc_nrings;
1061 
1062 	unsigned int		 sc_tx_ring_ndescs;
1063 	unsigned int		 sc_rx_ring_ndescs;
1064 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1065 };
1066 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1067 
1068 struct ixl_atq {
1069 	SIMPLEQ_ENTRY(ixl_atq)	  iatq_entry;
1070 	struct ixl_aq_desc	  iatq_desc;
1071 	void			 *iatq_arg;
1072 	void			(*iatq_fn)(struct ixl_softc *, void *);
1073 };
1074 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1075 
1076 #define delaymsec(_ms)	delay(1000 * (_ms))
1077 
1078 static void	ixl_clear_hw(struct ixl_softc *);
1079 static int	ixl_pf_reset(struct ixl_softc *);
1080 
1081 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1082 		    bus_size_t, u_int);
1083 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1084 
1085 static int	ixl_arq_fill(struct ixl_softc *);
1086 static void	ixl_arq_unfill(struct ixl_softc *);
1087 
1088 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1089 		    unsigned int);
1090 static void	ixl_atq_set(struct ixl_atq *,
1091 		    void (*)(struct ixl_softc *, void *), void *);
1092 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1093 static void	ixl_atq_done(struct ixl_softc *);
1094 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1095 		    const char *);
1096 static int	ixl_get_version(struct ixl_softc *);
1097 static int	ixl_pxe_clear(struct ixl_softc *);
1098 static int	ixl_lldp_shut(struct ixl_softc *);
1099 static int	ixl_get_mac(struct ixl_softc *);
1100 static int	ixl_get_switch_config(struct ixl_softc *);
1101 static int	ixl_phy_mask_ints(struct ixl_softc *);
1102 static int	ixl_get_phy_abilities(struct ixl_softc *, uint64_t *);
1103 static int	ixl_restart_an(struct ixl_softc *);
1104 static int	ixl_hmc(struct ixl_softc *);
1105 static void	ixl_hmc_free(struct ixl_softc *);
1106 static int	ixl_get_vsi(struct ixl_softc *);
1107 static int	ixl_set_vsi(struct ixl_softc *);
1108 static int	ixl_get_link_status(struct ixl_softc *);
1109 static int	ixl_set_link_status(struct ixl_softc *,
1110 		    const struct ixl_aq_desc *);
1111 static void	ixl_arq(void *);
1112 static void	ixl_hmc_pack(void *, const void *,
1113 		    const struct ixl_hmc_pack *, unsigned int);
1114 
1115 static int	ixl_match(struct device *, void *, void *);
1116 static void	ixl_attach(struct device *, struct device *, void *);
1117 
1118 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1119 static int	ixl_media_change(struct ifnet *);
1120 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1121 static void	ixl_watchdog(struct ifnet *);
1122 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1123 static void	ixl_start(struct ifqueue *);
1124 static int	ixl_intr(void *);
1125 static int	ixl_up(struct ixl_softc *);
1126 static int	ixl_down(struct ixl_softc *);
1127 static int	ixl_iff(struct ixl_softc *);
1128 
1129 static struct ixl_tx_ring *
1130 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1131 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1132 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1133 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1134 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1135 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1136 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1137 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1138 static int	ixl_txeof(struct ixl_softc *, struct ifqueue *);
1139 
1140 static struct ixl_rx_ring *
1141 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1142 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1143 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1144 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1145 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1146 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1147 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1148 static int	ixl_rxeof(struct ixl_softc *, struct ifiqueue *);
1149 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1150 static void	ixl_rxrefill(void *);
1151 
1152 struct cfdriver ixl_cd = {
1153 	NULL,
1154 	"ixl",
1155 	DV_IFNET,
1156 };
1157 
1158 struct cfattach ixl_ca = {
1159 	sizeof(struct ixl_softc),
1160 	ixl_match,
1161 	ixl_attach,
1162 };
1163 
1164 static const struct ixl_phy_type ixl_phy_type_map[] = {
1165 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1166 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1167 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1168 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1169 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1170 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1171 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1172 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1173 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1174 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1175 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1176 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1177 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1178 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1179 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1180 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1181 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1182 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1183 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1184 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1185 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1186 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1187 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1188 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1189 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1190 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1191 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1192 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1193 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1194 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1195 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1196 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1197 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1198 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1199 };
1200 
1201 static const struct ixl_speed_type ixl_speed_type_map[] = {
1202 	{ IXL_AQ_PHY_LINK_SPEED_40GB,		IF_Gbps(40) },
1203 	{ IXL_AQ_PHY_LINK_SPEED_25GB,		IF_Gbps(25) },
1204 	{ IXL_AQ_PHY_LINK_SPEED_20GB,		IF_Gbps(20) },
1205 	{ IXL_AQ_PHY_LINK_SPEED_10GB,		IF_Gbps(10) },
1206 	{ IXL_AQ_PHY_LINK_SPEED_1000MB,		IF_Mbps(1000) },
1207 	{ IXL_AQ_PHY_LINK_SPEED_100MB,		IF_Mbps(100) },
1208 };
1209 
1210 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1211 	.atq_tail	= I40E_PF_ATQT,
1212 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1213 	.atq_head	= I40E_PF_ATQH,
1214 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1215 	.atq_len	= I40E_PF_ATQLEN,
1216 	.atq_bal	= I40E_PF_ATQBAL,
1217 	.atq_bah	= I40E_PF_ATQBAH,
1218 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1219 
1220 	.arq_tail	= I40E_PF_ARQT,
1221 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1222 	.arq_head	= I40E_PF_ARQH,
1223 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1224 	.arq_len	= I40E_PF_ARQLEN,
1225 	.arq_bal	= I40E_PF_ARQBAL,
1226 	.arq_bah	= I40E_PF_ARQBAH,
1227 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1228 };
1229 
1230 #ifdef notyet
1231 static const struct ixl_aq_regs ixl_vf_aq_regs = {
1232 	.atq_tail	= I40E_VF_ATQT1,
1233 	.atq_tail_mask	= I40E_VF_ATQT1_ATQT_MASK;
1234 	.atq_head	= I40E_VF_ATQH1,
1235 	.atq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1236 	.atq_len	= I40E_VF_ATQLEN1,
1237 	.atq_bal	= I40E_VF_ATQBAL1,
1238 	.atq_bah	= I40E_VF_ATQBAH1,
1239 	.atq_len_enable	= I40E_VF_ATQLEN1_ATQENABLE_MASK,
1240 
1241 	.arq_tail	= I40E_VF_ARQT1,
1242 	.arq_tail_mask	= I40E_VF_ARQT1_ARQT_MASK;
1243 	.arq_head	= I40E_VF_ARQH1,
1244 	.arq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1245 	.arq_len	= I40E_VF_ARQLEN1,
1246 	.arq_bal	= I40E_VF_ARQBAL1,
1247 	.arq_bah	= I40E_VF_ARQBAH1,
1248 	.arq_len_enable	= I40E_VF_ARQLEN1_ARQENABLE_MASK,
1249 };
1250 #endif
1251 
1252 #define ixl_rd(_s, _r) \
1253 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1254 #define ixl_wr(_s, _r, _v) \
1255 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1256 #define ixl_barrier(_s, _r, _l, _o) \
1257 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1258 #define ixl_intr_enable(_s) \
1259 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1260 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1261 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1262 
1263 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1264 
1265 #ifdef __LP64__
1266 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1267 #else
1268 #define ixl_dmamem_hi(_ixm)	0
1269 #endif
1270 
1271 #define ixl_dmamem_lo(_ixm) 	(uint32_t)IXL_DMA_DVA(_ixm)
1272 
1273 static inline void
1274 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1275 {
1276 #ifdef __LP64__
1277 	htolem32(&iaq->iaq_param[2], addr >> 32);
1278 #else
1279 	iaq->iaq_param[2] = htole32(0);
1280 #endif
1281 	htolem32(&iaq->iaq_param[3], addr);
1282 }
1283 
1284 #if _BYTE_ORDER == _BIG_ENDIAN
1285 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1286 #else
1287 #define HTOLE16(_x)	(_x)
1288 #endif
1289 
1290 static const struct pci_matchid ixl_devices[] = {
1291 #ifdef notyet
1292 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF },
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF_HV },
1294 #endif
1295 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_SFP },
1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_40G_BP },
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BP },
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_QSFP },
1301 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BASET },
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1304 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_T4_10G },
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_BP },
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1307 };
1308 
1309 static int
1310 ixl_match(struct device *parent, void *match, void *aux)
1311 {
1312 	return (pci_matchbyid(aux, ixl_devices, nitems(ixl_devices)));
1313 }
1314 
1315 void
1316 ixl_attach(struct device *parent, struct device *self, void *aux)
1317 {
1318 	struct ixl_softc *sc = (struct ixl_softc *)self;
1319 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1320 	struct pci_attach_args *pa = aux;
1321 	pcireg_t memtype;
1322 	uint32_t port, ari, func;
1323 	uint64_t phy_types = 0;
1324 	int tries;
1325 
1326 	sc->sc_pc = pa->pa_pc;
1327 	sc->sc_tag = pa->pa_tag;
1328 	sc->sc_dmat = pa->pa_dmat;
1329 	sc->sc_aq_regs = &ixl_pf_aq_regs; /* VF? */
1330 
1331 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1332 	sc->sc_tx_ring_ndescs = 1024;
1333 	sc->sc_rx_ring_ndescs = 1024;
1334 
1335 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1336 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, BUS_SPACE_MAP_PREFETCHABLE,
1337 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1338 		printf(": unable to map registers\n");
1339 		return;
1340 	}
1341 
1342 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1343 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1344 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1345 	printf(" %u", sc->sc_base_queue);
1346 
1347 	ixl_clear_hw(sc);
1348 
1349 	if (ixl_pf_reset(sc) == -1) {
1350 		/* error printed by ixl_pf_reset */
1351 		goto unmap;
1352 	}
1353 
1354 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1355 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1356 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1357 	printf(": port %u", port);
1358 
1359 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1360 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1361 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1362 
1363 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1364 	func &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1365 	func >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1366 
1367 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1368 
1369 	/* initialise the adminq */
1370 
1371 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1372 
1373 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1374 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1375 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1376 		goto unmap;
1377 	}
1378 
1379 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1380 	SIMPLEQ_INIT(&sc->sc_arq_live);
1381 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1382 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1383 	sc->sc_arq_cons = 0;
1384 	sc->sc_arq_prod = 0;
1385 
1386 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1387 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1388 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1389 		goto free_atq;
1390 	}
1391 
1392 	if (!ixl_arq_fill(sc)) {
1393 		printf("\n" "%s: unable to fill arq descriptors\n",
1394 		    DEVNAME(sc));
1395 		goto free_arq;
1396 	}
1397 
1398 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1399 	    0, IXL_DMA_LEN(&sc->sc_atq),
1400 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1401 
1402 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1403 	    0, IXL_DMA_LEN(&sc->sc_arq),
1404 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1405 
1406  	for (tries = 0; tries < 10; tries++) {
1407 		int rv;
1408 
1409 		sc->sc_atq_cons = 0;
1410 		sc->sc_atq_prod = 0;
1411 
1412 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1413 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1414 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1415 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1416 
1417 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1418 
1419 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1420 		    ixl_dmamem_lo(&sc->sc_atq));
1421 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1422 		    ixl_dmamem_hi(&sc->sc_atq));
1423 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1424 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1425 
1426 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1427 		    ixl_dmamem_lo(&sc->sc_arq));
1428 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1429 		    ixl_dmamem_hi(&sc->sc_arq));
1430 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1431 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1432 
1433 		rv = ixl_get_version(sc);
1434 		if (rv == 0)
1435 			break;
1436 		if (rv != ETIMEDOUT) {
1437 			printf(", unable to get firmware version\n");
1438 			goto shutdown;
1439 		}
1440 
1441 		delaymsec(100);
1442 	}
1443 
1444 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1445 
1446 	if (ixl_pxe_clear(sc) != 0) {
1447 		/* error printed by ixl_pxe_clear */
1448 		goto shutdown;
1449 	}
1450 
1451 	if (ixl_get_mac(sc) != 0) {
1452 		/* error printed by ixl_get_mac */
1453 		goto shutdown;
1454 	}
1455 
1456 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1457 	    pci_intr_map(pa, &sc->sc_ih) != 0) {
1458 		printf(", unable to map interrupt\n");
1459 		goto shutdown;
1460 	}
1461 
1462 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
1463 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1464 
1465 	if (ixl_hmc(sc) != 0) {
1466 		/* error printed by ixl_hmc */
1467 		goto shutdown;
1468 	}
1469 
1470 	if (ixl_lldp_shut(sc) != 0) {
1471 		/* error printed by ixl_lldp_shut */
1472 		goto free_hmc;
1473 	}
1474 
1475 	if (ixl_phy_mask_ints(sc) != 0) {
1476 		/* error printed by ixl_phy_mask_ints */
1477 		goto free_hmc;
1478 	}
1479 
1480 	if (ixl_restart_an(sc) != 0) {
1481 		/* error printed by ixl_restart_an */
1482 		goto free_hmc;
1483 	}
1484 
1485 	if (ixl_get_switch_config(sc) != 0) {
1486 		/* error printed by ixl_get_switch_config */
1487 		goto free_hmc;
1488 	}
1489 
1490 	if (ixl_get_phy_abilities(sc, &phy_types) != 0) {
1491 		/* error printed by ixl_get_phy_abilities */
1492 		goto free_hmc;
1493 	}
1494 
1495 	if (ixl_get_link_status(sc) != 0) {
1496 		/* error printed by ixl_get_link_status */
1497 		goto free_hmc;
1498 	}
1499 
1500 	if (ixl_dmamem_alloc(sc, &sc->sc_vsi,
1501 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1502 		printf("%s: unable to allocate VSI data\n", DEVNAME(sc));
1503 		goto free_hmc;
1504 	}
1505 
1506 	if (ixl_get_vsi(sc) != 0) {
1507 		/* error printed by ixl_get_vsi */
1508 		goto free_vsi;
1509 	}
1510 
1511 	if (ixl_set_vsi(sc) != 0) {
1512 		/* error printed by ixl_set_vsi */
1513 		goto free_vsi;
1514 	}
1515 
1516 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1517 	    IPL_NET | IPL_MPSAFE, ixl_intr, sc, DEVNAME(sc));
1518 	if (sc->sc_ihc == NULL) {
1519 		printf("%s: unable to establish interrupt handler\n",
1520 		    DEVNAME(sc));
1521 		goto free_vsi;
1522 	}
1523 
1524 	ifp->if_softc = sc;
1525 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1526 	ifp->if_xflags = IFXF_MPSAFE;
1527 	ifp->if_ioctl = ixl_ioctl;
1528 	ifp->if_qstart = ixl_start;
1529 	ifp->if_watchdog = ixl_watchdog;
1530 	ifp->if_hardmtu = IXL_HARDMTU;
1531 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1532 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
1533 
1534 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1535 #if 0
1536 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1537 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1538 	    IFCAP_CSUM_UDPv4;
1539 #endif
1540 
1541 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1542 
1543 	ixl_media_add(sc, phy_types);
1544 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1545 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1546 
1547 	if_attach(ifp);
1548 	ether_ifattach(ifp);
1549 
1550 	if_attach_queues(ifp, ixl_nqueues(sc));
1551 	if_attach_iqueues(ifp, ixl_nqueues(sc));
1552 
1553 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1554 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1555 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1556 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1557 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1558 
1559 	ixl_intr_enable(sc);
1560 
1561 	return;
1562 
1563 free_vsi:
1564 	ixl_dmamem_free(sc, &sc->sc_vsi);
1565 free_hmc:
1566 	ixl_hmc_free(sc);
1567 shutdown:
1568 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1569 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1570 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1571 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1572 
1573 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1574 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1575 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1576 
1577 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1578 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1579 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1580 
1581 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1582 	    0, IXL_DMA_LEN(&sc->sc_arq),
1583 	    BUS_DMASYNC_POSTREAD);
1584 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1585 	    0, IXL_DMA_LEN(&sc->sc_atq),
1586 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1587 
1588 	ixl_arq_unfill(sc);
1589 free_arq:
1590 	ixl_dmamem_free(sc, &sc->sc_arq);
1591 free_atq:
1592 	ixl_dmamem_free(sc, &sc->sc_atq);
1593 unmap:
1594 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1595 	sc->sc_mems = 0;
1596 }
1597 
1598 static void
1599 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1600 {
1601 	struct ifmedia *ifm = &sc->sc_media;
1602 	const struct ixl_phy_type *itype;
1603 	unsigned int i;
1604 
1605 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
1606 		itype = &ixl_phy_type_map[i];
1607 
1608 		if (ISSET(phy_types, itype->phy_type))
1609 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
1610 	}
1611 }
1612 
1613 static int
1614 ixl_media_change(struct ifnet *ifp)
1615 {
1616 	/* ignore? */
1617 	return (EOPNOTSUPP);
1618 }
1619 
1620 static void
1621 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
1622 {
1623 	struct ixl_softc *sc = ifp->if_softc;
1624 
1625 	NET_ASSERT_LOCKED();
1626 
1627 	ifm->ifm_status = sc->sc_media_status;
1628 	ifm->ifm_active = sc->sc_media_active;
1629 }
1630 
1631 static void
1632 ixl_watchdog(struct ifnet *ifp)
1633 {
1634 
1635 }
1636 
1637 int
1638 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1639 {
1640 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1641 	struct ifreq *ifr = (struct ifreq *)data;
1642 	int error = 0;
1643 
1644 	switch (cmd) {
1645 	case SIOCSIFADDR:
1646 		ifp->if_flags |= IFF_UP;
1647 		/* FALLTHROUGH */
1648 
1649 	case SIOCSIFFLAGS:
1650 		if (ISSET(ifp->if_flags, IFF_UP)) {
1651 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1652 				error = ENETRESET;
1653 			else
1654 				error = ixl_up(sc);
1655 		} else {
1656 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1657 				error = ixl_down(sc);
1658 		}
1659 		break;
1660 
1661 	case SIOCGIFMEDIA:
1662 	case SIOCSIFMEDIA:
1663 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1664 		break;
1665 
1666 #if 0
1667 	case SIOCGIFRXR:
1668 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1669 		break;
1670 #endif
1671 
1672 	default:
1673 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1674 		break;
1675 	}
1676 
1677 	if (error == ENETRESET)
1678 		error = ixl_iff(sc);
1679 
1680 	return (error);
1681 }
1682 
1683 static inline void *
1684 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
1685 {
1686 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1687 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1688 
1689 	if (i >= e->hmc_count)
1690 		return (NULL);
1691 
1692 	kva += e->hmc_base;
1693 	kva += i * e->hmc_size;
1694 
1695 	return (kva);
1696 }
1697 
1698 static inline size_t
1699 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
1700 {
1701 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1702 
1703 	return (e->hmc_size);
1704 }
1705 
1706 static int
1707 ixl_up(struct ixl_softc *sc)
1708 {
1709 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1710 	struct ixl_rx_ring *rxr;
1711 	struct ixl_tx_ring *txr;
1712 	unsigned int nqueues, i;
1713 	uint32_t reg;
1714 	int rv = ENOMEM;
1715 
1716 	nqueues = ixl_nqueues(sc);
1717 	KASSERT(nqueues == 1); /* XXX */
1718 
1719 	/* allocation is the only thing that can fail, so do it up front */
1720 	for (i = 0; i < nqueues; i++) {
1721 		rxr = ixl_rxr_alloc(sc, i);
1722 		if (rxr == NULL)
1723 			goto free;
1724 
1725 		txr = ixl_txr_alloc(sc, i);
1726 		if (txr == NULL) {
1727 			ixl_rxr_free(sc, rxr);
1728 			goto free;
1729 		}
1730 
1731 		ifp->if_iqs[i]->ifiq_softc = rxr;
1732 		ifp->if_ifqs[i]->ifq_softc = txr;
1733 	}
1734 
1735 	/* XXX wait 50ms from completion of last RX queue disable */
1736 
1737 	for (i = 0; i < nqueues; i++) {
1738 		rxr = ifp->if_iqs[i]->ifiq_softc;
1739 		txr = ifp->if_ifqs[i]->ifq_softc;
1740 
1741 		ixl_txr_qdis(sc, txr, 1);
1742 
1743 		ixl_rxr_config(sc, rxr);
1744 		ixl_txr_config(sc, txr);
1745 
1746 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1747 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1748 
1749 		ixl_wr(sc, rxr->rxr_tail, 0);
1750 		ixl_rxfill(sc, rxr);
1751 
1752 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1753 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1754 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1755 
1756 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1757 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1758 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1759 	}
1760 
1761 	for (i = 0; i < nqueues; i++) {
1762 		rxr = ifp->if_iqs[i]->ifiq_softc;
1763 		txr = ifp->if_ifqs[i]->ifq_softc;
1764 
1765 		if (ixl_rxr_enabled(sc, rxr) != 0)
1766 			goto down;
1767 
1768 		if (ixl_txr_enabled(sc, txr) != 0)
1769 			goto down;
1770 	}
1771 
1772 	SET(ifp->if_flags, IFF_RUNNING);
1773 
1774 #if 0
1775 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
1776 	SET(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1777 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
1778 
1779 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
1780 	SET(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1781 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
1782 #endif
1783 
1784 	ixl_wr(sc, I40E_PFINT_LNKLST0,
1785 	    (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1786 	    (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1787 
1788 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
1789 	    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1790 	    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1791 	    (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
1792 	    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1793 	    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT));
1794 
1795 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
1796 	    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1797 	    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1798 	    (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
1799 	    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1800 	    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT));
1801 
1802 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
1803 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
1804 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
1805 
1806 	printf("%s: info %08x data %08x\n", DEVNAME(sc),
1807 	    ixl_rd(sc, I40E_PFHMC_ERRORINFO),
1808 	    ixl_rd(sc, I40E_PFHMC_ERRORDATA));
1809 
1810 	return (ENETRESET);
1811 
1812 free:
1813 	for (i = 0; i < nqueues; i++) {
1814 		rxr = ifp->if_iqs[i]->ifiq_softc;
1815 		txr = ifp->if_ifqs[i]->ifq_softc;
1816 
1817 		if (rxr == NULL) {
1818 			/*
1819 			 * tx and rx get set at the same time, so if one
1820 			 * is NULL, the other is too.
1821 			 */
1822 			continue;
1823 		}
1824 
1825 		ixl_txr_free(sc, txr);
1826 		ixl_rxr_free(sc, rxr);
1827 	}
1828 	return (rv);
1829 down:
1830 	ixl_down(sc);
1831 	return (ETIMEDOUT);
1832 }
1833 
1834 static int
1835 ixl_iff(struct ixl_softc *sc)
1836 {
1837 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1838 	struct ixl_atq iatq;
1839 	struct ixl_aq_desc *iaq;
1840 	struct ixl_aq_vsi_promisc_param *param;
1841 
1842 #if 0
1843 	if (!ISSET(ifp->if_flags, IFF_ALLMULTI))
1844 		return (0);
1845 #endif
1846 
1847 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1848 		return (0);
1849 
1850 	memset(&iatq, 0, sizeof(iatq));
1851 
1852 	iaq = &iatq.iatq_desc;
1853 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1854 
1855 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1856 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST);
1857 //	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1858 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1859 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1860 //	}
1861 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1862 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST);
1863 	param->seid = sc->sc_seid;
1864 
1865 	ixl_atq_exec(sc, &iatq, "ixliff");
1866 
1867 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1868 		return (EIO);
1869 
1870 	return (0);
1871 }
1872 
1873 static int
1874 ixl_down(struct ixl_softc *sc)
1875 {
1876 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1877 	struct ixl_rx_ring *rxr;
1878 	struct ixl_tx_ring *txr;
1879 	unsigned int nqueues, i;
1880 	uint32_t reg;
1881 	int error = 0;
1882 
1883 	nqueues = ixl_nqueues(sc);
1884 
1885 	CLR(ifp->if_flags, IFF_RUNNING);
1886 
1887 	/* mask interrupts */
1888 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
1889 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1890 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
1891 
1892 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
1893 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1894 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
1895 
1896 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
1897 
1898 	/* make sure the no hw generated work is still in flight */
1899 	intr_barrier(sc->sc_ihc);
1900 	for (i = 0; i < nqueues; i++) {
1901 		rxr = ifp->if_iqs[i]->ifiq_softc;
1902 		txr = ifp->if_ifqs[i]->ifq_softc;
1903 
1904 		ixl_txr_qdis(sc, txr, 0);
1905 
1906 		ifiq_barrier(ifp->if_iqs[i]);
1907 		ifq_barrier(ifp->if_ifqs[i]);
1908 
1909 		if (!timeout_del(&rxr->rxr_refill))
1910 			timeout_barrier(&rxr->rxr_refill);
1911 	}
1912 
1913 	/* XXX wait at least 400 usec for all tx queues in one go */
1914 	delay(500);
1915 
1916 	for (i = 0; i < nqueues; i++) {
1917 		rxr = ifp->if_iqs[i]->ifiq_softc;
1918 		txr = ifp->if_ifqs[i]->ifq_softc;
1919 
1920 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1921 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1922 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1923 
1924 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1925 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1926 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1927 	}
1928 
1929 	for (i = 0; i < nqueues; i++) {
1930 		rxr = ifp->if_iqs[i]->ifiq_softc;
1931 		txr = ifp->if_ifqs[i]->ifq_softc;
1932 
1933 		if (ixl_txr_disabled(sc, txr) != 0)
1934 			error = ETIMEDOUT;
1935 
1936 		if (ixl_rxr_disabled(sc, rxr) != 0)
1937 			error = ETIMEDOUT;
1938 	}
1939 
1940 	if (error) {
1941 	printf("%s: info %08x data %08x\n", DEVNAME(sc),
1942 	    ixl_rd(sc, I40E_PFHMC_ERRORINFO),
1943 	    ixl_rd(sc, I40E_PFHMC_ERRORDATA));
1944 
1945 		printf("%s: failed to shut down rings\n", DEVNAME(sc));
1946 		return (error);
1947 	}
1948 
1949 	for (i = 0; i < nqueues; i++) {
1950 		rxr = ifp->if_iqs[i]->ifiq_softc;
1951 		txr = ifp->if_ifqs[i]->ifq_softc;
1952 
1953 		ixl_txr_unconfig(sc, txr);
1954 		ixl_rxr_unconfig(sc, rxr);
1955 
1956 		ixl_txr_clean(sc, txr);
1957 		ixl_rxr_clean(sc, rxr);
1958 
1959 		ixl_txr_free(sc, txr);
1960 		ixl_rxr_free(sc, rxr);
1961 
1962 		ifp->if_iqs[i]->ifiq_softc = NULL;
1963 		ifp->if_ifqs[i]->ifq_softc =  NULL;
1964 	}
1965 
1966 	return (0);
1967 }
1968 
1969 static struct ixl_tx_ring *
1970 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
1971 {
1972 	struct ixl_tx_ring *txr;
1973 	struct ixl_tx_map *maps, *txm;
1974 	unsigned int i;
1975 
1976 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1977 	if (txr == NULL)
1978 		return (NULL);
1979 
1980 	maps = mallocarray(sizeof(*maps),
1981 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1982 	if (maps == NULL)
1983 		goto free;
1984 
1985 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
1986 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
1987 	    IXL_TX_QUEUE_ALIGN) != 0)
1988 		goto freemap;
1989 
1990 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1991 		txm = &maps[i];
1992 
1993 		if (bus_dmamap_create(sc->sc_dmat,
1994 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
1995 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1996 		    &txm->txm_map) != 0)
1997 			goto uncreate;
1998 
1999 		txm->txm_eop = -1;
2000 		txm->txm_m = NULL;
2001 	}
2002 
2003 	txr->txr_cons = txr->txr_prod = 0;
2004 	txr->txr_maps = maps;
2005 
2006 	txr->txr_tail = I40E_QTX_TAIL(qid);
2007 	txr->txr_qid = qid;
2008 
2009 	return (txr);
2010 
2011 uncreate:
2012 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2013 		txm = &maps[i];
2014 
2015 		if (txm->txm_map == NULL)
2016 			continue;
2017 
2018 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2019 	}
2020 
2021 	ixl_dmamem_free(sc, &txr->txr_mem);
2022 freemap:
2023 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2024 free:
2025 	free(txr, M_DEVBUF, sizeof(*txr));
2026 	return (NULL);
2027 }
2028 
2029 static void
2030 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2031 {
2032 	unsigned int qid;
2033 	bus_size_t reg;
2034 	uint32_t r;
2035 
2036 	qid = txr->txr_qid + sc->sc_base_queue;
2037 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2038 	qid %= 128;
2039 
2040 	r = ixl_rd(sc, reg);
2041 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2042 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2043 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2044 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2045 	ixl_wr(sc, reg, r);
2046 }
2047 
2048 static void
2049 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2050 {
2051 	struct ixl_hmc_txq txq;
2052 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_vsi);
2053 	void *hmc;
2054 
2055 	memset(&txq, 0, sizeof(txq));
2056 	txq.head = htole16(0);
2057 	txq.new_context = 1;
2058 	htolem64(&txq.base,
2059 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2060 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2061 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2062 	txq.tphrdesc_ena = 0;
2063 	txq.tphrpacket_ena = 0;
2064 	txq.tphwdesc_ena = 0;
2065 	txq.rdylist = data->qs_handle[0];
2066 
2067 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2068 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2069 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2070 }
2071 
2072 static void
2073 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2074 {
2075 	void *hmc;
2076 
2077 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2078 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2079 }
2080 
2081 static void
2082 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2083 {
2084 	struct ixl_tx_map *maps, *txm;
2085 	bus_dmamap_t map;
2086 	unsigned int i;
2087 
2088 	maps = txr->txr_maps;
2089 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2090 		txm = &maps[i];
2091 
2092 		if (txm->txm_m == NULL)
2093 			continue;
2094 
2095 		map = txm->txm_map;
2096 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2097 		    BUS_DMASYNC_POSTWRITE);
2098 		bus_dmamap_unload(sc->sc_dmat, map);
2099 
2100 		m_freem(txm->txm_m);
2101 		txm->txm_m = NULL;
2102 	}
2103 }
2104 
2105 static int
2106 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2107 {
2108 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2109 	uint32_t reg;
2110 	int i;
2111 
2112 	for (i = 0; i < 10; i++) {
2113 		reg = ixl_rd(sc, ena);
2114 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2115 			return (0);
2116 
2117 		delaymsec(10);
2118 	}
2119 
2120 	return (ETIMEDOUT);
2121 }
2122 
2123 static int
2124 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2125 {
2126 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2127 	uint32_t reg;
2128 	int i;
2129 
2130 	for (i = 0; i < 20; i++) {
2131 		reg = ixl_rd(sc, ena);
2132 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2133 			return (0);
2134 
2135 		delaymsec(10);
2136 	}
2137 
2138 	return (ETIMEDOUT);
2139 }
2140 
2141 static void
2142 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2143 {
2144 	struct ixl_tx_map *maps, *txm;
2145 	unsigned int i;
2146 
2147 	maps = txr->txr_maps;
2148 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2149 		txm = &maps[i];
2150 
2151 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2152 	}
2153 
2154 	ixl_dmamem_free(sc, &txr->txr_mem);
2155 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2156 	free(txr, M_DEVBUF, sizeof(*txr));
2157 }
2158 
2159 static inline int
2160 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2161 {
2162 	int error;
2163 
2164 	error = bus_dmamap_load_mbuf(dmat, map, m,
2165 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2166 	if (error != EFBIG || m_defrag(m, M_DONTWAIT) != 0)
2167 		return (error);
2168 
2169 	return (bus_dmamap_load_mbuf(dmat, map, m,
2170 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2171 }
2172 
2173 static void
2174 ixl_start(struct ifqueue *ifq)
2175 {
2176 	struct ifnet *ifp = ifq->ifq_if;
2177 	struct ixl_softc *sc = ifp->if_softc;
2178 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2179 	struct ixl_tx_desc *ring, *txd;
2180 	struct ixl_tx_map *txm;
2181 	bus_dmamap_t map;
2182 	struct mbuf *m;
2183 	uint64_t cmd;
2184 	unsigned int prod, free, last, i;
2185 	unsigned int mask;
2186 	int post = 0;
2187 #if NBPFILTER > 0
2188 	caddr_t if_bpf;
2189 #endif
2190 
2191 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2192 		ifq_purge(ifq);
2193 		return;
2194 	}
2195 
2196 	prod = txr->txr_prod;
2197 	free = txr->txr_cons;
2198 	if (free <= prod)
2199 		free += sc->sc_tx_ring_ndescs;
2200 	free -= prod;
2201 
2202 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2203 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2204 
2205 	ring = IXL_DMA_KVA(&txr->txr_mem);
2206 	mask = sc->sc_tx_ring_ndescs - 1;
2207 
2208 	for (;;) {
2209 		if (free <= IXL_TX_PKT_DESCS) {
2210 			ifq_set_oactive(ifq);
2211 			break;
2212 		}
2213 
2214 		m = ifq_dequeue(ifq);
2215 		if (m == NULL)
2216 			break;
2217 
2218 		txm = &txr->txr_maps[prod];
2219 		map = txm->txm_map;
2220 
2221 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2222 			m_freem(m);
2223 			continue;
2224 		}
2225 
2226 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2227 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2228 
2229 		for (i = 0; i < map->dm_nsegs; i++) {
2230 			txd = &ring[prod];
2231 
2232 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2233 			    IXL_TX_DESC_BSIZE_SHIFT;
2234 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2235 
2236 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2237 			htolem64(&txd->cmd, cmd);
2238 
2239 			last = prod;
2240 
2241 			prod++;
2242 			prod &= mask;
2243 		}
2244 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2245 		htolem64(&txd->cmd, cmd);
2246 
2247 		txm->txm_m = m;
2248 		txm->txm_eop = last;
2249 
2250 #if NBPFILTER > 0
2251 		if_bpf = ifp->if_bpf;
2252 		if (if_bpf)
2253 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2254 #endif
2255 
2256 		free -= i;
2257 		post = 1;
2258 	}
2259 
2260 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2261 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2262 
2263 	if (post) {
2264 		txr->txr_prod = prod;
2265 		ixl_wr(sc, txr->txr_tail, prod);
2266 	}
2267 }
2268 
2269 static int
2270 ixl_txeof(struct ixl_softc *sc, struct ifqueue *ifq)
2271 {
2272 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2273 	struct ixl_tx_desc *ring, *txd;
2274 	struct ixl_tx_map *txm;
2275 	bus_dmamap_t map;
2276 	unsigned int cons, prod, last;
2277 	unsigned int mask;
2278 	uint64_t dtype;
2279 	int done = 0;
2280 
2281 	prod = txr->txr_prod;
2282 	cons = txr->txr_cons;
2283 
2284 	if (cons == prod)
2285 		return (0);
2286 
2287 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2288 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2289 
2290 	ring = IXL_DMA_KVA(&txr->txr_mem);
2291 	mask = sc->sc_tx_ring_ndescs - 1;
2292 
2293 	do {
2294 		txm = &txr->txr_maps[cons];
2295 		last = txm->txm_eop;
2296 		txd = &ring[last];
2297 
2298 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2299 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2300 			break;
2301 
2302 		map = txm->txm_map;
2303 
2304 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2305 		    BUS_DMASYNC_POSTWRITE);
2306 		bus_dmamap_unload(sc->sc_dmat, map);
2307 		m_freem(txm->txm_m);
2308 
2309 		txm->txm_m = NULL;
2310 		txm->txm_eop = -1;
2311 
2312 		cons = last + 1;
2313 		cons &= mask;
2314 
2315 		done = 1;
2316 	} while (cons != prod);
2317 
2318 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2319 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2320 
2321 	txr->txr_cons = cons;
2322 
2323 	//ixl_enable(sc, txr->txr_msix);
2324 
2325 	if (ifq_is_oactive(ifq))
2326 		ifq_restart(ifq);
2327 
2328 	return (done);
2329 }
2330 
2331 static struct ixl_rx_ring *
2332 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2333 {
2334 	struct ixl_rx_ring *rxr;
2335 	struct ixl_rx_map *maps, *rxm;
2336 	unsigned int i;
2337 
2338 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2339 	if (rxr == NULL)
2340 		return (NULL);
2341 
2342 	maps = mallocarray(sizeof(*maps),
2343 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2344 	if (maps == NULL)
2345 		goto free;
2346 
2347 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2348 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2349 	    IXL_RX_QUEUE_ALIGN) != 0)
2350 		goto freemap;
2351 
2352 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2353 		rxm = &maps[i];
2354 
2355 		if (bus_dmamap_create(sc->sc_dmat,
2356 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2357 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2358 		    &rxm->rxm_map) != 0)
2359 			goto uncreate;
2360 
2361 		rxm->rxm_m = NULL;
2362 	}
2363 
2364 	rxr->rxr_sc = sc;
2365 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
2366 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
2367 	rxr->rxr_cons = rxr->rxr_prod = 0;
2368 	rxr->rxr_m_head = NULL;
2369 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2370 	rxr->rxr_maps = maps;
2371 
2372 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2373 	rxr->rxr_qid = qid;
2374 
2375 	return (rxr);
2376 
2377 uncreate:
2378 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2379 		rxm = &maps[i];
2380 
2381 		if (rxm->rxm_map == NULL)
2382 			continue;
2383 
2384 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2385 	}
2386 
2387 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2388 freemap:
2389 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2390 free:
2391 	free(rxr, M_DEVBUF, sizeof(*rxr));
2392 	return (NULL);
2393 }
2394 
2395 static void
2396 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2397 {
2398 	struct ixl_rx_map *maps, *rxm;
2399 	bus_dmamap_t map;
2400 	unsigned int i;
2401 
2402 	if (!timeout_del(&rxr->rxr_refill))
2403 		timeout_barrier(&rxr->rxr_refill);
2404 
2405 	maps = rxr->rxr_maps;
2406 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2407 		rxm = &maps[i];
2408 
2409 		if (rxm->rxm_m == NULL)
2410 			continue;
2411 
2412 		map = rxm->rxm_map;
2413 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2414 		    BUS_DMASYNC_POSTWRITE);
2415 		bus_dmamap_unload(sc->sc_dmat, map);
2416 
2417 		m_freem(rxm->rxm_m);
2418 		rxm->rxm_m = NULL;
2419 	}
2420 
2421 	m_freem(rxr->rxr_m_head);
2422 	rxr->rxr_m_head = NULL;
2423 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2424 
2425 	rxr->rxr_prod = rxr->rxr_cons = 0;
2426 }
2427 
2428 static int
2429 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2430 {
2431 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2432 	uint32_t reg;
2433 	int i;
2434 
2435 	for (i = 0; i < 10; i++) {
2436 		reg = ixl_rd(sc, ena);
2437 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2438 			return (0);
2439 
2440 		delaymsec(10);
2441 	}
2442 
2443 	return (ETIMEDOUT);
2444 }
2445 
2446 static int
2447 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2448 {
2449 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2450 	uint32_t reg;
2451 	int i;
2452 
2453 	for (i = 0; i < 20; i++) {
2454 		reg = ixl_rd(sc, ena);
2455 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2456 			return (0);
2457 
2458 		delaymsec(10);
2459 	}
2460 
2461 	return (ETIMEDOUT);
2462 }
2463 
2464 static void
2465 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2466 {
2467 	struct ixl_hmc_rxq rxq;
2468 	void *hmc;
2469 
2470 	memset(&rxq, 0, sizeof(rxq));
2471 
2472 	rxq.head = htole16(0);
2473 	htolem64(&rxq.base,
2474 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2475 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
2476 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2477 	rxq.hbuff = 0;
2478 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2479 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
2480 	rxq.crcstrip = 1;
2481 	rxq.l2sel = 0;
2482 	rxq.showiv = 0;
2483 	rxq.rxmax = htole16(MCLBYTES); /* XXX */
2484 	rxq.tphrdesc_ena = 0;
2485 	rxq.tphwdesc_ena = 0;
2486 	rxq.tphdata_ena = 0;
2487 	rxq.tphhead_ena = 0;
2488 	rxq.lrxqthresh = 0;
2489 	rxq.prefena = 1;
2490 
2491 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2492 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2493 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
2494 }
2495 
2496 static void
2497 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2498 {
2499 	void *hmc;
2500 
2501 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2502 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2503 }
2504 
2505 static void
2506 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2507 {
2508 	struct ixl_rx_map *maps, *rxm;
2509 	unsigned int i;
2510 
2511 	maps = rxr->rxr_maps;
2512 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2513 		rxm = &maps[i];
2514 
2515 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2516 	}
2517 
2518 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2519 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2520 	free(rxr, M_DEVBUF, sizeof(*rxr));
2521 }
2522 
2523 static int
2524 ixl_rxeof(struct ixl_softc *sc, struct ifiqueue *ifiq)
2525 {
2526 	struct ixl_rx_ring *rxr = ifiq->ifiq_softc;
2527 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2528 	struct ixl_rx_wb_desc_16 *ring, *rxd;
2529 	struct ixl_rx_map *rxm;
2530 	bus_dmamap_t map;
2531 	unsigned int cons, prod;
2532 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2533 	struct mbuf *m;
2534 	uint64_t word;
2535 	unsigned int len;
2536 	unsigned int mask;
2537 	int done = 0;
2538 
2539 	prod = rxr->rxr_prod;
2540 	cons = rxr->rxr_cons;
2541 
2542 	if (cons == prod)
2543 		return (0);
2544 
2545 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2546 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2547 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2548 
2549 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2550 	mask = sc->sc_rx_ring_ndescs - 1;
2551 
2552 	do {
2553 		rxd = &ring[cons];
2554 
2555 		word = lemtoh64(&rxd->qword1);
2556 		if (!ISSET(word, IXL_RX_DESC_DD))
2557 			break;
2558 
2559 		if_rxr_put(&rxr->rxr_acct, 1);
2560 
2561 		rxm = &rxr->rxr_maps[cons];
2562 
2563 		map = rxm->rxm_map;
2564 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2565 		    BUS_DMASYNC_POSTREAD);
2566 		bus_dmamap_unload(sc->sc_dmat, map);
2567 
2568 		m = rxm->rxm_m;
2569 		rxm->rxm_m = NULL;
2570 
2571 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2572 		m->m_len = len;
2573 		m->m_pkthdr.len = 0;
2574 
2575 		m->m_next = NULL;
2576 		*rxr->rxr_m_tail = m;
2577 		rxr->rxr_m_tail = &m->m_next;
2578 
2579 		m = rxr->rxr_m_head;
2580 		m->m_pkthdr.len += len;
2581 
2582 		if (ISSET(word, IXL_RX_DESC_EOP)) {
2583 			if (!ISSET(word,
2584 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2585 				ml_enqueue(&ml, m);
2586 			} else {
2587 				ifp->if_ierrors++; /* XXX */
2588 				m_freem(m);
2589 			}
2590 
2591 			rxr->rxr_m_head = NULL;
2592 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2593 		}
2594 
2595 		cons++;
2596 		cons &= mask;
2597 
2598 		done = 1;
2599 	} while (cons != prod);
2600 
2601 	if (done) {
2602 		rxr->rxr_cons = cons;
2603 		ixl_rxfill(sc, rxr);
2604 		if_input(ifp, &ml);
2605 	}
2606 
2607 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2608 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2609 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2610 
2611 	return (done);
2612 }
2613 
2614 static void
2615 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2616 {
2617 	struct ixl_rx_rd_desc_16 *ring, *rxd;
2618 	struct ixl_rx_map *rxm;
2619 	bus_dmamap_t map;
2620 	struct mbuf *m;
2621 	unsigned int prod;
2622 	unsigned int slots;
2623 	unsigned int mask;
2624 	int post = 0;
2625 
2626 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2627 	if (slots == 0)
2628 		return;
2629 
2630 	prod = rxr->rxr_prod;
2631 
2632 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2633 	mask = sc->sc_rx_ring_ndescs - 1;
2634 
2635 	do {
2636 		rxm = &rxr->rxr_maps[prod];
2637 
2638 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN);
2639 		if (m == NULL)
2640 			break;
2641 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2642 		m_adj(m, ETHER_ALIGN);
2643 
2644 		map = rxm->rxm_map;
2645 
2646 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2647 		    BUS_DMA_NOWAIT) != 0) {
2648 			m_freem(m);
2649 			break;
2650 		}
2651 
2652 		rxm->rxm_m = m;
2653 
2654 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2655 		    BUS_DMASYNC_PREREAD);
2656 
2657 		rxd = &ring[prod];
2658 
2659 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2660 		rxd->haddr = htole64(0);
2661 
2662 		prod++;
2663 		prod &= mask;
2664 
2665 		post = 1;
2666 	} while (--slots);
2667 
2668 	if_rxr_put(&rxr->rxr_acct, slots);
2669 
2670 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2671 		timeout_add(&rxr->rxr_refill, 1);
2672 	else if (post) {
2673 		rxr->rxr_prod = prod;
2674 		ixl_wr(sc, rxr->rxr_tail, prod);
2675 	}
2676 }
2677 
2678 void
2679 ixl_rxrefill(void *arg)
2680 {
2681 	struct ixl_rx_ring *rxr = arg;
2682 	struct ixl_softc *sc = rxr->rxr_sc;
2683 
2684 	ixl_rxfill(sc, rxr);
2685 }
2686 
2687 static int
2688 ixl_intr(void *xsc)
2689 {
2690 	struct ixl_softc *sc = xsc;
2691 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2692 	uint32_t icr;
2693 	int rv = 0;
2694 
2695 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
2696 
2697 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
2698 		ixl_atq_done(sc);
2699 		task_add(systq, &sc->sc_arq_task);
2700 		rv = 1;
2701 	}
2702 
2703 	if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
2704 		rv |= ixl_rxeof(sc, ifp->if_iqs[0]);
2705 	if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
2706 		rv |= ixl_txeof(sc, ifp->if_ifqs[0]);
2707 
2708 	return (rv);
2709 }
2710 
2711 static void
2712 ixl_arq_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2713 {
2714 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2715 	int link_state;
2716 
2717 	NET_LOCK();
2718 	link_state = ixl_set_link_status(sc, iaq);
2719 	if (ifp->if_link_state != link_state) {
2720 		ifp->if_link_state = link_state;
2721 		if_link_state_change(ifp);
2722 	}
2723 	NET_UNLOCK();
2724 }
2725 
2726 #if 0
2727 static void
2728 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2729 {
2730 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
2731 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
2732 	    lemtoh16(&iaq->iaq_opcode));
2733 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
2734 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
2735 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
2736 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
2737 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
2738 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
2739 }
2740 #endif
2741 
2742 static void
2743 ixl_arq(void *xsc)
2744 {
2745 	struct ixl_softc *sc = xsc;
2746 	struct ixl_aq_desc *arq, *iaq;
2747 	struct ixl_aq_buf *aqb;
2748 	unsigned int cons = sc->sc_arq_cons;
2749 	unsigned int prod;
2750 	int done = 0;
2751 
2752 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
2753 	    sc->sc_aq_regs->arq_head_mask;
2754 
2755 	if (cons == prod)
2756 		goto done;
2757 
2758 	arq = IXL_DMA_KVA(&sc->sc_arq);
2759 
2760 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2761 	    0, IXL_DMA_LEN(&sc->sc_arq),
2762 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2763 
2764 	do {
2765 		iaq = &arq[cons];
2766 
2767 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2768 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
2769 		    BUS_DMASYNC_POSTREAD);
2770 
2771 		switch (iaq->iaq_opcode) {
2772 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
2773 			ixl_arq_link_status(sc, iaq);
2774 			break;
2775 		}
2776 
2777 		memset(iaq, 0, sizeof(*iaq));
2778 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2779 		if_rxr_put(&sc->sc_arq_ring, 1);
2780 
2781 		cons++;
2782 		cons &= IXL_AQ_MASK;
2783 
2784 		done = 1;
2785 	} while (cons != prod);
2786 
2787 	if (done && ixl_arq_fill(sc))
2788 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2789 
2790 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2791 	    0, IXL_DMA_LEN(&sc->sc_arq),
2792 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2793 
2794 	sc->sc_arq_cons = cons;
2795 
2796 done:
2797 	ixl_intr_enable(sc);
2798 }
2799 
2800 static void
2801 ixl_atq_set(struct ixl_atq *iatq,
2802     void (*fn)(struct ixl_softc *, void *), void *arg)
2803 {
2804 	iatq->iatq_fn = fn;
2805 	iatq->iatq_arg = arg;
2806 }
2807 
2808 static void
2809 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
2810 {
2811 	struct ixl_aq_desc *atq, *slot;
2812 	unsigned int prod;
2813 
2814 	/* assert locked */
2815 
2816 	atq = IXL_DMA_KVA(&sc->sc_atq);
2817 	prod = sc->sc_atq_prod;
2818 	slot = atq + prod;
2819 
2820 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2821 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2822 
2823 	*slot = iatq->iatq_desc;
2824 	slot->iaq_cookie = (uint64_t)iatq;
2825 
2826 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2827 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2828 
2829 	prod++;
2830 	prod &= IXL_AQ_MASK;
2831 	sc->sc_atq_prod = prod;
2832 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2833 }
2834 
2835 static void
2836 ixl_atq_done(struct ixl_softc *sc)
2837 {
2838 	struct ixl_atq_list cmds = SIMPLEQ_HEAD_INITIALIZER(cmds);
2839 	struct ixl_aq_desc *atq, *slot;
2840 	struct ixl_atq *iatq;
2841 	unsigned int cons;
2842 	unsigned int prod;
2843 
2844 	prod = sc->sc_atq_prod;
2845 	cons = sc->sc_atq_cons;
2846 
2847 	if (prod == cons)
2848 		return;
2849 
2850 	atq = IXL_DMA_KVA(&sc->sc_atq);
2851 
2852 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2853 	    0, IXL_DMA_LEN(&sc->sc_atq),
2854 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2855 
2856 	do {
2857 		slot = &atq[cons];
2858 
2859 		iatq = (struct ixl_atq *)slot->iaq_cookie;
2860 		iatq->iatq_desc = *slot;
2861 		SIMPLEQ_INSERT_TAIL(&cmds, iatq, iatq_entry);
2862 
2863 		memset(slot, 0, sizeof(*slot));
2864 
2865 		cons++;
2866 		cons &= IXL_AQ_MASK;
2867 	} while (cons != prod);
2868 
2869 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2870 	    0, IXL_DMA_LEN(&sc->sc_atq),
2871 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2872 
2873 	sc->sc_atq_cons = cons;
2874 
2875 	while ((iatq = SIMPLEQ_FIRST(&cmds)) != NULL) {
2876 		SIMPLEQ_REMOVE_HEAD(&cmds, iatq_entry);
2877 
2878 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
2879 	}
2880 }
2881 
2882 struct ixl_wakeup {
2883 	struct mutex mtx;
2884 	int notdone;
2885 };
2886 
2887 static void
2888 ixl_wakeup(struct ixl_softc *sc, void *arg)
2889 {
2890 	struct ixl_wakeup *wake = arg;
2891 
2892 	mtx_enter(&wake->mtx);
2893 	wake->notdone = 0;
2894 	mtx_leave(&wake->mtx);
2895 
2896 	wakeup(wake);
2897 }
2898 
2899 static void
2900 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
2901 {
2902 	struct ixl_wakeup wake = { MUTEX_INITIALIZER(IPL_NET), 1 };
2903 
2904 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
2905 
2906 	ixl_atq_set(iatq, ixl_wakeup, &wake);
2907 	ixl_atq_post(sc, iatq);
2908 
2909 	mtx_enter(&wake.mtx);
2910 	while (wake.notdone) {
2911 		mtx_leave(&wake.mtx);
2912 		ixl_atq_done(sc);
2913 		mtx_enter(&wake.mtx);
2914 		msleep(&wake, &wake.mtx, 0, wmesg, 1);
2915 	}
2916 	mtx_leave(&wake.mtx);
2917 }
2918 
2919 static int
2920 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
2921 {
2922 	struct ixl_aq_desc *atq, *slot;
2923 	unsigned int prod;
2924 	unsigned int t = 0;
2925 
2926 	atq = IXL_DMA_KVA(&sc->sc_atq);
2927 	prod = sc->sc_atq_prod;
2928 	slot = atq + prod;
2929 
2930 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2931 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2932 
2933 	*slot = *iaq;
2934 	slot->iaq_flags |= htole16(IXL_AQ_SI);
2935 
2936 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2937 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2938 
2939 	prod++;
2940 	prod &= IXL_AQ_MASK;
2941 	sc->sc_atq_prod = prod;
2942 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2943 
2944 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
2945 		delaymsec(1);
2946 
2947 		if (t++ > tm)
2948 			return (ETIMEDOUT);
2949 	}
2950 
2951 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2952 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
2953 	*iaq = *slot;
2954 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2955 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
2956 
2957 	sc->sc_atq_cons = prod;
2958 
2959 	return (0);
2960 }
2961 
2962 static int
2963 ixl_get_version(struct ixl_softc *sc)
2964 {
2965 	struct ixl_aq_desc iaq;
2966 	uint32_t fwbuild, fwver, apiver;
2967 
2968 	memset(&iaq, 0, sizeof(iaq));
2969 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
2970 
2971 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
2972 		return (ETIMEDOUT);
2973 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
2974 		return (EIO);
2975 
2976 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
2977 	fwver = lemtoh32(&iaq.iaq_param[2]);
2978 	apiver = lemtoh32(&iaq.iaq_param[3]);
2979 
2980 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
2981 	    (uint16_t)(fwver >> 16), fwbuild, (uint16_t)apiver,
2982 	    (uint16_t)(apiver >> 16));
2983 
2984 	return (0);
2985 }
2986 
2987 static int
2988 ixl_pxe_clear(struct ixl_softc *sc)
2989 {
2990 	struct ixl_aq_desc iaq;
2991 
2992 	memset(&iaq, 0, sizeof(iaq));
2993 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
2994 	iaq.iaq_param[0] = htole32(0x2);
2995 
2996 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
2997 		printf(", CLEAR PXE MODE timeout\n");
2998 		return (-1);
2999 	}
3000 
3001 	switch (iaq.iaq_retval) {
3002 	case HTOLE16(IXL_AQ_RC_OK):
3003 	case HTOLE16(IXL_AQ_RC_EEXIST):
3004 		break;
3005 	default:
3006 		printf(", CLEAR PXE MODE error\n");
3007 		return (-1);
3008 	}
3009 
3010 	return (0);
3011 }
3012 
3013 static int
3014 ixl_lldp_shut(struct ixl_softc *sc)
3015 {
3016 	struct ixl_aq_desc iaq;
3017 
3018 	memset(&iaq, 0, sizeof(iaq));
3019 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3020 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3021 
3022 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3023 		printf(", STOP LLDP AGENT timeout\n");
3024 		return (-1);
3025 	}
3026 
3027 	switch (iaq.iaq_retval) {
3028 	case HTOLE16(IXL_AQ_RC_EMODE):
3029 	case HTOLE16(IXL_AQ_RC_EPERM):
3030 		/* ignore silently */
3031 	default:
3032 		break;
3033 	}
3034 
3035 	return (0);
3036 }
3037 
3038 static int
3039 ixl_get_mac(struct ixl_softc *sc)
3040 {
3041 	struct ixl_dmamem idm;
3042 	struct ixl_aq_desc iaq;
3043 	struct ixl_aq_mac_addresses *addrs;
3044 	int rv;
3045 
3046 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3047 		printf(", unable to allocate mac addresses\n");
3048 		return (-1);
3049 	}
3050 
3051 	memset(&iaq, 0, sizeof(iaq));
3052 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3053 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3054 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3055 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3056 
3057 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3058 	    BUS_DMASYNC_PREREAD);
3059 
3060 	rv = ixl_atq_poll(sc, &iaq, 250);
3061 
3062 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3063 	    BUS_DMASYNC_POSTREAD);
3064 
3065 	if (rv != 0) {
3066 		printf(", MAC ADDRESS READ timeout\n");
3067 		rv = -1;
3068 		goto done;
3069 	}
3070 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3071 		printf(", MAC ADDRESS READ error\n");
3072 		rv = -1;
3073 		goto done;
3074 	}
3075 
3076 	addrs = IXL_DMA_KVA(&idm);
3077 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3078 		printf(", port address is not valid\n");
3079 		goto done;
3080 	}
3081 
3082 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3083 	rv = 0;
3084 
3085 done:
3086 	ixl_dmamem_free(sc, &idm);
3087 	return (rv);
3088 }
3089 
3090 static int
3091 ixl_get_switch_config(struct ixl_softc *sc)
3092 {
3093 	struct ixl_dmamem idm;
3094 	struct ixl_aq_desc iaq;
3095 	struct ixl_aq_switch_config *hdr;
3096 	struct ixl_aq_switch_config_element *elms, *elm;
3097 	unsigned int nelm;
3098 	int rv;
3099 
3100 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3101 		printf("%s: unable to allocate switch config buffer\n",
3102 		    DEVNAME(sc));
3103 		return (-1);
3104 	}
3105 
3106 	memset(&iaq, 0, sizeof(iaq));
3107 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3108 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3109 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3110 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3111 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3112 
3113 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3114 	    BUS_DMASYNC_PREREAD);
3115 
3116 	rv = ixl_atq_poll(sc, &iaq, 250);
3117 
3118 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3119 	    BUS_DMASYNC_POSTREAD);
3120 
3121 	if (rv != 0) {
3122 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3123 		rv = -1;
3124 		goto done;
3125 	}
3126 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3127 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3128 		rv = -1;
3129 		goto done;
3130 	}
3131 
3132 	hdr = IXL_DMA_KVA(&idm);
3133 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3134 
3135 	nelm = lemtoh16(&hdr->num_reported);
3136 	if (nelm < 1) {
3137 		printf("%s: no switch config available\n", DEVNAME(sc));
3138 		rv = -1;
3139 		goto done;
3140 	}
3141 
3142 #if 0
3143 	for (i = 0; i < nelm; i++) {
3144 		elm = &elms[i];
3145 
3146 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3147 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3148 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3149 		    lemtoh16(&elm->uplink_seid),
3150 		    lemtoh16(&elm->downlink_seid));
3151 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3152 		    DEVNAME(sc), elm->connection_type,
3153 		    lemtoh16(&elm->scheduler_id),
3154 		    lemtoh16(&elm->element_info));
3155 	}
3156 #endif
3157 
3158 	elm = &elms[0];
3159 
3160 	sc->sc_uplink_seid = elm->uplink_seid;
3161 	sc->sc_downlink_seid = elm->downlink_seid;
3162 	sc->sc_seid = elm->seid;
3163 
3164 	if ((sc->sc_uplink_seid == htole16(0)) !=
3165 	    (sc->sc_downlink_seid == htole16(0))) {
3166 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3167 		rv = -1;
3168 		goto done;
3169 	}
3170 
3171 done:
3172 	ixl_dmamem_free(sc, &idm);
3173 	return (rv);
3174 }
3175 
3176 static int
3177 ixl_phy_mask_ints(struct ixl_softc *sc)
3178 {
3179 	struct ixl_aq_desc iaq;
3180 
3181 	memset(&iaq, 0, sizeof(iaq));
3182 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3183 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3184 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3185 	      IXL_AQ_PHY_EV_MEDIA_NA));
3186 
3187 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3188 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
3189 		return (-1);
3190 	}
3191 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3192 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
3193 		return (-1);
3194 	}
3195 
3196 	return (0);
3197 }
3198 
3199 static int
3200 ixl_get_phy_abilities(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3201 {
3202 	struct ixl_dmamem idm;
3203 	struct ixl_aq_desc iaq;
3204 	struct ixl_aq_phy_abilities *phy;
3205 	uint64_t phy_types;
3206 	int rv;
3207 
3208 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3209 		printf("%s: unable to allocate switch config buffer\n",
3210 		    DEVNAME(sc));
3211 		return (-1);
3212 	}
3213 
3214 	memset(&iaq, 0, sizeof(iaq));
3215 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3216 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3217 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3218 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3219 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3220 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3221 
3222 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3223 	    BUS_DMASYNC_PREREAD);
3224 
3225 	rv = ixl_atq_poll(sc, &iaq, 250);
3226 
3227 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3228 	    BUS_DMASYNC_POSTREAD);
3229 
3230 	if (rv != 0) {
3231 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
3232 		rv = -1;
3233 		goto done;
3234 	}
3235 	switch (iaq.iaq_retval) {
3236 	case HTOLE16(IXL_AQ_RC_OK):
3237 		break;
3238 	case HTOLE16(IXL_AQ_RC_EIO):
3239 		printf("%s: unable to query phy types\n", DEVNAME(sc));
3240 		rv = 0;
3241 		goto done;
3242 	default:
3243 		printf("%s: GET PHY ABILITIIES error\n", DEVNAME(sc));
3244 		rv = -1;
3245 		goto done;
3246 	}
3247 
3248 	phy = IXL_DMA_KVA(&idm);
3249 
3250 	phy_types = lemtoh32(&phy->phy_type);
3251 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
3252 
3253 	*phy_types_ptr = phy_types;
3254 
3255 	rv = 0;
3256 
3257 done:
3258 	ixl_dmamem_free(sc, &idm);
3259 	return (rv);
3260 }
3261 
3262 static int
3263 ixl_get_link_status(struct ixl_softc *sc)
3264 {
3265 	struct ixl_aq_desc iaq;
3266 
3267 	memset(&iaq, 0, sizeof(iaq));
3268 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3269 
3270 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3271 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
3272 		return (-1);
3273 	}
3274 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3275 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
3276 		return (0);
3277 	}
3278 
3279 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
3280 
3281 	return (0);
3282 }
3283 
3284 static int
3285 ixl_get_vsi(struct ixl_softc *sc)
3286 {
3287 	struct ixl_dmamem *vsi = &sc->sc_vsi;
3288 	struct ixl_aq_desc iaq;
3289 	struct ixl_aq_vsi_param *param;
3290 	struct ixl_aq_vsi_reply *reply;
3291 	int rv;
3292 
3293 	/* grumble, vsi info isn't "known" at compile time */
3294 
3295 	memset(&iaq, 0, sizeof(iaq));
3296 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
3297 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3298 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3299 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3300 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3301 
3302 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3303 	param->uplink_seid = sc->sc_seid;
3304 
3305 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3306 	    BUS_DMASYNC_PREREAD);
3307 
3308 	rv = ixl_atq_poll(sc, &iaq, 250);
3309 
3310 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3311 	    BUS_DMASYNC_POSTREAD);
3312 
3313 	if (rv != 0) {
3314 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
3315 		return (-1);
3316 	}
3317 
3318 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3319 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
3320 		    lemtoh16(&iaq.iaq_retval));
3321 		return (-1);
3322 	}
3323 
3324 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3325 	sc->sc_vsi_number = reply->vsi_number;
3326 
3327 	return (0);
3328 }
3329 
3330 static int
3331 ixl_set_vsi(struct ixl_softc *sc)
3332 {
3333 	struct ixl_dmamem *vsi = &sc->sc_vsi;
3334 	struct ixl_aq_desc iaq;
3335 	struct ixl_aq_vsi_param *param;
3336 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3337 	int rv;
3338 
3339 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3340 	    IXL_AQ_VSI_VALID_VLAN);
3341 
3342 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3343 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3344 	data->queue_mapping[0] = htole16(0);
3345 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3346 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3347 
3348 	CLR(data->port_vlan_flags,
3349 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
3350 	SET(data->port_vlan_flags,
3351 	    htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
3352 
3353 	/* grumble, vsi info isn't "known" at compile time */
3354 
3355 	memset(&iaq, 0, sizeof(iaq));
3356 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
3357 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3358 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
3359 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3360 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3361 
3362 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3363 	param->uplink_seid = sc->sc_seid;
3364 
3365 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3366 	    BUS_DMASYNC_PREWRITE);
3367 
3368 	rv = ixl_atq_poll(sc, &iaq, 250);
3369 
3370 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3371 	    BUS_DMASYNC_POSTWRITE);
3372 
3373 	if (rv != 0) {
3374 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
3375 		return (-1);
3376 	}
3377 
3378 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3379 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
3380 		    lemtoh16(&iaq.iaq_retval));
3381 		return (-1);
3382 	}
3383 
3384 	return (0);
3385 }
3386 
3387 static const struct ixl_phy_type *
3388 ixl_search_phy_type(uint8_t phy_type)
3389 {
3390 	const struct ixl_phy_type *itype;
3391 	uint64_t mask;
3392 	unsigned int i;
3393 
3394 	if (phy_type >= 64)
3395 		return (NULL);
3396 
3397 	mask = 1ULL << phy_type;
3398 
3399 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3400 		itype = &ixl_phy_type_map[i];
3401 
3402 		if (ISSET(itype->phy_type, mask))
3403 			return (itype);
3404 	}
3405 
3406 	return (NULL);
3407 }
3408 
3409 static uint64_t
3410 ixl_search_link_speed(uint8_t link_speed)
3411 {
3412 	const struct ixl_speed_type *type;
3413 	unsigned int i;
3414 
3415 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3416 		type = &ixl_speed_type_map[i];
3417 
3418 		if (ISSET(type->dev_speed, link_speed))
3419 			return (type->net_speed);
3420 	}
3421 
3422 	return (0);
3423 }
3424 
3425 static int
3426 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3427 {
3428 	const struct ixl_aq_link_status *status;
3429 	const struct ixl_phy_type *itype;
3430 
3431 	uint64_t ifm_active = IFM_ETHER;
3432 	uint64_t ifm_status = IFM_AVALID;
3433 	int link_state = LINK_STATE_DOWN;
3434 	uint64_t baudrate = 0;
3435 
3436 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
3437 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
3438 		goto done;
3439 
3440 	ifm_active |= IFM_FDX;
3441 	ifm_status |= IFM_ACTIVE;
3442 	link_state = LINK_STATE_FULL_DUPLEX;
3443 
3444 	itype = ixl_search_phy_type(status->phy_type);
3445 	if (itype != NULL)
3446 		ifm_active |= itype->ifm_type;
3447 
3448 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
3449 		ifm_active |= IFM_ETH_TXPAUSE;
3450 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
3451 		ifm_active |= IFM_ETH_RXPAUSE;
3452 
3453 	baudrate = ixl_search_link_speed(status->link_speed);
3454 
3455 done:
3456 	/* NET_ASSERT_LOCKED() except during attach */
3457 	sc->sc_media_active = ifm_active;
3458 	sc->sc_media_status = ifm_status;
3459 	sc->sc_ac.ac_if.if_baudrate = baudrate;
3460 
3461 	return (link_state);
3462 }
3463 
3464 static int
3465 ixl_restart_an(struct ixl_softc *sc)
3466 {
3467 	struct ixl_aq_desc iaq;
3468 
3469 	memset(&iaq, 0, sizeof(iaq));
3470 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
3471 	iaq.iaq_param[0] =
3472 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
3473 
3474 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3475 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
3476 		return (-1);
3477 	}
3478 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3479 		printf("%s: RESTART AN error\n", DEVNAME(sc));
3480 		return (-1);
3481 	}
3482 
3483 	return (0);
3484 }
3485 
3486 static int
3487 ixl_hmc(struct ixl_softc *sc)
3488 {
3489 	struct {
3490 		uint32_t   count;
3491 		uint32_t   minsize;
3492 		bus_size_t maxcnt;
3493 		bus_size_t setoff;
3494 		bus_size_t setcnt;
3495 	} regs[] = {
3496 		{
3497 			0,
3498 			IXL_HMC_TXQ_MINSIZE,
3499 			I40E_GLHMC_LANTXOBJSZ,
3500 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
3501 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
3502 		},
3503 		{
3504 			0,
3505 			IXL_HMC_RXQ_MINSIZE,
3506 			I40E_GLHMC_LANRXOBJSZ,
3507 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
3508 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
3509 		},
3510 		{
3511 			0,
3512 			0,
3513 			I40E_GLHMC_FCOEMAX,
3514 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
3515 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
3516 		},
3517 		{
3518 			0,
3519 			0,
3520 			I40E_GLHMC_FCOEFMAX,
3521 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
3522 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
3523 		},
3524 	};
3525 	struct ixl_hmc_entry *e;
3526 	uint64_t size, dva;
3527 	uint8_t *kva;
3528 	uint64_t *sdpage;
3529 	unsigned int i;
3530 	int npages, tables;
3531 
3532 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
3533 
3534 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
3535 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
3536 
3537 	size = 0;
3538 	for (i = 0; i < nitems(regs); i++) {
3539 		e = &sc->sc_hmc_entries[i];
3540 
3541 		e->hmc_count = regs[i].count;
3542 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
3543 		e->hmc_base = size;
3544 
3545 		if ((e->hmc_size * 8) < regs[i].minsize) {
3546 			printf("%s: kernel hmc entry is too big\n",
3547 			    DEVNAME(sc));
3548 			return (-1);
3549 		}
3550 
3551 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
3552 	}
3553 	size = roundup(size, IXL_HMC_PGSIZE);
3554 	npages = size / IXL_HMC_PGSIZE;
3555 
3556 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
3557 
3558 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
3559 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
3560 		return (-1);
3561 	}
3562 
3563 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
3564 	    IXL_HMC_PGSIZE) != 0) {
3565 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
3566 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3567 		return (-1);
3568 	}
3569 
3570 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
3571 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
3572 
3573 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
3574 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
3575 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3576 
3577 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
3578 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
3579 	for (i = 0; i < npages; i++) {
3580 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
3581 
3582 		dva += IXL_HMC_PGSIZE;
3583 	}
3584 
3585 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
3586 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
3587 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3588 
3589 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
3590 	for (i = 0; i < tables; i++) {
3591 		uint32_t count;
3592 
3593 		KASSERT(npages >= 0);
3594 
3595 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
3596 
3597 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
3598 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
3599 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
3600 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
3601 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
3602 		ixl_wr(sc, I40E_PFHMC_SDCMD,
3603 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
3604 
3605 		npages -= IXL_HMC_PGS;
3606 		dva += IXL_HMC_PGSIZE;
3607 	}
3608 
3609 	for (i = 0; i < nitems(regs); i++) {
3610 		e = &sc->sc_hmc_entries[i];
3611 
3612 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
3613 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
3614 	}
3615 
3616 	return (0);
3617 }
3618 
3619 static void
3620 ixl_hmc_free(struct ixl_softc *sc)
3621 {
3622 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
3623 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3624 }
3625 
3626 static void
3627 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
3628     unsigned int npacking)
3629 {
3630 	uint8_t *dst = d;
3631 	const uint8_t *src = s;
3632 	unsigned int i;
3633 
3634 	for (i = 0; i < npacking; i++) {
3635 		const struct ixl_hmc_pack *pack = &packing[i];
3636 		unsigned int offset = pack->lsb / 8;
3637 		unsigned int align = pack->lsb % 8;
3638 		const uint8_t *in = src + pack->offset;
3639 		uint8_t *out = dst + offset;
3640 		int width = pack->width;
3641 		unsigned int inbits = 0;
3642 
3643 		if (align) {
3644 			inbits = *in++;
3645 
3646 			*out++ |= inbits << align;
3647 
3648 			width -= 8 - align;
3649 		}
3650 
3651 		while (width >= 8) {
3652 			inbits <<= 8;
3653 			inbits |= *in++;
3654 
3655 			*out++ = inbits << align;
3656 
3657 			width -= 8;
3658 		}
3659 
3660 		if (width)
3661 			*out = inbits >> (8 - align);
3662 	}
3663 }
3664 
3665 static struct ixl_aq_buf *
3666 ixl_aqb_alloc(struct ixl_softc *sc)
3667 {
3668 	struct ixl_aq_buf *aqb;
3669 
3670 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
3671 	if (aqb == NULL)
3672 		return (NULL);
3673 
3674 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
3675 	if (aqb->aqb_data == NULL)
3676 		goto free;
3677 
3678 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
3679 	    IXL_AQ_BUFLEN, 0,
3680 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3681 	    &aqb->aqb_map) != 0)
3682 		goto dma_free;
3683 
3684 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
3685 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
3686 		goto destroy;
3687 
3688 	return (aqb);
3689 
3690 destroy:
3691 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
3692 dma_free:
3693 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
3694 free:
3695 	free(aqb, M_DEVBUF, sizeof(*aqb));
3696 
3697 	return (NULL);
3698 }
3699 
3700 static void
3701 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
3702 {
3703 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
3704 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
3705 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
3706 	free(aqb, M_DEVBUF, sizeof(*aqb));
3707 }
3708 
3709 static int
3710 ixl_arq_fill(struct ixl_softc *sc)
3711 {
3712 	struct ixl_aq_buf *aqb;
3713 	struct ixl_aq_desc *arq, *iaq;
3714 	unsigned int prod = sc->sc_arq_prod;
3715 	unsigned int n;
3716 	int post = 0;
3717 
3718 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
3719  	arq = IXL_DMA_KVA(&sc->sc_arq);
3720 
3721 	while (n > 0) {
3722 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
3723 		if (aqb != NULL)
3724 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
3725 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
3726 			break;
3727 
3728 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
3729 
3730 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3731 		    BUS_DMASYNC_PREREAD);
3732 
3733 		iaq = &arq[prod];
3734 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
3735 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3736 		iaq->iaq_opcode = 0;
3737 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
3738 		iaq->iaq_retval = 0;
3739 		iaq->iaq_cookie = 0;
3740 		iaq->iaq_param[0] = 0;
3741 		iaq->iaq_param[1] = 0;
3742 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
3743 
3744 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
3745 
3746 		prod++;
3747 		prod &= IXL_AQ_MASK;
3748 
3749 		post = 1;
3750 
3751 		n--;
3752 	}
3753 
3754 	if_rxr_put(&sc->sc_arq_ring, n);
3755 	sc->sc_arq_prod = prod;
3756 
3757 	return (post);
3758 }
3759 
3760 static void
3761 ixl_arq_unfill(struct ixl_softc *sc)
3762 {
3763 	struct ixl_aq_buf *aqb;
3764 
3765 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
3766 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3767 
3768 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3769 		    BUS_DMASYNC_POSTREAD);
3770 		ixl_aqb_free(sc, aqb);
3771 	}
3772 }
3773 
3774 static void
3775 ixl_clear_hw(struct ixl_softc *sc)
3776 {
3777 	uint32_t num_queues, base_queue;
3778 	uint32_t num_pf_int;
3779 	uint32_t num_vf_int;
3780 	uint32_t num_vfs;
3781 	uint32_t i, j;
3782 	uint32_t val;
3783 	uint32_t eol = 0x7ff;
3784 
3785 	/* get number of interrupts, queues, and vfs */
3786 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
3787 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
3788 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
3789 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
3790 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
3791 
3792 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
3793 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
3794 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
3795 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
3796 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
3797 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
3798 		num_queues = (j - base_queue) + 1;
3799 	else
3800 		num_queues = 0;
3801 
3802 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
3803 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
3804 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
3805 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
3806 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
3807 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
3808 		num_vfs = (j - i) + 1;
3809 	else
3810 		num_vfs = 0;
3811 
3812 	/* stop all the interrupts */
3813 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
3814 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3815 	for (i = 0; i < num_pf_int - 2; i++)
3816 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
3817 
3818 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
3819 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3820 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
3821 	for (i = 0; i < num_pf_int - 2; i++)
3822 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
3823 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3824 	for (i = 0; i < num_vfs; i++)
3825 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
3826 	for (i = 0; i < num_vf_int - 2; i++)
3827 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
3828 
3829 	/* warn the HW of the coming Tx disables */
3830 	for (i = 0; i < num_queues; i++) {
3831 		uint32_t abs_queue_idx = base_queue + i;
3832 		uint32_t reg_block = 0;
3833 
3834 		if (abs_queue_idx >= 128) {
3835 			reg_block = abs_queue_idx / 128;
3836 			abs_queue_idx %= 128;
3837 		}
3838 
3839 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
3840 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
3841 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
3842 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
3843 
3844 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
3845 	}
3846 	delaymsec(400);
3847 
3848 	/* stop all the queues */
3849 	for (i = 0; i < num_queues; i++) {
3850 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
3851 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
3852 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
3853 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
3854 	}
3855 
3856 	/* short wait for all queue disables to settle */
3857 	delaymsec(50);
3858 }
3859 
3860 static int
3861 ixl_pf_reset(struct ixl_softc *sc)
3862 {
3863 	uint32_t cnt = 0;
3864 	uint32_t cnt1 = 0;
3865 	uint32_t reg = 0;
3866 	uint32_t grst_del;
3867 
3868 	/*
3869 	 * Poll for Global Reset steady state in case of recent GRST.
3870 	 * The grst delay value is in 100ms units, and we'll wait a
3871 	 * couple counts longer to be sure we don't just miss the end.
3872 	 */
3873 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
3874 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
3875 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
3876 	grst_del += 10;
3877 
3878 	for (cnt = 0; cnt < grst_del; cnt++) {
3879 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3880 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
3881 			break;
3882 		delaymsec(100);
3883 	}
3884 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
3885 		printf(", Global reset polling failed to complete\n");
3886 		return (-1);
3887 	}
3888 
3889 	/* Now Wait for the FW to be ready */
3890 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
3891 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
3892 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3893 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
3894 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3895 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
3896 			break;
3897 
3898 		delaymsec(10);
3899 	}
3900 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3901 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
3902 		printf(", wait for FW Reset complete timed out "
3903 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
3904 		return (-1);
3905 	}
3906 
3907 	/*
3908 	 * If there was a Global Reset in progress when we got here,
3909 	 * we don't need to do the PF Reset
3910 	 */
3911 	if (cnt == 0) {
3912 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
3913 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
3914 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
3915 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
3916 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
3917 				break;
3918 			delaymsec(1);
3919 		}
3920 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
3921 			printf(", PF reset polling failed to complete"
3922 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
3923 			return (-1);
3924 		}
3925 	}
3926 
3927 	return (0);
3928 }
3929 
3930 static int
3931 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
3932     bus_size_t size, u_int align)
3933 {
3934 	ixm->ixm_size = size;
3935 
3936 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
3937 	    ixm->ixm_size, 0,
3938 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3939 	    &ixm->ixm_map) != 0)
3940 		return (1);
3941 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
3942 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3943 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
3944 		goto destroy;
3945 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3946 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3947 		goto free;
3948 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
3949 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3950 		goto unmap;
3951 
3952 	return (0);
3953 unmap:
3954 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
3955 free:
3956 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
3957 destroy:
3958 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
3959 	return (1);
3960 }
3961 
3962 static void
3963 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
3964 {
3965 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
3966 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
3967 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
3968 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
3969 }
3970