xref: /openbsd-src/sys/dev/pci/if_ixl.c (revision c90a81c56dcebd6a1b73fe4aff9b03385b8e63b3)
1 /*	$OpenBSD: if_ixl.c,v 1.8 2018/11/18 08:42:15 jmatthew Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sockio.h>
55 #include <sys/mbuf.h>
56 #include <sys/kernel.h>
57 #include <sys/socket.h>
58 #include <sys/device.h>
59 #include <sys/pool.h>
60 #include <sys/queue.h>
61 #include <sys/timeout.h>
62 #include <sys/task.h>
63 
64 #include <machine/bus.h>
65 #include <machine/intr.h>
66 
67 #include <net/if.h>
68 #include <net/if_dl.h>
69 #include <net/if_media.h>
70 
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 #include <netinet/in.h>
76 #include <netinet/if_ether.h>
77 
78 #include <dev/pci/pcireg.h>
79 #include <dev/pci/pcivar.h>
80 #include <dev/pci/pcidevs.h>
81 
82 #define I40E_MASK(mask, shift)		((mask) << (shift))
83 #define I40E_PF_RESET_WAIT_COUNT	200
84 #define I40E_AQ_LARGE_BUF		512
85 
86 /* bitfields for Tx queue mapping in QTX_CTL */
87 #define I40E_QTX_CTL_VF_QUEUE		0x0
88 #define I40E_QTX_CTL_VM_QUEUE		0x1
89 #define I40E_QTX_CTL_PF_QUEUE		0x2
90 
91 #define I40E_QUEUE_TYPE_EOL		0x7ff
92 #define I40E_INTR_NOTX_QUEUE		0
93 
94 #define I40E_QUEUE_TYPE_RX		0x0
95 #define I40E_QUEUE_TYPE_TX		0x1
96 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
97 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
98 
99 #define I40E_ITR_INDEX_RX		0x0
100 #define I40E_ITR_INDEX_TX		0x1
101 #define I40E_ITR_INDEX_OTHER		0x2
102 #define I40E_ITR_INDEX_NONE		0x3
103 
104 #include <dev/pci/if_ixlreg.h>
105 
106 #define I40E_INTR_NOTX_QUEUE		0
107 #define I40E_INTR_NOTX_INTR		0
108 #define I40E_INTR_NOTX_RX_QUEUE		0
109 #define I40E_INTR_NOTX_TX_QUEUE		1
110 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
111 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
112 
113 struct ixl_aq_desc {
114 	uint16_t	iaq_flags;
115 #define	IXL_AQ_DD		(1U << 0)
116 #define	IXL_AQ_CMP		(1U << 1)
117 #define IXL_AQ_ERR		(1U << 2)
118 #define IXL_AQ_VFE		(1U << 3)
119 #define IXL_AQ_LB		(1U << 9)
120 #define IXL_AQ_RD		(1U << 10)
121 #define IXL_AQ_VFC		(1U << 11)
122 #define IXL_AQ_BUF		(1U << 12)
123 #define IXL_AQ_SI		(1U << 13)
124 #define IXL_AQ_EI		(1U << 14)
125 #define IXL_AQ_FE		(1U << 15)
126 
127 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
128 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
129 				    "\003ERR" "\002CMP" "\001DD"
130 
131 	uint16_t	iaq_opcode;
132 
133 	uint16_t	iaq_datalen;
134 	uint16_t	iaq_retval;
135 
136 	uint64_t	iaq_cookie;
137 
138 	uint32_t	iaq_param[4];
139 /*	iaq_data_hi	iaq_param[2] */
140 /*	iaq_data_lo	iaq_param[3] */
141 } __packed __aligned(8);
142 
143 /* aq commands */
144 #define IXL_AQ_OP_GET_VERSION		0x0001
145 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
146 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
147 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
148 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
149 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
150 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
151 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
152 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
153 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
154 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
155 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
156 #define IXL_AQ_OP_ADD_VSI		0x0210
157 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
158 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
159 #define IXL_AQ_OP_ADD_VEB		0x0230
160 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
161 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
162 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
163 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
164 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
165 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
166 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
167 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
168 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
169 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
170 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
171 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
172 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
173 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
174 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
175 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
176 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
177 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
178 
179 struct ixl_aq_mac_addresses {
180 	uint8_t		pf_lan[ETHER_ADDR_LEN];
181 	uint8_t		pf_san[ETHER_ADDR_LEN];
182 	uint8_t		port[ETHER_ADDR_LEN];
183 	uint8_t		pf_wol[ETHER_ADDR_LEN];
184 } __packed;
185 
186 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
187 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
188 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
189 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
190 
191 struct ixl_aq_capability {
192 	uint16_t	cap_id;
193 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
194 #define IXL_AQ_CAP_MNG_MODE		0x0002
195 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
196 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
197 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
198 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
199 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
200 #define IXL_AQ_CAP_SRIOV		0x0012
201 #define IXL_AQ_CAP_VF			0x0013
202 #define IXL_AQ_CAP_VMDQ			0x0014
203 #define IXL_AQ_CAP_8021QBG		0x0015
204 #define IXL_AQ_CAP_8021QBR		0x0016
205 #define IXL_AQ_CAP_VSI			0x0017
206 #define IXL_AQ_CAP_DCB			0x0018
207 #define IXL_AQ_CAP_FCOE			0x0021
208 #define IXL_AQ_CAP_ISCSI		0x0022
209 #define IXL_AQ_CAP_RSS			0x0040
210 #define IXL_AQ_CAP_RXQ			0x0041
211 #define IXL_AQ_CAP_TXQ			0x0042
212 #define IXL_AQ_CAP_MSIX			0x0043
213 #define IXL_AQ_CAP_VF_MSIX		0x0044
214 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
215 #define IXL_AQ_CAP_1588			0x0046
216 #define IXL_AQ_CAP_IWARP		0x0051
217 #define IXL_AQ_CAP_LED			0x0061
218 #define IXL_AQ_CAP_SDP			0x0062
219 #define IXL_AQ_CAP_MDIO			0x0063
220 #define IXL_AQ_CAP_WSR_PROT		0x0064
221 #define IXL_AQ_CAP_NVM_MGMT		0x0080
222 #define IXL_AQ_CAP_FLEX10		0x00F1
223 #define IXL_AQ_CAP_CEM			0x00F2
224 	uint8_t		major_rev;
225 	uint8_t		minor_rev;
226 	uint32_t	number;
227 	uint32_t	logical_id;
228 	uint32_t	phys_id;
229 	uint8_t		_reserved[16];
230 } __packed __aligned(4);
231 
232 #define IXL_LLDP_SHUTDOWN		0x1
233 
234 struct ixl_aq_switch_config {
235 	uint16_t	num_reported;
236 	uint16_t	num_total;
237 	uint8_t		_reserved[12];
238 } __packed __aligned(4);
239 
240 struct ixl_aq_switch_config_element {
241 	uint8_t		type;
242 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
243 #define IXL_AQ_SW_ELEM_TYPE_PF		2
244 #define IXL_AQ_SW_ELEM_TYPE_VF		3
245 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
246 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
247 #define IXL_AQ_SW_ELEM_TYPE_PV		16
248 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
249 #define IXL_AQ_SW_ELEM_TYPE_PA		18
250 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
251 	uint8_t		revision;
252 #define IXL_AQ_SW_ELEM_REV_1		1
253 	uint16_t	seid;
254 
255 	uint16_t	uplink_seid;
256 	uint16_t	downlink_seid;
257 
258 	uint8_t		_reserved[3];
259 	uint8_t		connection_type;
260 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
261 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
262 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
263 
264 	uint16_t	scheduler_id;
265 	uint16_t	element_info;
266 } __packed __aligned(4);
267 
268 #define IXL_PHY_TYPE_SGMII		0x00
269 #define IXL_PHY_TYPE_1000BASE_KX	0x01
270 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
271 #define IXL_PHY_TYPE_10GBASE_KR		0x03
272 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
273 #define IXL_PHY_TYPE_XAUI		0x05
274 #define IXL_PHY_TYPE_XFI		0x06
275 #define IXL_PHY_TYPE_SFI		0x07
276 #define IXL_PHY_TYPE_XLAUI		0x08
277 #define IXL_PHY_TYPE_XLPPI		0x09
278 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
279 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
280 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
281 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
282 #define IXL_PHY_TYPE_100BASE_TX		0x11
283 #define IXL_PHY_TYPE_1000BASE_T		0x12
284 #define IXL_PHY_TYPE_10GBASE_T		0x13
285 #define IXL_PHY_TYPE_10GBASE_SR		0x14
286 #define IXL_PHY_TYPE_10GBASE_LR		0x15
287 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
288 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
289 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
290 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
291 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
292 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
293 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
294 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
295 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
296 
297 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
298 #define IXL_PHY_TYPE_25GBASE_CR		0x20
299 #define IXL_PHY_TYPE_25GBASE_SR		0x21
300 #define IXL_PHY_TYPE_25GBASE_LR		0x22
301 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
302 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
303 
304 struct ixl_aq_module_desc {
305 	uint8_t		oui[3];
306 	uint8_t		_reserved1;
307 	uint8_t		part_number[16];
308 	uint8_t		revision[4];
309 	uint8_t		_reserved2[8];
310 } __packed __aligned(4);
311 
312 struct ixl_aq_phy_abilities {
313 	uint32_t	phy_type;
314 
315 	uint8_t		link_speed;
316 #define IXL_AQ_PHY_LINK_SPEED_100MB	0x1
317 #define IXL_AQ_PHY_LINK_SPEED_1000MB	0x2
318 #define IXL_AQ_PHY_LINK_SPEED_10GB	0x3
319 #define IXL_AQ_PHY_LINK_SPEED_40GB	0x4
320 #define IXL_AQ_PHY_LINK_SPEED_20GB	0x5
321 #define IXL_AQ_PHY_LINK_SPEED_25GB	0x6
322 	uint8_t		abilities;
323 	uint16_t	eee_capability;
324 
325 	uint32_t	eeer_val;
326 
327 	uint8_t		d3_lpan;
328 	uint8_t		phy_type_ext;
329 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
330 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
331 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
332 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
333 	uint8_t		fec_cfg_curr_mod_ext_info;
334 #define IXL_AQ_ENABLE_FEC_KR		0x01
335 #define IXL_AQ_ENABLE_FEC_RS		0x02
336 #define IXL_AQ_REQUEST_FEC_KR		0x04
337 #define IXL_AQ_REQUEST_FEC_RS		0x08
338 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
339 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
340 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
341 	uint8_t		ext_comp_code;
342 
343 	uint8_t		phy_id[4];
344 
345 	uint8_t		module_type[3];
346 	uint8_t		qualified_module_count;
347 #define IXL_AQ_PHY_MAX_QMS		16
348 	struct ixl_aq_module_desc
349 			qualified_module[IXL_AQ_PHY_MAX_QMS];
350 } __packed __aligned(4);
351 
352 struct ixl_aq_link_param {
353 	uint8_t		notify;
354 #define IXL_AQ_LINK_NOTIFY	0x03
355 	uint8_t		_reserved1;
356 	uint8_t		phy;
357 	uint8_t		speed;
358 	uint8_t		status;
359 	uint8_t		_reserved2[11];
360 } __packed __aligned(4);
361 
362 struct ixl_aq_vsi_param {
363 	uint16_t	uplink_seid;
364 	uint8_t		connect_type;
365 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
366 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
367 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
368 	uint8_t		_reserved1;
369 
370 	uint8_t		vf_id;
371 	uint8_t		_reserved2;
372 	uint16_t	vsi_flags;
373 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
374 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
375 #define IXL_AQ_VSI_TYPE_VF		0x0
376 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
377 #define IXL_AQ_VSI_TYPE_PF		0x2
378 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
379 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
380 
381 	uint32_t	addr_hi;
382 	uint32_t	addr_lo;
383 } __packed __aligned(16);
384 
385 struct ixl_aq_vsi_reply {
386 	uint16_t	seid;
387 	uint16_t	vsi_number;
388 
389 	uint16_t	vsis_used;
390 	uint16_t	vsis_free;
391 
392 	uint32_t	addr_hi;
393 	uint32_t	addr_lo;
394 } __packed __aligned(16);
395 
396 struct ixl_aq_vsi_data {
397 	/* first 96 byte are written by SW */
398 	uint16_t	valid_sections;
399 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
400 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
401 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
402 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
403 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
404 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
405 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
406 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
407 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
408 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
409 	/* switch section */
410 	uint16_t	switch_id;
411 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
412 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
413 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
414 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
415 
416 	uint8_t		_reserved1[2];
417 	/* security section */
418 	uint8_t		sec_flags;
419 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
420 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
421 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
422 	uint8_t		_reserved2;
423 
424 	/* vlan section */
425 	uint16_t	pvid;
426 	uint16_t	fcoe_pvid;
427 
428 	uint8_t		port_vlan_flags;
429 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
430 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
431 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
432 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED 	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
433 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
434 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
435 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
436 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
437 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
438 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
439 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
440 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
441 	uint8_t		_reserved3[3];
442 
443 	/* ingress egress up section */
444 	uint32_t	ingress_table;
445 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
446 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
447 	uint32_t	egress_table;
448 
449 	/* cascaded pv section */
450 	uint16_t	cas_pv_tag;
451 	uint8_t		cas_pv_flags;
452 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
453 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
454 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
455 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
456 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
457 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
458 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
459 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
460 					(1 << 6)
461 	uint8_t		_reserved4;
462 
463 	/* queue mapping section */
464 	uint16_t	mapping_flags;
465 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
466 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
467 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
468 	uint16_t	queue_mapping[16];
469 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
470 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
471 	uint16_t	tc_mapping[8];
472 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
473 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
474 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
475 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
476 
477 	/* queueing option section */
478 	uint8_t		queueing_opt_flags;
479 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
480 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
481 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
482 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
483 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
484 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
485 	uint8_t		_reserved5[3];
486 
487 	/* scheduler section */
488 	uint8_t		up_enable_bits;
489 	uint8_t		_reserved6;
490 
491 	/* outer up section */
492 	uint32_t	outer_up_table; /* same as ingress/egress tables */
493 	uint8_t		_reserved7[8];
494 
495 	/* last 32 bytes are written by FW */
496 	uint16_t	qs_handle[8];
497 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
498 	uint16_t	stat_counter_idx;
499 	uint16_t	sched_id;
500 
501 	uint8_t		_reserved8[12];
502 } __packed __aligned(8);
503 
504 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
505 
506 struct ixl_aq_vsi_promisc_param {
507 	uint16_t	flags;
508 	uint16_t	valid_flags;
509 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
510 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
511 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
512 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
513 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
514 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
515 
516 	uint16_t	seid;
517 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
518 	uint16_t	vlan;
519 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
520 	uint32_t	reserved[2];
521 } __packed __aligned(8);
522 
523 struct ixl_aq_veb_param {
524 	uint16_t	uplink_seid;
525 	uint16_t	downlink_seid;
526 	uint16_t	veb_flags;
527 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
528 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
529 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
530 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
531 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
532 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
533 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
534 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
535 	uint8_t		enable_tcs;
536 	uint8_t		_reserved[9];
537 } __packed __aligned(16);
538 
539 struct ixl_aq_veb_reply {
540 	uint16_t	_reserved1;
541 	uint16_t	_reserved2;
542 	uint16_t	_reserved3;
543 	uint16_t	switch_seid;
544 	uint16_t	veb_seid;
545 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
546 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
547 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
548 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
549 	uint16_t	statistic_index;
550 	uint16_t	vebs_used;
551 	uint16_t	vebs_free;
552 } __packed __aligned(16);
553 
554 /* GET PHY ABILITIES param[0] */
555 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
556 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
557 
558 /* RESTART_AN param[0] */
559 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
560 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
561 
562 struct ixl_aq_link_status { /* this occupies the iaq_param space */
563 	uint16_t	command_flags; /* only field set on command */
564 #define IXL_AQ_LSE_MASK			0x3
565 #define IXL_AQ_LSE_NOP			0x0
566 #define IXL_AQ_LSE_DISABLE		0x2
567 #define IXL_AQ_LSE_ENABLE		0x3
568 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
569 	uint8_t		phy_type;
570 	uint8_t		link_speed;
571 	uint8_t		link_info;
572 #define IXL_AQ_LINK_UP_FUNCTION		0x01
573 #define IXL_AQ_LINK_FAULT		0x02
574 #define IXL_AQ_LINK_FAULT_TX		0x04
575 #define IXL_AQ_LINK_FAULT_RX		0x08
576 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
577 #define IXL_AQ_LINK_UP_PORT		0x20
578 #define IXL_AQ_MEDIA_AVAILABLE		0x40
579 #define IXL_AQ_SIGNAL_DETECT		0x80
580 	uint8_t		an_info;
581 #define IXL_AQ_AN_COMPLETED		0x01
582 #define IXL_AQ_LP_AN_ABILITY		0x02
583 #define IXL_AQ_PD_FAULT			0x04
584 #define IXL_AQ_FEC_EN			0x08
585 #define IXL_AQ_PHY_LOW_POWER		0x10
586 #define IXL_AQ_LINK_PAUSE_TX		0x20
587 #define IXL_AQ_LINK_PAUSE_RX		0x40
588 #define IXL_AQ_QUALIFIED_MODULE		0x80
589 
590 	uint8_t		ext_info;
591 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
592 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
593 #define IXL_AQ_LINK_TX_SHIFT		0x02
594 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
595 #define IXL_AQ_LINK_TX_ACTIVE		0x00
596 #define IXL_AQ_LINK_TX_DRAINED		0x01
597 #define IXL_AQ_LINK_TX_FLUSHED		0x03
598 #define IXL_AQ_LINK_FORCED_40G		0x10
599 /* 25G Error Codes */
600 #define IXL_AQ_25G_NO_ERR		0X00
601 #define IXL_AQ_25G_NOT_PRESENT		0X01
602 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
603 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
604 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
605 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
606 	uint8_t		loopback;
607 	uint16_t	max_frame_size;
608 
609 	uint8_t		config;
610 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
611 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
612 #define IXL_AQ_CONFIG_CRC_ENA	0x04
613 #define IXL_AQ_CONFIG_PACING_MASK	0x78
614 	uint8_t		power_desc;
615 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
616 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
617 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
618 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
619 #define IXL_AQ_PWR_CLASS_MASK		0x03
620 
621 	uint8_t		reserved[4];
622 } __packed __aligned(4);
623 /* event mask command flags for param[2] */
624 #define IXL_AQ_PHY_EV_MASK		0x3ff
625 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
626 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
627 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
628 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
629 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
630 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
631 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
632 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
633 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
634 
635 /* aq response codes */
636 #define IXL_AQ_RC_OK			0  /* success */
637 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
638 #define IXL_AQ_RC_ENOENT		2  /* No such element */
639 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
640 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
641 #define IXL_AQ_RC_EIO			5  /* I/O error */
642 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
643 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
644 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
645 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
646 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
647 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
648 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
649 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
650 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
651 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
652 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
653 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
654 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
655 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
656 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
657 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
658 #define IXL_AQ_RC_EFBIG			22 /* file too large */
659 
660 struct ixl_tx_desc {
661 	uint64_t		addr;
662 	uint64_t		cmd;
663 #define IXL_TX_DESC_DTYPE_SHIFT		0
664 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
665 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
666 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
667 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
668 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
669 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
670 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
671 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
672 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
673 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
674 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
675 
676 #define IXL_TX_DESC_CMD_SHIFT		4
677 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
678 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
679 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
680 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
681 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
682 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
683 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
684 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
685 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
686 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
687 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
688 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
689 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
690 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
691 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
692 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
693 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
694 
695 #define IXL_TX_DESC_MACLEN_SHIFT	16
696 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
697 #define IXL_TX_DESC_IPLEN_SHIFT		23
698 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
699 #define IXL_TX_DESC_L4LEN_SHIFT		30
700 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
701 #define IXL_TX_DESC_FCLEN_SHIFT		30
702 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
703 
704 #define IXL_TX_DESC_BSIZE_SHIFT		34
705 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
706 #define IXL_TX_DESC_BSIZE_MASK		\
707 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
708 } __packed __aligned(16);
709 
710 struct ixl_rx_rd_desc_16 {
711 	uint64_t		paddr; /* packet addr */
712 	uint64_t		haddr; /* header addr */
713 } __packed __aligned(16);
714 
715 struct ixl_rx_rd_desc_32 {
716 	uint64_t		paddr; /* packet addr */
717 	uint64_t		haddr; /* header addr */
718 	uint64_t		_reserved1;
719 	uint64_t		_reserved2;
720 } __packed __aligned(16);
721 
722 struct ixl_rx_wb_desc_16 {
723 	uint64_t		qword0;
724 	uint64_t		qword1;
725 #define IXL_RX_DESC_DD			(1 << 0)
726 #define IXL_RX_DESC_EOP			(1 << 1)
727 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
728 #define IXL_RX_DESC_L3L4P		(1 << 3)
729 #define IXL_RX_DESC_CRCP		(1 << 4)
730 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
731 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
732 #define IXL_RX_DESC_UMB_SHIFT		9
733 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
734 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
735 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
736 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
737 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
738 #define IXL_RX_DESC_FLM			(1 << 11)
739 #define IXL_RX_DESC_FLTSTAT_SHIFT 	12
740 #define IXL_RX_DESC_FLTSTAT_MASK 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
741 #define IXL_RX_DESC_FLTSTAT_NODATA 	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
742 #define IXL_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
743 #define IXL_RX_DESC_FLTSTAT_RSS 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
744 #define IXL_RX_DESC_LPBK		(1 << 14)
745 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
746 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
747 
748 #define IXL_RX_DESC_RXE			(1 << 19)
749 #define IXL_RX_DESC_HBO			(1 << 21)
750 #define IXL_RX_DESC_IPE			(1 << 22)
751 #define IXL_RX_DESC_L4E			(1 << 23)
752 #define IXL_RX_DESC_EIPE		(1 << 24)
753 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
754 
755 #define IXL_RX_DESC_PTYPE_SHIFT		30
756 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
757 
758 #define IXL_RX_DESC_PLEN_SHIFT		38
759 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
760 #define IXL_RX_DESC_HLEN_SHIFT		42
761 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
762 } __packed __aligned(16);
763 
764 struct ixl_rx_wb_desc_32 {
765 	uint64_t		qword0;
766 	uint64_t		qword1;
767 	uint64_t		qword2;
768 	uint64_t		qword3;
769 } __packed __aligned(16);
770 
771 #define IXL_TX_PKT_DESCS		8
772 #define IXL_TX_QUEUE_ALIGN		128
773 #define IXL_RX_QUEUE_ALIGN		128
774 
775 #define IXL_HARDMTU			9706 /* - ETHER_HEADER_LEN? */
776 
777 #define IXL_PCIREG			PCI_MAPREG_START
778 
779 #define IXL_ITR0			0x0
780 #define IXL_ITR1			0x1
781 #define IXL_ITR2			0x2
782 #define IXL_NOITR			0x2
783 
784 #define IXL_AQ_NUM			256
785 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
786 #define IXL_AQ_ALIGN			64 /* lol */
787 #define IXL_AQ_BUFLEN			4096
788 
789 #define IXL_HMC_ROUNDUP			512
790 #define IXL_HMC_PGSIZE			4096
791 #define IXL_HMC_DVASZ			sizeof(uint64_t)
792 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
793 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
794 #define IXL_HMC_PDVALID			1ULL
795 
796 struct ixl_aq_regs {
797 	bus_size_t		atq_tail;
798 	bus_size_t		atq_head;
799 	bus_size_t		atq_len;
800 	bus_size_t		atq_bal;
801 	bus_size_t		atq_bah;
802 
803 	bus_size_t		arq_tail;
804 	bus_size_t		arq_head;
805 	bus_size_t		arq_len;
806 	bus_size_t		arq_bal;
807 	bus_size_t		arq_bah;
808 
809 	uint32_t		atq_len_enable;
810 	uint32_t		atq_tail_mask;
811 	uint32_t		atq_head_mask;
812 
813 	uint32_t		arq_len_enable;
814 	uint32_t		arq_tail_mask;
815 	uint32_t		arq_head_mask;
816 };
817 
818 struct ixl_phy_type {
819 	uint64_t	phy_type;
820 	uint64_t	ifm_type;
821 };
822 
823 struct ixl_speed_type {
824 	uint8_t		dev_speed;
825 	uint64_t	net_speed;
826 };
827 
828 struct ixl_aq_buf {
829 	SIMPLEQ_ENTRY(ixl_aq_buf)
830 				 aqb_entry;
831 	void			*aqb_data;
832 	bus_dmamap_t		 aqb_map;
833 };
834 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
835 
836 struct ixl_dmamem {
837 	bus_dmamap_t		ixm_map;
838 	bus_dma_segment_t	ixm_seg;
839 	int			ixm_nsegs;
840 	size_t			ixm_size;
841 	caddr_t			ixm_kva;
842 };
843 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
844 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
845 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
846 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
847 
848 struct ixl_hmc_entry {
849 	uint64_t		 hmc_base;
850 	uint32_t		 hmc_count;
851 	uint32_t		 hmc_size;
852 };
853 
854 #define IXL_HMC_LAN_TX		 0
855 #define IXL_HMC_LAN_RX		 1
856 #define IXL_HMC_FCOE_CTX	 2
857 #define IXL_HMC_FCOE_FILTER	 3
858 #define IXL_HMC_COUNT		 4
859 
860 struct ixl_hmc_pack {
861 	uint16_t		offset;
862 	uint16_t		width;
863 	uint16_t		lsb;
864 };
865 
866 /*
867  * these hmc objects have weird sizes and alignments, so these are abstract
868  * representations of them that are nice for c to populate.
869  *
870  * the packing code relies on little-endian values being stored in the fields,
871  * no high bits in the fields being set, and the fields must be packed in the
872  * same order as they are in the ctx structure.
873  */
874 
875 struct ixl_hmc_rxq {
876 	uint16_t		 head;
877 	uint8_t			 cpuid;
878 	uint64_t		 base;
879 #define IXL_HMC_RXQ_BASE_UNIT		128
880 	uint16_t		 qlen;
881 	uint16_t		 dbuff;
882 #define IXL_HMC_RXQ_DBUFF_UNIT		128
883 	uint8_t			 hbuff;
884 #define IXL_HMC_RXQ_HBUFF_UNIT		64
885 	uint8_t			 dtype;
886 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
887 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
888 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
889 	uint8_t			 dsize;
890 #define IXL_HMC_RXQ_DSIZE_16		0
891 #define IXL_HMC_RXQ_DSIZE_32		1
892 	uint8_t			 crcstrip;
893 	uint8_t			 fc_ena;
894 	uint8_t			 l2sel;
895 	uint8_t			 hsplit_0;
896 	uint8_t			 hsplit_1;
897 	uint8_t			 showiv;
898 	uint16_t		 rxmax;
899 	uint8_t			 tphrdesc_ena;
900 	uint8_t			 tphwdesc_ena;
901 	uint8_t			 tphdata_ena;
902 	uint8_t			 tphhead_ena;
903 	uint8_t			 lrxqthresh;
904 	uint8_t			 prefena;
905 };
906 
907 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
908 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
909 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
910 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
911 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
912 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
913 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
914 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
915 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
916 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
917 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
918 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
919 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
920 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
921 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
922 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
923 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
924 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
925 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
926 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
927 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
928 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
929 };
930 
931 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
932 
933 struct ixl_hmc_txq {
934 	uint16_t		head;
935 	uint8_t			new_context;
936 	uint64_t		base;
937 #define IXL_HMC_TXQ_BASE_UNIT		128
938 	uint8_t			fc_ena;
939 	uint8_t			timesync_ena;
940 	uint8_t			fd_ena;
941 	uint8_t			alt_vlan_ena;
942 	uint16_t		thead_wb;
943 	uint8_t			cpuid;
944 	uint8_t			head_wb_ena;
945 #define IXL_HMC_TXQ_DESC_WB		0
946 #define IXL_HMC_TXQ_HEAD_WB		1
947 	uint16_t		qlen;
948 	uint8_t			tphrdesc_ena;
949 	uint8_t			tphrpacket_ena;
950 	uint8_t			tphwdesc_ena;
951 	uint64_t		head_wb_addr;
952 	uint32_t		crc;
953 	uint16_t		rdylist;
954 	uint8_t			rdylist_act;
955 };
956 
957 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
958 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
959 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
960 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
961 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
962 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
963 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
964 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
965 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
966 /* line 1 */
967 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
968 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
969 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
970 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
971 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
972 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
973 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
974 /* line 7 */
975 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
976 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
977 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
978 };
979 
980 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
981 
982 struct ixl_tx_map {
983 	struct mbuf		*txm_m;
984 	bus_dmamap_t		 txm_map;
985 	unsigned int		 txm_eop;
986 };
987 
988 struct ixl_tx_ring {
989 	unsigned int		 txr_prod;
990 	unsigned int		 txr_cons;
991 
992 	struct ixl_tx_map	*txr_maps;
993 	struct ixl_dmamem	 txr_mem;
994 
995 	bus_size_t		 txr_tail;
996 	unsigned int		 txr_qid;
997 };
998 
999 struct ixl_rx_map {
1000 	struct mbuf		*rxm_m;
1001 	bus_dmamap_t		 rxm_map;
1002 };
1003 
1004 struct ixl_rx_ring {
1005 	struct ixl_softc	*rxr_sc;
1006 
1007 	struct if_rxring	 rxr_acct;
1008 	struct timeout		 rxr_refill;
1009 
1010 	unsigned int		 rxr_prod;
1011 	unsigned int		 rxr_cons;
1012 
1013 	struct ixl_rx_map	*rxr_maps;
1014 	struct ixl_dmamem	 rxr_mem;
1015 
1016 	struct mbuf		*rxr_m_head;
1017 	struct mbuf		**rxr_m_tail;
1018 
1019 	bus_size_t		 rxr_tail;
1020 	unsigned int		 rxr_qid;
1021 };
1022 
1023 struct ixl_softc {
1024 	struct device		 sc_dev;
1025 	struct arpcom		 sc_ac;
1026 	struct ifmedia		 sc_media;
1027 	uint64_t		 sc_media_status;
1028 	uint64_t		 sc_media_active;
1029 
1030 	pci_chipset_tag_t	 sc_pc;
1031 	pci_intr_handle_t	 sc_ih;
1032 	void			*sc_ihc;
1033 	pcitag_t		 sc_tag;
1034 
1035 	bus_dma_tag_t		 sc_dmat;
1036 	bus_space_tag_t		 sc_memt;
1037 	bus_space_handle_t	 sc_memh;
1038 	bus_size_t		 sc_mems;
1039 
1040 	uint8_t			 sc_pf_id;
1041 	uint16_t		 sc_uplink_seid;	/* le */
1042 	uint16_t		 sc_downlink_seid;	/* le */
1043 	uint16_t		 sc_veb_seid;		/* le */
1044 	uint16_t		 sc_vsi_number;		/* le */
1045 	uint16_t		 sc_seid;
1046 	unsigned int		 sc_base_queue;
1047 
1048 	struct ixl_dmamem	 sc_vsi;
1049 
1050 	const struct ixl_aq_regs *
1051 				 sc_aq_regs;
1052 
1053 	struct mutex		 sc_atq_mtx;
1054 	struct ixl_dmamem	 sc_atq;
1055 	unsigned int		 sc_atq_prod;
1056 	unsigned int		 sc_atq_cons;
1057 
1058 	struct ixl_dmamem	 sc_arq;
1059 	struct task		 sc_arq_task;
1060 	struct ixl_aq_bufs	 sc_arq_idle;
1061 	struct ixl_aq_bufs	 sc_arq_live;
1062 	struct if_rxring	 sc_arq_ring;
1063 	unsigned int		 sc_arq_prod;
1064 	unsigned int		 sc_arq_cons;
1065 
1066 	struct ixl_dmamem	 sc_hmc_sd;
1067 	struct ixl_dmamem	 sc_hmc_pd;
1068 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1069 
1070 	unsigned int		 sc_nrings;
1071 
1072 	unsigned int		 sc_tx_ring_ndescs;
1073 	unsigned int		 sc_rx_ring_ndescs;
1074 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1075 };
1076 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1077 
1078 struct ixl_atq {
1079 	SIMPLEQ_ENTRY(ixl_atq)	  iatq_entry;
1080 	struct ixl_aq_desc	  iatq_desc;
1081 	void			 *iatq_arg;
1082 	void			(*iatq_fn)(struct ixl_softc *, void *);
1083 };
1084 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1085 
1086 #define delaymsec(_ms)	delay(1000 * (_ms))
1087 
1088 static void	ixl_clear_hw(struct ixl_softc *);
1089 static int	ixl_pf_reset(struct ixl_softc *);
1090 
1091 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1092 		    bus_size_t, u_int);
1093 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1094 
1095 static int	ixl_arq_fill(struct ixl_softc *);
1096 static void	ixl_arq_unfill(struct ixl_softc *);
1097 
1098 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1099 		    unsigned int);
1100 static void	ixl_atq_set(struct ixl_atq *,
1101 		    void (*)(struct ixl_softc *, void *), void *);
1102 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1103 static void	ixl_atq_done(struct ixl_softc *);
1104 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1105 		    const char *);
1106 static int	ixl_get_version(struct ixl_softc *);
1107 static int	ixl_pxe_clear(struct ixl_softc *);
1108 static int	ixl_lldp_shut(struct ixl_softc *);
1109 static int	ixl_get_mac(struct ixl_softc *);
1110 static int	ixl_get_switch_config(struct ixl_softc *);
1111 static int	ixl_phy_mask_ints(struct ixl_softc *);
1112 static int	ixl_get_phy_abilities(struct ixl_softc *, uint64_t *);
1113 static int	ixl_restart_an(struct ixl_softc *);
1114 static int	ixl_hmc(struct ixl_softc *);
1115 static void	ixl_hmc_free(struct ixl_softc *);
1116 static int	ixl_get_vsi(struct ixl_softc *);
1117 static int	ixl_set_vsi(struct ixl_softc *);
1118 static int	ixl_get_link_status(struct ixl_softc *);
1119 static int	ixl_set_link_status(struct ixl_softc *,
1120 		    const struct ixl_aq_desc *);
1121 static void	ixl_arq(void *);
1122 static void	ixl_hmc_pack(void *, const void *,
1123 		    const struct ixl_hmc_pack *, unsigned int);
1124 
1125 static int	ixl_match(struct device *, void *, void *);
1126 static void	ixl_attach(struct device *, struct device *, void *);
1127 
1128 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1129 static int	ixl_media_change(struct ifnet *);
1130 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1131 static void	ixl_watchdog(struct ifnet *);
1132 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1133 static void	ixl_start(struct ifqueue *);
1134 static int	ixl_intr(void *);
1135 static int	ixl_up(struct ixl_softc *);
1136 static int	ixl_down(struct ixl_softc *);
1137 static int	ixl_iff(struct ixl_softc *);
1138 
1139 static struct ixl_tx_ring *
1140 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1141 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1142 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1143 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1144 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1145 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1146 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1147 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1148 static int	ixl_txeof(struct ixl_softc *, struct ifqueue *);
1149 
1150 static struct ixl_rx_ring *
1151 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1152 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1153 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1154 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1155 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1156 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1157 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1158 static int	ixl_rxeof(struct ixl_softc *, struct ifiqueue *);
1159 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1160 static void	ixl_rxrefill(void *);
1161 
1162 struct cfdriver ixl_cd = {
1163 	NULL,
1164 	"ixl",
1165 	DV_IFNET,
1166 };
1167 
1168 struct cfattach ixl_ca = {
1169 	sizeof(struct ixl_softc),
1170 	ixl_match,
1171 	ixl_attach,
1172 };
1173 
1174 static const struct ixl_phy_type ixl_phy_type_map[] = {
1175 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1176 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1177 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1178 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1179 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1180 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1181 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1182 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1183 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1184 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1185 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1186 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1187 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1188 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1189 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1190 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1191 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1192 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1193 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1194 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1195 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1196 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1197 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1198 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1199 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1200 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1201 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1202 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1203 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1204 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1205 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1206 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1207 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1208 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1209 };
1210 
1211 static const struct ixl_speed_type ixl_speed_type_map[] = {
1212 	{ IXL_AQ_PHY_LINK_SPEED_40GB,		IF_Gbps(40) },
1213 	{ IXL_AQ_PHY_LINK_SPEED_25GB,		IF_Gbps(25) },
1214 	{ IXL_AQ_PHY_LINK_SPEED_20GB,		IF_Gbps(20) },
1215 	{ IXL_AQ_PHY_LINK_SPEED_10GB,		IF_Gbps(10) },
1216 	{ IXL_AQ_PHY_LINK_SPEED_1000MB,		IF_Mbps(1000) },
1217 	{ IXL_AQ_PHY_LINK_SPEED_100MB,		IF_Mbps(100) },
1218 };
1219 
1220 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1221 	.atq_tail	= I40E_PF_ATQT,
1222 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1223 	.atq_head	= I40E_PF_ATQH,
1224 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1225 	.atq_len	= I40E_PF_ATQLEN,
1226 	.atq_bal	= I40E_PF_ATQBAL,
1227 	.atq_bah	= I40E_PF_ATQBAH,
1228 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1229 
1230 	.arq_tail	= I40E_PF_ARQT,
1231 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1232 	.arq_head	= I40E_PF_ARQH,
1233 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1234 	.arq_len	= I40E_PF_ARQLEN,
1235 	.arq_bal	= I40E_PF_ARQBAL,
1236 	.arq_bah	= I40E_PF_ARQBAH,
1237 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1238 };
1239 
1240 #ifdef notyet
1241 static const struct ixl_aq_regs ixl_vf_aq_regs = {
1242 	.atq_tail	= I40E_VF_ATQT1,
1243 	.atq_tail_mask	= I40E_VF_ATQT1_ATQT_MASK;
1244 	.atq_head	= I40E_VF_ATQH1,
1245 	.atq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1246 	.atq_len	= I40E_VF_ATQLEN1,
1247 	.atq_bal	= I40E_VF_ATQBAL1,
1248 	.atq_bah	= I40E_VF_ATQBAH1,
1249 	.atq_len_enable	= I40E_VF_ATQLEN1_ATQENABLE_MASK,
1250 
1251 	.arq_tail	= I40E_VF_ARQT1,
1252 	.arq_tail_mask	= I40E_VF_ARQT1_ARQT_MASK;
1253 	.arq_head	= I40E_VF_ARQH1,
1254 	.arq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1255 	.arq_len	= I40E_VF_ARQLEN1,
1256 	.arq_bal	= I40E_VF_ARQBAL1,
1257 	.arq_bah	= I40E_VF_ARQBAH1,
1258 	.arq_len_enable	= I40E_VF_ARQLEN1_ARQENABLE_MASK,
1259 };
1260 #endif
1261 
1262 #define ixl_rd(_s, _r) \
1263 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1264 #define ixl_wr(_s, _r, _v) \
1265 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1266 #define ixl_barrier(_s, _r, _l, _o) \
1267 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1268 #define ixl_intr_enable(_s) \
1269 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1270 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1271 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1272 
1273 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1274 
1275 #ifdef __LP64__
1276 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1277 #else
1278 #define ixl_dmamem_hi(_ixm)	0
1279 #endif
1280 
1281 #define ixl_dmamem_lo(_ixm) 	(uint32_t)IXL_DMA_DVA(_ixm)
1282 
1283 static inline void
1284 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1285 {
1286 #ifdef __LP64__
1287 	htolem32(&iaq->iaq_param[2], addr >> 32);
1288 #else
1289 	iaq->iaq_param[2] = htole32(0);
1290 #endif
1291 	htolem32(&iaq->iaq_param[3], addr);
1292 }
1293 
1294 #if _BYTE_ORDER == _BIG_ENDIAN
1295 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1296 #else
1297 #define HTOLE16(_x)	(_x)
1298 #endif
1299 
1300 static const struct pci_matchid ixl_devices[] = {
1301 #ifdef notyet
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF },
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF_HV },
1304 #endif
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_SFP },
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_40G_BP },
1307 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BP },
1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_QSFP },
1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BASET },
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1313 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_T4_10G },
1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_BP },
1316 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1317 };
1318 
1319 static int
1320 ixl_match(struct device *parent, void *match, void *aux)
1321 {
1322 	return (pci_matchbyid(aux, ixl_devices, nitems(ixl_devices)));
1323 }
1324 
1325 void
1326 ixl_attach(struct device *parent, struct device *self, void *aux)
1327 {
1328 	struct ixl_softc *sc = (struct ixl_softc *)self;
1329 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1330 	struct pci_attach_args *pa = aux;
1331 	pcireg_t memtype;
1332 	uint32_t port, ari, func;
1333 	uint64_t phy_types = 0;
1334 	int tries;
1335 
1336 	sc->sc_pc = pa->pa_pc;
1337 	sc->sc_tag = pa->pa_tag;
1338 	sc->sc_dmat = pa->pa_dmat;
1339 	sc->sc_aq_regs = &ixl_pf_aq_regs; /* VF? */
1340 
1341 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1342 	sc->sc_tx_ring_ndescs = 1024;
1343 	sc->sc_rx_ring_ndescs = 1024;
1344 
1345 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1346 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, BUS_SPACE_MAP_PREFETCHABLE,
1347 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1348 		printf(": unable to map registers\n");
1349 		return;
1350 	}
1351 
1352 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1353 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1354 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1355 	printf(" %u", sc->sc_base_queue);
1356 
1357 	ixl_clear_hw(sc);
1358 
1359 	if (ixl_pf_reset(sc) == -1) {
1360 		/* error printed by ixl_pf_reset */
1361 		goto unmap;
1362 	}
1363 
1364 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1365 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1366 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1367 	printf(": port %u", port);
1368 
1369 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1370 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1371 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1372 
1373 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1374 	func &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1375 	func >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1376 
1377 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1378 
1379 	/* initialise the adminq */
1380 
1381 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1382 
1383 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1384 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1385 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1386 		goto unmap;
1387 	}
1388 
1389 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1390 	SIMPLEQ_INIT(&sc->sc_arq_live);
1391 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1392 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1393 	sc->sc_arq_cons = 0;
1394 	sc->sc_arq_prod = 0;
1395 
1396 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1397 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1398 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1399 		goto free_atq;
1400 	}
1401 
1402 	if (!ixl_arq_fill(sc)) {
1403 		printf("\n" "%s: unable to fill arq descriptors\n",
1404 		    DEVNAME(sc));
1405 		goto free_arq;
1406 	}
1407 
1408 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1409 	    0, IXL_DMA_LEN(&sc->sc_atq),
1410 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1411 
1412 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1413 	    0, IXL_DMA_LEN(&sc->sc_arq),
1414 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1415 
1416  	for (tries = 0; tries < 10; tries++) {
1417 		int rv;
1418 
1419 		sc->sc_atq_cons = 0;
1420 		sc->sc_atq_prod = 0;
1421 
1422 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1423 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1424 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1425 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1426 
1427 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1428 
1429 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1430 		    ixl_dmamem_lo(&sc->sc_atq));
1431 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1432 		    ixl_dmamem_hi(&sc->sc_atq));
1433 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1434 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1435 
1436 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1437 		    ixl_dmamem_lo(&sc->sc_arq));
1438 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1439 		    ixl_dmamem_hi(&sc->sc_arq));
1440 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1441 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1442 
1443 		rv = ixl_get_version(sc);
1444 		if (rv == 0)
1445 			break;
1446 		if (rv != ETIMEDOUT) {
1447 			printf(", unable to get firmware version\n");
1448 			goto shutdown;
1449 		}
1450 
1451 		delaymsec(100);
1452 	}
1453 
1454 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1455 
1456 	if (ixl_pxe_clear(sc) != 0) {
1457 		/* error printed by ixl_pxe_clear */
1458 		goto shutdown;
1459 	}
1460 
1461 	if (ixl_get_mac(sc) != 0) {
1462 		/* error printed by ixl_get_mac */
1463 		goto shutdown;
1464 	}
1465 
1466 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1467 	    pci_intr_map(pa, &sc->sc_ih) != 0) {
1468 		printf(", unable to map interrupt\n");
1469 		goto shutdown;
1470 	}
1471 
1472 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
1473 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1474 
1475 	if (ixl_hmc(sc) != 0) {
1476 		/* error printed by ixl_hmc */
1477 		goto shutdown;
1478 	}
1479 
1480 	if (ixl_lldp_shut(sc) != 0) {
1481 		/* error printed by ixl_lldp_shut */
1482 		goto free_hmc;
1483 	}
1484 
1485 	if (ixl_phy_mask_ints(sc) != 0) {
1486 		/* error printed by ixl_phy_mask_ints */
1487 		goto free_hmc;
1488 	}
1489 
1490 	if (ixl_restart_an(sc) != 0) {
1491 		/* error printed by ixl_restart_an */
1492 		goto free_hmc;
1493 	}
1494 
1495 	if (ixl_get_switch_config(sc) != 0) {
1496 		/* error printed by ixl_get_switch_config */
1497 		goto free_hmc;
1498 	}
1499 
1500 	if (ixl_get_phy_abilities(sc, &phy_types) != 0) {
1501 		/* error printed by ixl_get_phy_abilities */
1502 		goto free_hmc;
1503 	}
1504 
1505 	if (ixl_get_link_status(sc) != 0) {
1506 		/* error printed by ixl_get_link_status */
1507 		goto free_hmc;
1508 	}
1509 
1510 	if (ixl_dmamem_alloc(sc, &sc->sc_vsi,
1511 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1512 		printf("%s: unable to allocate VSI data\n", DEVNAME(sc));
1513 		goto free_hmc;
1514 	}
1515 
1516 	if (ixl_get_vsi(sc) != 0) {
1517 		/* error printed by ixl_get_vsi */
1518 		goto free_vsi;
1519 	}
1520 
1521 	if (ixl_set_vsi(sc) != 0) {
1522 		/* error printed by ixl_set_vsi */
1523 		goto free_vsi;
1524 	}
1525 
1526 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1527 	    IPL_NET | IPL_MPSAFE, ixl_intr, sc, DEVNAME(sc));
1528 	if (sc->sc_ihc == NULL) {
1529 		printf("%s: unable to establish interrupt handler\n",
1530 		    DEVNAME(sc));
1531 		goto free_vsi;
1532 	}
1533 
1534 	ifp->if_softc = sc;
1535 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1536 	ifp->if_xflags = IFXF_MPSAFE;
1537 	ifp->if_ioctl = ixl_ioctl;
1538 	ifp->if_qstart = ixl_start;
1539 	ifp->if_watchdog = ixl_watchdog;
1540 	ifp->if_hardmtu = IXL_HARDMTU;
1541 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1542 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
1543 
1544 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1545 #if 0
1546 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1547 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1548 	    IFCAP_CSUM_UDPv4;
1549 #endif
1550 
1551 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1552 
1553 	ixl_media_add(sc, phy_types);
1554 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1555 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1556 
1557 	if_attach(ifp);
1558 	ether_ifattach(ifp);
1559 
1560 	if_attach_queues(ifp, ixl_nqueues(sc));
1561 	if_attach_iqueues(ifp, ixl_nqueues(sc));
1562 
1563 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1564 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1565 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1566 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1567 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1568 
1569 	ixl_intr_enable(sc);
1570 
1571 	return;
1572 
1573 free_vsi:
1574 	ixl_dmamem_free(sc, &sc->sc_vsi);
1575 free_hmc:
1576 	ixl_hmc_free(sc);
1577 shutdown:
1578 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1579 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1580 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1581 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1582 
1583 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1584 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1585 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1586 
1587 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1588 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1589 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1590 
1591 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1592 	    0, IXL_DMA_LEN(&sc->sc_arq),
1593 	    BUS_DMASYNC_POSTREAD);
1594 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1595 	    0, IXL_DMA_LEN(&sc->sc_atq),
1596 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1597 
1598 	ixl_arq_unfill(sc);
1599 free_arq:
1600 	ixl_dmamem_free(sc, &sc->sc_arq);
1601 free_atq:
1602 	ixl_dmamem_free(sc, &sc->sc_atq);
1603 unmap:
1604 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1605 	sc->sc_mems = 0;
1606 }
1607 
1608 static void
1609 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1610 {
1611 	struct ifmedia *ifm = &sc->sc_media;
1612 	const struct ixl_phy_type *itype;
1613 	unsigned int i;
1614 
1615 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
1616 		itype = &ixl_phy_type_map[i];
1617 
1618 		if (ISSET(phy_types, itype->phy_type))
1619 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
1620 	}
1621 }
1622 
1623 static int
1624 ixl_media_change(struct ifnet *ifp)
1625 {
1626 	/* ignore? */
1627 	return (EOPNOTSUPP);
1628 }
1629 
1630 static void
1631 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
1632 {
1633 	struct ixl_softc *sc = ifp->if_softc;
1634 
1635 	NET_ASSERT_LOCKED();
1636 
1637 	ifm->ifm_status = sc->sc_media_status;
1638 	ifm->ifm_active = sc->sc_media_active;
1639 }
1640 
1641 static void
1642 ixl_watchdog(struct ifnet *ifp)
1643 {
1644 
1645 }
1646 
1647 int
1648 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1649 {
1650 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1651 	struct ifreq *ifr = (struct ifreq *)data;
1652 	int error = 0;
1653 
1654 	switch (cmd) {
1655 	case SIOCSIFADDR:
1656 		ifp->if_flags |= IFF_UP;
1657 		/* FALLTHROUGH */
1658 
1659 	case SIOCSIFFLAGS:
1660 		if (ISSET(ifp->if_flags, IFF_UP)) {
1661 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1662 				error = ENETRESET;
1663 			else
1664 				error = ixl_up(sc);
1665 		} else {
1666 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1667 				error = ixl_down(sc);
1668 		}
1669 		break;
1670 
1671 	case SIOCGIFMEDIA:
1672 	case SIOCSIFMEDIA:
1673 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1674 		break;
1675 
1676 #if 0
1677 	case SIOCGIFRXR:
1678 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1679 		break;
1680 #endif
1681 
1682 	default:
1683 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1684 		break;
1685 	}
1686 
1687 	if (error == ENETRESET)
1688 		error = ixl_iff(sc);
1689 
1690 	return (error);
1691 }
1692 
1693 static inline void *
1694 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
1695 {
1696 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1697 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1698 
1699 	if (i >= e->hmc_count)
1700 		return (NULL);
1701 
1702 	kva += e->hmc_base;
1703 	kva += i * e->hmc_size;
1704 
1705 	return (kva);
1706 }
1707 
1708 static inline size_t
1709 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
1710 {
1711 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1712 
1713 	return (e->hmc_size);
1714 }
1715 
1716 static int
1717 ixl_up(struct ixl_softc *sc)
1718 {
1719 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1720 	struct ixl_rx_ring *rxr;
1721 	struct ixl_tx_ring *txr;
1722 	unsigned int nqueues, i;
1723 	uint32_t reg;
1724 	int rv = ENOMEM;
1725 
1726 	nqueues = ixl_nqueues(sc);
1727 	KASSERT(nqueues == 1); /* XXX */
1728 
1729 	/* allocation is the only thing that can fail, so do it up front */
1730 	for (i = 0; i < nqueues; i++) {
1731 		rxr = ixl_rxr_alloc(sc, i);
1732 		if (rxr == NULL)
1733 			goto free;
1734 
1735 		txr = ixl_txr_alloc(sc, i);
1736 		if (txr == NULL) {
1737 			ixl_rxr_free(sc, rxr);
1738 			goto free;
1739 		}
1740 
1741 		ifp->if_iqs[i]->ifiq_softc = rxr;
1742 		ifp->if_ifqs[i]->ifq_softc = txr;
1743 	}
1744 
1745 	/* XXX wait 50ms from completion of last RX queue disable */
1746 
1747 	for (i = 0; i < nqueues; i++) {
1748 		rxr = ifp->if_iqs[i]->ifiq_softc;
1749 		txr = ifp->if_ifqs[i]->ifq_softc;
1750 
1751 		ixl_txr_qdis(sc, txr, 1);
1752 
1753 		ixl_rxr_config(sc, rxr);
1754 		ixl_txr_config(sc, txr);
1755 
1756 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1757 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1758 
1759 		ixl_wr(sc, rxr->rxr_tail, 0);
1760 		ixl_rxfill(sc, rxr);
1761 
1762 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1763 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1764 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1765 
1766 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1767 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1768 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1769 	}
1770 
1771 	for (i = 0; i < nqueues; i++) {
1772 		rxr = ifp->if_iqs[i]->ifiq_softc;
1773 		txr = ifp->if_ifqs[i]->ifq_softc;
1774 
1775 		if (ixl_rxr_enabled(sc, rxr) != 0)
1776 			goto down;
1777 
1778 		if (ixl_txr_enabled(sc, txr) != 0)
1779 			goto down;
1780 	}
1781 
1782 	SET(ifp->if_flags, IFF_RUNNING);
1783 
1784 #if 0
1785 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
1786 	SET(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1787 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
1788 
1789 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
1790 	SET(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1791 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
1792 #endif
1793 
1794 	ixl_wr(sc, I40E_PFINT_LNKLST0,
1795 	    (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1796 	    (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1797 
1798 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
1799 	    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1800 	    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1801 	    (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
1802 	    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1803 	    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT));
1804 
1805 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
1806 	    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1807 	    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1808 	    (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
1809 	    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1810 	    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT));
1811 
1812 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
1813 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
1814 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
1815 
1816 	printf("%s: info %08x data %08x\n", DEVNAME(sc),
1817 	    ixl_rd(sc, I40E_PFHMC_ERRORINFO),
1818 	    ixl_rd(sc, I40E_PFHMC_ERRORDATA));
1819 
1820 	return (ENETRESET);
1821 
1822 free:
1823 	for (i = 0; i < nqueues; i++) {
1824 		rxr = ifp->if_iqs[i]->ifiq_softc;
1825 		txr = ifp->if_ifqs[i]->ifq_softc;
1826 
1827 		if (rxr == NULL) {
1828 			/*
1829 			 * tx and rx get set at the same time, so if one
1830 			 * is NULL, the other is too.
1831 			 */
1832 			continue;
1833 		}
1834 
1835 		ixl_txr_free(sc, txr);
1836 		ixl_rxr_free(sc, rxr);
1837 	}
1838 	return (rv);
1839 down:
1840 	ixl_down(sc);
1841 	return (ETIMEDOUT);
1842 }
1843 
1844 static int
1845 ixl_iff(struct ixl_softc *sc)
1846 {
1847 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1848 	struct ixl_atq iatq;
1849 	struct ixl_aq_desc *iaq;
1850 	struct ixl_aq_vsi_promisc_param *param;
1851 
1852 #if 0
1853 	if (!ISSET(ifp->if_flags, IFF_ALLMULTI))
1854 		return (0);
1855 #endif
1856 
1857 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1858 		return (0);
1859 
1860 	memset(&iatq, 0, sizeof(iatq));
1861 
1862 	iaq = &iatq.iatq_desc;
1863 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1864 
1865 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1866 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST);
1867 //	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1868 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1869 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1870 //	}
1871 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1872 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST);
1873 	param->seid = sc->sc_seid;
1874 
1875 	ixl_atq_exec(sc, &iatq, "ixliff");
1876 
1877 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1878 		return (EIO);
1879 
1880 	return (0);
1881 }
1882 
1883 static int
1884 ixl_down(struct ixl_softc *sc)
1885 {
1886 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1887 	struct ixl_rx_ring *rxr;
1888 	struct ixl_tx_ring *txr;
1889 	unsigned int nqueues, i;
1890 	uint32_t reg;
1891 	int error = 0;
1892 
1893 	nqueues = ixl_nqueues(sc);
1894 
1895 	CLR(ifp->if_flags, IFF_RUNNING);
1896 
1897 	/* mask interrupts */
1898 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
1899 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1900 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
1901 
1902 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
1903 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1904 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
1905 
1906 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
1907 
1908 	/* make sure the no hw generated work is still in flight */
1909 	intr_barrier(sc->sc_ihc);
1910 	for (i = 0; i < nqueues; i++) {
1911 		rxr = ifp->if_iqs[i]->ifiq_softc;
1912 		txr = ifp->if_ifqs[i]->ifq_softc;
1913 
1914 		ixl_txr_qdis(sc, txr, 0);
1915 
1916 		ifiq_barrier(ifp->if_iqs[i]);
1917 		ifq_barrier(ifp->if_ifqs[i]);
1918 
1919 		if (!timeout_del(&rxr->rxr_refill))
1920 			timeout_barrier(&rxr->rxr_refill);
1921 	}
1922 
1923 	/* XXX wait at least 400 usec for all tx queues in one go */
1924 	delay(500);
1925 
1926 	for (i = 0; i < nqueues; i++) {
1927 		rxr = ifp->if_iqs[i]->ifiq_softc;
1928 		txr = ifp->if_ifqs[i]->ifq_softc;
1929 
1930 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1931 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1932 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1933 
1934 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1935 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1936 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1937 	}
1938 
1939 	for (i = 0; i < nqueues; i++) {
1940 		rxr = ifp->if_iqs[i]->ifiq_softc;
1941 		txr = ifp->if_ifqs[i]->ifq_softc;
1942 
1943 		if (ixl_txr_disabled(sc, txr) != 0)
1944 			error = ETIMEDOUT;
1945 
1946 		if (ixl_rxr_disabled(sc, rxr) != 0)
1947 			error = ETIMEDOUT;
1948 	}
1949 
1950 	if (error) {
1951 	printf("%s: info %08x data %08x\n", DEVNAME(sc),
1952 	    ixl_rd(sc, I40E_PFHMC_ERRORINFO),
1953 	    ixl_rd(sc, I40E_PFHMC_ERRORDATA));
1954 
1955 		printf("%s: failed to shut down rings\n", DEVNAME(sc));
1956 		return (error);
1957 	}
1958 
1959 	for (i = 0; i < nqueues; i++) {
1960 		rxr = ifp->if_iqs[i]->ifiq_softc;
1961 		txr = ifp->if_ifqs[i]->ifq_softc;
1962 
1963 		ixl_txr_unconfig(sc, txr);
1964 		ixl_rxr_unconfig(sc, rxr);
1965 
1966 		ixl_txr_clean(sc, txr);
1967 		ixl_rxr_clean(sc, rxr);
1968 
1969 		ixl_txr_free(sc, txr);
1970 		ixl_rxr_free(sc, rxr);
1971 
1972 		ifp->if_iqs[i]->ifiq_softc = NULL;
1973 		ifp->if_ifqs[i]->ifq_softc =  NULL;
1974 	}
1975 
1976 	return (0);
1977 }
1978 
1979 static struct ixl_tx_ring *
1980 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
1981 {
1982 	struct ixl_tx_ring *txr;
1983 	struct ixl_tx_map *maps, *txm;
1984 	unsigned int i;
1985 
1986 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1987 	if (txr == NULL)
1988 		return (NULL);
1989 
1990 	maps = mallocarray(sizeof(*maps),
1991 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1992 	if (maps == NULL)
1993 		goto free;
1994 
1995 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
1996 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
1997 	    IXL_TX_QUEUE_ALIGN) != 0)
1998 		goto freemap;
1999 
2000 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2001 		txm = &maps[i];
2002 
2003 		if (bus_dmamap_create(sc->sc_dmat,
2004 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2005 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2006 		    &txm->txm_map) != 0)
2007 			goto uncreate;
2008 
2009 		txm->txm_eop = -1;
2010 		txm->txm_m = NULL;
2011 	}
2012 
2013 	txr->txr_cons = txr->txr_prod = 0;
2014 	txr->txr_maps = maps;
2015 
2016 	txr->txr_tail = I40E_QTX_TAIL(qid);
2017 	txr->txr_qid = qid;
2018 
2019 	return (txr);
2020 
2021 uncreate:
2022 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2023 		txm = &maps[i];
2024 
2025 		if (txm->txm_map == NULL)
2026 			continue;
2027 
2028 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2029 	}
2030 
2031 	ixl_dmamem_free(sc, &txr->txr_mem);
2032 freemap:
2033 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2034 free:
2035 	free(txr, M_DEVBUF, sizeof(*txr));
2036 	return (NULL);
2037 }
2038 
2039 static void
2040 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2041 {
2042 	unsigned int qid;
2043 	bus_size_t reg;
2044 	uint32_t r;
2045 
2046 	qid = txr->txr_qid + sc->sc_base_queue;
2047 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2048 	qid %= 128;
2049 
2050 	r = ixl_rd(sc, reg);
2051 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2052 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2053 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2054 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2055 	ixl_wr(sc, reg, r);
2056 }
2057 
2058 static void
2059 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2060 {
2061 	struct ixl_hmc_txq txq;
2062 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_vsi);
2063 	void *hmc;
2064 
2065 	memset(&txq, 0, sizeof(txq));
2066 	txq.head = htole16(0);
2067 	txq.new_context = 1;
2068 	htolem64(&txq.base,
2069 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2070 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2071 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2072 	txq.tphrdesc_ena = 0;
2073 	txq.tphrpacket_ena = 0;
2074 	txq.tphwdesc_ena = 0;
2075 	txq.rdylist = data->qs_handle[0];
2076 
2077 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2078 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2079 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2080 }
2081 
2082 static void
2083 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2084 {
2085 	void *hmc;
2086 
2087 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2088 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2089 }
2090 
2091 static void
2092 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2093 {
2094 	struct ixl_tx_map *maps, *txm;
2095 	bus_dmamap_t map;
2096 	unsigned int i;
2097 
2098 	maps = txr->txr_maps;
2099 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2100 		txm = &maps[i];
2101 
2102 		if (txm->txm_m == NULL)
2103 			continue;
2104 
2105 		map = txm->txm_map;
2106 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2107 		    BUS_DMASYNC_POSTWRITE);
2108 		bus_dmamap_unload(sc->sc_dmat, map);
2109 
2110 		m_freem(txm->txm_m);
2111 		txm->txm_m = NULL;
2112 	}
2113 }
2114 
2115 static int
2116 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2117 {
2118 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2119 	uint32_t reg;
2120 	int i;
2121 
2122 	for (i = 0; i < 10; i++) {
2123 		reg = ixl_rd(sc, ena);
2124 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2125 			return (0);
2126 
2127 		delaymsec(10);
2128 	}
2129 
2130 	return (ETIMEDOUT);
2131 }
2132 
2133 static int
2134 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2135 {
2136 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2137 	uint32_t reg;
2138 	int i;
2139 
2140 	for (i = 0; i < 20; i++) {
2141 		reg = ixl_rd(sc, ena);
2142 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2143 			return (0);
2144 
2145 		delaymsec(10);
2146 	}
2147 
2148 	return (ETIMEDOUT);
2149 }
2150 
2151 static void
2152 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2153 {
2154 	struct ixl_tx_map *maps, *txm;
2155 	unsigned int i;
2156 
2157 	maps = txr->txr_maps;
2158 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2159 		txm = &maps[i];
2160 
2161 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2162 	}
2163 
2164 	ixl_dmamem_free(sc, &txr->txr_mem);
2165 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2166 	free(txr, M_DEVBUF, sizeof(*txr));
2167 }
2168 
2169 static inline int
2170 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2171 {
2172 	int error;
2173 
2174 	error = bus_dmamap_load_mbuf(dmat, map, m,
2175 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2176 	if (error != EFBIG || m_defrag(m, M_DONTWAIT) != 0)
2177 		return (error);
2178 
2179 	return (bus_dmamap_load_mbuf(dmat, map, m,
2180 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2181 }
2182 
2183 static void
2184 ixl_start(struct ifqueue *ifq)
2185 {
2186 	struct ifnet *ifp = ifq->ifq_if;
2187 	struct ixl_softc *sc = ifp->if_softc;
2188 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2189 	struct ixl_tx_desc *ring, *txd;
2190 	struct ixl_tx_map *txm;
2191 	bus_dmamap_t map;
2192 	struct mbuf *m;
2193 	uint64_t cmd;
2194 	unsigned int prod, free, last, i;
2195 	unsigned int mask;
2196 	int post = 0;
2197 #if NBPFILTER > 0
2198 	caddr_t if_bpf;
2199 #endif
2200 
2201 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2202 		ifq_purge(ifq);
2203 		return;
2204 	}
2205 
2206 	prod = txr->txr_prod;
2207 	free = txr->txr_cons;
2208 	if (free <= prod)
2209 		free += sc->sc_tx_ring_ndescs;
2210 	free -= prod;
2211 
2212 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2213 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2214 
2215 	ring = IXL_DMA_KVA(&txr->txr_mem);
2216 	mask = sc->sc_tx_ring_ndescs - 1;
2217 
2218 	for (;;) {
2219 		if (free <= IXL_TX_PKT_DESCS) {
2220 			ifq_set_oactive(ifq);
2221 			break;
2222 		}
2223 
2224 		m = ifq_dequeue(ifq);
2225 		if (m == NULL)
2226 			break;
2227 
2228 		txm = &txr->txr_maps[prod];
2229 		map = txm->txm_map;
2230 
2231 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2232 			m_freem(m);
2233 			continue;
2234 		}
2235 
2236 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2237 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2238 
2239 		for (i = 0; i < map->dm_nsegs; i++) {
2240 			txd = &ring[prod];
2241 
2242 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2243 			    IXL_TX_DESC_BSIZE_SHIFT;
2244 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2245 
2246 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2247 			htolem64(&txd->cmd, cmd);
2248 
2249 			last = prod;
2250 
2251 			prod++;
2252 			prod &= mask;
2253 		}
2254 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2255 		htolem64(&txd->cmd, cmd);
2256 
2257 		txm->txm_m = m;
2258 		txm->txm_eop = last;
2259 
2260 #if NBPFILTER > 0
2261 		if_bpf = ifp->if_bpf;
2262 		if (if_bpf)
2263 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2264 #endif
2265 
2266 		free -= i;
2267 		post = 1;
2268 	}
2269 
2270 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2271 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2272 
2273 	if (post) {
2274 		txr->txr_prod = prod;
2275 		ixl_wr(sc, txr->txr_tail, prod);
2276 	}
2277 }
2278 
2279 static int
2280 ixl_txeof(struct ixl_softc *sc, struct ifqueue *ifq)
2281 {
2282 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2283 	struct ixl_tx_desc *ring, *txd;
2284 	struct ixl_tx_map *txm;
2285 	bus_dmamap_t map;
2286 	unsigned int cons, prod, last;
2287 	unsigned int mask;
2288 	uint64_t dtype;
2289 	int done = 0;
2290 
2291 	prod = txr->txr_prod;
2292 	cons = txr->txr_cons;
2293 
2294 	if (cons == prod)
2295 		return (0);
2296 
2297 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2298 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2299 
2300 	ring = IXL_DMA_KVA(&txr->txr_mem);
2301 	mask = sc->sc_tx_ring_ndescs - 1;
2302 
2303 	do {
2304 		txm = &txr->txr_maps[cons];
2305 		last = txm->txm_eop;
2306 		txd = &ring[last];
2307 
2308 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2309 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2310 			break;
2311 
2312 		map = txm->txm_map;
2313 
2314 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2315 		    BUS_DMASYNC_POSTWRITE);
2316 		bus_dmamap_unload(sc->sc_dmat, map);
2317 		m_freem(txm->txm_m);
2318 
2319 		txm->txm_m = NULL;
2320 		txm->txm_eop = -1;
2321 
2322 		cons = last + 1;
2323 		cons &= mask;
2324 
2325 		done = 1;
2326 	} while (cons != prod);
2327 
2328 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2329 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2330 
2331 	txr->txr_cons = cons;
2332 
2333 	//ixl_enable(sc, txr->txr_msix);
2334 
2335 	if (ifq_is_oactive(ifq))
2336 		ifq_restart(ifq);
2337 
2338 	return (done);
2339 }
2340 
2341 static struct ixl_rx_ring *
2342 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2343 {
2344 	struct ixl_rx_ring *rxr;
2345 	struct ixl_rx_map *maps, *rxm;
2346 	unsigned int i;
2347 
2348 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2349 	if (rxr == NULL)
2350 		return (NULL);
2351 
2352 	maps = mallocarray(sizeof(*maps),
2353 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2354 	if (maps == NULL)
2355 		goto free;
2356 
2357 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2358 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2359 	    IXL_RX_QUEUE_ALIGN) != 0)
2360 		goto freemap;
2361 
2362 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2363 		rxm = &maps[i];
2364 
2365 		if (bus_dmamap_create(sc->sc_dmat,
2366 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2367 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2368 		    &rxm->rxm_map) != 0)
2369 			goto uncreate;
2370 
2371 		rxm->rxm_m = NULL;
2372 	}
2373 
2374 	rxr->rxr_sc = sc;
2375 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
2376 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
2377 	rxr->rxr_cons = rxr->rxr_prod = 0;
2378 	rxr->rxr_m_head = NULL;
2379 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2380 	rxr->rxr_maps = maps;
2381 
2382 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2383 	rxr->rxr_qid = qid;
2384 
2385 	return (rxr);
2386 
2387 uncreate:
2388 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2389 		rxm = &maps[i];
2390 
2391 		if (rxm->rxm_map == NULL)
2392 			continue;
2393 
2394 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2395 	}
2396 
2397 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2398 freemap:
2399 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2400 free:
2401 	free(rxr, M_DEVBUF, sizeof(*rxr));
2402 	return (NULL);
2403 }
2404 
2405 static void
2406 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2407 {
2408 	struct ixl_rx_map *maps, *rxm;
2409 	bus_dmamap_t map;
2410 	unsigned int i;
2411 
2412 	if (!timeout_del(&rxr->rxr_refill))
2413 		timeout_barrier(&rxr->rxr_refill);
2414 
2415 	maps = rxr->rxr_maps;
2416 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2417 		rxm = &maps[i];
2418 
2419 		if (rxm->rxm_m == NULL)
2420 			continue;
2421 
2422 		map = rxm->rxm_map;
2423 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2424 		    BUS_DMASYNC_POSTWRITE);
2425 		bus_dmamap_unload(sc->sc_dmat, map);
2426 
2427 		m_freem(rxm->rxm_m);
2428 		rxm->rxm_m = NULL;
2429 	}
2430 
2431 	m_freem(rxr->rxr_m_head);
2432 	rxr->rxr_m_head = NULL;
2433 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2434 
2435 	rxr->rxr_prod = rxr->rxr_cons = 0;
2436 }
2437 
2438 static int
2439 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2440 {
2441 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2442 	uint32_t reg;
2443 	int i;
2444 
2445 	for (i = 0; i < 10; i++) {
2446 		reg = ixl_rd(sc, ena);
2447 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2448 			return (0);
2449 
2450 		delaymsec(10);
2451 	}
2452 
2453 	return (ETIMEDOUT);
2454 }
2455 
2456 static int
2457 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2458 {
2459 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2460 	uint32_t reg;
2461 	int i;
2462 
2463 	for (i = 0; i < 20; i++) {
2464 		reg = ixl_rd(sc, ena);
2465 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2466 			return (0);
2467 
2468 		delaymsec(10);
2469 	}
2470 
2471 	return (ETIMEDOUT);
2472 }
2473 
2474 static void
2475 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2476 {
2477 	struct ixl_hmc_rxq rxq;
2478 	void *hmc;
2479 
2480 	memset(&rxq, 0, sizeof(rxq));
2481 
2482 	rxq.head = htole16(0);
2483 	htolem64(&rxq.base,
2484 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2485 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
2486 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2487 	rxq.hbuff = 0;
2488 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2489 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
2490 	rxq.crcstrip = 1;
2491 	rxq.l2sel = 0;
2492 	rxq.showiv = 0;
2493 	rxq.rxmax = htole16(MCLBYTES); /* XXX */
2494 	rxq.tphrdesc_ena = 0;
2495 	rxq.tphwdesc_ena = 0;
2496 	rxq.tphdata_ena = 0;
2497 	rxq.tphhead_ena = 0;
2498 	rxq.lrxqthresh = 0;
2499 	rxq.prefena = 1;
2500 
2501 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2502 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2503 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
2504 }
2505 
2506 static void
2507 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2508 {
2509 	void *hmc;
2510 
2511 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2512 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2513 }
2514 
2515 static void
2516 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2517 {
2518 	struct ixl_rx_map *maps, *rxm;
2519 	unsigned int i;
2520 
2521 	maps = rxr->rxr_maps;
2522 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2523 		rxm = &maps[i];
2524 
2525 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2526 	}
2527 
2528 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2529 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2530 	free(rxr, M_DEVBUF, sizeof(*rxr));
2531 }
2532 
2533 static int
2534 ixl_rxeof(struct ixl_softc *sc, struct ifiqueue *ifiq)
2535 {
2536 	struct ixl_rx_ring *rxr = ifiq->ifiq_softc;
2537 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2538 	struct ixl_rx_wb_desc_16 *ring, *rxd;
2539 	struct ixl_rx_map *rxm;
2540 	bus_dmamap_t map;
2541 	unsigned int cons, prod;
2542 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2543 	struct mbuf *m;
2544 	uint64_t word;
2545 	unsigned int len;
2546 	unsigned int mask;
2547 	int done = 0;
2548 
2549 	prod = rxr->rxr_prod;
2550 	cons = rxr->rxr_cons;
2551 
2552 	if (cons == prod)
2553 		return (0);
2554 
2555 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2556 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2557 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2558 
2559 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2560 	mask = sc->sc_rx_ring_ndescs - 1;
2561 
2562 	do {
2563 		rxd = &ring[cons];
2564 
2565 		word = lemtoh64(&rxd->qword1);
2566 		if (!ISSET(word, IXL_RX_DESC_DD))
2567 			break;
2568 
2569 		if_rxr_put(&rxr->rxr_acct, 1);
2570 
2571 		rxm = &rxr->rxr_maps[cons];
2572 
2573 		map = rxm->rxm_map;
2574 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2575 		    BUS_DMASYNC_POSTREAD);
2576 		bus_dmamap_unload(sc->sc_dmat, map);
2577 
2578 		m = rxm->rxm_m;
2579 		rxm->rxm_m = NULL;
2580 
2581 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2582 		m->m_len = len;
2583 		m->m_pkthdr.len = 0;
2584 
2585 		m->m_next = NULL;
2586 		*rxr->rxr_m_tail = m;
2587 		rxr->rxr_m_tail = &m->m_next;
2588 
2589 		m = rxr->rxr_m_head;
2590 		m->m_pkthdr.len += len;
2591 
2592 		if (ISSET(word, IXL_RX_DESC_EOP)) {
2593 			if (!ISSET(word,
2594 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2595 				ml_enqueue(&ml, m);
2596 			} else {
2597 				ifp->if_ierrors++; /* XXX */
2598 				m_freem(m);
2599 			}
2600 
2601 			rxr->rxr_m_head = NULL;
2602 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2603 		}
2604 
2605 		cons++;
2606 		cons &= mask;
2607 
2608 		done = 1;
2609 	} while (cons != prod);
2610 
2611 	if (done) {
2612 		rxr->rxr_cons = cons;
2613 		ixl_rxfill(sc, rxr);
2614 		if_input(ifp, &ml);
2615 	}
2616 
2617 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2618 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2619 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2620 
2621 	return (done);
2622 }
2623 
2624 static void
2625 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2626 {
2627 	struct ixl_rx_rd_desc_16 *ring, *rxd;
2628 	struct ixl_rx_map *rxm;
2629 	bus_dmamap_t map;
2630 	struct mbuf *m;
2631 	unsigned int prod;
2632 	unsigned int slots;
2633 	unsigned int mask;
2634 	int post = 0;
2635 
2636 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2637 	if (slots == 0)
2638 		return;
2639 
2640 	prod = rxr->rxr_prod;
2641 
2642 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2643 	mask = sc->sc_rx_ring_ndescs - 1;
2644 
2645 	do {
2646 		rxm = &rxr->rxr_maps[prod];
2647 
2648 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN);
2649 		if (m == NULL)
2650 			break;
2651 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2652 		m_adj(m, ETHER_ALIGN);
2653 
2654 		map = rxm->rxm_map;
2655 
2656 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2657 		    BUS_DMA_NOWAIT) != 0) {
2658 			m_freem(m);
2659 			break;
2660 		}
2661 
2662 		rxm->rxm_m = m;
2663 
2664 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2665 		    BUS_DMASYNC_PREREAD);
2666 
2667 		rxd = &ring[prod];
2668 
2669 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2670 		rxd->haddr = htole64(0);
2671 
2672 		prod++;
2673 		prod &= mask;
2674 
2675 		post = 1;
2676 	} while (--slots);
2677 
2678 	if_rxr_put(&rxr->rxr_acct, slots);
2679 
2680 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2681 		timeout_add(&rxr->rxr_refill, 1);
2682 	else if (post) {
2683 		rxr->rxr_prod = prod;
2684 		ixl_wr(sc, rxr->rxr_tail, prod);
2685 	}
2686 }
2687 
2688 void
2689 ixl_rxrefill(void *arg)
2690 {
2691 	struct ixl_rx_ring *rxr = arg;
2692 	struct ixl_softc *sc = rxr->rxr_sc;
2693 
2694 	ixl_rxfill(sc, rxr);
2695 }
2696 
2697 static int
2698 ixl_intr(void *xsc)
2699 {
2700 	struct ixl_softc *sc = xsc;
2701 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2702 	uint32_t icr;
2703 	int rv = 0;
2704 
2705 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
2706 
2707 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
2708 		ixl_atq_done(sc);
2709 		task_add(systq, &sc->sc_arq_task);
2710 		rv = 1;
2711 	}
2712 
2713 	if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
2714 		rv |= ixl_rxeof(sc, ifp->if_iqs[0]);
2715 	if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
2716 		rv |= ixl_txeof(sc, ifp->if_ifqs[0]);
2717 
2718 	return (rv);
2719 }
2720 
2721 static void
2722 ixl_arq_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2723 {
2724 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2725 	int link_state;
2726 
2727 	NET_LOCK();
2728 	link_state = ixl_set_link_status(sc, iaq);
2729 	if (ifp->if_link_state != link_state) {
2730 		ifp->if_link_state = link_state;
2731 		if_link_state_change(ifp);
2732 	}
2733 	NET_UNLOCK();
2734 }
2735 
2736 #if 0
2737 static void
2738 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2739 {
2740 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
2741 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
2742 	    lemtoh16(&iaq->iaq_opcode));
2743 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
2744 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
2745 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
2746 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
2747 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
2748 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
2749 }
2750 #endif
2751 
2752 static void
2753 ixl_arq(void *xsc)
2754 {
2755 	struct ixl_softc *sc = xsc;
2756 	struct ixl_aq_desc *arq, *iaq;
2757 	struct ixl_aq_buf *aqb;
2758 	unsigned int cons = sc->sc_arq_cons;
2759 	unsigned int prod;
2760 	int done = 0;
2761 
2762 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
2763 	    sc->sc_aq_regs->arq_head_mask;
2764 
2765 	if (cons == prod)
2766 		goto done;
2767 
2768 	arq = IXL_DMA_KVA(&sc->sc_arq);
2769 
2770 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2771 	    0, IXL_DMA_LEN(&sc->sc_arq),
2772 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2773 
2774 	do {
2775 		iaq = &arq[cons];
2776 
2777 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2778 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
2779 		    BUS_DMASYNC_POSTREAD);
2780 
2781 		switch (iaq->iaq_opcode) {
2782 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
2783 			ixl_arq_link_status(sc, iaq);
2784 			break;
2785 		}
2786 
2787 		memset(iaq, 0, sizeof(*iaq));
2788 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2789 		if_rxr_put(&sc->sc_arq_ring, 1);
2790 
2791 		cons++;
2792 		cons &= IXL_AQ_MASK;
2793 
2794 		done = 1;
2795 	} while (cons != prod);
2796 
2797 	if (done && ixl_arq_fill(sc))
2798 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2799 
2800 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2801 	    0, IXL_DMA_LEN(&sc->sc_arq),
2802 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2803 
2804 	sc->sc_arq_cons = cons;
2805 
2806 done:
2807 	ixl_intr_enable(sc);
2808 }
2809 
2810 static void
2811 ixl_atq_set(struct ixl_atq *iatq,
2812     void (*fn)(struct ixl_softc *, void *), void *arg)
2813 {
2814 	iatq->iatq_fn = fn;
2815 	iatq->iatq_arg = arg;
2816 }
2817 
2818 static void
2819 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
2820 {
2821 	struct ixl_aq_desc *atq, *slot;
2822 	unsigned int prod;
2823 
2824 	/* assert locked */
2825 
2826 	atq = IXL_DMA_KVA(&sc->sc_atq);
2827 	prod = sc->sc_atq_prod;
2828 	slot = atq + prod;
2829 
2830 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2831 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2832 
2833 	*slot = iatq->iatq_desc;
2834 	slot->iaq_cookie = (uint64_t)iatq;
2835 
2836 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2837 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2838 
2839 	prod++;
2840 	prod &= IXL_AQ_MASK;
2841 	sc->sc_atq_prod = prod;
2842 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2843 }
2844 
2845 static void
2846 ixl_atq_done(struct ixl_softc *sc)
2847 {
2848 	struct ixl_atq_list cmds = SIMPLEQ_HEAD_INITIALIZER(cmds);
2849 	struct ixl_aq_desc *atq, *slot;
2850 	struct ixl_atq *iatq;
2851 	unsigned int cons;
2852 	unsigned int prod;
2853 
2854 	prod = sc->sc_atq_prod;
2855 	cons = sc->sc_atq_cons;
2856 
2857 	if (prod == cons)
2858 		return;
2859 
2860 	atq = IXL_DMA_KVA(&sc->sc_atq);
2861 
2862 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2863 	    0, IXL_DMA_LEN(&sc->sc_atq),
2864 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2865 
2866 	do {
2867 		slot = &atq[cons];
2868 
2869 		iatq = (struct ixl_atq *)slot->iaq_cookie;
2870 		iatq->iatq_desc = *slot;
2871 		SIMPLEQ_INSERT_TAIL(&cmds, iatq, iatq_entry);
2872 
2873 		memset(slot, 0, sizeof(*slot));
2874 
2875 		cons++;
2876 		cons &= IXL_AQ_MASK;
2877 	} while (cons != prod);
2878 
2879 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2880 	    0, IXL_DMA_LEN(&sc->sc_atq),
2881 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2882 
2883 	sc->sc_atq_cons = cons;
2884 
2885 	while ((iatq = SIMPLEQ_FIRST(&cmds)) != NULL) {
2886 		SIMPLEQ_REMOVE_HEAD(&cmds, iatq_entry);
2887 
2888 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
2889 	}
2890 }
2891 
2892 struct ixl_wakeup {
2893 	struct mutex mtx;
2894 	int notdone;
2895 };
2896 
2897 static void
2898 ixl_wakeup(struct ixl_softc *sc, void *arg)
2899 {
2900 	struct ixl_wakeup *wake = arg;
2901 
2902 	mtx_enter(&wake->mtx);
2903 	wake->notdone = 0;
2904 	mtx_leave(&wake->mtx);
2905 
2906 	wakeup(wake);
2907 }
2908 
2909 static void
2910 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
2911 {
2912 	struct ixl_wakeup wake = { MUTEX_INITIALIZER(IPL_NET), 1 };
2913 
2914 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
2915 
2916 	ixl_atq_set(iatq, ixl_wakeup, &wake);
2917 	ixl_atq_post(sc, iatq);
2918 
2919 	mtx_enter(&wake.mtx);
2920 	while (wake.notdone) {
2921 		mtx_leave(&wake.mtx);
2922 		ixl_atq_done(sc);
2923 		mtx_enter(&wake.mtx);
2924 		msleep(&wake, &wake.mtx, 0, wmesg, 1);
2925 	}
2926 	mtx_leave(&wake.mtx);
2927 }
2928 
2929 static int
2930 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
2931 {
2932 	struct ixl_aq_desc *atq, *slot;
2933 	unsigned int prod;
2934 	unsigned int t = 0;
2935 
2936 	atq = IXL_DMA_KVA(&sc->sc_atq);
2937 	prod = sc->sc_atq_prod;
2938 	slot = atq + prod;
2939 
2940 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2941 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2942 
2943 	*slot = *iaq;
2944 	slot->iaq_flags |= htole16(IXL_AQ_SI);
2945 
2946 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2947 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2948 
2949 	prod++;
2950 	prod &= IXL_AQ_MASK;
2951 	sc->sc_atq_prod = prod;
2952 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2953 
2954 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
2955 		delaymsec(1);
2956 
2957 		if (t++ > tm)
2958 			return (ETIMEDOUT);
2959 	}
2960 
2961 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2962 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
2963 	*iaq = *slot;
2964 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2965 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
2966 
2967 	sc->sc_atq_cons = prod;
2968 
2969 	return (0);
2970 }
2971 
2972 static int
2973 ixl_get_version(struct ixl_softc *sc)
2974 {
2975 	struct ixl_aq_desc iaq;
2976 	uint32_t fwbuild, fwver, apiver;
2977 
2978 	memset(&iaq, 0, sizeof(iaq));
2979 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
2980 
2981 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
2982 		return (ETIMEDOUT);
2983 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
2984 		return (EIO);
2985 
2986 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
2987 	fwver = lemtoh32(&iaq.iaq_param[2]);
2988 	apiver = lemtoh32(&iaq.iaq_param[3]);
2989 
2990 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
2991 	    (uint16_t)(fwver >> 16), fwbuild, (uint16_t)apiver,
2992 	    (uint16_t)(apiver >> 16));
2993 
2994 	return (0);
2995 }
2996 
2997 static int
2998 ixl_pxe_clear(struct ixl_softc *sc)
2999 {
3000 	struct ixl_aq_desc iaq;
3001 
3002 	memset(&iaq, 0, sizeof(iaq));
3003 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3004 	iaq.iaq_param[0] = htole32(0x2);
3005 
3006 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3007 		printf(", CLEAR PXE MODE timeout\n");
3008 		return (-1);
3009 	}
3010 
3011 	switch (iaq.iaq_retval) {
3012 	case HTOLE16(IXL_AQ_RC_OK):
3013 	case HTOLE16(IXL_AQ_RC_EEXIST):
3014 		break;
3015 	default:
3016 		printf(", CLEAR PXE MODE error\n");
3017 		return (-1);
3018 	}
3019 
3020 	return (0);
3021 }
3022 
3023 static int
3024 ixl_lldp_shut(struct ixl_softc *sc)
3025 {
3026 	struct ixl_aq_desc iaq;
3027 
3028 	memset(&iaq, 0, sizeof(iaq));
3029 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3030 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3031 
3032 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3033 		printf(", STOP LLDP AGENT timeout\n");
3034 		return (-1);
3035 	}
3036 
3037 	switch (iaq.iaq_retval) {
3038 	case HTOLE16(IXL_AQ_RC_EMODE):
3039 	case HTOLE16(IXL_AQ_RC_EPERM):
3040 		/* ignore silently */
3041 	default:
3042 		break;
3043 	}
3044 
3045 	return (0);
3046 }
3047 
3048 static int
3049 ixl_get_mac(struct ixl_softc *sc)
3050 {
3051 	struct ixl_dmamem idm;
3052 	struct ixl_aq_desc iaq;
3053 	struct ixl_aq_mac_addresses *addrs;
3054 	int rv;
3055 
3056 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3057 		printf(", unable to allocate mac addresses\n");
3058 		return (-1);
3059 	}
3060 
3061 	memset(&iaq, 0, sizeof(iaq));
3062 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3063 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3064 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3065 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3066 
3067 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3068 	    BUS_DMASYNC_PREREAD);
3069 
3070 	rv = ixl_atq_poll(sc, &iaq, 250);
3071 
3072 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3073 	    BUS_DMASYNC_POSTREAD);
3074 
3075 	if (rv != 0) {
3076 		printf(", MAC ADDRESS READ timeout\n");
3077 		rv = -1;
3078 		goto done;
3079 	}
3080 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3081 		printf(", MAC ADDRESS READ error\n");
3082 		rv = -1;
3083 		goto done;
3084 	}
3085 
3086 	addrs = IXL_DMA_KVA(&idm);
3087 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3088 		printf(", port address is not valid\n");
3089 		goto done;
3090 	}
3091 
3092 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3093 	rv = 0;
3094 
3095 done:
3096 	ixl_dmamem_free(sc, &idm);
3097 	return (rv);
3098 }
3099 
3100 static int
3101 ixl_get_switch_config(struct ixl_softc *sc)
3102 {
3103 	struct ixl_dmamem idm;
3104 	struct ixl_aq_desc iaq;
3105 	struct ixl_aq_switch_config *hdr;
3106 	struct ixl_aq_switch_config_element *elms, *elm;
3107 	unsigned int nelm;
3108 	int rv;
3109 
3110 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3111 		printf("%s: unable to allocate switch config buffer\n",
3112 		    DEVNAME(sc));
3113 		return (-1);
3114 	}
3115 
3116 	memset(&iaq, 0, sizeof(iaq));
3117 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3118 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3119 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3120 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3121 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3122 
3123 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3124 	    BUS_DMASYNC_PREREAD);
3125 
3126 	rv = ixl_atq_poll(sc, &iaq, 250);
3127 
3128 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3129 	    BUS_DMASYNC_POSTREAD);
3130 
3131 	if (rv != 0) {
3132 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3133 		rv = -1;
3134 		goto done;
3135 	}
3136 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3137 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3138 		rv = -1;
3139 		goto done;
3140 	}
3141 
3142 	hdr = IXL_DMA_KVA(&idm);
3143 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3144 
3145 	nelm = lemtoh16(&hdr->num_reported);
3146 	if (nelm < 1) {
3147 		printf("%s: no switch config available\n", DEVNAME(sc));
3148 		rv = -1;
3149 		goto done;
3150 	}
3151 
3152 #if 0
3153 	for (i = 0; i < nelm; i++) {
3154 		elm = &elms[i];
3155 
3156 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3157 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3158 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3159 		    lemtoh16(&elm->uplink_seid),
3160 		    lemtoh16(&elm->downlink_seid));
3161 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3162 		    DEVNAME(sc), elm->connection_type,
3163 		    lemtoh16(&elm->scheduler_id),
3164 		    lemtoh16(&elm->element_info));
3165 	}
3166 #endif
3167 
3168 	elm = &elms[0];
3169 
3170 	sc->sc_uplink_seid = elm->uplink_seid;
3171 	sc->sc_downlink_seid = elm->downlink_seid;
3172 	sc->sc_seid = elm->seid;
3173 
3174 	if ((sc->sc_uplink_seid == htole16(0)) !=
3175 	    (sc->sc_downlink_seid == htole16(0))) {
3176 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3177 		rv = -1;
3178 		goto done;
3179 	}
3180 
3181 done:
3182 	ixl_dmamem_free(sc, &idm);
3183 	return (rv);
3184 }
3185 
3186 static int
3187 ixl_phy_mask_ints(struct ixl_softc *sc)
3188 {
3189 	struct ixl_aq_desc iaq;
3190 
3191 	memset(&iaq, 0, sizeof(iaq));
3192 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3193 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3194 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3195 	      IXL_AQ_PHY_EV_MEDIA_NA));
3196 
3197 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3198 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
3199 		return (-1);
3200 	}
3201 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3202 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
3203 		return (-1);
3204 	}
3205 
3206 	return (0);
3207 }
3208 
3209 static int
3210 ixl_get_phy_abilities(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3211 {
3212 	struct ixl_dmamem idm;
3213 	struct ixl_aq_desc iaq;
3214 	struct ixl_aq_phy_abilities *phy;
3215 	uint64_t phy_types;
3216 	int rv;
3217 
3218 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3219 		printf("%s: unable to allocate switch config buffer\n",
3220 		    DEVNAME(sc));
3221 		return (-1);
3222 	}
3223 
3224 	memset(&iaq, 0, sizeof(iaq));
3225 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3226 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3227 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3228 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3229 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3230 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3231 
3232 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3233 	    BUS_DMASYNC_PREREAD);
3234 
3235 	rv = ixl_atq_poll(sc, &iaq, 250);
3236 
3237 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3238 	    BUS_DMASYNC_POSTREAD);
3239 
3240 	if (rv != 0) {
3241 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
3242 		rv = -1;
3243 		goto done;
3244 	}
3245 	switch (iaq.iaq_retval) {
3246 	case HTOLE16(IXL_AQ_RC_OK):
3247 		break;
3248 	case HTOLE16(IXL_AQ_RC_EIO):
3249 		printf("%s: unable to query phy types\n", DEVNAME(sc));
3250 		rv = 0;
3251 		goto done;
3252 	default:
3253 		printf("%s: GET PHY ABILITIIES error\n", DEVNAME(sc));
3254 		rv = -1;
3255 		goto done;
3256 	}
3257 
3258 	phy = IXL_DMA_KVA(&idm);
3259 
3260 	phy_types = lemtoh32(&phy->phy_type);
3261 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
3262 
3263 	*phy_types_ptr = phy_types;
3264 
3265 	rv = 0;
3266 
3267 done:
3268 	ixl_dmamem_free(sc, &idm);
3269 	return (rv);
3270 }
3271 
3272 static int
3273 ixl_get_link_status(struct ixl_softc *sc)
3274 {
3275 	struct ixl_aq_desc iaq;
3276 	struct ixl_aq_link_param *param;
3277 
3278 	memset(&iaq, 0, sizeof(iaq));
3279 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3280 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
3281 	param->notify = IXL_AQ_LINK_NOTIFY;
3282 
3283 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3284 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
3285 		return (-1);
3286 	}
3287 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3288 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
3289 		return (0);
3290 	}
3291 
3292 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
3293 
3294 	return (0);
3295 }
3296 
3297 static int
3298 ixl_get_vsi(struct ixl_softc *sc)
3299 {
3300 	struct ixl_dmamem *vsi = &sc->sc_vsi;
3301 	struct ixl_aq_desc iaq;
3302 	struct ixl_aq_vsi_param *param;
3303 	struct ixl_aq_vsi_reply *reply;
3304 	int rv;
3305 
3306 	/* grumble, vsi info isn't "known" at compile time */
3307 
3308 	memset(&iaq, 0, sizeof(iaq));
3309 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
3310 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3311 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3312 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3313 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3314 
3315 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3316 	param->uplink_seid = sc->sc_seid;
3317 
3318 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3319 	    BUS_DMASYNC_PREREAD);
3320 
3321 	rv = ixl_atq_poll(sc, &iaq, 250);
3322 
3323 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3324 	    BUS_DMASYNC_POSTREAD);
3325 
3326 	if (rv != 0) {
3327 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
3328 		return (-1);
3329 	}
3330 
3331 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3332 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
3333 		    lemtoh16(&iaq.iaq_retval));
3334 		return (-1);
3335 	}
3336 
3337 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3338 	sc->sc_vsi_number = reply->vsi_number;
3339 
3340 	return (0);
3341 }
3342 
3343 static int
3344 ixl_set_vsi(struct ixl_softc *sc)
3345 {
3346 	struct ixl_dmamem *vsi = &sc->sc_vsi;
3347 	struct ixl_aq_desc iaq;
3348 	struct ixl_aq_vsi_param *param;
3349 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3350 	int rv;
3351 
3352 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3353 	    IXL_AQ_VSI_VALID_VLAN);
3354 
3355 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3356 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3357 	data->queue_mapping[0] = htole16(0);
3358 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3359 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3360 
3361 	CLR(data->port_vlan_flags,
3362 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
3363 	SET(data->port_vlan_flags,
3364 	    htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
3365 
3366 	/* grumble, vsi info isn't "known" at compile time */
3367 
3368 	memset(&iaq, 0, sizeof(iaq));
3369 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
3370 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3371 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
3372 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3373 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3374 
3375 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3376 	param->uplink_seid = sc->sc_seid;
3377 
3378 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3379 	    BUS_DMASYNC_PREWRITE);
3380 
3381 	rv = ixl_atq_poll(sc, &iaq, 250);
3382 
3383 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3384 	    BUS_DMASYNC_POSTWRITE);
3385 
3386 	if (rv != 0) {
3387 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
3388 		return (-1);
3389 	}
3390 
3391 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3392 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
3393 		    lemtoh16(&iaq.iaq_retval));
3394 		return (-1);
3395 	}
3396 
3397 	return (0);
3398 }
3399 
3400 static const struct ixl_phy_type *
3401 ixl_search_phy_type(uint8_t phy_type)
3402 {
3403 	const struct ixl_phy_type *itype;
3404 	uint64_t mask;
3405 	unsigned int i;
3406 
3407 	if (phy_type >= 64)
3408 		return (NULL);
3409 
3410 	mask = 1ULL << phy_type;
3411 
3412 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3413 		itype = &ixl_phy_type_map[i];
3414 
3415 		if (ISSET(itype->phy_type, mask))
3416 			return (itype);
3417 	}
3418 
3419 	return (NULL);
3420 }
3421 
3422 static uint64_t
3423 ixl_search_link_speed(uint8_t link_speed)
3424 {
3425 	const struct ixl_speed_type *type;
3426 	unsigned int i;
3427 
3428 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3429 		type = &ixl_speed_type_map[i];
3430 
3431 		if (ISSET(type->dev_speed, link_speed))
3432 			return (type->net_speed);
3433 	}
3434 
3435 	return (0);
3436 }
3437 
3438 static int
3439 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3440 {
3441 	const struct ixl_aq_link_status *status;
3442 	const struct ixl_phy_type *itype;
3443 
3444 	uint64_t ifm_active = IFM_ETHER;
3445 	uint64_t ifm_status = IFM_AVALID;
3446 	int link_state = LINK_STATE_DOWN;
3447 	uint64_t baudrate = 0;
3448 
3449 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
3450 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
3451 		goto done;
3452 
3453 	ifm_active |= IFM_FDX;
3454 	ifm_status |= IFM_ACTIVE;
3455 	link_state = LINK_STATE_FULL_DUPLEX;
3456 
3457 	itype = ixl_search_phy_type(status->phy_type);
3458 	if (itype != NULL)
3459 		ifm_active |= itype->ifm_type;
3460 
3461 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
3462 		ifm_active |= IFM_ETH_TXPAUSE;
3463 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
3464 		ifm_active |= IFM_ETH_RXPAUSE;
3465 
3466 	baudrate = ixl_search_link_speed(status->link_speed);
3467 
3468 done:
3469 	/* NET_ASSERT_LOCKED() except during attach */
3470 	sc->sc_media_active = ifm_active;
3471 	sc->sc_media_status = ifm_status;
3472 	sc->sc_ac.ac_if.if_baudrate = baudrate;
3473 
3474 	return (link_state);
3475 }
3476 
3477 static int
3478 ixl_restart_an(struct ixl_softc *sc)
3479 {
3480 	struct ixl_aq_desc iaq;
3481 
3482 	memset(&iaq, 0, sizeof(iaq));
3483 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
3484 	iaq.iaq_param[0] =
3485 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
3486 
3487 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3488 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
3489 		return (-1);
3490 	}
3491 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3492 		printf("%s: RESTART AN error\n", DEVNAME(sc));
3493 		return (-1);
3494 	}
3495 
3496 	return (0);
3497 }
3498 
3499 static int
3500 ixl_hmc(struct ixl_softc *sc)
3501 {
3502 	struct {
3503 		uint32_t   count;
3504 		uint32_t   minsize;
3505 		bus_size_t maxcnt;
3506 		bus_size_t setoff;
3507 		bus_size_t setcnt;
3508 	} regs[] = {
3509 		{
3510 			0,
3511 			IXL_HMC_TXQ_MINSIZE,
3512 			I40E_GLHMC_LANTXOBJSZ,
3513 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
3514 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
3515 		},
3516 		{
3517 			0,
3518 			IXL_HMC_RXQ_MINSIZE,
3519 			I40E_GLHMC_LANRXOBJSZ,
3520 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
3521 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
3522 		},
3523 		{
3524 			0,
3525 			0,
3526 			I40E_GLHMC_FCOEMAX,
3527 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
3528 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
3529 		},
3530 		{
3531 			0,
3532 			0,
3533 			I40E_GLHMC_FCOEFMAX,
3534 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
3535 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
3536 		},
3537 	};
3538 	struct ixl_hmc_entry *e;
3539 	uint64_t size, dva;
3540 	uint8_t *kva;
3541 	uint64_t *sdpage;
3542 	unsigned int i;
3543 	int npages, tables;
3544 
3545 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
3546 
3547 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
3548 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
3549 
3550 	size = 0;
3551 	for (i = 0; i < nitems(regs); i++) {
3552 		e = &sc->sc_hmc_entries[i];
3553 
3554 		e->hmc_count = regs[i].count;
3555 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
3556 		e->hmc_base = size;
3557 
3558 		if ((e->hmc_size * 8) < regs[i].minsize) {
3559 			printf("%s: kernel hmc entry is too big\n",
3560 			    DEVNAME(sc));
3561 			return (-1);
3562 		}
3563 
3564 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
3565 	}
3566 	size = roundup(size, IXL_HMC_PGSIZE);
3567 	npages = size / IXL_HMC_PGSIZE;
3568 
3569 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
3570 
3571 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
3572 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
3573 		return (-1);
3574 	}
3575 
3576 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
3577 	    IXL_HMC_PGSIZE) != 0) {
3578 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
3579 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3580 		return (-1);
3581 	}
3582 
3583 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
3584 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
3585 
3586 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
3587 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
3588 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3589 
3590 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
3591 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
3592 	for (i = 0; i < npages; i++) {
3593 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
3594 
3595 		dva += IXL_HMC_PGSIZE;
3596 	}
3597 
3598 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
3599 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
3600 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3601 
3602 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
3603 	for (i = 0; i < tables; i++) {
3604 		uint32_t count;
3605 
3606 		KASSERT(npages >= 0);
3607 
3608 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
3609 
3610 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
3611 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
3612 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
3613 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
3614 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
3615 		ixl_wr(sc, I40E_PFHMC_SDCMD,
3616 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
3617 
3618 		npages -= IXL_HMC_PGS;
3619 		dva += IXL_HMC_PGSIZE;
3620 	}
3621 
3622 	for (i = 0; i < nitems(regs); i++) {
3623 		e = &sc->sc_hmc_entries[i];
3624 
3625 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
3626 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
3627 	}
3628 
3629 	return (0);
3630 }
3631 
3632 static void
3633 ixl_hmc_free(struct ixl_softc *sc)
3634 {
3635 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
3636 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3637 }
3638 
3639 static void
3640 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
3641     unsigned int npacking)
3642 {
3643 	uint8_t *dst = d;
3644 	const uint8_t *src = s;
3645 	unsigned int i;
3646 
3647 	for (i = 0; i < npacking; i++) {
3648 		const struct ixl_hmc_pack *pack = &packing[i];
3649 		unsigned int offset = pack->lsb / 8;
3650 		unsigned int align = pack->lsb % 8;
3651 		const uint8_t *in = src + pack->offset;
3652 		uint8_t *out = dst + offset;
3653 		int width = pack->width;
3654 		unsigned int inbits = 0;
3655 
3656 		if (align) {
3657 			inbits = *in++;
3658 
3659 			*out++ |= inbits << align;
3660 
3661 			width -= 8 - align;
3662 		}
3663 
3664 		while (width >= 8) {
3665 			inbits <<= 8;
3666 			inbits |= *in++;
3667 
3668 			*out++ = inbits << align;
3669 
3670 			width -= 8;
3671 		}
3672 
3673 		if (width)
3674 			*out = inbits >> (8 - align);
3675 	}
3676 }
3677 
3678 static struct ixl_aq_buf *
3679 ixl_aqb_alloc(struct ixl_softc *sc)
3680 {
3681 	struct ixl_aq_buf *aqb;
3682 
3683 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
3684 	if (aqb == NULL)
3685 		return (NULL);
3686 
3687 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
3688 	if (aqb->aqb_data == NULL)
3689 		goto free;
3690 
3691 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
3692 	    IXL_AQ_BUFLEN, 0,
3693 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3694 	    &aqb->aqb_map) != 0)
3695 		goto dma_free;
3696 
3697 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
3698 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
3699 		goto destroy;
3700 
3701 	return (aqb);
3702 
3703 destroy:
3704 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
3705 dma_free:
3706 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
3707 free:
3708 	free(aqb, M_DEVBUF, sizeof(*aqb));
3709 
3710 	return (NULL);
3711 }
3712 
3713 static void
3714 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
3715 {
3716 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
3717 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
3718 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
3719 	free(aqb, M_DEVBUF, sizeof(*aqb));
3720 }
3721 
3722 static int
3723 ixl_arq_fill(struct ixl_softc *sc)
3724 {
3725 	struct ixl_aq_buf *aqb;
3726 	struct ixl_aq_desc *arq, *iaq;
3727 	unsigned int prod = sc->sc_arq_prod;
3728 	unsigned int n;
3729 	int post = 0;
3730 
3731 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
3732  	arq = IXL_DMA_KVA(&sc->sc_arq);
3733 
3734 	while (n > 0) {
3735 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
3736 		if (aqb != NULL)
3737 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
3738 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
3739 			break;
3740 
3741 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
3742 
3743 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3744 		    BUS_DMASYNC_PREREAD);
3745 
3746 		iaq = &arq[prod];
3747 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
3748 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3749 		iaq->iaq_opcode = 0;
3750 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
3751 		iaq->iaq_retval = 0;
3752 		iaq->iaq_cookie = 0;
3753 		iaq->iaq_param[0] = 0;
3754 		iaq->iaq_param[1] = 0;
3755 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
3756 
3757 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
3758 
3759 		prod++;
3760 		prod &= IXL_AQ_MASK;
3761 
3762 		post = 1;
3763 
3764 		n--;
3765 	}
3766 
3767 	if_rxr_put(&sc->sc_arq_ring, n);
3768 	sc->sc_arq_prod = prod;
3769 
3770 	return (post);
3771 }
3772 
3773 static void
3774 ixl_arq_unfill(struct ixl_softc *sc)
3775 {
3776 	struct ixl_aq_buf *aqb;
3777 
3778 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
3779 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3780 
3781 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3782 		    BUS_DMASYNC_POSTREAD);
3783 		ixl_aqb_free(sc, aqb);
3784 	}
3785 }
3786 
3787 static void
3788 ixl_clear_hw(struct ixl_softc *sc)
3789 {
3790 	uint32_t num_queues, base_queue;
3791 	uint32_t num_pf_int;
3792 	uint32_t num_vf_int;
3793 	uint32_t num_vfs;
3794 	uint32_t i, j;
3795 	uint32_t val;
3796 	uint32_t eol = 0x7ff;
3797 
3798 	/* get number of interrupts, queues, and vfs */
3799 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
3800 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
3801 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
3802 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
3803 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
3804 
3805 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
3806 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
3807 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
3808 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
3809 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
3810 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
3811 		num_queues = (j - base_queue) + 1;
3812 	else
3813 		num_queues = 0;
3814 
3815 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
3816 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
3817 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
3818 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
3819 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
3820 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
3821 		num_vfs = (j - i) + 1;
3822 	else
3823 		num_vfs = 0;
3824 
3825 	/* stop all the interrupts */
3826 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
3827 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3828 	for (i = 0; i < num_pf_int - 2; i++)
3829 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
3830 
3831 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
3832 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3833 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
3834 	for (i = 0; i < num_pf_int - 2; i++)
3835 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
3836 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3837 	for (i = 0; i < num_vfs; i++)
3838 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
3839 	for (i = 0; i < num_vf_int - 2; i++)
3840 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
3841 
3842 	/* warn the HW of the coming Tx disables */
3843 	for (i = 0; i < num_queues; i++) {
3844 		uint32_t abs_queue_idx = base_queue + i;
3845 		uint32_t reg_block = 0;
3846 
3847 		if (abs_queue_idx >= 128) {
3848 			reg_block = abs_queue_idx / 128;
3849 			abs_queue_idx %= 128;
3850 		}
3851 
3852 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
3853 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
3854 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
3855 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
3856 
3857 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
3858 	}
3859 	delaymsec(400);
3860 
3861 	/* stop all the queues */
3862 	for (i = 0; i < num_queues; i++) {
3863 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
3864 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
3865 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
3866 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
3867 	}
3868 
3869 	/* short wait for all queue disables to settle */
3870 	delaymsec(50);
3871 }
3872 
3873 static int
3874 ixl_pf_reset(struct ixl_softc *sc)
3875 {
3876 	uint32_t cnt = 0;
3877 	uint32_t cnt1 = 0;
3878 	uint32_t reg = 0;
3879 	uint32_t grst_del;
3880 
3881 	/*
3882 	 * Poll for Global Reset steady state in case of recent GRST.
3883 	 * The grst delay value is in 100ms units, and we'll wait a
3884 	 * couple counts longer to be sure we don't just miss the end.
3885 	 */
3886 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
3887 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
3888 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
3889 	grst_del += 10;
3890 
3891 	for (cnt = 0; cnt < grst_del; cnt++) {
3892 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3893 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
3894 			break;
3895 		delaymsec(100);
3896 	}
3897 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
3898 		printf(", Global reset polling failed to complete\n");
3899 		return (-1);
3900 	}
3901 
3902 	/* Now Wait for the FW to be ready */
3903 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
3904 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
3905 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3906 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
3907 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3908 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
3909 			break;
3910 
3911 		delaymsec(10);
3912 	}
3913 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
3914 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
3915 		printf(", wait for FW Reset complete timed out "
3916 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
3917 		return (-1);
3918 	}
3919 
3920 	/*
3921 	 * If there was a Global Reset in progress when we got here,
3922 	 * we don't need to do the PF Reset
3923 	 */
3924 	if (cnt == 0) {
3925 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
3926 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
3927 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
3928 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
3929 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
3930 				break;
3931 			delaymsec(1);
3932 		}
3933 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
3934 			printf(", PF reset polling failed to complete"
3935 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
3936 			return (-1);
3937 		}
3938 	}
3939 
3940 	return (0);
3941 }
3942 
3943 static int
3944 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
3945     bus_size_t size, u_int align)
3946 {
3947 	ixm->ixm_size = size;
3948 
3949 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
3950 	    ixm->ixm_size, 0,
3951 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3952 	    &ixm->ixm_map) != 0)
3953 		return (1);
3954 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
3955 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3956 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
3957 		goto destroy;
3958 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3959 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3960 		goto free;
3961 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
3962 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3963 		goto unmap;
3964 
3965 	return (0);
3966 unmap:
3967 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
3968 free:
3969 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
3970 destroy:
3971 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
3972 	return (1);
3973 }
3974 
3975 static void
3976 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
3977 {
3978 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
3979 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
3980 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
3981 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
3982 }
3983