xref: /openbsd-src/sys/dev/pci/if_ixl.c (revision 3374c67d44f9b75b98444cbf63020f777792342e)
1 /*	$OpenBSD: if_ixl.c,v 1.84 2022/08/05 13:57:16 bluhm Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 #include "kstat.h"
52 
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/proc.h>
56 #include <sys/sockio.h>
57 #include <sys/mbuf.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66 #include <sys/intrmap.h>
67 
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70 
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/toeplitz.h>
75 
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79 
80 #if NKSTAT > 0
81 #include <sys/kstat.h>
82 #endif
83 
84 #include <netinet/in.h>
85 #include <netinet/ip.h>
86 #include <netinet/ip6.h>
87 #include <netinet/tcp.h>
88 #include <netinet/udp.h>
89 #include <netinet/if_ether.h>
90 
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/pcidevs.h>
94 
95 #ifdef __sparc64__
96 #include <dev/ofw/openfirm.h>
97 #endif
98 
99 #ifndef CACHE_LINE_SIZE
100 #define CACHE_LINE_SIZE 64
101 #endif
102 
103 #define IXL_MAX_VECTORS			8 /* XXX this is pretty arbitrary */
104 
105 #define I40E_MASK(mask, shift)		((mask) << (shift))
106 #define I40E_PF_RESET_WAIT_COUNT	200
107 #define I40E_AQ_LARGE_BUF		512
108 
109 /* bitfields for Tx queue mapping in QTX_CTL */
110 #define I40E_QTX_CTL_VF_QUEUE		0x0
111 #define I40E_QTX_CTL_VM_QUEUE		0x1
112 #define I40E_QTX_CTL_PF_QUEUE		0x2
113 
114 #define I40E_QUEUE_TYPE_EOL		0x7ff
115 #define I40E_INTR_NOTX_QUEUE		0
116 
117 #define I40E_QUEUE_TYPE_RX		0x0
118 #define I40E_QUEUE_TYPE_TX		0x1
119 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
120 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
121 
122 #define I40E_ITR_INDEX_RX		0x0
123 #define I40E_ITR_INDEX_TX		0x1
124 #define I40E_ITR_INDEX_OTHER		0x2
125 #define I40E_ITR_INDEX_NONE		0x3
126 
127 #include <dev/pci/if_ixlreg.h>
128 
129 #define I40E_INTR_NOTX_QUEUE		0
130 #define I40E_INTR_NOTX_INTR		0
131 #define I40E_INTR_NOTX_RX_QUEUE		0
132 #define I40E_INTR_NOTX_TX_QUEUE		1
133 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
134 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
135 
136 struct ixl_aq_desc {
137 	uint16_t	iaq_flags;
138 #define	IXL_AQ_DD		(1U << 0)
139 #define	IXL_AQ_CMP		(1U << 1)
140 #define IXL_AQ_ERR		(1U << 2)
141 #define IXL_AQ_VFE		(1U << 3)
142 #define IXL_AQ_LB		(1U << 9)
143 #define IXL_AQ_RD		(1U << 10)
144 #define IXL_AQ_VFC		(1U << 11)
145 #define IXL_AQ_BUF		(1U << 12)
146 #define IXL_AQ_SI		(1U << 13)
147 #define IXL_AQ_EI		(1U << 14)
148 #define IXL_AQ_FE		(1U << 15)
149 
150 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
151 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
152 				    "\003ERR" "\002CMP" "\001DD"
153 
154 	uint16_t	iaq_opcode;
155 
156 	uint16_t	iaq_datalen;
157 	uint16_t	iaq_retval;
158 
159 	uint64_t	iaq_cookie;
160 
161 	uint32_t	iaq_param[4];
162 /*	iaq_data_hi	iaq_param[2] */
163 /*	iaq_data_lo	iaq_param[3] */
164 } __packed __aligned(8);
165 
166 /* aq commands */
167 #define IXL_AQ_OP_GET_VERSION		0x0001
168 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
169 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
170 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
171 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
172 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
173 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
174 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
175 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
176 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
177 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
178 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
179 #define IXL_AQ_OP_RX_CTL_READ		0x0206
180 #define IXL_AQ_OP_RX_CTL_WRITE		0x0207
181 #define IXL_AQ_OP_ADD_VSI		0x0210
182 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
183 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
184 #define IXL_AQ_OP_ADD_VEB		0x0230
185 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
186 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
187 #define IXL_AQ_OP_ADD_MACVLAN		0x0250
188 #define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
189 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
190 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
191 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
192 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
193 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
194 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
195 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
196 #define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
197 #define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
198 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
199 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
200 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
201 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
202 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
203 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
204 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
205 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
206 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
207 #define IXL_AQ_OP_SET_RSS_KEY		0x0b02 /* 722 only */
208 #define IXL_AQ_OP_SET_RSS_LUT		0x0b03 /* 722 only */
209 #define IXL_AQ_OP_GET_RSS_KEY		0x0b04 /* 722 only */
210 #define IXL_AQ_OP_GET_RSS_LUT		0x0b05 /* 722 only */
211 
212 struct ixl_aq_mac_addresses {
213 	uint8_t		pf_lan[ETHER_ADDR_LEN];
214 	uint8_t		pf_san[ETHER_ADDR_LEN];
215 	uint8_t		port[ETHER_ADDR_LEN];
216 	uint8_t		pf_wol[ETHER_ADDR_LEN];
217 } __packed;
218 
219 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
220 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
221 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
222 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
223 
224 struct ixl_aq_capability {
225 	uint16_t	cap_id;
226 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
227 #define IXL_AQ_CAP_MNG_MODE		0x0002
228 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
229 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
230 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
231 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
232 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
233 #define IXL_AQ_CAP_SRIOV		0x0012
234 #define IXL_AQ_CAP_VF			0x0013
235 #define IXL_AQ_CAP_VMDQ			0x0014
236 #define IXL_AQ_CAP_8021QBG		0x0015
237 #define IXL_AQ_CAP_8021QBR		0x0016
238 #define IXL_AQ_CAP_VSI			0x0017
239 #define IXL_AQ_CAP_DCB			0x0018
240 #define IXL_AQ_CAP_FCOE			0x0021
241 #define IXL_AQ_CAP_ISCSI		0x0022
242 #define IXL_AQ_CAP_RSS			0x0040
243 #define IXL_AQ_CAP_RXQ			0x0041
244 #define IXL_AQ_CAP_TXQ			0x0042
245 #define IXL_AQ_CAP_MSIX			0x0043
246 #define IXL_AQ_CAP_VF_MSIX		0x0044
247 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
248 #define IXL_AQ_CAP_1588			0x0046
249 #define IXL_AQ_CAP_IWARP		0x0051
250 #define IXL_AQ_CAP_LED			0x0061
251 #define IXL_AQ_CAP_SDP			0x0062
252 #define IXL_AQ_CAP_MDIO			0x0063
253 #define IXL_AQ_CAP_WSR_PROT		0x0064
254 #define IXL_AQ_CAP_NVM_MGMT		0x0080
255 #define IXL_AQ_CAP_FLEX10		0x00F1
256 #define IXL_AQ_CAP_CEM			0x00F2
257 	uint8_t		major_rev;
258 	uint8_t		minor_rev;
259 	uint32_t	number;
260 	uint32_t	logical_id;
261 	uint32_t	phys_id;
262 	uint8_t		_reserved[16];
263 } __packed __aligned(4);
264 
265 #define IXL_LLDP_SHUTDOWN		0x1
266 
267 struct ixl_aq_switch_config {
268 	uint16_t	num_reported;
269 	uint16_t	num_total;
270 	uint8_t		_reserved[12];
271 } __packed __aligned(4);
272 
273 struct ixl_aq_switch_config_element {
274 	uint8_t		type;
275 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
276 #define IXL_AQ_SW_ELEM_TYPE_PF		2
277 #define IXL_AQ_SW_ELEM_TYPE_VF		3
278 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
279 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
280 #define IXL_AQ_SW_ELEM_TYPE_PV		16
281 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
282 #define IXL_AQ_SW_ELEM_TYPE_PA		18
283 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
284 	uint8_t		revision;
285 #define IXL_AQ_SW_ELEM_REV_1		1
286 	uint16_t	seid;
287 
288 	uint16_t	uplink_seid;
289 	uint16_t	downlink_seid;
290 
291 	uint8_t		_reserved[3];
292 	uint8_t		connection_type;
293 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
294 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
295 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
296 
297 	uint16_t	scheduler_id;
298 	uint16_t	element_info;
299 } __packed __aligned(4);
300 
301 #define IXL_PHY_TYPE_SGMII		0x00
302 #define IXL_PHY_TYPE_1000BASE_KX	0x01
303 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
304 #define IXL_PHY_TYPE_10GBASE_KR		0x03
305 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
306 #define IXL_PHY_TYPE_XAUI		0x05
307 #define IXL_PHY_TYPE_XFI		0x06
308 #define IXL_PHY_TYPE_SFI		0x07
309 #define IXL_PHY_TYPE_XLAUI		0x08
310 #define IXL_PHY_TYPE_XLPPI		0x09
311 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
312 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
313 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
314 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
315 #define IXL_PHY_TYPE_100BASE_TX		0x11
316 #define IXL_PHY_TYPE_1000BASE_T		0x12
317 #define IXL_PHY_TYPE_10GBASE_T		0x13
318 #define IXL_PHY_TYPE_10GBASE_SR		0x14
319 #define IXL_PHY_TYPE_10GBASE_LR		0x15
320 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
321 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
322 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
323 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
324 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
325 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
326 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
327 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
328 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
329 
330 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
331 #define IXL_PHY_TYPE_25GBASE_CR		0x20
332 #define IXL_PHY_TYPE_25GBASE_SR		0x21
333 #define IXL_PHY_TYPE_25GBASE_LR		0x22
334 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
335 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
336 
337 struct ixl_aq_module_desc {
338 	uint8_t		oui[3];
339 	uint8_t		_reserved1;
340 	uint8_t		part_number[16];
341 	uint8_t		revision[4];
342 	uint8_t		_reserved2[8];
343 } __packed __aligned(4);
344 
345 struct ixl_aq_phy_abilities {
346 	uint32_t	phy_type;
347 
348 	uint8_t		link_speed;
349 #define IXL_AQ_PHY_LINK_SPEED_100MB	(1 << 1)
350 #define IXL_AQ_PHY_LINK_SPEED_1000MB	(1 << 2)
351 #define IXL_AQ_PHY_LINK_SPEED_10GB	(1 << 3)
352 #define IXL_AQ_PHY_LINK_SPEED_40GB	(1 << 4)
353 #define IXL_AQ_PHY_LINK_SPEED_20GB	(1 << 5)
354 #define IXL_AQ_PHY_LINK_SPEED_25GB	(1 << 6)
355 	uint8_t		abilities;
356 	uint16_t	eee_capability;
357 
358 	uint32_t	eeer_val;
359 
360 	uint8_t		d3_lpan;
361 	uint8_t		phy_type_ext;
362 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
363 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
364 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
365 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
366 	uint8_t		fec_cfg_curr_mod_ext_info;
367 #define IXL_AQ_ENABLE_FEC_KR		0x01
368 #define IXL_AQ_ENABLE_FEC_RS		0x02
369 #define IXL_AQ_REQUEST_FEC_KR		0x04
370 #define IXL_AQ_REQUEST_FEC_RS		0x08
371 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
372 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
373 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
374 	uint8_t		ext_comp_code;
375 
376 	uint8_t		phy_id[4];
377 
378 	uint8_t		module_type[3];
379 #define IXL_SFF8024_ID_SFP		0x03
380 #define IXL_SFF8024_ID_QSFP		0x0c
381 #define IXL_SFF8024_ID_QSFP_PLUS	0x0d
382 #define IXL_SFF8024_ID_QSFP28		0x11
383 	uint8_t		qualified_module_count;
384 #define IXL_AQ_PHY_MAX_QMS		16
385 	struct ixl_aq_module_desc
386 			qualified_module[IXL_AQ_PHY_MAX_QMS];
387 } __packed __aligned(4);
388 
389 struct ixl_aq_link_param {
390 	uint8_t		notify;
391 #define IXL_AQ_LINK_NOTIFY	0x03
392 	uint8_t		_reserved1;
393 	uint8_t		phy;
394 	uint8_t		speed;
395 	uint8_t		status;
396 	uint8_t		_reserved2[11];
397 } __packed __aligned(4);
398 
399 struct ixl_aq_vsi_param {
400 	uint16_t	uplink_seid;
401 	uint8_t		connect_type;
402 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
403 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
404 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
405 	uint8_t		_reserved1;
406 
407 	uint8_t		vf_id;
408 	uint8_t		_reserved2;
409 	uint16_t	vsi_flags;
410 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
411 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
412 #define IXL_AQ_VSI_TYPE_VF		0x0
413 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
414 #define IXL_AQ_VSI_TYPE_PF		0x2
415 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
416 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
417 
418 	uint32_t	addr_hi;
419 	uint32_t	addr_lo;
420 } __packed __aligned(16);
421 
422 struct ixl_aq_add_macvlan {
423 	uint16_t	num_addrs;
424 	uint16_t	seid0;
425 	uint16_t	seid1;
426 	uint16_t	seid2;
427 	uint32_t	addr_hi;
428 	uint32_t	addr_lo;
429 } __packed __aligned(16);
430 
431 struct ixl_aq_add_macvlan_elem {
432 	uint8_t		macaddr[6];
433 	uint16_t	vlan;
434 	uint16_t	flags;
435 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
436 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
437 	uint16_t	queue;
438 	uint32_t	_reserved;
439 } __packed __aligned(16);
440 
441 struct ixl_aq_remove_macvlan {
442 	uint16_t	num_addrs;
443 	uint16_t	seid0;
444 	uint16_t	seid1;
445 	uint16_t	seid2;
446 	uint32_t	addr_hi;
447 	uint32_t	addr_lo;
448 } __packed __aligned(16);
449 
450 struct ixl_aq_remove_macvlan_elem {
451 	uint8_t		macaddr[6];
452 	uint16_t	vlan;
453 	uint8_t		flags;
454 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
455 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
456 	uint8_t		_reserved[7];
457 } __packed __aligned(16);
458 
459 struct ixl_aq_vsi_reply {
460 	uint16_t	seid;
461 	uint16_t	vsi_number;
462 
463 	uint16_t	vsis_used;
464 	uint16_t	vsis_free;
465 
466 	uint32_t	addr_hi;
467 	uint32_t	addr_lo;
468 } __packed __aligned(16);
469 
470 struct ixl_aq_vsi_data {
471 	/* first 96 byte are written by SW */
472 	uint16_t	valid_sections;
473 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
474 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
475 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
476 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
477 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
478 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
479 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
480 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
481 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
482 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
483 	/* switch section */
484 	uint16_t	switch_id;
485 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
486 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
487 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
488 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
489 
490 	uint8_t		_reserved1[2];
491 	/* security section */
492 	uint8_t		sec_flags;
493 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
494 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
495 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
496 	uint8_t		_reserved2;
497 
498 	/* vlan section */
499 	uint16_t	pvid;
500 	uint16_t	fcoe_pvid;
501 
502 	uint8_t		port_vlan_flags;
503 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
504 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
505 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
506 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
507 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
508 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
509 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
510 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
511 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
512 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
513 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
514 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
515 	uint8_t		_reserved3[3];
516 
517 	/* ingress egress up section */
518 	uint32_t	ingress_table;
519 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
520 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
521 	uint32_t	egress_table;
522 
523 	/* cascaded pv section */
524 	uint16_t	cas_pv_tag;
525 	uint8_t		cas_pv_flags;
526 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
527 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
528 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
529 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
530 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
531 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
532 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
533 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
534 					(1 << 6)
535 	uint8_t		_reserved4;
536 
537 	/* queue mapping section */
538 	uint16_t	mapping_flags;
539 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
540 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
541 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
542 	uint16_t	queue_mapping[16];
543 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
544 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
545 	uint16_t	tc_mapping[8];
546 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
547 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
548 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
549 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
550 
551 	/* queueing option section */
552 	uint8_t		queueing_opt_flags;
553 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
554 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
555 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
556 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
557 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
558 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
559 	uint8_t		_reserved5[3];
560 
561 	/* scheduler section */
562 	uint8_t		up_enable_bits;
563 	uint8_t		_reserved6;
564 
565 	/* outer up section */
566 	uint32_t	outer_up_table; /* same as ingress/egress tables */
567 	uint8_t		_reserved7[8];
568 
569 	/* last 32 bytes are written by FW */
570 	uint16_t	qs_handle[8];
571 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
572 	uint16_t	stat_counter_idx;
573 	uint16_t	sched_id;
574 
575 	uint8_t		_reserved8[12];
576 } __packed __aligned(8);
577 
578 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
579 
580 struct ixl_aq_vsi_promisc_param {
581 	uint16_t	flags;
582 	uint16_t	valid_flags;
583 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
584 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
585 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
586 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
587 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
588 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
589 
590 	uint16_t	seid;
591 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
592 	uint16_t	vlan;
593 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
594 	uint32_t	reserved[2];
595 } __packed __aligned(8);
596 
597 struct ixl_aq_veb_param {
598 	uint16_t	uplink_seid;
599 	uint16_t	downlink_seid;
600 	uint16_t	veb_flags;
601 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
602 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
603 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
604 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
605 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
606 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
607 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
608 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
609 	uint8_t		enable_tcs;
610 	uint8_t		_reserved[9];
611 } __packed __aligned(16);
612 
613 struct ixl_aq_veb_reply {
614 	uint16_t	_reserved1;
615 	uint16_t	_reserved2;
616 	uint16_t	_reserved3;
617 	uint16_t	switch_seid;
618 	uint16_t	veb_seid;
619 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
620 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
621 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
622 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
623 	uint16_t	statistic_index;
624 	uint16_t	vebs_used;
625 	uint16_t	vebs_free;
626 } __packed __aligned(16);
627 
628 /* GET PHY ABILITIES param[0] */
629 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
630 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
631 
632 struct ixl_aq_phy_reg_access {
633 	uint8_t		phy_iface;
634 #define IXL_AQ_PHY_IF_INTERNAL		0
635 #define IXL_AQ_PHY_IF_EXTERNAL		1
636 #define IXL_AQ_PHY_IF_MODULE		2
637 	uint8_t		dev_addr;
638 	uint16_t	recall;
639 #define IXL_AQ_PHY_QSFP_DEV_ADDR	0
640 #define IXL_AQ_PHY_QSFP_LAST		1
641 	uint32_t	reg;
642 	uint32_t	val;
643 	uint32_t	_reserved2;
644 } __packed __aligned(16);
645 
646 /* RESTART_AN param[0] */
647 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
648 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
649 
650 struct ixl_aq_link_status { /* this occupies the iaq_param space */
651 	uint16_t	command_flags; /* only field set on command */
652 #define IXL_AQ_LSE_MASK			0x3
653 #define IXL_AQ_LSE_NOP			0x0
654 #define IXL_AQ_LSE_DISABLE		0x2
655 #define IXL_AQ_LSE_ENABLE		0x3
656 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
657 	uint8_t		phy_type;
658 	uint8_t		link_speed;
659 #define IXL_AQ_LINK_SPEED_1GB		(1 << 2)
660 #define IXL_AQ_LINK_SPEED_10GB		(1 << 3)
661 #define IXL_AQ_LINK_SPEED_40GB		(1 << 4)
662 #define IXL_AQ_LINK_SPEED_25GB		(1 << 6)
663 	uint8_t		link_info;
664 #define IXL_AQ_LINK_UP_FUNCTION		0x01
665 #define IXL_AQ_LINK_FAULT		0x02
666 #define IXL_AQ_LINK_FAULT_TX		0x04
667 #define IXL_AQ_LINK_FAULT_RX		0x08
668 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
669 #define IXL_AQ_LINK_UP_PORT		0x20
670 #define IXL_AQ_MEDIA_AVAILABLE		0x40
671 #define IXL_AQ_SIGNAL_DETECT		0x80
672 	uint8_t		an_info;
673 #define IXL_AQ_AN_COMPLETED		0x01
674 #define IXL_AQ_LP_AN_ABILITY		0x02
675 #define IXL_AQ_PD_FAULT			0x04
676 #define IXL_AQ_FEC_EN			0x08
677 #define IXL_AQ_PHY_LOW_POWER		0x10
678 #define IXL_AQ_LINK_PAUSE_TX		0x20
679 #define IXL_AQ_LINK_PAUSE_RX		0x40
680 #define IXL_AQ_QUALIFIED_MODULE		0x80
681 
682 	uint8_t		ext_info;
683 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
684 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
685 #define IXL_AQ_LINK_TX_SHIFT		0x02
686 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
687 #define IXL_AQ_LINK_TX_ACTIVE		0x00
688 #define IXL_AQ_LINK_TX_DRAINED		0x01
689 #define IXL_AQ_LINK_TX_FLUSHED		0x03
690 #define IXL_AQ_LINK_FORCED_40G		0x10
691 /* 25G Error Codes */
692 #define IXL_AQ_25G_NO_ERR		0X00
693 #define IXL_AQ_25G_NOT_PRESENT		0X01
694 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
695 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
696 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
697 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
698 	uint8_t		loopback;
699 	uint16_t	max_frame_size;
700 
701 	uint8_t		config;
702 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
703 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
704 #define IXL_AQ_CONFIG_CRC_ENA	0x04
705 #define IXL_AQ_CONFIG_PACING_MASK	0x78
706 	uint8_t		power_desc;
707 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
708 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
709 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
710 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
711 #define IXL_AQ_PWR_CLASS_MASK		0x03
712 
713 	uint8_t		reserved[4];
714 } __packed __aligned(4);
715 /* event mask command flags for param[2] */
716 #define IXL_AQ_PHY_EV_MASK		0x3ff
717 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
718 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
719 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
720 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
721 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
722 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
723 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
724 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
725 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
726 
727 struct ixl_aq_rss_lut { /* 722 */
728 #define IXL_AQ_SET_RSS_LUT_VSI_VALID	(1 << 15)
729 #define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT	0
730 #define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK	\
731 	(0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT)
732 
733 	uint16_t	vsi_number;
734 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
735 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK \
736 	(0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT)
737 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI	0
738 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF	1
739 	uint16_t	flags;
740 	uint8_t		_reserved[4];
741 	uint32_t	addr_hi;
742 	uint32_t	addr_lo;
743 } __packed __aligned(16);
744 
745 struct ixl_aq_get_set_rss_key { /* 722 */
746 #define IXL_AQ_SET_RSS_KEY_VSI_VALID	(1 << 15)
747 #define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT	0
748 #define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK	\
749 	(0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT)
750 	uint16_t	vsi_number;
751 	uint8_t		_reserved[6];
752 	uint32_t	addr_hi;
753 	uint32_t	addr_lo;
754 } __packed __aligned(16);
755 
756 /* aq response codes */
757 #define IXL_AQ_RC_OK			0  /* success */
758 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
759 #define IXL_AQ_RC_ENOENT		2  /* No such element */
760 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
761 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
762 #define IXL_AQ_RC_EIO			5  /* I/O error */
763 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
764 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
765 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
766 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
767 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
768 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
769 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
770 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
771 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
772 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
773 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
774 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
775 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
776 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
777 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
778 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
779 #define IXL_AQ_RC_EFBIG			22 /* file too large */
780 
781 struct ixl_tx_desc {
782 	uint64_t		addr;
783 	uint64_t		cmd;
784 #define IXL_TX_DESC_DTYPE_SHIFT		0
785 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
786 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
787 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
788 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
789 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
790 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
791 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
792 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
793 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
794 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
795 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
796 
797 #define IXL_TX_DESC_CMD_SHIFT		4
798 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
799 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
800 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
801 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
802 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
803 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
804 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
805 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
806 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
807 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
808 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
809 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
810 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
811 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
812 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
813 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
814 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
815 
816 #define IXL_TX_DESC_MACLEN_SHIFT	16
817 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
818 #define IXL_TX_DESC_IPLEN_SHIFT		23
819 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
820 #define IXL_TX_DESC_L4LEN_SHIFT		30
821 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
822 #define IXL_TX_DESC_FCLEN_SHIFT		30
823 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
824 
825 #define IXL_TX_DESC_BSIZE_SHIFT		34
826 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
827 #define IXL_TX_DESC_BSIZE_MASK		\
828 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
829 
830 #define IXL_TX_DESC_L2TAG1_SHIFT	48
831 } __packed __aligned(16);
832 
833 struct ixl_rx_rd_desc_16 {
834 	uint64_t		paddr; /* packet addr */
835 	uint64_t		haddr; /* header addr */
836 } __packed __aligned(16);
837 
838 struct ixl_rx_rd_desc_32 {
839 	uint64_t		paddr; /* packet addr */
840 	uint64_t		haddr; /* header addr */
841 	uint64_t		_reserved1;
842 	uint64_t		_reserved2;
843 } __packed __aligned(16);
844 
845 struct ixl_rx_wb_desc_16 {
846 	uint16_t		_reserved1;
847 	uint16_t		l2tag1;
848 	uint32_t		filter_status;
849 	uint64_t		qword1;
850 #define IXL_RX_DESC_DD			(1 << 0)
851 #define IXL_RX_DESC_EOP			(1 << 1)
852 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
853 #define IXL_RX_DESC_L3L4P		(1 << 3)
854 #define IXL_RX_DESC_CRCP		(1 << 4)
855 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
856 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
857 #define IXL_RX_DESC_UMB_SHIFT		9
858 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
859 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
860 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
861 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
862 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
863 #define IXL_RX_DESC_FLM			(1 << 11)
864 #define IXL_RX_DESC_FLTSTAT_SHIFT	12
865 #define IXL_RX_DESC_FLTSTAT_MASK	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
866 #define IXL_RX_DESC_FLTSTAT_NODATA	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
867 #define IXL_RX_DESC_FLTSTAT_FDFILTID	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
868 #define IXL_RX_DESC_FLTSTAT_RSS		(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
869 #define IXL_RX_DESC_LPBK		(1 << 14)
870 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
871 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
872 
873 #define IXL_RX_DESC_RXE			(1 << 19)
874 #define IXL_RX_DESC_HBO			(1 << 21)
875 #define IXL_RX_DESC_IPE			(1 << 22)
876 #define IXL_RX_DESC_L4E			(1 << 23)
877 #define IXL_RX_DESC_EIPE		(1 << 24)
878 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
879 
880 #define IXL_RX_DESC_PTYPE_SHIFT		30
881 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
882 
883 #define IXL_RX_DESC_PLEN_SHIFT		38
884 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
885 #define IXL_RX_DESC_HLEN_SHIFT		42
886 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
887 } __packed __aligned(16);
888 
889 struct ixl_rx_wb_desc_32 {
890 	uint64_t		qword0;
891 	uint64_t		qword1;
892 	uint64_t		qword2;
893 	uint64_t		qword3;
894 } __packed __aligned(16);
895 
896 #define IXL_TX_PKT_DESCS		8
897 #define IXL_TX_QUEUE_ALIGN		128
898 #define IXL_RX_QUEUE_ALIGN		128
899 
900 #define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
901 
902 #define IXL_PCIREG			PCI_MAPREG_START
903 
904 #define IXL_ITR0			0x0
905 #define IXL_ITR1			0x1
906 #define IXL_ITR2			0x2
907 #define IXL_NOITR			0x2
908 
909 #define IXL_AQ_NUM			256
910 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
911 #define IXL_AQ_ALIGN			64 /* lol */
912 #define IXL_AQ_BUFLEN			4096
913 
914 /* Packet Classifier Types for filters */
915 /* bits 0-28 are reserved for future use */
916 #define IXL_PCT_NONF_IPV4_UDP_UCAST	(1ULL << 29)	/* 722 */
917 #define IXL_PCT_NONF_IPV4_UDP_MCAST	(1ULL << 30)	/* 722 */
918 #define IXL_PCT_NONF_IPV4_UDP		(1ULL << 31)
919 #define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK	(1ULL << 32)	/* 722 */
920 #define IXL_PCT_NONF_IPV4_TCP		(1ULL << 33)
921 #define IXL_PCT_NONF_IPV4_SCTP		(1ULL << 34)
922 #define IXL_PCT_NONF_IPV4_OTHER		(1ULL << 35)
923 #define IXL_PCT_FRAG_IPV4		(1ULL << 36)
924 /* bits 37-38 are reserved for future use */
925 #define IXL_PCT_NONF_IPV6_UDP_UCAST	(1ULL << 39)	/* 722 */
926 #define IXL_PCT_NONF_IPV6_UDP_MCAST	(1ULL << 40)	/* 722 */
927 #define IXL_PCT_NONF_IPV6_UDP		(1ULL << 41)
928 #define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK	(1ULL << 42)	/* 722 */
929 #define IXL_PCT_NONF_IPV6_TCP		(1ULL << 43)
930 #define IXL_PCT_NONF_IPV6_SCTP		(1ULL << 44)
931 #define IXL_PCT_NONF_IPV6_OTHER		(1ULL << 45)
932 #define IXL_PCT_FRAG_IPV6		(1ULL << 46)
933 /* bit 47 is reserved for future use */
934 #define IXL_PCT_FCOE_OX			(1ULL << 48)
935 #define IXL_PCT_FCOE_RX			(1ULL << 49)
936 #define IXL_PCT_FCOE_OTHER		(1ULL << 50)
937 /* bits 51-62 are reserved for future use */
938 #define IXL_PCT_L2_PAYLOAD		(1ULL << 63)
939 
940 #define IXL_RSS_HENA_BASE_DEFAULT		\
941 	IXL_PCT_NONF_IPV4_UDP |			\
942 	IXL_PCT_NONF_IPV4_TCP |			\
943 	IXL_PCT_NONF_IPV4_SCTP |		\
944 	IXL_PCT_NONF_IPV4_OTHER |		\
945 	IXL_PCT_FRAG_IPV4 |			\
946 	IXL_PCT_NONF_IPV6_UDP |			\
947 	IXL_PCT_NONF_IPV6_TCP |			\
948 	IXL_PCT_NONF_IPV6_SCTP |		\
949 	IXL_PCT_NONF_IPV6_OTHER |		\
950 	IXL_PCT_FRAG_IPV6 |			\
951 	IXL_PCT_L2_PAYLOAD
952 
953 #define IXL_RSS_HENA_BASE_710		IXL_RSS_HENA_BASE_DEFAULT
954 #define IXL_RSS_HENA_BASE_722		IXL_RSS_HENA_BASE_DEFAULT | \
955 	IXL_PCT_NONF_IPV4_UDP_UCAST |		\
956 	IXL_PCT_NONF_IPV4_UDP_MCAST |		\
957 	IXL_PCT_NONF_IPV6_UDP_UCAST |		\
958 	IXL_PCT_NONF_IPV6_UDP_MCAST |		\
959 	IXL_PCT_NONF_IPV4_TCP_SYN_NOACK |	\
960 	IXL_PCT_NONF_IPV6_TCP_SYN_NOACK
961 
962 #define IXL_HMC_ROUNDUP			512
963 #define IXL_HMC_PGSIZE			4096
964 #define IXL_HMC_DVASZ			sizeof(uint64_t)
965 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
966 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
967 #define IXL_HMC_PDVALID			1ULL
968 
969 struct ixl_aq_regs {
970 	bus_size_t		atq_tail;
971 	bus_size_t		atq_head;
972 	bus_size_t		atq_len;
973 	bus_size_t		atq_bal;
974 	bus_size_t		atq_bah;
975 
976 	bus_size_t		arq_tail;
977 	bus_size_t		arq_head;
978 	bus_size_t		arq_len;
979 	bus_size_t		arq_bal;
980 	bus_size_t		arq_bah;
981 
982 	uint32_t		atq_len_enable;
983 	uint32_t		atq_tail_mask;
984 	uint32_t		atq_head_mask;
985 
986 	uint32_t		arq_len_enable;
987 	uint32_t		arq_tail_mask;
988 	uint32_t		arq_head_mask;
989 };
990 
991 struct ixl_phy_type {
992 	uint64_t	phy_type;
993 	uint64_t	ifm_type;
994 };
995 
996 struct ixl_speed_type {
997 	uint8_t		dev_speed;
998 	uint64_t	net_speed;
999 };
1000 
1001 struct ixl_aq_buf {
1002 	SIMPLEQ_ENTRY(ixl_aq_buf)
1003 				 aqb_entry;
1004 	void			*aqb_data;
1005 	bus_dmamap_t		 aqb_map;
1006 };
1007 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
1008 
1009 struct ixl_dmamem {
1010 	bus_dmamap_t		ixm_map;
1011 	bus_dma_segment_t	ixm_seg;
1012 	int			ixm_nsegs;
1013 	size_t			ixm_size;
1014 	caddr_t			ixm_kva;
1015 };
1016 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
1017 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
1018 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
1019 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
1020 
1021 struct ixl_hmc_entry {
1022 	uint64_t		 hmc_base;
1023 	uint32_t		 hmc_count;
1024 	uint32_t		 hmc_size;
1025 };
1026 
1027 #define IXL_HMC_LAN_TX		 0
1028 #define IXL_HMC_LAN_RX		 1
1029 #define IXL_HMC_FCOE_CTX	 2
1030 #define IXL_HMC_FCOE_FILTER	 3
1031 #define IXL_HMC_COUNT		 4
1032 
1033 struct ixl_hmc_pack {
1034 	uint16_t		offset;
1035 	uint16_t		width;
1036 	uint16_t		lsb;
1037 };
1038 
1039 /*
1040  * these hmc objects have weird sizes and alignments, so these are abstract
1041  * representations of them that are nice for c to populate.
1042  *
1043  * the packing code relies on little-endian values being stored in the fields,
1044  * no high bits in the fields being set, and the fields must be packed in the
1045  * same order as they are in the ctx structure.
1046  */
1047 
1048 struct ixl_hmc_rxq {
1049 	uint16_t		 head;
1050 	uint8_t			 cpuid;
1051 	uint64_t		 base;
1052 #define IXL_HMC_RXQ_BASE_UNIT		128
1053 	uint16_t		 qlen;
1054 	uint16_t		 dbuff;
1055 #define IXL_HMC_RXQ_DBUFF_UNIT		128
1056 	uint8_t			 hbuff;
1057 #define IXL_HMC_RXQ_HBUFF_UNIT		64
1058 	uint8_t			 dtype;
1059 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
1060 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
1061 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
1062 	uint8_t			 dsize;
1063 #define IXL_HMC_RXQ_DSIZE_16		0
1064 #define IXL_HMC_RXQ_DSIZE_32		1
1065 	uint8_t			 crcstrip;
1066 	uint8_t			 fc_ena;
1067 	uint8_t			 l2tsel;
1068 #define IXL_HMC_RXQ_L2TSEL_2ND_TAG_TO_L2TAG1 \
1069 					0
1070 #define IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1 \
1071 					1
1072 	uint8_t			 hsplit_0;
1073 	uint8_t			 hsplit_1;
1074 	uint8_t			 showiv;
1075 	uint16_t		 rxmax;
1076 	uint8_t			 tphrdesc_ena;
1077 	uint8_t			 tphwdesc_ena;
1078 	uint8_t			 tphdata_ena;
1079 	uint8_t			 tphhead_ena;
1080 	uint8_t			 lrxqthresh;
1081 	uint8_t			 prefena;
1082 };
1083 
1084 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
1085 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
1086 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
1087 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
1088 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
1089 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
1090 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
1091 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
1092 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
1093 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
1094 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
1095 	{ offsetof(struct ixl_hmc_rxq, l2tsel),		1,	119 },
1096 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
1097 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
1098 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
1099 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
1100 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
1101 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
1102 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
1103 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
1104 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
1105 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
1106 };
1107 
1108 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
1109 
1110 struct ixl_hmc_txq {
1111 	uint16_t		head;
1112 	uint8_t			new_context;
1113 	uint64_t		base;
1114 #define IXL_HMC_TXQ_BASE_UNIT		128
1115 	uint8_t			fc_ena;
1116 	uint8_t			timesync_ena;
1117 	uint8_t			fd_ena;
1118 	uint8_t			alt_vlan_ena;
1119 	uint16_t		thead_wb;
1120 	uint8_t			cpuid;
1121 	uint8_t			head_wb_ena;
1122 #define IXL_HMC_TXQ_DESC_WB		0
1123 #define IXL_HMC_TXQ_HEAD_WB		1
1124 	uint16_t		qlen;
1125 	uint8_t			tphrdesc_ena;
1126 	uint8_t			tphrpacket_ena;
1127 	uint8_t			tphwdesc_ena;
1128 	uint64_t		head_wb_addr;
1129 	uint32_t		crc;
1130 	uint16_t		rdylist;
1131 	uint8_t			rdylist_act;
1132 };
1133 
1134 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1135 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1136 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1137 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1138 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1139 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1140 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1141 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1142 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1143 /* line 1 */
1144 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1145 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1146 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1147 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1148 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1149 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1150 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1151 /* line 7 */
1152 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1153 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1154 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1155 };
1156 
1157 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1158 
1159 struct ixl_rss_key {
1160 	uint32_t		 key[13];
1161 };
1162 
1163 struct ixl_rss_lut_128 {
1164 	uint32_t		 entries[128 / sizeof(uint32_t)];
1165 };
1166 
1167 struct ixl_rss_lut_512 {
1168 	uint32_t		 entries[512 / sizeof(uint32_t)];
1169 };
1170 
1171 /* driver structures */
1172 
1173 struct ixl_vector;
1174 struct ixl_chip;
1175 
1176 struct ixl_tx_map {
1177 	struct mbuf		*txm_m;
1178 	bus_dmamap_t		 txm_map;
1179 	unsigned int		 txm_eop;
1180 };
1181 
1182 struct ixl_tx_ring {
1183 	struct ixl_softc	*txr_sc;
1184 	struct ixl_vector	*txr_vector;
1185 	struct ifqueue		*txr_ifq;
1186 
1187 	unsigned int		 txr_prod;
1188 	unsigned int		 txr_cons;
1189 
1190 	struct ixl_tx_map	*txr_maps;
1191 	struct ixl_dmamem	 txr_mem;
1192 
1193 	bus_size_t		 txr_tail;
1194 	unsigned int		 txr_qid;
1195 } __aligned(CACHE_LINE_SIZE);
1196 
1197 struct ixl_rx_map {
1198 	struct mbuf		*rxm_m;
1199 	bus_dmamap_t		 rxm_map;
1200 };
1201 
1202 struct ixl_rx_ring {
1203 	struct ixl_softc	*rxr_sc;
1204 	struct ixl_vector	*rxr_vector;
1205 	struct ifiqueue		*rxr_ifiq;
1206 
1207 	struct if_rxring	 rxr_acct;
1208 	struct timeout		 rxr_refill;
1209 
1210 	unsigned int		 rxr_prod;
1211 	unsigned int		 rxr_cons;
1212 
1213 	struct ixl_rx_map	*rxr_maps;
1214 	struct ixl_dmamem	 rxr_mem;
1215 
1216 	struct mbuf		*rxr_m_head;
1217 	struct mbuf		**rxr_m_tail;
1218 
1219 	bus_size_t		 rxr_tail;
1220 	unsigned int		 rxr_qid;
1221 } __aligned(CACHE_LINE_SIZE);
1222 
1223 struct ixl_atq {
1224 	struct ixl_aq_desc	  iatq_desc;
1225 	void			 *iatq_arg;
1226 	void			(*iatq_fn)(struct ixl_softc *, void *);
1227 };
1228 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1229 
1230 struct ixl_vector {
1231 	struct ixl_softc	*iv_sc;
1232 	struct ixl_rx_ring	*iv_rxr;
1233 	struct ixl_tx_ring	*iv_txr;
1234 	int			 iv_qid;
1235 	void			*iv_ihc;
1236 	char			 iv_name[16];
1237 } __aligned(CACHE_LINE_SIZE);
1238 
1239 struct ixl_softc {
1240 	struct device		 sc_dev;
1241 	const struct ixl_chip	*sc_chip;
1242 	struct arpcom		 sc_ac;
1243 	struct ifmedia		 sc_media;
1244 	uint64_t		 sc_media_status;
1245 	uint64_t		 sc_media_active;
1246 
1247 	pci_chipset_tag_t	 sc_pc;
1248 	pci_intr_handle_t	 sc_ih;
1249 	void			*sc_ihc;
1250 	pcitag_t		 sc_tag;
1251 
1252 	bus_dma_tag_t		 sc_dmat;
1253 	bus_space_tag_t		 sc_memt;
1254 	bus_space_handle_t	 sc_memh;
1255 	bus_size_t		 sc_mems;
1256 
1257 	uint16_t		 sc_api_major;
1258 	uint16_t		 sc_api_minor;
1259 	uint8_t			 sc_pf_id;
1260 	uint16_t		 sc_uplink_seid;	/* le */
1261 	uint16_t		 sc_downlink_seid;	/* le */
1262 	uint16_t		 sc_veb_seid;		/* le */
1263 	uint16_t		 sc_vsi_number;		/* le */
1264 	uint16_t		 sc_seid;
1265 	unsigned int		 sc_base_queue;
1266 	unsigned int		 sc_port;
1267 
1268 	struct ixl_dmamem	 sc_scratch;
1269 
1270 	const struct ixl_aq_regs *
1271 				 sc_aq_regs;
1272 
1273 	struct ixl_dmamem	 sc_atq;
1274 	unsigned int		 sc_atq_prod;
1275 	unsigned int		 sc_atq_cons;
1276 
1277 	struct ixl_dmamem	 sc_arq;
1278 	struct task		 sc_arq_task;
1279 	struct ixl_aq_bufs	 sc_arq_idle;
1280 	struct ixl_aq_bufs	 sc_arq_live;
1281 	struct if_rxring	 sc_arq_ring;
1282 	unsigned int		 sc_arq_prod;
1283 	unsigned int		 sc_arq_cons;
1284 
1285 	struct mutex		 sc_link_state_mtx;
1286 	struct task		 sc_link_state_task;
1287 	struct ixl_atq		 sc_link_state_atq;
1288 
1289 	struct ixl_dmamem	 sc_hmc_sd;
1290 	struct ixl_dmamem	 sc_hmc_pd;
1291 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1292 
1293 	unsigned int		 sc_tx_ring_ndescs;
1294 	unsigned int		 sc_rx_ring_ndescs;
1295 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1296 
1297 	struct intrmap		*sc_intrmap;
1298 	struct ixl_vector	*sc_vectors;
1299 
1300 	struct rwlock		 sc_cfg_lock;
1301 	unsigned int		 sc_dead;
1302 
1303 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
1304 
1305 #if NKSTAT > 0
1306 	struct mutex		 sc_kstat_mtx;
1307 	struct timeout		 sc_kstat_tmo;
1308 	struct kstat		*sc_port_kstat;
1309 	struct kstat		*sc_vsi_kstat;
1310 #endif
1311 };
1312 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1313 
1314 #define delaymsec(_ms)	delay(1000 * (_ms))
1315 
1316 static void	ixl_clear_hw(struct ixl_softc *);
1317 static int	ixl_pf_reset(struct ixl_softc *);
1318 
1319 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1320 		    bus_size_t, u_int);
1321 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1322 
1323 static int	ixl_arq_fill(struct ixl_softc *);
1324 static void	ixl_arq_unfill(struct ixl_softc *);
1325 
1326 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1327 		    unsigned int);
1328 static void	ixl_atq_set(struct ixl_atq *,
1329 		    void (*)(struct ixl_softc *, void *), void *);
1330 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1331 static void	ixl_atq_done(struct ixl_softc *);
1332 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1333 		    const char *);
1334 static int	ixl_get_version(struct ixl_softc *);
1335 static int	ixl_pxe_clear(struct ixl_softc *);
1336 static int	ixl_lldp_shut(struct ixl_softc *);
1337 static int	ixl_get_mac(struct ixl_softc *);
1338 static int	ixl_get_switch_config(struct ixl_softc *);
1339 static int	ixl_phy_mask_ints(struct ixl_softc *);
1340 static int	ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1341 static int	ixl_restart_an(struct ixl_softc *);
1342 static int	ixl_hmc(struct ixl_softc *);
1343 static void	ixl_hmc_free(struct ixl_softc *);
1344 static int	ixl_get_vsi(struct ixl_softc *);
1345 static int	ixl_set_vsi(struct ixl_softc *);
1346 static int	ixl_get_link_status(struct ixl_softc *);
1347 static int	ixl_set_link_status(struct ixl_softc *,
1348 		    const struct ixl_aq_desc *);
1349 static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1350 		    uint16_t);
1351 static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1352 		    uint16_t);
1353 static void	ixl_link_state_update(void *);
1354 static void	ixl_arq(void *);
1355 static void	ixl_hmc_pack(void *, const void *,
1356 		    const struct ixl_hmc_pack *, unsigned int);
1357 
1358 static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1359 static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1360 		    uint8_t *);
1361 static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1362 		    uint8_t);
1363 
1364 static int	ixl_match(struct device *, void *, void *);
1365 static void	ixl_attach(struct device *, struct device *, void *);
1366 
1367 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1368 static int	ixl_media_change(struct ifnet *);
1369 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1370 static void	ixl_watchdog(struct ifnet *);
1371 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1372 static void	ixl_start(struct ifqueue *);
1373 static int	ixl_intr0(void *);
1374 static int	ixl_intr_vector(void *);
1375 static int	ixl_up(struct ixl_softc *);
1376 static int	ixl_down(struct ixl_softc *);
1377 static int	ixl_iff(struct ixl_softc *);
1378 
1379 static struct ixl_tx_ring *
1380 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1381 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1382 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1383 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1384 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1385 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1386 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1387 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1388 static int	ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *);
1389 
1390 static struct ixl_rx_ring *
1391 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1392 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1393 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1394 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1395 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1396 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1397 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1398 static int	ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *);
1399 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1400 static void	ixl_rxrefill(void *);
1401 static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1402 static void	ixl_rx_checksum(struct mbuf *, uint64_t);
1403 
1404 #if NKSTAT > 0
1405 static void	ixl_kstat_attach(struct ixl_softc *);
1406 #endif
1407 
1408 struct cfdriver ixl_cd = {
1409 	NULL,
1410 	"ixl",
1411 	DV_IFNET,
1412 };
1413 
1414 const struct cfattach ixl_ca = {
1415 	sizeof(struct ixl_softc),
1416 	ixl_match,
1417 	ixl_attach,
1418 };
1419 
1420 static const struct ixl_phy_type ixl_phy_type_map[] = {
1421 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1422 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1423 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1424 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1425 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1426 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1427 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1428 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1429 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1430 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1431 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1432 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1433 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1434 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1435 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1436 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1437 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1438 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1439 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1440 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1441 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1442 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1443 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1444 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1445 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1446 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1447 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1448 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1449 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1450 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1451 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1452 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1453 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1454 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1455 };
1456 
1457 static const struct ixl_speed_type ixl_speed_type_map[] = {
1458 	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
1459 	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
1460 	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
1461 	{ IXL_AQ_LINK_SPEED_1GB,		IF_Gbps(1) },
1462 };
1463 
1464 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1465 	.atq_tail	= I40E_PF_ATQT,
1466 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1467 	.atq_head	= I40E_PF_ATQH,
1468 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1469 	.atq_len	= I40E_PF_ATQLEN,
1470 	.atq_bal	= I40E_PF_ATQBAL,
1471 	.atq_bah	= I40E_PF_ATQBAH,
1472 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1473 
1474 	.arq_tail	= I40E_PF_ARQT,
1475 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1476 	.arq_head	= I40E_PF_ARQH,
1477 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1478 	.arq_len	= I40E_PF_ARQLEN,
1479 	.arq_bal	= I40E_PF_ARQBAL,
1480 	.arq_bah	= I40E_PF_ARQBAH,
1481 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1482 };
1483 
1484 #define ixl_rd(_s, _r) \
1485 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1486 #define ixl_wr(_s, _r, _v) \
1487 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1488 #define ixl_barrier(_s, _r, _l, _o) \
1489 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1490 #define ixl_intr_enable(_s) \
1491 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1492 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1493 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1494 
1495 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1496 
1497 #ifdef __LP64__
1498 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1499 #else
1500 #define ixl_dmamem_hi(_ixm)	0
1501 #endif
1502 
1503 #define ixl_dmamem_lo(_ixm)	(uint32_t)IXL_DMA_DVA(_ixm)
1504 
1505 static inline void
1506 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1507 {
1508 #ifdef __LP64__
1509 	htolem32(&iaq->iaq_param[2], addr >> 32);
1510 #else
1511 	iaq->iaq_param[2] = htole32(0);
1512 #endif
1513 	htolem32(&iaq->iaq_param[3], addr);
1514 }
1515 
1516 #if _BYTE_ORDER == _BIG_ENDIAN
1517 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1518 #else
1519 #define HTOLE16(_x)	(_x)
1520 #endif
1521 
1522 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1523 
1524 /* deal with differences between chips */
1525 
1526 struct ixl_chip {
1527 	uint64_t		  ic_rss_hena;
1528 	uint32_t		(*ic_rd_ctl)(struct ixl_softc *, uint32_t);
1529 	void			(*ic_wr_ctl)(struct ixl_softc *, uint32_t,
1530 				      uint32_t);
1531 
1532 	int			(*ic_set_rss_key)(struct ixl_softc *,
1533 				      const struct ixl_rss_key *);
1534 	int			(*ic_set_rss_lut)(struct ixl_softc *,
1535 				      const struct ixl_rss_lut_128 *);
1536 };
1537 
1538 static inline uint64_t
1539 ixl_rss_hena(struct ixl_softc *sc)
1540 {
1541 	return (sc->sc_chip->ic_rss_hena);
1542 }
1543 
1544 static inline uint32_t
1545 ixl_rd_ctl(struct ixl_softc *sc, uint32_t r)
1546 {
1547 	return ((*sc->sc_chip->ic_rd_ctl)(sc, r));
1548 }
1549 
1550 static inline void
1551 ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
1552 {
1553 	(*sc->sc_chip->ic_wr_ctl)(sc, r, v);
1554 }
1555 
1556 static inline int
1557 ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
1558 {
1559 	return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey));
1560 }
1561 
1562 static inline int
1563 ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
1564 {
1565 	return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut));
1566 }
1567 
1568 /* 710 chip specifics */
1569 
1570 static uint32_t		ixl_710_rd_ctl(struct ixl_softc *, uint32_t);
1571 static void		ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1572 static int		ixl_710_set_rss_key(struct ixl_softc *,
1573 			    const struct ixl_rss_key *);
1574 static int		ixl_710_set_rss_lut(struct ixl_softc *,
1575 			    const struct ixl_rss_lut_128 *);
1576 
1577 static const struct ixl_chip ixl_710 = {
1578 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1579 	.ic_rd_ctl =		ixl_710_rd_ctl,
1580 	.ic_wr_ctl =		ixl_710_wr_ctl,
1581 	.ic_set_rss_key =	ixl_710_set_rss_key,
1582 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1583 };
1584 
1585 /* 722 chip specifics */
1586 
1587 static uint32_t		ixl_722_rd_ctl(struct ixl_softc *, uint32_t);
1588 static void		ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1589 static int		ixl_722_set_rss_key(struct ixl_softc *,
1590 			    const struct ixl_rss_key *);
1591 static int		ixl_722_set_rss_lut(struct ixl_softc *,
1592 			    const struct ixl_rss_lut_128 *);
1593 
1594 static const struct ixl_chip ixl_722 = {
1595 	.ic_rss_hena =		IXL_RSS_HENA_BASE_722,
1596 	.ic_rd_ctl =		ixl_722_rd_ctl,
1597 	.ic_wr_ctl =		ixl_722_wr_ctl,
1598 	.ic_set_rss_key =	ixl_722_set_rss_key,
1599 	.ic_set_rss_lut =	ixl_722_set_rss_lut,
1600 };
1601 
1602 /*
1603  * 710 chips using an older firmware/API use the same ctl ops as
1604  * 722 chips. or 722 chips use the same ctl ops as 710 chips in early
1605  * firmware/API versions?
1606 */
1607 
1608 static const struct ixl_chip ixl_710_decrepit = {
1609 	.ic_rss_hena =		IXL_RSS_HENA_BASE_710,
1610 	.ic_rd_ctl =		ixl_722_rd_ctl,
1611 	.ic_wr_ctl =		ixl_722_wr_ctl,
1612 	.ic_set_rss_key =	ixl_710_set_rss_key,
1613 	.ic_set_rss_lut =	ixl_710_set_rss_lut,
1614 };
1615 
1616 /* driver code */
1617 
1618 struct ixl_device {
1619 	const struct ixl_chip	*id_chip;
1620 	pci_vendor_id_t		 id_vid;
1621 	pci_product_id_t	 id_pid;
1622 };
1623 
1624 static const struct ixl_device ixl_devices[] = {
1625 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
1626 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP_2 },
1627 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP },
1628 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP, },
1629 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1630 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1631 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP },
1632 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET },
1633 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1634 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1635 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1636 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1637 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28, },
1638 	{ &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T, },
1639 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_KX },
1640 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_QSFP },
1641 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1642 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G },
1643 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_T },
1644 	{ &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1645 };
1646 
1647 static const struct ixl_device *
1648 ixl_device_lookup(struct pci_attach_args *pa)
1649 {
1650 	pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id);
1651 	pci_product_id_t pid = PCI_PRODUCT(pa->pa_id);
1652 	const struct ixl_device *id;
1653 	unsigned int i;
1654 
1655 	for (i = 0; i < nitems(ixl_devices); i++) {
1656 		id = &ixl_devices[i];
1657 		if (id->id_vid == vid && id->id_pid == pid)
1658 			return (id);
1659 	}
1660 
1661 	return (NULL);
1662 }
1663 
1664 static int
1665 ixl_match(struct device *parent, void *match, void *aux)
1666 {
1667 	return (ixl_device_lookup(aux) != NULL);
1668 }
1669 
1670 void
1671 ixl_attach(struct device *parent, struct device *self, void *aux)
1672 {
1673 	struct ixl_softc *sc = (struct ixl_softc *)self;
1674 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1675 	struct pci_attach_args *pa = aux;
1676 	pcireg_t memtype;
1677 	uint32_t port, ari, func;
1678 	uint64_t phy_types = 0;
1679 	unsigned int nqueues, i;
1680 	int tries;
1681 
1682 	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1683 
1684 	sc->sc_chip = ixl_device_lookup(pa)->id_chip;
1685 	sc->sc_pc = pa->pa_pc;
1686 	sc->sc_tag = pa->pa_tag;
1687 	sc->sc_dmat = pa->pa_dmat;
1688 	sc->sc_aq_regs = &ixl_pf_aq_regs;
1689 
1690 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1691 	sc->sc_tx_ring_ndescs = 1024;
1692 	sc->sc_rx_ring_ndescs = 1024;
1693 
1694 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1695 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1696 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1697 		printf(": unable to map registers\n");
1698 		return;
1699 	}
1700 
1701 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1702 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1703 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1704 
1705 	ixl_clear_hw(sc);
1706 	if (ixl_pf_reset(sc) == -1) {
1707 		/* error printed by ixl_pf_reset */
1708 		goto unmap;
1709 	}
1710 
1711 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1712 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1713 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1714 	sc->sc_port = port;
1715 	printf(": port %u", port);
1716 
1717 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1718 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1719 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1720 
1721 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1722 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1723 
1724 	/* initialise the adminq */
1725 
1726 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1727 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1728 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1729 		goto unmap;
1730 	}
1731 
1732 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1733 	SIMPLEQ_INIT(&sc->sc_arq_live);
1734 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1735 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1736 	sc->sc_arq_cons = 0;
1737 	sc->sc_arq_prod = 0;
1738 
1739 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1740 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1741 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1742 		goto free_atq;
1743 	}
1744 
1745 	if (!ixl_arq_fill(sc)) {
1746 		printf("\n" "%s: unable to fill arq descriptors\n",
1747 		    DEVNAME(sc));
1748 		goto free_arq;
1749 	}
1750 
1751 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1752 	    0, IXL_DMA_LEN(&sc->sc_atq),
1753 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1754 
1755 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1756 	    0, IXL_DMA_LEN(&sc->sc_arq),
1757 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1758 
1759 	for (tries = 0; tries < 10; tries++) {
1760 		int rv;
1761 
1762 		sc->sc_atq_cons = 0;
1763 		sc->sc_atq_prod = 0;
1764 
1765 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1766 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1767 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1768 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1769 
1770 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1771 
1772 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1773 		    ixl_dmamem_lo(&sc->sc_atq));
1774 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1775 		    ixl_dmamem_hi(&sc->sc_atq));
1776 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1777 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1778 
1779 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1780 		    ixl_dmamem_lo(&sc->sc_arq));
1781 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1782 		    ixl_dmamem_hi(&sc->sc_arq));
1783 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1784 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1785 
1786 		rv = ixl_get_version(sc);
1787 		if (rv == 0)
1788 			break;
1789 		if (rv != ETIMEDOUT) {
1790 			printf(", unable to get firmware version\n");
1791 			goto shutdown;
1792 		}
1793 
1794 		delaymsec(100);
1795 	}
1796 
1797 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1798 
1799 	if (ixl_pxe_clear(sc) != 0) {
1800 		/* error printed by ixl_pxe_clear */
1801 		goto shutdown;
1802 	}
1803 
1804 	if (ixl_get_mac(sc) != 0) {
1805 		/* error printed by ixl_get_mac */
1806 		goto shutdown;
1807 	}
1808 
1809 	if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) {
1810 		int nmsix = pci_intr_msix_count(pa);
1811 		if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */
1812 			nmsix--;
1813 
1814 			sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1815 			    nmsix, IXL_MAX_VECTORS, INTRMAP_POWEROF2);
1816 			nqueues = intrmap_count(sc->sc_intrmap);
1817 			KASSERT(nqueues > 0);
1818 			KASSERT(powerof2(nqueues));
1819 			sc->sc_nqueues = fls(nqueues) - 1;
1820 		}
1821 	} else {
1822 		if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1823 		    pci_intr_map(pa, &sc->sc_ih) != 0) {
1824 			printf(", unable to map interrupt\n");
1825 			goto shutdown;
1826 		}
1827 	}
1828 
1829 	nqueues = ixl_nqueues(sc);
1830 
1831 	printf(", %s, %d queue%s, address %s\n",
1832 	    pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc),
1833 	    (nqueues > 1 ? "s" : ""),
1834 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1835 
1836 	if (ixl_hmc(sc) != 0) {
1837 		/* error printed by ixl_hmc */
1838 		goto shutdown;
1839 	}
1840 
1841 	if (ixl_lldp_shut(sc) != 0) {
1842 		/* error printed by ixl_lldp_shut */
1843 		goto free_hmc;
1844 	}
1845 
1846 	if (ixl_phy_mask_ints(sc) != 0) {
1847 		/* error printed by ixl_phy_mask_ints */
1848 		goto free_hmc;
1849 	}
1850 
1851 	if (ixl_restart_an(sc) != 0) {
1852 		/* error printed by ixl_restart_an */
1853 		goto free_hmc;
1854 	}
1855 
1856 	if (ixl_get_switch_config(sc) != 0) {
1857 		/* error printed by ixl_get_switch_config */
1858 		goto free_hmc;
1859 	}
1860 
1861 	if (ixl_get_phy_types(sc, &phy_types) != 0) {
1862 		/* error printed by ixl_get_phy_abilities */
1863 		goto free_hmc;
1864 	}
1865 
1866 	if (ixl_get_link_status(sc) != 0) {
1867 		/* error printed by ixl_get_link_status */
1868 		goto free_hmc;
1869 	}
1870 
1871 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1872 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1873 		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1874 		goto free_hmc;
1875 	}
1876 
1877 	if (ixl_get_vsi(sc) != 0) {
1878 		/* error printed by ixl_get_vsi */
1879 		goto free_hmc;
1880 	}
1881 
1882 	if (ixl_set_vsi(sc) != 0) {
1883 		/* error printed by ixl_set_vsi */
1884 		goto free_scratch;
1885 	}
1886 
1887 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1888 	    IPL_NET | IPL_MPSAFE, ixl_intr0, sc, DEVNAME(sc));
1889 	if (sc->sc_ihc == NULL) {
1890 		printf("%s: unable to establish interrupt handler\n",
1891 		    DEVNAME(sc));
1892 		goto free_scratch;
1893 	}
1894 
1895 	sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues,
1896 	    M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1897 	if (sc->sc_vectors == NULL) {
1898 		printf("%s: unable to allocate vectors\n", DEVNAME(sc));
1899 		goto free_scratch;
1900 	}
1901 
1902 	for (i = 0; i < nqueues; i++) {
1903 		struct ixl_vector *iv = &sc->sc_vectors[i];
1904 		iv->iv_sc = sc;
1905 		iv->iv_qid = i;
1906 		snprintf(iv->iv_name, sizeof(iv->iv_name),
1907 		    "%s:%u", DEVNAME(sc), i); /* truncated? */
1908 	}
1909 
1910 	if (sc->sc_intrmap) {
1911 		for (i = 0; i < nqueues; i++) {
1912 			struct ixl_vector *iv = &sc->sc_vectors[i];
1913 			pci_intr_handle_t ih;
1914 			int v = i + 1; /* 0 is used for adminq */
1915 
1916 			if (pci_intr_map_msix(pa, v, &ih)) {
1917 				printf("%s: unable to map msi-x vector %d\n",
1918 				    DEVNAME(sc), v);
1919 				goto free_vectors;
1920 			}
1921 
1922 			iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1923 			    IPL_NET | IPL_MPSAFE,
1924 			    intrmap_cpu(sc->sc_intrmap, i),
1925 			    ixl_intr_vector, iv, iv->iv_name);
1926 			if (iv->iv_ihc == NULL) {
1927 				printf("%s: unable to establish interrupt %d\n",
1928 				    DEVNAME(sc), v);
1929 				goto free_vectors;
1930 			}
1931 
1932 			ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),
1933 			    I40E_PFINT_DYN_CTLN_INTENA_MASK |
1934 			    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1935 			    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1936 		}
1937 	}
1938 
1939 	/* fixup the chip ops for older fw releases */
1940 	if (sc->sc_chip == &ixl_710 &&
1941 	    sc->sc_api_major == 1 && sc->sc_api_minor < 5)
1942 		sc->sc_chip = &ixl_710_decrepit;
1943 
1944 	ifp->if_softc = sc;
1945 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1946 	ifp->if_xflags = IFXF_MPSAFE;
1947 	ifp->if_ioctl = ixl_ioctl;
1948 	ifp->if_qstart = ixl_start;
1949 	ifp->if_watchdog = ixl_watchdog;
1950 	ifp->if_hardmtu = IXL_HARDMTU;
1951 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1952 	ifq_set_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1953 
1954 	ifp->if_capabilities = IFCAP_VLAN_HWTAGGING;
1955 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
1956 	    IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
1957 	    IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
1958 
1959 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1960 
1961 	ixl_media_add(sc, phy_types);
1962 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1963 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1964 
1965 	if_attach(ifp);
1966 	ether_ifattach(ifp);
1967 
1968 	if_attach_queues(ifp, nqueues);
1969 	if_attach_iqueues(ifp, nqueues);
1970 
1971 	mtx_init(&sc->sc_link_state_mtx, IPL_NET);
1972 	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1973 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1974 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1975 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1976 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1977 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1978 
1979 	/* remove default mac filter and replace it so we can see vlans */
1980 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
1981 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1982 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1983 	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1984 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1985 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
1986 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1987 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1988 
1989 	ixl_intr_enable(sc);
1990 
1991 #if NKSTAT > 0
1992 	ixl_kstat_attach(sc);
1993 #endif
1994 
1995 	return;
1996 free_vectors:
1997 	if (sc->sc_intrmap != NULL) {
1998 		for (i = 0; i < nqueues; i++) {
1999 			struct ixl_vector *iv = &sc->sc_vectors[i];
2000 			if (iv->iv_ihc == NULL)
2001 				continue;
2002 			pci_intr_disestablish(sc->sc_pc, iv->iv_ihc);
2003 		}
2004 	}
2005 	free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors));
2006 free_scratch:
2007 	ixl_dmamem_free(sc, &sc->sc_scratch);
2008 free_hmc:
2009 	ixl_hmc_free(sc);
2010 shutdown:
2011 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
2012 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
2013 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2014 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2015 
2016 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2017 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2018 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
2019 
2020 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2021 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2022 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
2023 
2024 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2025 	    0, IXL_DMA_LEN(&sc->sc_arq),
2026 	    BUS_DMASYNC_POSTREAD);
2027 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2028 	    0, IXL_DMA_LEN(&sc->sc_atq),
2029 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2030 
2031 	ixl_arq_unfill(sc);
2032 
2033 free_arq:
2034 	ixl_dmamem_free(sc, &sc->sc_arq);
2035 free_atq:
2036 	ixl_dmamem_free(sc, &sc->sc_atq);
2037 unmap:
2038 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2039 	sc->sc_mems = 0;
2040 
2041 	if (sc->sc_intrmap != NULL)
2042 		intrmap_destroy(sc->sc_intrmap);
2043 }
2044 
2045 static void
2046 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
2047 {
2048 	struct ifmedia *ifm = &sc->sc_media;
2049 	const struct ixl_phy_type *itype;
2050 	unsigned int i;
2051 
2052 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
2053 		itype = &ixl_phy_type_map[i];
2054 
2055 		if (ISSET(phy_types, itype->phy_type))
2056 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
2057 	}
2058 }
2059 
2060 static int
2061 ixl_media_change(struct ifnet *ifp)
2062 {
2063 	/* ignore? */
2064 	return (EOPNOTSUPP);
2065 }
2066 
2067 static void
2068 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
2069 {
2070 	struct ixl_softc *sc = ifp->if_softc;
2071 
2072 	KERNEL_ASSERT_LOCKED();
2073 
2074 	ifm->ifm_status = sc->sc_media_status;
2075 	ifm->ifm_active = sc->sc_media_active;
2076 }
2077 
2078 static void
2079 ixl_watchdog(struct ifnet *ifp)
2080 {
2081 
2082 }
2083 
2084 int
2085 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2086 {
2087 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
2088 	struct ifreq *ifr = (struct ifreq *)data;
2089 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
2090 	int aqerror, error = 0;
2091 
2092 	switch (cmd) {
2093 	case SIOCSIFADDR:
2094 		ifp->if_flags |= IFF_UP;
2095 		/* FALLTHROUGH */
2096 
2097 	case SIOCSIFFLAGS:
2098 		if (ISSET(ifp->if_flags, IFF_UP)) {
2099 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2100 				error = ENETRESET;
2101 			else
2102 				error = ixl_up(sc);
2103 		} else {
2104 			if (ISSET(ifp->if_flags, IFF_RUNNING))
2105 				error = ixl_down(sc);
2106 		}
2107 		break;
2108 
2109 	case SIOCGIFMEDIA:
2110 	case SIOCSIFMEDIA:
2111 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2112 		break;
2113 
2114 	case SIOCGIFRXR:
2115 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2116 		break;
2117 
2118 	case SIOCADDMULTI:
2119 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
2120 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2121 			if (error != 0)
2122 				return (error);
2123 
2124 			aqerror = ixl_add_macvlan(sc, addrlo, 0,
2125 			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2126 			if (aqerror == IXL_AQ_RC_ENOSPC) {
2127 				ether_delmulti(ifr, &sc->sc_ac);
2128 				error = ENOSPC;
2129 			}
2130 
2131 			if (sc->sc_ac.ac_multirangecnt > 0) {
2132 				SET(ifp->if_flags, IFF_ALLMULTI);
2133 				error = ENETRESET;
2134 			}
2135 		}
2136 		break;
2137 
2138 	case SIOCDELMULTI:
2139 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
2140 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2141 			if (error != 0)
2142 				return (error);
2143 
2144 			ixl_remove_macvlan(sc, addrlo, 0,
2145 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2146 
2147 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
2148 			    sc->sc_ac.ac_multirangecnt == 0) {
2149 				CLR(ifp->if_flags, IFF_ALLMULTI);
2150 				error = ENETRESET;
2151 			}
2152 		}
2153 		break;
2154 
2155 	case SIOCGIFSFFPAGE:
2156 		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
2157 		if (error != 0)
2158 			break;
2159 
2160 		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
2161 		rw_exit(&ixl_sff_lock);
2162 		break;
2163 
2164 	default:
2165 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2166 		break;
2167 	}
2168 
2169 	if (error == ENETRESET)
2170 		error = ixl_iff(sc);
2171 
2172 	return (error);
2173 }
2174 
2175 static inline void *
2176 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
2177 {
2178 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
2179 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2180 
2181 	if (i >= e->hmc_count)
2182 		return (NULL);
2183 
2184 	kva += e->hmc_base;
2185 	kva += i * e->hmc_size;
2186 
2187 	return (kva);
2188 }
2189 
2190 static inline size_t
2191 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
2192 {
2193 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2194 
2195 	return (e->hmc_size);
2196 }
2197 
2198 static int
2199 ixl_configure_rss(struct ixl_softc *sc)
2200 {
2201 	struct ixl_rss_key rsskey;
2202 	struct ixl_rss_lut_128 lut;
2203 	uint8_t *lute = (uint8_t *)&lut;
2204 	uint64_t rss_hena;
2205 	unsigned int i, nqueues;
2206 	int error;
2207 
2208 #if 0
2209 	/* if we want to do a 512 entry LUT, do this. */
2210 	uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_0);
2211 	SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
2212 	ixl_wr_ctl(sc, I40E_PFQF_CTL_0, v);
2213 #endif
2214 
2215 	stoeplitz_to_key(&rsskey, sizeof(rsskey));
2216 
2217 	nqueues = ixl_nqueues(sc);
2218 	for (i = 0; i < sizeof(lut); i++) {
2219 		/*
2220 		 * ixl must have a power of 2 rings, so using mod
2221 		 * to populate the table is fine.
2222 		 */
2223 		lute[i] = i % nqueues;
2224 	}
2225 
2226 	error = ixl_set_rss_key(sc, &rsskey);
2227 	if (error != 0)
2228 		return (error);
2229 
2230 	rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0));
2231 	rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)) << 32;
2232 	rss_hena |= ixl_rss_hena(sc);
2233 	ixl_wr_ctl(sc, I40E_PFQF_HENA(0), rss_hena);
2234 	ixl_wr_ctl(sc, I40E_PFQF_HENA(1), rss_hena >> 32);
2235 
2236 	error = ixl_set_rss_lut(sc, &lut);
2237 	if (error != 0)
2238 		return (error);
2239 
2240 	/* nothing to clena up :( */
2241 
2242 	return (0);
2243 }
2244 
2245 static int
2246 ixl_up(struct ixl_softc *sc)
2247 {
2248 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2249 	struct ifqueue *ifq;
2250 	struct ifiqueue *ifiq;
2251 	struct ixl_vector *iv;
2252 	struct ixl_rx_ring *rxr;
2253 	struct ixl_tx_ring *txr;
2254 	unsigned int nqueues, i;
2255 	uint32_t reg;
2256 	int rv = ENOMEM;
2257 
2258 	nqueues = ixl_nqueues(sc);
2259 
2260 	rw_enter_write(&sc->sc_cfg_lock);
2261 	if (sc->sc_dead) {
2262 		rw_exit_write(&sc->sc_cfg_lock);
2263 		return (ENXIO);
2264 	}
2265 
2266 	/* allocation is the only thing that can fail, so do it up front */
2267 	for (i = 0; i < nqueues; i++) {
2268 		rxr = ixl_rxr_alloc(sc, i);
2269 		if (rxr == NULL)
2270 			goto free;
2271 
2272 		txr = ixl_txr_alloc(sc, i);
2273 		if (txr == NULL) {
2274 			ixl_rxr_free(sc, rxr);
2275 			goto free;
2276 		}
2277 
2278 		/* wire everything together */
2279 		iv = &sc->sc_vectors[i];
2280 		iv->iv_rxr = rxr;
2281 		iv->iv_txr = txr;
2282 
2283 		ifq = ifp->if_ifqs[i];
2284 		ifq->ifq_softc = txr;
2285 		txr->txr_ifq = ifq;
2286 
2287 		ifiq = ifp->if_iqs[i];
2288 		ifiq->ifiq_softc = rxr;
2289 		rxr->rxr_ifiq = ifiq;
2290 	}
2291 
2292 	/* XXX wait 50ms from completion of last RX queue disable */
2293 
2294 	for (i = 0; i < nqueues; i++) {
2295 		iv = &sc->sc_vectors[i];
2296 		rxr = iv->iv_rxr;
2297 		txr = iv->iv_txr;
2298 
2299 		ixl_txr_qdis(sc, txr, 1);
2300 
2301 		ixl_rxr_config(sc, rxr);
2302 		ixl_txr_config(sc, txr);
2303 
2304 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2305 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2306 
2307 		ixl_wr(sc, rxr->rxr_tail, 0);
2308 		ixl_rxfill(sc, rxr);
2309 
2310 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2311 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2312 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2313 
2314 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2315 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2316 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2317 	}
2318 
2319 	for (i = 0; i < nqueues; i++) {
2320 		iv = &sc->sc_vectors[i];
2321 		rxr = iv->iv_rxr;
2322 		txr = iv->iv_txr;
2323 
2324 		if (ixl_rxr_enabled(sc, rxr) != 0)
2325 			goto down;
2326 
2327 		if (ixl_txr_enabled(sc, txr) != 0)
2328 			goto down;
2329 	}
2330 
2331 	ixl_configure_rss(sc);
2332 
2333 	SET(ifp->if_flags, IFF_RUNNING);
2334 
2335 	if (sc->sc_intrmap == NULL) {
2336 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2337 		    (I40E_INTR_NOTX_QUEUE <<
2338 		     I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2339 		    (I40E_QUEUE_TYPE_RX <<
2340 		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2341 
2342 		ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
2343 		    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2344 		    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2345 		    (I40E_INTR_NOTX_RX_QUEUE <<
2346 		     I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
2347 		    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2348 		    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2349 		    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2350 
2351 		ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
2352 		    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2353 		    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2354 		    (I40E_INTR_NOTX_TX_QUEUE <<
2355 		     I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
2356 		    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2357 		    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2358 		    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2359 	} else {
2360 		/* vector 0 has no queues */
2361 		ixl_wr(sc, I40E_PFINT_LNKLST0,
2362 		    I40E_QUEUE_TYPE_EOL <<
2363 		    I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT);
2364 
2365 		/* queue n is mapped to vector n+1 */
2366 		for (i = 0; i < nqueues; i++) {
2367 			/* LNKLSTN(i) configures vector i+1 */
2368 			ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
2369 			    (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2370 			    (I40E_QUEUE_TYPE_RX <<
2371 			     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2372 			ixl_wr(sc, I40E_QINT_RQCTL(i),
2373 			    ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2374 			    (I40E_ITR_INDEX_RX <<
2375 			     I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2376 			    (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2377 			    (I40E_QUEUE_TYPE_TX <<
2378 			     I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2379 			    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2380 			ixl_wr(sc, I40E_QINT_TQCTL(i),
2381 			    ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2382 			    (I40E_ITR_INDEX_TX <<
2383 			     I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2384 			    (I40E_QUEUE_TYPE_EOL <<
2385 			     I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2386 			    (I40E_QUEUE_TYPE_RX <<
2387 			     I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2388 			    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2389 
2390 			ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a);
2391 			ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a);
2392 			ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0);
2393 		}
2394 	}
2395 
2396 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
2397 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
2398 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
2399 
2400 	rw_exit_write(&sc->sc_cfg_lock);
2401 
2402 	return (ENETRESET);
2403 
2404 free:
2405 	for (i = 0; i < nqueues; i++) {
2406 		iv = &sc->sc_vectors[i];
2407 		rxr = iv->iv_rxr;
2408 		txr = iv->iv_txr;
2409 
2410 		if (rxr == NULL) {
2411 			/*
2412 			 * tx and rx get set at the same time, so if one
2413 			 * is NULL, the other is too.
2414 			 */
2415 			continue;
2416 		}
2417 
2418 		ixl_txr_free(sc, txr);
2419 		ixl_rxr_free(sc, rxr);
2420 	}
2421 	rw_exit_write(&sc->sc_cfg_lock);
2422 	return (rv);
2423 down:
2424 	rw_exit_write(&sc->sc_cfg_lock);
2425 	ixl_down(sc);
2426 	return (ETIMEDOUT);
2427 }
2428 
2429 static int
2430 ixl_iff(struct ixl_softc *sc)
2431 {
2432 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2433 	struct ixl_atq iatq;
2434 	struct ixl_aq_desc *iaq;
2435 	struct ixl_aq_vsi_promisc_param *param;
2436 
2437 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2438 		return (0);
2439 
2440 	memset(&iatq, 0, sizeof(iatq));
2441 
2442 	iaq = &iatq.iatq_desc;
2443 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2444 
2445 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2446 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2447 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2448 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2449 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2450 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2451 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2452 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2453 	}
2454 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2455 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2456 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2457 	param->seid = sc->sc_seid;
2458 
2459 	ixl_atq_exec(sc, &iatq, "ixliff");
2460 
2461 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2462 		return (EIO);
2463 
2464 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
2465 		ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
2466 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2467 		ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2468 		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2469 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2470 	}
2471 	return (0);
2472 }
2473 
2474 static int
2475 ixl_down(struct ixl_softc *sc)
2476 {
2477 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2478 	struct ixl_vector *iv;
2479 	struct ixl_rx_ring *rxr;
2480 	struct ixl_tx_ring *txr;
2481 	unsigned int nqueues, i;
2482 	uint32_t reg;
2483 	int error = 0;
2484 
2485 	nqueues = ixl_nqueues(sc);
2486 
2487 	rw_enter_write(&sc->sc_cfg_lock);
2488 
2489 	CLR(ifp->if_flags, IFF_RUNNING);
2490 
2491 	NET_UNLOCK();
2492 
2493 	/* mask interrupts */
2494 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2495 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2496 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2497 
2498 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2499 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2500 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2501 
2502 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2503 
2504 	/* make sure the no hw generated work is still in flight */
2505 	intr_barrier(sc->sc_ihc);
2506 	if (sc->sc_intrmap != NULL) {
2507 		for (i = 0; i < nqueues; i++) {
2508 			iv = &sc->sc_vectors[i];
2509 			rxr = iv->iv_rxr;
2510 			txr = iv->iv_txr;
2511 
2512 			ixl_txr_qdis(sc, txr, 0);
2513 
2514 			ifq_barrier(txr->txr_ifq);
2515 
2516 			timeout_del_barrier(&rxr->rxr_refill);
2517 
2518 			intr_barrier(iv->iv_ihc);
2519 		}
2520 	}
2521 
2522 	/* XXX wait at least 400 usec for all tx queues in one go */
2523 	delay(500);
2524 
2525 	for (i = 0; i < nqueues; i++) {
2526 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2527 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2528 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2529 
2530 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2531 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2532 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2533 	}
2534 
2535 	for (i = 0; i < nqueues; i++) {
2536 		iv = &sc->sc_vectors[i];
2537 		rxr = iv->iv_rxr;
2538 		txr = iv->iv_txr;
2539 
2540 		if (ixl_txr_disabled(sc, txr) != 0)
2541 			goto die;
2542 
2543 		if (ixl_rxr_disabled(sc, rxr) != 0)
2544 			goto die;
2545 	}
2546 
2547 	for (i = 0; i < nqueues; i++) {
2548 		iv = &sc->sc_vectors[i];
2549 		rxr = iv->iv_rxr;
2550 		txr = iv->iv_txr;
2551 
2552 		ixl_txr_unconfig(sc, txr);
2553 		ixl_rxr_unconfig(sc, rxr);
2554 
2555 		ixl_txr_clean(sc, txr);
2556 		ixl_rxr_clean(sc, rxr);
2557 
2558 		ixl_txr_free(sc, txr);
2559 		ixl_rxr_free(sc, rxr);
2560 
2561 		ifp->if_iqs[i]->ifiq_softc = NULL;
2562 		ifp->if_ifqs[i]->ifq_softc =  NULL;
2563 	}
2564 
2565 out:
2566 	rw_exit_write(&sc->sc_cfg_lock);
2567 	NET_LOCK();
2568 	return (error);
2569 die:
2570 	sc->sc_dead = 1;
2571 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2572 	error = ETIMEDOUT;
2573 	goto out;
2574 }
2575 
2576 static struct ixl_tx_ring *
2577 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2578 {
2579 	struct ixl_tx_ring *txr;
2580 	struct ixl_tx_map *maps, *txm;
2581 	unsigned int i;
2582 
2583 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2584 	if (txr == NULL)
2585 		return (NULL);
2586 
2587 	maps = mallocarray(sizeof(*maps),
2588 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2589 	if (maps == NULL)
2590 		goto free;
2591 
2592 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2593 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2594 	    IXL_TX_QUEUE_ALIGN) != 0)
2595 		goto freemap;
2596 
2597 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2598 		txm = &maps[i];
2599 
2600 		if (bus_dmamap_create(sc->sc_dmat,
2601 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2602 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2603 		    &txm->txm_map) != 0)
2604 			goto uncreate;
2605 
2606 		txm->txm_eop = -1;
2607 		txm->txm_m = NULL;
2608 	}
2609 
2610 	txr->txr_cons = txr->txr_prod = 0;
2611 	txr->txr_maps = maps;
2612 
2613 	txr->txr_tail = I40E_QTX_TAIL(qid);
2614 	txr->txr_qid = qid;
2615 
2616 	return (txr);
2617 
2618 uncreate:
2619 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2620 		txm = &maps[i];
2621 
2622 		if (txm->txm_map == NULL)
2623 			continue;
2624 
2625 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2626 	}
2627 
2628 	ixl_dmamem_free(sc, &txr->txr_mem);
2629 freemap:
2630 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2631 free:
2632 	free(txr, M_DEVBUF, sizeof(*txr));
2633 	return (NULL);
2634 }
2635 
2636 static void
2637 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2638 {
2639 	unsigned int qid;
2640 	bus_size_t reg;
2641 	uint32_t r;
2642 
2643 	qid = txr->txr_qid + sc->sc_base_queue;
2644 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2645 	qid %= 128;
2646 
2647 	r = ixl_rd(sc, reg);
2648 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2649 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2650 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2651 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2652 	ixl_wr(sc, reg, r);
2653 }
2654 
2655 static void
2656 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2657 {
2658 	struct ixl_hmc_txq txq;
2659 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2660 	void *hmc;
2661 
2662 	memset(&txq, 0, sizeof(txq));
2663 	txq.head = htole16(0);
2664 	txq.new_context = 1;
2665 	htolem64(&txq.base,
2666 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2667 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2668 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2669 	txq.tphrdesc_ena = 0;
2670 	txq.tphrpacket_ena = 0;
2671 	txq.tphwdesc_ena = 0;
2672 	txq.rdylist = data->qs_handle[0];
2673 
2674 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2675 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2676 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2677 }
2678 
2679 static void
2680 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2681 {
2682 	void *hmc;
2683 
2684 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2685 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2686 }
2687 
2688 static void
2689 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2690 {
2691 	struct ixl_tx_map *maps, *txm;
2692 	bus_dmamap_t map;
2693 	unsigned int i;
2694 
2695 	maps = txr->txr_maps;
2696 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2697 		txm = &maps[i];
2698 
2699 		if (txm->txm_m == NULL)
2700 			continue;
2701 
2702 		map = txm->txm_map;
2703 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2704 		    BUS_DMASYNC_POSTWRITE);
2705 		bus_dmamap_unload(sc->sc_dmat, map);
2706 
2707 		m_freem(txm->txm_m);
2708 		txm->txm_m = NULL;
2709 	}
2710 }
2711 
2712 static int
2713 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2714 {
2715 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2716 	uint32_t reg;
2717 	int i;
2718 
2719 	for (i = 0; i < 10; i++) {
2720 		reg = ixl_rd(sc, ena);
2721 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2722 			return (0);
2723 
2724 		delaymsec(10);
2725 	}
2726 
2727 	return (ETIMEDOUT);
2728 }
2729 
2730 static int
2731 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2732 {
2733 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2734 	uint32_t reg;
2735 	int i;
2736 
2737 	for (i = 0; i < 20; i++) {
2738 		reg = ixl_rd(sc, ena);
2739 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2740 			return (0);
2741 
2742 		delaymsec(10);
2743 	}
2744 
2745 	return (ETIMEDOUT);
2746 }
2747 
2748 static void
2749 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2750 {
2751 	struct ixl_tx_map *maps, *txm;
2752 	unsigned int i;
2753 
2754 	maps = txr->txr_maps;
2755 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2756 		txm = &maps[i];
2757 
2758 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2759 	}
2760 
2761 	ixl_dmamem_free(sc, &txr->txr_mem);
2762 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2763 	free(txr, M_DEVBUF, sizeof(*txr));
2764 }
2765 
2766 static inline int
2767 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2768 {
2769 	int error;
2770 
2771 	error = bus_dmamap_load_mbuf(dmat, map, m,
2772 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2773 	if (error != EFBIG)
2774 		return (error);
2775 
2776 	error = m_defrag(m, M_DONTWAIT);
2777 	if (error != 0)
2778 		return (error);
2779 
2780 	return (bus_dmamap_load_mbuf(dmat, map, m,
2781 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2782 }
2783 
2784 static uint64_t
2785 ixl_tx_setup_offload(struct mbuf *m0)
2786 {
2787 	struct mbuf *m;
2788 	int hoff;
2789 	uint64_t hlen;
2790 	uint8_t ipproto;
2791 	uint64_t offload = 0;
2792 
2793 	if (ISSET(m0->m_flags, M_VLANTAG)) {
2794 		uint64_t vtag = m0->m_pkthdr.ether_vtag;
2795 		offload |= IXL_TX_DESC_CMD_IL2TAG1;
2796 		offload |= vtag << IXL_TX_DESC_L2TAG1_SHIFT;
2797 	}
2798 
2799 	if (!ISSET(m0->m_pkthdr.csum_flags,
2800 	    M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
2801 		return (offload);
2802 
2803 	switch (ntohs(mtod(m0, struct ether_header *)->ether_type)) {
2804 	case ETHERTYPE_IP: {
2805 		struct ip *ip;
2806 
2807 		m = m_getptr(m0, ETHER_HDR_LEN, &hoff);
2808 		KASSERT(m != NULL && m->m_len - hoff >= sizeof(*ip));
2809 		ip = (struct ip *)(mtod(m, caddr_t) + hoff);
2810 
2811 		offload |= ISSET(m0->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ?
2812 		    IXL_TX_DESC_CMD_IIPT_IPV4_CSUM :
2813 		    IXL_TX_DESC_CMD_IIPT_IPV4;
2814 
2815 		hlen = ip->ip_hl << 2;
2816 		ipproto = ip->ip_p;
2817 		break;
2818 	}
2819 
2820 #ifdef INET6
2821 	case ETHERTYPE_IPV6: {
2822 		struct ip6_hdr *ip6;
2823 
2824 		m = m_getptr(m0, ETHER_HDR_LEN, &hoff);
2825 		KASSERT(m != NULL && m->m_len - hoff >= sizeof(*ip6));
2826 		ip6 = (struct ip6_hdr *)(mtod(m, caddr_t) + hoff);
2827 
2828 		offload |= IXL_TX_DESC_CMD_IIPT_IPV6;
2829 
2830 		hlen = sizeof(*ip6);
2831 		ipproto = ip6->ip6_nxt;
2832 		break;
2833 	}
2834 #endif
2835 	default:
2836 		panic("CSUM_OUT set for non-IP packet");
2837 		/* NOTREACHED */
2838 	}
2839 
2840 	offload |= (ETHER_HDR_LEN >> 1) << IXL_TX_DESC_MACLEN_SHIFT;
2841 	offload |= (hlen >> 2) << IXL_TX_DESC_IPLEN_SHIFT;
2842 
2843 	switch (ipproto) {
2844 	case IPPROTO_TCP: {
2845 		struct tcphdr *th;
2846 
2847 		if (!ISSET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT))
2848 			break;
2849 
2850 		m = m_getptr(m, hoff + hlen, &hoff);
2851 		KASSERT(m != NULL && m->m_len - hoff >= sizeof(*th));
2852 		th = (struct tcphdr *)(mtod(m, caddr_t) + hoff);
2853 
2854 		offload |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2855 		offload |= (uint64_t)th->th_off << IXL_TX_DESC_L4LEN_SHIFT;
2856 		break;
2857 	}
2858 
2859 	case IPPROTO_UDP:
2860 		if (!ISSET(m0->m_pkthdr.csum_flags, M_UDP_CSUM_OUT))
2861 			break;
2862 
2863 		offload |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2864 		offload |= (sizeof(struct udphdr) >> 2) <<
2865 		    IXL_TX_DESC_L4LEN_SHIFT;
2866 		break;
2867 	}
2868 
2869 	return (offload);
2870 }
2871 
2872 static void
2873 ixl_start(struct ifqueue *ifq)
2874 {
2875 	struct ifnet *ifp = ifq->ifq_if;
2876 	struct ixl_softc *sc = ifp->if_softc;
2877 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2878 	struct ixl_tx_desc *ring, *txd;
2879 	struct ixl_tx_map *txm;
2880 	bus_dmamap_t map;
2881 	struct mbuf *m;
2882 	uint64_t cmd;
2883 	unsigned int prod, free, last, i;
2884 	unsigned int mask;
2885 	int post = 0;
2886 	uint64_t offload;
2887 #if NBPFILTER > 0
2888 	caddr_t if_bpf;
2889 #endif
2890 
2891 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2892 		ifq_purge(ifq);
2893 		return;
2894 	}
2895 
2896 	prod = txr->txr_prod;
2897 	free = txr->txr_cons;
2898 	if (free <= prod)
2899 		free += sc->sc_tx_ring_ndescs;
2900 	free -= prod;
2901 
2902 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2903 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2904 
2905 	ring = IXL_DMA_KVA(&txr->txr_mem);
2906 	mask = sc->sc_tx_ring_ndescs - 1;
2907 
2908 	for (;;) {
2909 		if (free <= IXL_TX_PKT_DESCS) {
2910 			ifq_set_oactive(ifq);
2911 			break;
2912 		}
2913 
2914 		m = ifq_dequeue(ifq);
2915 		if (m == NULL)
2916 			break;
2917 
2918 		offload = ixl_tx_setup_offload(m);
2919 
2920 		txm = &txr->txr_maps[prod];
2921 		map = txm->txm_map;
2922 
2923 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2924 			ifq->ifq_errors++;
2925 			m_freem(m);
2926 			continue;
2927 		}
2928 
2929 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2930 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2931 
2932 		for (i = 0; i < map->dm_nsegs; i++) {
2933 			txd = &ring[prod];
2934 
2935 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2936 			    IXL_TX_DESC_BSIZE_SHIFT;
2937 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2938 			cmd |= offload;
2939 
2940 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2941 			htolem64(&txd->cmd, cmd);
2942 
2943 			last = prod;
2944 
2945 			prod++;
2946 			prod &= mask;
2947 		}
2948 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2949 		htolem64(&txd->cmd, cmd);
2950 
2951 		txm->txm_m = m;
2952 		txm->txm_eop = last;
2953 
2954 #if NBPFILTER > 0
2955 		if_bpf = ifp->if_bpf;
2956 		if (if_bpf)
2957 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2958 #endif
2959 
2960 		free -= i;
2961 		post = 1;
2962 	}
2963 
2964 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2965 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2966 
2967 	if (post) {
2968 		txr->txr_prod = prod;
2969 		ixl_wr(sc, txr->txr_tail, prod);
2970 	}
2971 }
2972 
2973 static int
2974 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2975 {
2976 	struct ifqueue *ifq = txr->txr_ifq;
2977 	struct ixl_tx_desc *ring, *txd;
2978 	struct ixl_tx_map *txm;
2979 	bus_dmamap_t map;
2980 	unsigned int cons, prod, last;
2981 	unsigned int mask;
2982 	uint64_t dtype;
2983 	int done = 0;
2984 
2985 	prod = txr->txr_prod;
2986 	cons = txr->txr_cons;
2987 
2988 	if (cons == prod)
2989 		return (0);
2990 
2991 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2992 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2993 
2994 	ring = IXL_DMA_KVA(&txr->txr_mem);
2995 	mask = sc->sc_tx_ring_ndescs - 1;
2996 
2997 	do {
2998 		txm = &txr->txr_maps[cons];
2999 		last = txm->txm_eop;
3000 		txd = &ring[last];
3001 
3002 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
3003 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
3004 			break;
3005 
3006 		map = txm->txm_map;
3007 
3008 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3009 		    BUS_DMASYNC_POSTWRITE);
3010 		bus_dmamap_unload(sc->sc_dmat, map);
3011 		m_freem(txm->txm_m);
3012 
3013 		txm->txm_m = NULL;
3014 		txm->txm_eop = -1;
3015 
3016 		cons = last + 1;
3017 		cons &= mask;
3018 
3019 		done = 1;
3020 	} while (cons != prod);
3021 
3022 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3023 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
3024 
3025 	txr->txr_cons = cons;
3026 
3027 	//ixl_enable(sc, txr->txr_msix);
3028 
3029 	if (ifq_is_oactive(ifq))
3030 		ifq_restart(ifq);
3031 
3032 	return (done);
3033 }
3034 
3035 static struct ixl_rx_ring *
3036 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
3037 {
3038 	struct ixl_rx_ring *rxr;
3039 	struct ixl_rx_map *maps, *rxm;
3040 	unsigned int i;
3041 
3042 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
3043 	if (rxr == NULL)
3044 		return (NULL);
3045 
3046 	maps = mallocarray(sizeof(*maps),
3047 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
3048 	if (maps == NULL)
3049 		goto free;
3050 
3051 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
3052 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
3053 	    IXL_RX_QUEUE_ALIGN) != 0)
3054 		goto freemap;
3055 
3056 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3057 		rxm = &maps[i];
3058 
3059 		if (bus_dmamap_create(sc->sc_dmat,
3060 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
3061 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3062 		    &rxm->rxm_map) != 0)
3063 			goto uncreate;
3064 
3065 		rxm->rxm_m = NULL;
3066 	}
3067 
3068 	rxr->rxr_sc = sc;
3069 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
3070 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
3071 	rxr->rxr_cons = rxr->rxr_prod = 0;
3072 	rxr->rxr_m_head = NULL;
3073 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3074 	rxr->rxr_maps = maps;
3075 
3076 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
3077 	rxr->rxr_qid = qid;
3078 
3079 	return (rxr);
3080 
3081 uncreate:
3082 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3083 		rxm = &maps[i];
3084 
3085 		if (rxm->rxm_map == NULL)
3086 			continue;
3087 
3088 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3089 	}
3090 
3091 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3092 freemap:
3093 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3094 free:
3095 	free(rxr, M_DEVBUF, sizeof(*rxr));
3096 	return (NULL);
3097 }
3098 
3099 static void
3100 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3101 {
3102 	struct ixl_rx_map *maps, *rxm;
3103 	bus_dmamap_t map;
3104 	unsigned int i;
3105 
3106 	timeout_del_barrier(&rxr->rxr_refill);
3107 
3108 	maps = rxr->rxr_maps;
3109 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3110 		rxm = &maps[i];
3111 
3112 		if (rxm->rxm_m == NULL)
3113 			continue;
3114 
3115 		map = rxm->rxm_map;
3116 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3117 		    BUS_DMASYNC_POSTWRITE);
3118 		bus_dmamap_unload(sc->sc_dmat, map);
3119 
3120 		m_freem(rxm->rxm_m);
3121 		rxm->rxm_m = NULL;
3122 	}
3123 
3124 	m_freem(rxr->rxr_m_head);
3125 	rxr->rxr_m_head = NULL;
3126 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3127 
3128 	rxr->rxr_prod = rxr->rxr_cons = 0;
3129 }
3130 
3131 static int
3132 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3133 {
3134 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3135 	uint32_t reg;
3136 	int i;
3137 
3138 	for (i = 0; i < 10; i++) {
3139 		reg = ixl_rd(sc, ena);
3140 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3141 			return (0);
3142 
3143 		delaymsec(10);
3144 	}
3145 
3146 	return (ETIMEDOUT);
3147 }
3148 
3149 static int
3150 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3151 {
3152 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3153 	uint32_t reg;
3154 	int i;
3155 
3156 	for (i = 0; i < 20; i++) {
3157 		reg = ixl_rd(sc, ena);
3158 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3159 			return (0);
3160 
3161 		delaymsec(10);
3162 	}
3163 
3164 	return (ETIMEDOUT);
3165 }
3166 
3167 static void
3168 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3169 {
3170 	struct ixl_hmc_rxq rxq;
3171 	void *hmc;
3172 
3173 	memset(&rxq, 0, sizeof(rxq));
3174 
3175 	rxq.head = htole16(0);
3176 	htolem64(&rxq.base,
3177 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3178 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
3179 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3180 	rxq.hbuff = 0;
3181 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3182 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
3183 	rxq.crcstrip = 1;
3184 	rxq.l2tsel = IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1;
3185 	rxq.showiv = 0;
3186 	rxq.rxmax = htole16(IXL_HARDMTU);
3187 	rxq.tphrdesc_ena = 0;
3188 	rxq.tphwdesc_ena = 0;
3189 	rxq.tphdata_ena = 0;
3190 	rxq.tphhead_ena = 0;
3191 	rxq.lrxqthresh = 0;
3192 	rxq.prefena = 1;
3193 
3194 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3195 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3196 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
3197 }
3198 
3199 static void
3200 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3201 {
3202 	void *hmc;
3203 
3204 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3205 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3206 }
3207 
3208 static void
3209 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3210 {
3211 	struct ixl_rx_map *maps, *rxm;
3212 	unsigned int i;
3213 
3214 	maps = rxr->rxr_maps;
3215 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3216 		rxm = &maps[i];
3217 
3218 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3219 	}
3220 
3221 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3222 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3223 	free(rxr, M_DEVBUF, sizeof(*rxr));
3224 }
3225 
3226 static int
3227 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3228 {
3229 	struct ifiqueue *ifiq = rxr->rxr_ifiq;
3230 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3231 	struct ixl_rx_wb_desc_16 *ring, *rxd;
3232 	struct ixl_rx_map *rxm;
3233 	bus_dmamap_t map;
3234 	unsigned int cons, prod;
3235 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3236 	struct mbuf *m;
3237 	uint64_t word;
3238 	unsigned int len;
3239 	unsigned int mask;
3240 	int done = 0;
3241 
3242 	prod = rxr->rxr_prod;
3243 	cons = rxr->rxr_cons;
3244 
3245 	if (cons == prod)
3246 		return (0);
3247 
3248 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3249 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3250 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3251 
3252 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3253 	mask = sc->sc_rx_ring_ndescs - 1;
3254 
3255 	do {
3256 		rxd = &ring[cons];
3257 
3258 		word = lemtoh64(&rxd->qword1);
3259 		if (!ISSET(word, IXL_RX_DESC_DD))
3260 			break;
3261 
3262 		if_rxr_put(&rxr->rxr_acct, 1);
3263 
3264 		rxm = &rxr->rxr_maps[cons];
3265 
3266 		map = rxm->rxm_map;
3267 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3268 		    BUS_DMASYNC_POSTREAD);
3269 		bus_dmamap_unload(sc->sc_dmat, map);
3270 
3271 		m = rxm->rxm_m;
3272 		rxm->rxm_m = NULL;
3273 
3274 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3275 		m->m_len = len;
3276 		m->m_pkthdr.len = 0;
3277 
3278 		m->m_next = NULL;
3279 		*rxr->rxr_m_tail = m;
3280 		rxr->rxr_m_tail = &m->m_next;
3281 
3282 		m = rxr->rxr_m_head;
3283 		m->m_pkthdr.len += len;
3284 
3285 		if (ISSET(word, IXL_RX_DESC_EOP)) {
3286 			if (!ISSET(word,
3287 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3288 				if ((word & IXL_RX_DESC_FLTSTAT_MASK) ==
3289 				    IXL_RX_DESC_FLTSTAT_RSS) {
3290 					m->m_pkthdr.ph_flowid =
3291 					    lemtoh32(&rxd->filter_status);
3292 					m->m_pkthdr.csum_flags |= M_FLOWID;
3293 				}
3294 
3295 				if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3296 					m->m_pkthdr.ether_vtag =
3297 					    lemtoh16(&rxd->l2tag1);
3298 					SET(m->m_flags, M_VLANTAG);
3299 				}
3300 
3301 				ixl_rx_checksum(m, word);
3302 				ml_enqueue(&ml, m);
3303 			} else {
3304 				ifp->if_ierrors++; /* XXX */
3305 				m_freem(m);
3306 			}
3307 
3308 			rxr->rxr_m_head = NULL;
3309 			rxr->rxr_m_tail = &rxr->rxr_m_head;
3310 		}
3311 
3312 		cons++;
3313 		cons &= mask;
3314 
3315 		done = 1;
3316 	} while (cons != prod);
3317 
3318 	if (done) {
3319 		rxr->rxr_cons = cons;
3320 		if (ifiq_input(ifiq, &ml))
3321 			if_rxr_livelocked(&rxr->rxr_acct);
3322 		ixl_rxfill(sc, rxr);
3323 	}
3324 
3325 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3326 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3327 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3328 
3329 	return (done);
3330 }
3331 
3332 static void
3333 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3334 {
3335 	struct ixl_rx_rd_desc_16 *ring, *rxd;
3336 	struct ixl_rx_map *rxm;
3337 	bus_dmamap_t map;
3338 	struct mbuf *m;
3339 	unsigned int prod;
3340 	unsigned int slots;
3341 	unsigned int mask;
3342 	int post = 0;
3343 
3344 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
3345 	if (slots == 0)
3346 		return;
3347 
3348 	prod = rxr->rxr_prod;
3349 
3350 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3351 	mask = sc->sc_rx_ring_ndescs - 1;
3352 
3353 	do {
3354 		rxm = &rxr->rxr_maps[prod];
3355 
3356 		m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
3357 		if (m == NULL)
3358 			break;
3359 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
3360 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3361 
3362 		map = rxm->rxm_map;
3363 
3364 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3365 		    BUS_DMA_NOWAIT) != 0) {
3366 			m_freem(m);
3367 			break;
3368 		}
3369 
3370 		rxm->rxm_m = m;
3371 
3372 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3373 		    BUS_DMASYNC_PREREAD);
3374 
3375 		rxd = &ring[prod];
3376 
3377 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
3378 		rxd->haddr = htole64(0);
3379 
3380 		prod++;
3381 		prod &= mask;
3382 
3383 		post = 1;
3384 	} while (--slots);
3385 
3386 	if_rxr_put(&rxr->rxr_acct, slots);
3387 
3388 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
3389 		timeout_add(&rxr->rxr_refill, 1);
3390 	else if (post) {
3391 		rxr->rxr_prod = prod;
3392 		ixl_wr(sc, rxr->rxr_tail, prod);
3393 	}
3394 }
3395 
3396 void
3397 ixl_rxrefill(void *arg)
3398 {
3399 	struct ixl_rx_ring *rxr = arg;
3400 	struct ixl_softc *sc = rxr->rxr_sc;
3401 
3402 	ixl_rxfill(sc, rxr);
3403 }
3404 
3405 static int
3406 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
3407 {
3408 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3409 	struct if_rxring_info *ifr;
3410 	struct ixl_rx_ring *ring;
3411 	int i, rv;
3412 
3413 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3414 		return (ENOTTY);
3415 
3416 	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
3417 	    M_WAITOK|M_CANFAIL|M_ZERO);
3418 	if (ifr == NULL)
3419 		return (ENOMEM);
3420 
3421 	for (i = 0; i < ixl_nqueues(sc); i++) {
3422 		ring = ifp->if_iqs[i]->ifiq_softc;
3423 		ifr[i].ifr_size = MCLBYTES;
3424 		snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i);
3425 		ifr[i].ifr_info = ring->rxr_acct;
3426 	}
3427 
3428 	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
3429 	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
3430 
3431 	return (rv);
3432 }
3433 
3434 static void
3435 ixl_rx_checksum(struct mbuf *m, uint64_t word)
3436 {
3437 	if (!ISSET(word, IXL_RX_DESC_L3L4P))
3438 		return;
3439 
3440 	if (ISSET(word, IXL_RX_DESC_IPE))
3441 		return;
3442 
3443 	m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3444 
3445 	if (ISSET(word, IXL_RX_DESC_L4E))
3446 		return;
3447 
3448 	m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3449 }
3450 
3451 static int
3452 ixl_intr0(void *xsc)
3453 {
3454 	struct ixl_softc *sc = xsc;
3455 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3456 	uint32_t icr;
3457 	int rv = 0;
3458 
3459 	ixl_intr_enable(sc);
3460 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3461 
3462 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3463 		ixl_atq_done(sc);
3464 		task_add(systq, &sc->sc_arq_task);
3465 		rv = 1;
3466 	}
3467 
3468 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3469 		task_add(systq, &sc->sc_link_state_task);
3470 		rv = 1;
3471 	}
3472 
3473 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3474 		struct ixl_vector *iv = sc->sc_vectors;
3475 		if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
3476 			rv |= ixl_rxeof(sc, iv->iv_rxr);
3477 		if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
3478 			rv |= ixl_txeof(sc, iv->iv_txr);
3479 	}
3480 
3481 	return (rv);
3482 }
3483 
3484 static int
3485 ixl_intr_vector(void *v)
3486 {
3487 	struct ixl_vector *iv = v;
3488 	struct ixl_softc *sc = iv->iv_sc;
3489 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3490 	int rv = 0;
3491 
3492 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3493 		rv |= ixl_rxeof(sc, iv->iv_rxr);
3494 		rv |= ixl_txeof(sc, iv->iv_txr);
3495 	}
3496 
3497 	ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),
3498 	    I40E_PFINT_DYN_CTLN_INTENA_MASK |
3499 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3500 	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
3501 
3502 	return (rv);
3503 }
3504 
3505 static void
3506 ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg)
3507 {
3508 	struct ifnet *ifp = &sc->sc_ac.ac_if;
3509 	struct ixl_aq_desc *iaq = arg;
3510 	uint16_t retval;
3511 	int link_state;
3512 	int change = 0;
3513 
3514 	retval = lemtoh16(&iaq->iaq_retval);
3515 	if (retval != IXL_AQ_RC_OK) {
3516 		printf("%s: LINK STATUS error %u\n", DEVNAME(sc), retval);
3517 		return;
3518 	}
3519 
3520 	KERNEL_LOCK();
3521 	link_state = ixl_set_link_status(sc, iaq);
3522 	KERNEL_UNLOCK();
3523 	mtx_enter(&sc->sc_link_state_mtx);
3524 	if (ifp->if_link_state != link_state) {
3525 		ifp->if_link_state = link_state;
3526 		change = 1;
3527 	}
3528 	mtx_leave(&sc->sc_link_state_mtx);
3529 
3530 	if (change)
3531 		if_link_state_change(ifp);
3532 }
3533 
3534 static void
3535 ixl_link_state_update(void *xsc)
3536 {
3537 	struct ixl_softc *sc = xsc;
3538 	struct ixl_aq_desc *iaq;
3539 	struct ixl_aq_link_param *param;
3540 
3541 	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3542 	iaq = &sc->sc_link_state_atq.iatq_desc;
3543 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3544 	param = (struct ixl_aq_link_param *)iaq->iaq_param;
3545 	param->notify = IXL_AQ_LINK_NOTIFY;
3546 
3547 	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq);
3548 	ixl_atq_post(sc, &sc->sc_link_state_atq);
3549 }
3550 
3551 #if 0
3552 static void
3553 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3554 {
3555 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
3556 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
3557 	    lemtoh16(&iaq->iaq_opcode));
3558 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
3559 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
3560 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
3561 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
3562 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
3563 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
3564 }
3565 #endif
3566 
3567 static void
3568 ixl_arq(void *xsc)
3569 {
3570 	struct ixl_softc *sc = xsc;
3571 	struct ixl_aq_desc *arq, *iaq;
3572 	struct ixl_aq_buf *aqb;
3573 	unsigned int cons = sc->sc_arq_cons;
3574 	unsigned int prod;
3575 	int done = 0;
3576 
3577 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3578 	    sc->sc_aq_regs->arq_head_mask;
3579 
3580 	if (cons == prod)
3581 		goto done;
3582 
3583 	arq = IXL_DMA_KVA(&sc->sc_arq);
3584 
3585 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3586 	    0, IXL_DMA_LEN(&sc->sc_arq),
3587 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3588 
3589 	do {
3590 		iaq = &arq[cons];
3591 
3592 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
3593 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3594 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3595 		    BUS_DMASYNC_POSTREAD);
3596 
3597 		switch (iaq->iaq_opcode) {
3598 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
3599 			ixl_link_state_update_iaq(sc, iaq);
3600 			break;
3601 		}
3602 
3603 		memset(iaq, 0, sizeof(*iaq));
3604 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3605 		if_rxr_put(&sc->sc_arq_ring, 1);
3606 
3607 		cons++;
3608 		cons &= IXL_AQ_MASK;
3609 
3610 		done = 1;
3611 	} while (cons != prod);
3612 
3613 	if (done && ixl_arq_fill(sc))
3614 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3615 
3616 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3617 	    0, IXL_DMA_LEN(&sc->sc_arq),
3618 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3619 
3620 	sc->sc_arq_cons = cons;
3621 
3622 done:
3623 	ixl_intr_enable(sc);
3624 }
3625 
3626 static void
3627 ixl_atq_set(struct ixl_atq *iatq,
3628     void (*fn)(struct ixl_softc *, void *), void *arg)
3629 {
3630 	iatq->iatq_fn = fn;
3631 	iatq->iatq_arg = arg;
3632 }
3633 
3634 static void
3635 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3636 {
3637 	struct ixl_aq_desc *atq, *slot;
3638 	unsigned int prod;
3639 
3640 	/* assert locked */
3641 
3642 	atq = IXL_DMA_KVA(&sc->sc_atq);
3643 	prod = sc->sc_atq_prod;
3644 	slot = atq + prod;
3645 
3646 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3647 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3648 
3649 	*slot = iatq->iatq_desc;
3650 	slot->iaq_cookie = (uint64_t)iatq;
3651 
3652 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3653 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3654 
3655 	prod++;
3656 	prod &= IXL_AQ_MASK;
3657 	sc->sc_atq_prod = prod;
3658 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3659 }
3660 
3661 static void
3662 ixl_atq_done(struct ixl_softc *sc)
3663 {
3664 	struct ixl_aq_desc *atq, *slot;
3665 	struct ixl_atq *iatq;
3666 	unsigned int cons;
3667 	unsigned int prod;
3668 
3669 	prod = sc->sc_atq_prod;
3670 	cons = sc->sc_atq_cons;
3671 
3672 	if (prod == cons)
3673 		return;
3674 
3675 	atq = IXL_DMA_KVA(&sc->sc_atq);
3676 
3677 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3678 	    0, IXL_DMA_LEN(&sc->sc_atq),
3679 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3680 
3681 	do {
3682 		slot = &atq[cons];
3683 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3684 			break;
3685 
3686 		iatq = (struct ixl_atq *)slot->iaq_cookie;
3687 		iatq->iatq_desc = *slot;
3688 
3689 		memset(slot, 0, sizeof(*slot));
3690 
3691 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3692 
3693 		cons++;
3694 		cons &= IXL_AQ_MASK;
3695 	} while (cons != prod);
3696 
3697 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3698 	    0, IXL_DMA_LEN(&sc->sc_atq),
3699 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3700 
3701 	sc->sc_atq_cons = cons;
3702 }
3703 
3704 static void
3705 ixl_wakeup(struct ixl_softc *sc, void *arg)
3706 {
3707 	struct cond *c = arg;
3708 
3709 	cond_signal(c);
3710 }
3711 
3712 static void
3713 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3714 {
3715 	struct cond c = COND_INITIALIZER();
3716 
3717 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3718 
3719 	ixl_atq_set(iatq, ixl_wakeup, &c);
3720 	ixl_atq_post(sc, iatq);
3721 
3722 	cond_wait(&c, wmesg);
3723 }
3724 
3725 static int
3726 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3727 {
3728 	struct ixl_aq_desc *atq, *slot;
3729 	unsigned int prod;
3730 	unsigned int t = 0;
3731 
3732 	atq = IXL_DMA_KVA(&sc->sc_atq);
3733 	prod = sc->sc_atq_prod;
3734 	slot = atq + prod;
3735 
3736 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3737 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3738 
3739 	*slot = *iaq;
3740 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3741 
3742 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3743 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3744 
3745 	prod++;
3746 	prod &= IXL_AQ_MASK;
3747 	sc->sc_atq_prod = prod;
3748 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3749 
3750 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3751 		delaymsec(1);
3752 
3753 		if (t++ > tm)
3754 			return (ETIMEDOUT);
3755 	}
3756 
3757 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3758 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3759 	*iaq = *slot;
3760 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3761 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3762 
3763 	sc->sc_atq_cons = prod;
3764 
3765 	return (0);
3766 }
3767 
3768 static int
3769 ixl_get_version(struct ixl_softc *sc)
3770 {
3771 	struct ixl_aq_desc iaq;
3772 	uint32_t fwbuild, fwver, apiver;
3773 
3774 	memset(&iaq, 0, sizeof(iaq));
3775 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3776 
3777 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3778 		return (ETIMEDOUT);
3779 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3780 		return (EIO);
3781 
3782 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3783 	fwver = lemtoh32(&iaq.iaq_param[2]);
3784 	apiver = lemtoh32(&iaq.iaq_param[3]);
3785 
3786 	sc->sc_api_major = apiver & 0xffff;
3787 	sc->sc_api_minor = (apiver >> 16) & 0xffff;
3788 
3789 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3790 	    (uint16_t)(fwver >> 16), fwbuild,
3791 	    sc->sc_api_major, sc->sc_api_minor);
3792 
3793 	return (0);
3794 }
3795 
3796 static int
3797 ixl_pxe_clear(struct ixl_softc *sc)
3798 {
3799 	struct ixl_aq_desc iaq;
3800 
3801 	memset(&iaq, 0, sizeof(iaq));
3802 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3803 	iaq.iaq_param[0] = htole32(0x2);
3804 
3805 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3806 		printf(", CLEAR PXE MODE timeout\n");
3807 		return (-1);
3808 	}
3809 
3810 	switch (iaq.iaq_retval) {
3811 	case HTOLE16(IXL_AQ_RC_OK):
3812 	case HTOLE16(IXL_AQ_RC_EEXIST):
3813 		break;
3814 	default:
3815 		printf(", CLEAR PXE MODE error\n");
3816 		return (-1);
3817 	}
3818 
3819 	return (0);
3820 }
3821 
3822 static int
3823 ixl_lldp_shut(struct ixl_softc *sc)
3824 {
3825 	struct ixl_aq_desc iaq;
3826 
3827 	memset(&iaq, 0, sizeof(iaq));
3828 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3829 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3830 
3831 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3832 		printf(", STOP LLDP AGENT timeout\n");
3833 		return (-1);
3834 	}
3835 
3836 	switch (iaq.iaq_retval) {
3837 	case HTOLE16(IXL_AQ_RC_EMODE):
3838 	case HTOLE16(IXL_AQ_RC_EPERM):
3839 		/* ignore silently */
3840 	default:
3841 		break;
3842 	}
3843 
3844 	return (0);
3845 }
3846 
3847 static int
3848 ixl_get_mac(struct ixl_softc *sc)
3849 {
3850 	struct ixl_dmamem idm;
3851 	struct ixl_aq_desc iaq;
3852 	struct ixl_aq_mac_addresses *addrs;
3853 	int rv;
3854 
3855 #ifdef __sparc64__
3856 	if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address",
3857 	    sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
3858 		return (0);
3859 #endif
3860 
3861 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3862 		printf(", unable to allocate mac addresses\n");
3863 		return (-1);
3864 	}
3865 
3866 	memset(&iaq, 0, sizeof(iaq));
3867 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3868 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3869 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3870 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3871 
3872 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3873 	    BUS_DMASYNC_PREREAD);
3874 
3875 	rv = ixl_atq_poll(sc, &iaq, 250);
3876 
3877 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3878 	    BUS_DMASYNC_POSTREAD);
3879 
3880 	if (rv != 0) {
3881 		printf(", MAC ADDRESS READ timeout\n");
3882 		rv = -1;
3883 		goto done;
3884 	}
3885 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3886 		printf(", MAC ADDRESS READ error\n");
3887 		rv = -1;
3888 		goto done;
3889 	}
3890 
3891 	addrs = IXL_DMA_KVA(&idm);
3892 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3893 		printf(", port address is not valid\n");
3894 		goto done;
3895 	}
3896 
3897 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3898 	rv = 0;
3899 
3900 done:
3901 	ixl_dmamem_free(sc, &idm);
3902 	return (rv);
3903 }
3904 
3905 static int
3906 ixl_get_switch_config(struct ixl_softc *sc)
3907 {
3908 	struct ixl_dmamem idm;
3909 	struct ixl_aq_desc iaq;
3910 	struct ixl_aq_switch_config *hdr;
3911 	struct ixl_aq_switch_config_element *elms, *elm;
3912 	unsigned int nelm;
3913 	int rv;
3914 
3915 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3916 		printf("%s: unable to allocate switch config buffer\n",
3917 		    DEVNAME(sc));
3918 		return (-1);
3919 	}
3920 
3921 	memset(&iaq, 0, sizeof(iaq));
3922 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3923 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3924 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3925 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3926 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3927 
3928 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3929 	    BUS_DMASYNC_PREREAD);
3930 
3931 	rv = ixl_atq_poll(sc, &iaq, 250);
3932 
3933 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3934 	    BUS_DMASYNC_POSTREAD);
3935 
3936 	if (rv != 0) {
3937 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3938 		rv = -1;
3939 		goto done;
3940 	}
3941 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3942 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3943 		rv = -1;
3944 		goto done;
3945 	}
3946 
3947 	hdr = IXL_DMA_KVA(&idm);
3948 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3949 
3950 	nelm = lemtoh16(&hdr->num_reported);
3951 	if (nelm < 1) {
3952 		printf("%s: no switch config available\n", DEVNAME(sc));
3953 		rv = -1;
3954 		goto done;
3955 	}
3956 
3957 #if 0
3958 	for (i = 0; i < nelm; i++) {
3959 		elm = &elms[i];
3960 
3961 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3962 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3963 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3964 		    lemtoh16(&elm->uplink_seid),
3965 		    lemtoh16(&elm->downlink_seid));
3966 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3967 		    DEVNAME(sc), elm->connection_type,
3968 		    lemtoh16(&elm->scheduler_id),
3969 		    lemtoh16(&elm->element_info));
3970 	}
3971 #endif
3972 
3973 	elm = &elms[0];
3974 
3975 	sc->sc_uplink_seid = elm->uplink_seid;
3976 	sc->sc_downlink_seid = elm->downlink_seid;
3977 	sc->sc_seid = elm->seid;
3978 
3979 	if ((sc->sc_uplink_seid == htole16(0)) !=
3980 	    (sc->sc_downlink_seid == htole16(0))) {
3981 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3982 		rv = -1;
3983 		goto done;
3984 	}
3985 
3986 done:
3987 	ixl_dmamem_free(sc, &idm);
3988 	return (rv);
3989 }
3990 
3991 static int
3992 ixl_phy_mask_ints(struct ixl_softc *sc)
3993 {
3994 	struct ixl_aq_desc iaq;
3995 
3996 	memset(&iaq, 0, sizeof(iaq));
3997 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3998 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3999 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4000 	      IXL_AQ_PHY_EV_MEDIA_NA));
4001 
4002 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4003 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
4004 		return (-1);
4005 	}
4006 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4007 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
4008 		return (-1);
4009 	}
4010 
4011 	return (0);
4012 }
4013 
4014 static int
4015 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4016 {
4017 	struct ixl_aq_desc iaq;
4018 	int rv;
4019 
4020 	memset(&iaq, 0, sizeof(iaq));
4021 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4022 	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4023 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4024 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
4025 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4026 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4027 
4028 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4029 	    BUS_DMASYNC_PREREAD);
4030 
4031 	rv = ixl_atq_poll(sc, &iaq, 250);
4032 
4033 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4034 	    BUS_DMASYNC_POSTREAD);
4035 
4036 	if (rv != 0)
4037 		return (-1);
4038 
4039 	return (lemtoh16(&iaq.iaq_retval));
4040 }
4041 
4042 static int
4043 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4044 {
4045 	struct ixl_dmamem idm;
4046 	struct ixl_aq_phy_abilities *phy;
4047 	uint64_t phy_types;
4048 	int rv;
4049 
4050 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4051 		printf("%s: unable to allocate phy abilities buffer\n",
4052 		    DEVNAME(sc));
4053 		return (-1);
4054 	}
4055 
4056 	rv = ixl_get_phy_abilities(sc, &idm);
4057 	switch (rv) {
4058 	case -1:
4059 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
4060 		goto err;
4061 	case IXL_AQ_RC_OK:
4062 		break;
4063 	case IXL_AQ_RC_EIO:
4064 		/* API is too old to handle this command */
4065 		phy_types = 0;
4066 		goto done;
4067 	default:
4068 		printf("%s: GET PHY ABILITIES error %u\n", DEVNAME(sc), rv);
4069 		goto err;
4070 	}
4071 
4072 	phy = IXL_DMA_KVA(&idm);
4073 
4074 	phy_types = lemtoh32(&phy->phy_type);
4075 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
4076 
4077 done:
4078 	*phy_types_ptr = phy_types;
4079 
4080 	rv = 0;
4081 
4082 err:
4083 	ixl_dmamem_free(sc, &idm);
4084 	return (rv);
4085 }
4086 
4087 /*
4088  * this returns -2 on software/driver failure, -1 for problems
4089  * talking to the hardware, or the sff module type.
4090  */
4091 
4092 static int
4093 ixl_get_module_type(struct ixl_softc *sc)
4094 {
4095 	struct ixl_dmamem idm;
4096 	struct ixl_aq_phy_abilities *phy;
4097 	int rv;
4098 
4099 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
4100 		return (-2);
4101 
4102 	rv = ixl_get_phy_abilities(sc, &idm);
4103 	if (rv != IXL_AQ_RC_OK) {
4104 		rv = -1;
4105 		goto done;
4106 	}
4107 
4108 	phy = IXL_DMA_KVA(&idm);
4109 
4110 	rv = phy->module_type[0];
4111 
4112 done:
4113 	ixl_dmamem_free(sc, &idm);
4114 	return (rv);
4115 }
4116 
4117 static int
4118 ixl_get_link_status(struct ixl_softc *sc)
4119 {
4120 	struct ixl_aq_desc iaq;
4121 	struct ixl_aq_link_param *param;
4122 
4123 	memset(&iaq, 0, sizeof(iaq));
4124 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4125 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
4126 	param->notify = IXL_AQ_LINK_NOTIFY;
4127 
4128 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4129 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
4130 		return (-1);
4131 	}
4132 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4133 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
4134 		return (0);
4135 	}
4136 
4137 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
4138 
4139 	return (0);
4140 }
4141 
4142 struct ixl_sff_ops {
4143 	int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
4144 	int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
4145 	int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
4146 };
4147 
4148 static int
4149 ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4150 {
4151 	int error;
4152 
4153 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4154 		return (0);
4155 
4156 	error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4157 	if (error != 0)
4158 		return (error);
4159 	if (*page == sff->sff_page)
4160 		return (0);
4161 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
4162 	if (error != 0)
4163 		return (error);
4164 
4165 	return (0);
4166 }
4167 
4168 static int
4169 ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4170 {
4171 	return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
4172 }
4173 
4174 static int
4175 ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4176 {
4177 	int error;
4178 
4179 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4180 		return (0);
4181 
4182 	if (page == sff->sff_page)
4183 		return (0);
4184 
4185 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4186 	if (error != 0)
4187 		return (error);
4188 
4189 	return (0);
4190 }
4191 
4192 static const struct ixl_sff_ops ixl_sfp_ops = {
4193 	ixl_sfp_open,
4194 	ixl_sfp_get,
4195 	ixl_sfp_close,
4196 };
4197 
4198 static int
4199 ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4200 {
4201 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4202 		return (EIO);
4203 
4204 	return (0);
4205 }
4206 
4207 static int
4208 ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4209 {
4210 	return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
4211 }
4212 
4213 static int
4214 ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4215 {
4216 	return (0);
4217 }
4218 
4219 static const struct ixl_sff_ops ixl_qsfp_ops = {
4220 	ixl_qsfp_open,
4221 	ixl_qsfp_get,
4222 	ixl_qsfp_close,
4223 };
4224 
4225 static int
4226 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
4227 {
4228 	const struct ixl_sff_ops *ops;
4229 	uint8_t page;
4230 	size_t i;
4231 	int error;
4232 
4233 	switch (ixl_get_module_type(sc)) {
4234 	case -2:
4235 		return (ENOMEM);
4236 	case -1:
4237 		return (ENXIO);
4238 	case IXL_SFF8024_ID_SFP:
4239 		ops = &ixl_sfp_ops;
4240 		break;
4241 	case IXL_SFF8024_ID_QSFP:
4242 	case IXL_SFF8024_ID_QSFP_PLUS:
4243 	case IXL_SFF8024_ID_QSFP28:
4244 		ops = &ixl_qsfp_ops;
4245 		break;
4246 	default:
4247 		return (EOPNOTSUPP);
4248 	}
4249 
4250 	error = (*ops->open)(sc, sff, &page);
4251 	if (error != 0)
4252 		return (error);
4253 
4254 	for (i = 0; i < sizeof(sff->sff_data); i++) {
4255 		error = (*ops->get)(sc, sff, i);
4256 		if (error != 0)
4257 			return (error);
4258 	}
4259 
4260 	error = (*ops->close)(sc, sff, page);
4261 
4262 	return (0);
4263 }
4264 
4265 static int
4266 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
4267 {
4268 	struct ixl_atq iatq;
4269 	struct ixl_aq_desc *iaq;
4270 	struct ixl_aq_phy_reg_access *param;
4271 
4272 	memset(&iatq, 0, sizeof(iatq));
4273 	iaq = &iatq.iatq_desc;
4274 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
4275 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4276 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4277 	param->dev_addr = dev;
4278 	htolem32(&param->reg, reg);
4279 
4280 	ixl_atq_exec(sc, &iatq, "ixlsffget");
4281 
4282 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4283 		printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
4284 		    DEVNAME(sc), __func__,
4285 		    dev, reg, lemtoh16(&iaq->iaq_retval));
4286 	}
4287 
4288 	switch (iaq->iaq_retval) {
4289 	case htole16(IXL_AQ_RC_OK):
4290 		break;
4291 	case htole16(IXL_AQ_RC_EBUSY):
4292 		return (EBUSY);
4293 	case htole16(IXL_AQ_RC_ESRCH):
4294 		return (ENODEV);
4295 	case htole16(IXL_AQ_RC_EIO):
4296 	case htole16(IXL_AQ_RC_EINVAL):
4297 	default:
4298 		return (EIO);
4299 	}
4300 
4301 	*p = lemtoh32(&param->val);
4302 
4303 	return (0);
4304 }
4305 
4306 static int
4307 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
4308 {
4309 	struct ixl_atq iatq;
4310 	struct ixl_aq_desc *iaq;
4311 	struct ixl_aq_phy_reg_access *param;
4312 
4313 	memset(&iatq, 0, sizeof(iatq));
4314 	iaq = &iatq.iatq_desc;
4315 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
4316 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4317 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4318 	param->dev_addr = dev;
4319 	htolem32(&param->reg, reg);
4320 	htolem32(&param->val, v);
4321 
4322 	ixl_atq_exec(sc, &iatq, "ixlsffset");
4323 
4324 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4325 		printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
4326 		    DEVNAME(sc), __func__,
4327 		    dev, reg, v, lemtoh16(&iaq->iaq_retval));
4328 	}
4329 
4330 	switch (iaq->iaq_retval) {
4331 	case htole16(IXL_AQ_RC_OK):
4332 		break;
4333 	case htole16(IXL_AQ_RC_EBUSY):
4334 		return (EBUSY);
4335 	case htole16(IXL_AQ_RC_ESRCH):
4336 		return (ENODEV);
4337 	case htole16(IXL_AQ_RC_EIO):
4338 	case htole16(IXL_AQ_RC_EINVAL):
4339 	default:
4340 		return (EIO);
4341 	}
4342 
4343 	return (0);
4344 }
4345 
4346 static int
4347 ixl_get_vsi(struct ixl_softc *sc)
4348 {
4349 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4350 	struct ixl_aq_desc iaq;
4351 	struct ixl_aq_vsi_param *param;
4352 	struct ixl_aq_vsi_reply *reply;
4353 	int rv;
4354 
4355 	/* grumble, vsi info isn't "known" at compile time */
4356 
4357 	memset(&iaq, 0, sizeof(iaq));
4358 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
4359 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4360 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4361 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4362 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4363 
4364 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4365 	param->uplink_seid = sc->sc_seid;
4366 
4367 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4368 	    BUS_DMASYNC_PREREAD);
4369 
4370 	rv = ixl_atq_poll(sc, &iaq, 250);
4371 
4372 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4373 	    BUS_DMASYNC_POSTREAD);
4374 
4375 	if (rv != 0) {
4376 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
4377 		return (-1);
4378 	}
4379 
4380 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4381 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
4382 		    lemtoh16(&iaq.iaq_retval));
4383 		return (-1);
4384 	}
4385 
4386 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4387 	sc->sc_vsi_number = reply->vsi_number;
4388 
4389 	return (0);
4390 }
4391 
4392 static int
4393 ixl_set_vsi(struct ixl_softc *sc)
4394 {
4395 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4396 	struct ixl_aq_desc iaq;
4397 	struct ixl_aq_vsi_param *param;
4398 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4399 	int rv;
4400 
4401 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4402 	    IXL_AQ_VSI_VALID_VLAN);
4403 
4404 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4405 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4406 	data->queue_mapping[0] = htole16(0);
4407 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4408 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4409 
4410 	CLR(data->port_vlan_flags,
4411 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
4412 	SET(data->port_vlan_flags, htole16(IXL_AQ_VSI_PVLAN_MODE_ALL |
4413 	    IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH));
4414 
4415 	/* grumble, vsi info isn't "known" at compile time */
4416 
4417 	memset(&iaq, 0, sizeof(iaq));
4418 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
4419 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4420 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4421 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4422 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4423 
4424 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4425 	param->uplink_seid = sc->sc_seid;
4426 
4427 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4428 	    BUS_DMASYNC_PREWRITE);
4429 
4430 	rv = ixl_atq_poll(sc, &iaq, 250);
4431 
4432 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4433 	    BUS_DMASYNC_POSTWRITE);
4434 
4435 	if (rv != 0) {
4436 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
4437 		return (-1);
4438 	}
4439 
4440 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4441 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
4442 		    lemtoh16(&iaq.iaq_retval));
4443 		return (-1);
4444 	}
4445 
4446 	return (0);
4447 }
4448 
4449 static const struct ixl_phy_type *
4450 ixl_search_phy_type(uint8_t phy_type)
4451 {
4452 	const struct ixl_phy_type *itype;
4453 	uint64_t mask;
4454 	unsigned int i;
4455 
4456 	if (phy_type >= 64)
4457 		return (NULL);
4458 
4459 	mask = 1ULL << phy_type;
4460 
4461 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
4462 		itype = &ixl_phy_type_map[i];
4463 
4464 		if (ISSET(itype->phy_type, mask))
4465 			return (itype);
4466 	}
4467 
4468 	return (NULL);
4469 }
4470 
4471 static uint64_t
4472 ixl_search_link_speed(uint8_t link_speed)
4473 {
4474 	const struct ixl_speed_type *type;
4475 	unsigned int i;
4476 
4477 	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
4478 		type = &ixl_speed_type_map[i];
4479 
4480 		if (ISSET(type->dev_speed, link_speed))
4481 			return (type->net_speed);
4482 	}
4483 
4484 	return (0);
4485 }
4486 
4487 static int
4488 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4489 {
4490 	const struct ixl_aq_link_status *status;
4491 	const struct ixl_phy_type *itype;
4492 
4493 	KERNEL_ASSERT_LOCKED();
4494 
4495 	uint64_t ifm_active = IFM_ETHER;
4496 	uint64_t ifm_status = IFM_AVALID;
4497 	int link_state = LINK_STATE_DOWN;
4498 	uint64_t baudrate = 0;
4499 
4500 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4501 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4502 		goto done;
4503 
4504 	ifm_active |= IFM_FDX;
4505 	ifm_status |= IFM_ACTIVE;
4506 	link_state = LINK_STATE_FULL_DUPLEX;
4507 
4508 	itype = ixl_search_phy_type(status->phy_type);
4509 	if (itype != NULL)
4510 		ifm_active |= itype->ifm_type;
4511 
4512 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4513 		ifm_active |= IFM_ETH_TXPAUSE;
4514 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4515 		ifm_active |= IFM_ETH_RXPAUSE;
4516 
4517 	baudrate = ixl_search_link_speed(status->link_speed);
4518 
4519 done:
4520 	sc->sc_media_active = ifm_active;
4521 	sc->sc_media_status = ifm_status;
4522 	sc->sc_ac.ac_if.if_baudrate = baudrate;
4523 
4524 	return (link_state);
4525 }
4526 
4527 static int
4528 ixl_restart_an(struct ixl_softc *sc)
4529 {
4530 	struct ixl_aq_desc iaq;
4531 
4532 	memset(&iaq, 0, sizeof(iaq));
4533 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4534 	iaq.iaq_param[0] =
4535 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4536 
4537 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4538 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
4539 		return (-1);
4540 	}
4541 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4542 		printf("%s: RESTART AN error\n", DEVNAME(sc));
4543 		return (-1);
4544 	}
4545 
4546 	return (0);
4547 }
4548 
4549 static int
4550 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4551 {
4552 	struct ixl_aq_desc iaq;
4553 	struct ixl_aq_add_macvlan *param;
4554 	struct ixl_aq_add_macvlan_elem *elem;
4555 
4556 	memset(&iaq, 0, sizeof(iaq));
4557 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4558 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4559 	iaq.iaq_datalen = htole16(sizeof(*elem));
4560 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4561 
4562 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4563 	param->num_addrs = htole16(1);
4564 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4565 	param->seid1 = 0;
4566 	param->seid2 = 0;
4567 
4568 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4569 	memset(elem, 0, sizeof(*elem));
4570 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4571 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4572 	elem->vlan = htole16(vlan);
4573 
4574 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4575 		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
4576 		return (IXL_AQ_RC_EINVAL);
4577 	}
4578 
4579 	return letoh16(iaq.iaq_retval);
4580 }
4581 
4582 static int
4583 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4584 {
4585 	struct ixl_aq_desc iaq;
4586 	struct ixl_aq_remove_macvlan *param;
4587 	struct ixl_aq_remove_macvlan_elem *elem;
4588 
4589 	memset(&iaq, 0, sizeof(iaq));
4590 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4591 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4592 	iaq.iaq_datalen = htole16(sizeof(*elem));
4593 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4594 
4595 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4596 	param->num_addrs = htole16(1);
4597 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4598 	param->seid1 = 0;
4599 	param->seid2 = 0;
4600 
4601 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4602 	memset(elem, 0, sizeof(*elem));
4603 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4604 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4605 	elem->vlan = htole16(vlan);
4606 
4607 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4608 		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
4609 		return (IXL_AQ_RC_EINVAL);
4610 	}
4611 
4612 	return letoh16(iaq.iaq_retval);
4613 }
4614 
4615 static int
4616 ixl_hmc(struct ixl_softc *sc)
4617 {
4618 	struct {
4619 		uint32_t   count;
4620 		uint32_t   minsize;
4621 		bus_size_t maxcnt;
4622 		bus_size_t setoff;
4623 		bus_size_t setcnt;
4624 	} regs[] = {
4625 		{
4626 			0,
4627 			IXL_HMC_TXQ_MINSIZE,
4628 			I40E_GLHMC_LANTXOBJSZ,
4629 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4630 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4631 		},
4632 		{
4633 			0,
4634 			IXL_HMC_RXQ_MINSIZE,
4635 			I40E_GLHMC_LANRXOBJSZ,
4636 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4637 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4638 		},
4639 		{
4640 			0,
4641 			0,
4642 			I40E_GLHMC_FCOEMAX,
4643 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4644 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4645 		},
4646 		{
4647 			0,
4648 			0,
4649 			I40E_GLHMC_FCOEFMAX,
4650 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4651 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4652 		},
4653 	};
4654 	struct ixl_hmc_entry *e;
4655 	uint64_t size, dva;
4656 	uint8_t *kva;
4657 	uint64_t *sdpage;
4658 	unsigned int i;
4659 	int npages, tables;
4660 
4661 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4662 
4663 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4664 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
4665 
4666 	size = 0;
4667 	for (i = 0; i < nitems(regs); i++) {
4668 		e = &sc->sc_hmc_entries[i];
4669 
4670 		e->hmc_count = regs[i].count;
4671 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4672 		e->hmc_base = size;
4673 
4674 		if ((e->hmc_size * 8) < regs[i].minsize) {
4675 			printf("%s: kernel hmc entry is too big\n",
4676 			    DEVNAME(sc));
4677 			return (-1);
4678 		}
4679 
4680 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4681 	}
4682 	size = roundup(size, IXL_HMC_PGSIZE);
4683 	npages = size / IXL_HMC_PGSIZE;
4684 
4685 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4686 
4687 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4688 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4689 		return (-1);
4690 	}
4691 
4692 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4693 	    IXL_HMC_PGSIZE) != 0) {
4694 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4695 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4696 		return (-1);
4697 	}
4698 
4699 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4700 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4701 
4702 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4703 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4704 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4705 
4706 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4707 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4708 	for (i = 0; i < npages; i++) {
4709 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4710 
4711 		dva += IXL_HMC_PGSIZE;
4712 	}
4713 
4714 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4715 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4716 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4717 
4718 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4719 	for (i = 0; i < tables; i++) {
4720 		uint32_t count;
4721 
4722 		KASSERT(npages >= 0);
4723 
4724 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4725 
4726 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4727 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4728 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4729 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4730 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4731 		ixl_wr(sc, I40E_PFHMC_SDCMD,
4732 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4733 
4734 		npages -= IXL_HMC_PGS;
4735 		dva += IXL_HMC_PGSIZE;
4736 	}
4737 
4738 	for (i = 0; i < nitems(regs); i++) {
4739 		e = &sc->sc_hmc_entries[i];
4740 
4741 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4742 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4743 	}
4744 
4745 	return (0);
4746 }
4747 
4748 static void
4749 ixl_hmc_free(struct ixl_softc *sc)
4750 {
4751 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4752 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4753 }
4754 
4755 static void
4756 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4757     unsigned int npacking)
4758 {
4759 	uint8_t *dst = d;
4760 	const uint8_t *src = s;
4761 	unsigned int i;
4762 
4763 	for (i = 0; i < npacking; i++) {
4764 		const struct ixl_hmc_pack *pack = &packing[i];
4765 		unsigned int offset = pack->lsb / 8;
4766 		unsigned int align = pack->lsb % 8;
4767 		const uint8_t *in = src + pack->offset;
4768 		uint8_t *out = dst + offset;
4769 		int width = pack->width;
4770 		unsigned int inbits = 0;
4771 
4772 		if (align) {
4773 			inbits = (*in++) << align;
4774 			*out++ |= (inbits & 0xff);
4775 			inbits >>= 8;
4776 
4777 			width -= 8 - align;
4778 		}
4779 
4780 		while (width >= 8) {
4781 			inbits |= (*in++) << align;
4782 			*out++ = (inbits & 0xff);
4783 			inbits >>= 8;
4784 
4785 			width -= 8;
4786 		}
4787 
4788 		if (width > 0) {
4789 			inbits |= (*in) << align;
4790 			*out |= (inbits & ((1 << width) - 1));
4791 		}
4792 	}
4793 }
4794 
4795 static struct ixl_aq_buf *
4796 ixl_aqb_alloc(struct ixl_softc *sc)
4797 {
4798 	struct ixl_aq_buf *aqb;
4799 
4800 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4801 	if (aqb == NULL)
4802 		return (NULL);
4803 
4804 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4805 	if (aqb->aqb_data == NULL)
4806 		goto free;
4807 
4808 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4809 	    IXL_AQ_BUFLEN, 0,
4810 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4811 	    &aqb->aqb_map) != 0)
4812 		goto dma_free;
4813 
4814 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4815 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4816 		goto destroy;
4817 
4818 	return (aqb);
4819 
4820 destroy:
4821 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4822 dma_free:
4823 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4824 free:
4825 	free(aqb, M_DEVBUF, sizeof(*aqb));
4826 
4827 	return (NULL);
4828 }
4829 
4830 static void
4831 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4832 {
4833 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4834 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4835 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4836 	free(aqb, M_DEVBUF, sizeof(*aqb));
4837 }
4838 
4839 static int
4840 ixl_arq_fill(struct ixl_softc *sc)
4841 {
4842 	struct ixl_aq_buf *aqb;
4843 	struct ixl_aq_desc *arq, *iaq;
4844 	unsigned int prod = sc->sc_arq_prod;
4845 	unsigned int n;
4846 	int post = 0;
4847 
4848 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4849 	arq = IXL_DMA_KVA(&sc->sc_arq);
4850 
4851 	while (n > 0) {
4852 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4853 		if (aqb != NULL)
4854 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4855 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4856 			break;
4857 
4858 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4859 
4860 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4861 		    BUS_DMASYNC_PREREAD);
4862 
4863 		iaq = &arq[prod];
4864 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4865 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4866 		iaq->iaq_opcode = 0;
4867 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4868 		iaq->iaq_retval = 0;
4869 		iaq->iaq_cookie = 0;
4870 		iaq->iaq_param[0] = 0;
4871 		iaq->iaq_param[1] = 0;
4872 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4873 
4874 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4875 
4876 		prod++;
4877 		prod &= IXL_AQ_MASK;
4878 
4879 		post = 1;
4880 
4881 		n--;
4882 	}
4883 
4884 	if_rxr_put(&sc->sc_arq_ring, n);
4885 	sc->sc_arq_prod = prod;
4886 
4887 	return (post);
4888 }
4889 
4890 static void
4891 ixl_arq_unfill(struct ixl_softc *sc)
4892 {
4893 	struct ixl_aq_buf *aqb;
4894 
4895 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4896 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4897 
4898 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4899 		    BUS_DMASYNC_POSTREAD);
4900 		ixl_aqb_free(sc, aqb);
4901 	}
4902 }
4903 
4904 static void
4905 ixl_clear_hw(struct ixl_softc *sc)
4906 {
4907 	uint32_t num_queues, base_queue;
4908 	uint32_t num_pf_int;
4909 	uint32_t num_vf_int;
4910 	uint32_t num_vfs;
4911 	uint32_t i, j;
4912 	uint32_t val;
4913 
4914 	/* get number of interrupts, queues, and vfs */
4915 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4916 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4917 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4918 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4919 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4920 
4921 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4922 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4923 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4924 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4925 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4926 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4927 		num_queues = (j - base_queue) + 1;
4928 	else
4929 		num_queues = 0;
4930 
4931 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4932 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4933 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4934 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4935 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4936 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4937 		num_vfs = (j - i) + 1;
4938 	else
4939 		num_vfs = 0;
4940 
4941 	/* stop all the interrupts */
4942 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4943 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4944 	for (i = 0; i < num_pf_int - 2; i++)
4945 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4946 
4947 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4948 	val = I40E_QUEUE_TYPE_EOL << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4949 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4950 	for (i = 0; i < num_pf_int - 2; i++)
4951 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4952 	val = I40E_QUEUE_TYPE_EOL << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4953 	for (i = 0; i < num_vfs; i++)
4954 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4955 	for (i = 0; i < num_vf_int - 2; i++)
4956 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4957 
4958 	/* warn the HW of the coming Tx disables */
4959 	for (i = 0; i < num_queues; i++) {
4960 		uint32_t abs_queue_idx = base_queue + i;
4961 		uint32_t reg_block = 0;
4962 
4963 		if (abs_queue_idx >= 128) {
4964 			reg_block = abs_queue_idx / 128;
4965 			abs_queue_idx %= 128;
4966 		}
4967 
4968 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4969 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4970 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4971 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4972 
4973 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4974 	}
4975 	delaymsec(400);
4976 
4977 	/* stop all the queues */
4978 	for (i = 0; i < num_queues; i++) {
4979 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4980 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
4981 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4982 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
4983 	}
4984 
4985 	/* short wait for all queue disables to settle */
4986 	delaymsec(50);
4987 }
4988 
4989 static int
4990 ixl_pf_reset(struct ixl_softc *sc)
4991 {
4992 	uint32_t cnt = 0;
4993 	uint32_t cnt1 = 0;
4994 	uint32_t reg = 0;
4995 	uint32_t grst_del;
4996 
4997 	/*
4998 	 * Poll for Global Reset steady state in case of recent GRST.
4999 	 * The grst delay value is in 100ms units, and we'll wait a
5000 	 * couple counts longer to be sure we don't just miss the end.
5001 	 */
5002 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5003 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5004 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5005 	grst_del += 10;
5006 
5007 	for (cnt = 0; cnt < grst_del; cnt++) {
5008 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5009 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5010 			break;
5011 		delaymsec(100);
5012 	}
5013 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5014 		printf(", Global reset polling failed to complete\n");
5015 		return (-1);
5016 	}
5017 
5018 	/* Now Wait for the FW to be ready */
5019 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5020 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
5021 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5022 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5023 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5024 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5025 			break;
5026 
5027 		delaymsec(10);
5028 	}
5029 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5030 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5031 		printf(", wait for FW Reset complete timed out "
5032 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5033 		return (-1);
5034 	}
5035 
5036 	/*
5037 	 * If there was a Global Reset in progress when we got here,
5038 	 * we don't need to do the PF Reset
5039 	 */
5040 	if (cnt == 0) {
5041 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5042 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5043 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5044 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5045 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5046 				break;
5047 			delaymsec(1);
5048 		}
5049 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5050 			printf(", PF reset polling failed to complete"
5051 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5052 			return (-1);
5053 		}
5054 	}
5055 
5056 	return (0);
5057 }
5058 
5059 static uint32_t
5060 ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r)
5061 {
5062 	struct ixl_atq iatq;
5063 	struct ixl_aq_desc *iaq;
5064 	uint16_t retval;
5065 
5066 	memset(&iatq, 0, sizeof(iatq));
5067 	iaq = &iatq.iatq_desc;
5068 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ);
5069 	htolem32(&iaq->iaq_param[1], r);
5070 
5071 	ixl_atq_exec(sc, &iatq, "ixl710rd");
5072 
5073 	retval = lemtoh16(&iaq->iaq_retval);
5074 	if (retval != IXL_AQ_RC_OK) {
5075 		printf("%s: %s failed (%u)\n", DEVNAME(sc), __func__, retval);
5076 		return (~0U);
5077 	}
5078 
5079 	return (lemtoh32(&iaq->iaq_param[3]));
5080 }
5081 
5082 static void
5083 ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5084 {
5085 	struct ixl_atq iatq;
5086 	struct ixl_aq_desc *iaq;
5087 	uint16_t retval;
5088 
5089 	memset(&iatq, 0, sizeof(iatq));
5090 	iaq = &iatq.iatq_desc;
5091 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE);
5092 	htolem32(&iaq->iaq_param[1], r);
5093 	htolem32(&iaq->iaq_param[3], v);
5094 
5095 	ixl_atq_exec(sc, &iatq, "ixl710wr");
5096 
5097 	retval = lemtoh16(&iaq->iaq_retval);
5098 	if (retval != IXL_AQ_RC_OK) {
5099 		printf("%s: %s %08x=%08x failed (%u)\n",
5100 		    DEVNAME(sc), __func__, r, v, retval);
5101 	}
5102 }
5103 
5104 static int
5105 ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5106 {
5107 	unsigned int i;
5108 
5109 	for (i = 0; i < nitems(rsskey->key); i++)
5110 		ixl_wr_ctl(sc, I40E_PFQF_HKEY(i), rsskey->key[i]);
5111 
5112 	return (0);
5113 }
5114 
5115 static int
5116 ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5117 {
5118 	unsigned int i;
5119 
5120 	for (i = 0; i < nitems(lut->entries); i++)
5121 		ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i]);
5122 
5123 	return (0);
5124 }
5125 
5126 static uint32_t
5127 ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r)
5128 {
5129 	return (ixl_rd(sc, r));
5130 }
5131 
5132 static void
5133 ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5134 {
5135 	ixl_wr(sc, r, v);
5136 }
5137 
5138 static int
5139 ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5140 {
5141 	/* XXX */
5142 
5143 	return (0);
5144 }
5145 
5146 static int
5147 ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5148 {
5149 	/* XXX */
5150 
5151 	return (0);
5152 }
5153 
5154 static int
5155 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5156     bus_size_t size, u_int align)
5157 {
5158 	ixm->ixm_size = size;
5159 
5160 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5161 	    ixm->ixm_size, 0,
5162 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5163 	    &ixm->ixm_map) != 0)
5164 		return (1);
5165 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5166 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5167 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
5168 		goto destroy;
5169 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5170 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5171 		goto free;
5172 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5173 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5174 		goto unmap;
5175 
5176 	return (0);
5177 unmap:
5178 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5179 free:
5180 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5181 destroy:
5182 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5183 	return (1);
5184 }
5185 
5186 static void
5187 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5188 {
5189 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5190 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5191 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5192 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5193 }
5194 
5195 #if NKSTAT > 0
5196 
5197 CTASSERT(KSTAT_KV_U_NONE <= 0xffU);
5198 CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU);
5199 CTASSERT(KSTAT_KV_U_BYTES <= 0xffU);
5200 
5201 struct ixl_counter {
5202 	const char		*c_name;
5203 	uint32_t		 c_base;
5204 	uint8_t			 c_width;
5205 	uint8_t			 c_type;
5206 };
5207 
5208 const struct ixl_counter ixl_port_counters[] = {
5209 	/* GORC */
5210 	{ "rx bytes",		0x00300000, 48, KSTAT_KV_U_BYTES },
5211 	/* MLFC */
5212 	{ "mac local errs",	0x00300020, 32, KSTAT_KV_U_NONE },
5213 	/* MRFC */
5214 	{ "mac remote errs",	0x00300040, 32, KSTAT_KV_U_NONE },
5215 	/* MSPDC */
5216 	{ "mac short",		0x00300060, 32, KSTAT_KV_U_PACKETS },
5217 	/* CRCERRS */
5218 	{ "crc errs",		0x00300080, 32, KSTAT_KV_U_PACKETS },
5219 	/* RLEC */
5220 	{ "rx len errs",	0x003000a0, 32, KSTAT_KV_U_PACKETS },
5221 	/* ERRBC */
5222 	{ "byte errs",		0x003000c0, 32, KSTAT_KV_U_PACKETS },
5223 	/* ILLERRC */
5224 	{ "illegal byte",	0x003000d0, 32, KSTAT_KV_U_PACKETS },
5225 	/* RUC */
5226 	{ "rx undersize",	0x00300100, 32, KSTAT_KV_U_PACKETS },
5227 	/* ROC */
5228 	{ "rx oversize",	0x00300120, 32, KSTAT_KV_U_PACKETS },
5229 	/* LXONRXCNT */
5230 	{ "rx link xon",	0x00300140, 32, KSTAT_KV_U_PACKETS },
5231 	/* LXOFFRXCNT */
5232 	{ "rx link xoff",	0x00300160, 32, KSTAT_KV_U_PACKETS },
5233 
5234 	/* Priority XON Received Count */
5235 	/* Priority XOFF Received Count */
5236 	/* Priority XON to XOFF Count */
5237 
5238 	/* PRC64 */
5239 	{ "rx 64B",		0x00300480, 48, KSTAT_KV_U_PACKETS },
5240 	/* PRC127 */
5241 	{ "rx 65-127B",		0x003004A0, 48, KSTAT_KV_U_PACKETS },
5242 	/* PRC255 */
5243 	{ "rx 128-255B",	0x003004C0, 48, KSTAT_KV_U_PACKETS },
5244 	/* PRC511 */
5245 	{ "rx 256-511B",	0x003004E0, 48, KSTAT_KV_U_PACKETS },
5246 	/* PRC1023 */
5247 	{ "rx 512-1023B",	0x00300500, 48, KSTAT_KV_U_PACKETS },
5248 	/* PRC1522 */
5249 	{ "rx 1024-1522B",	0x00300520, 48, KSTAT_KV_U_PACKETS },
5250 	/* PRC9522 */
5251 	{ "rx 1523-9522B",	0x00300540, 48, KSTAT_KV_U_PACKETS },
5252 	/* ROC */
5253 	{ "rx fragment",	0x00300560, 32, KSTAT_KV_U_PACKETS },
5254 	/* RJC */
5255 	{ "rx jabber",		0x00300580, 32, KSTAT_KV_U_PACKETS },
5256 	/* UPRC */
5257 	{ "rx ucasts",		0x003005a0, 48, KSTAT_KV_U_PACKETS },
5258 	/* MPRC */
5259 	{ "rx mcasts",		0x003005c0, 48, KSTAT_KV_U_PACKETS },
5260 	/* BPRC */
5261 	{ "rx bcasts",		0x003005e0, 48, KSTAT_KV_U_PACKETS },
5262 	/* RDPC */
5263 	{ "rx discards",	0x00300600, 32, KSTAT_KV_U_PACKETS },
5264 	/* LDPC */
5265 	{ "rx lo discards",	0x00300620, 32, KSTAT_KV_U_PACKETS },
5266 	/* RUPP */
5267 	{ "rx no dest",		0x00300660, 32, KSTAT_KV_U_PACKETS },
5268 
5269 	/* GOTC */
5270 	{ "tx bytes",		0x00300680, 48, KSTAT_KV_U_BYTES },
5271 	/* PTC64 */
5272 	{ "tx 64B",		0x003006A0, 48, KSTAT_KV_U_PACKETS },
5273 	/* PTC127 */
5274 	{ "tx 65-127B",		0x003006C0, 48, KSTAT_KV_U_PACKETS },
5275 	/* PTC255 */
5276 	{ "tx 128-255B",	0x003006E0, 48, KSTAT_KV_U_PACKETS },
5277 	/* PTC511 */
5278 	{ "tx 256-511B",	0x00300700, 48, KSTAT_KV_U_PACKETS },
5279 	/* PTC1023 */
5280 	{ "tx 512-1023B",	0x00300720, 48, KSTAT_KV_U_PACKETS },
5281 	/* PTC1522 */
5282 	{ "tx 1024-1522B",	0x00300740, 48, KSTAT_KV_U_PACKETS },
5283 	/* PTC9522 */
5284 	{ "tx 1523-9522B",	0x00300760, 48, KSTAT_KV_U_PACKETS },
5285 
5286 	/* Priority XON Transmitted Count */
5287 	/* Priority XOFF Transmitted Count */
5288 
5289 	/* LXONTXC */
5290 	{ "tx link xon",	0x00300980, 48, KSTAT_KV_U_PACKETS },
5291 	/* LXOFFTXC */
5292 	{ "tx link xoff",	0x003009a0, 48, KSTAT_KV_U_PACKETS },
5293 	/* UPTC */
5294 	{ "tx ucasts",		0x003009c0, 48, KSTAT_KV_U_PACKETS },
5295 	/* MPTC */
5296 	{ "tx mcasts",		0x003009e0, 48, KSTAT_KV_U_PACKETS },
5297 	/* BPTC */
5298 	{ "tx bcasts",		0x00300a00, 48, KSTAT_KV_U_PACKETS },
5299 	/* TDOLD */
5300 	{ "tx link down",	0x00300a20, 48, KSTAT_KV_U_PACKETS },
5301 };
5302 
5303 const struct ixl_counter ixl_vsi_counters[] = {
5304 	/* VSI RDPC */
5305 	{ "rx discards",	0x00310000, 32, KSTAT_KV_U_PACKETS },
5306 	/* VSI GOTC */
5307 	{ "tx bytes",		0x00328000, 48, KSTAT_KV_U_BYTES },
5308 	/* VSI UPTC */
5309 	{ "tx ucasts",		0x0033c000, 48, KSTAT_KV_U_PACKETS },
5310 	/* VSI MPTC */
5311 	{ "tx mcasts",		0x0033cc00, 48, KSTAT_KV_U_PACKETS },
5312 	/* VSI BPTC */
5313 	{ "tx bcasts",		0x0033d800, 48, KSTAT_KV_U_PACKETS },
5314 	/* VSI TEPC */
5315 	{ "tx errs",		0x00344000, 48, KSTAT_KV_U_PACKETS },
5316 	/* VSI TDPC */
5317 	{ "tx discards",	0x00348000, 48, KSTAT_KV_U_PACKETS },
5318 	/* VSI GORC */
5319 	{ "rx bytes",		0x00358000, 48, KSTAT_KV_U_BYTES },
5320 	/* VSI UPRC */
5321 	{ "rx ucasts",		0x0036c000, 48, KSTAT_KV_U_PACKETS },
5322 	/* VSI MPRC */
5323 	{ "rx mcasts",		0x0036cc00, 48, KSTAT_KV_U_PACKETS },
5324 	/* VSI BPRC */
5325 	{ "rx bcasts",		0x0036d800, 48, KSTAT_KV_U_PACKETS },
5326 	/* VSI RUPP */
5327 	{ "rx noproto",		0x0036e400, 32, KSTAT_KV_U_PACKETS },
5328 };
5329 
5330 struct ixl_counter_state {
5331 	const struct ixl_counter
5332 				*counters;
5333 	uint64_t		*values;
5334 	size_t			 n;
5335 	uint32_t		 index;
5336 	unsigned int		 gen;
5337 };
5338 
5339 static void
5340 ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state,
5341     uint64_t *vs)
5342 {
5343 	const struct ixl_counter *c;
5344 	bus_addr_t r;
5345 	uint64_t v;
5346 	size_t i;
5347 
5348 	for (i = 0; i < state->n; i++) {
5349 		c = &state->counters[i];
5350 
5351 		r = c->c_base + (state->index * 8);
5352 
5353 		if (c->c_width == 32)
5354 			v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
5355 		else
5356 			v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r);
5357 
5358 		vs[i] = v;
5359 	}
5360 }
5361 
5362 static int
5363 ixl_kstat_read(struct kstat *ks)
5364 {
5365 	struct ixl_softc *sc = ks->ks_softc;
5366 	struct kstat_kv *kvs = ks->ks_data;
5367 	struct ixl_counter_state *state = ks->ks_ptr;
5368 	unsigned int gen = (state->gen++) & 1;
5369 	uint64_t *ovs = state->values + (gen * state->n);
5370 	uint64_t *nvs = state->values + (!gen * state->n);
5371 	size_t i;
5372 
5373 	ixl_rd_counters(sc, state, nvs);
5374 	getnanouptime(&ks->ks_updated);
5375 
5376 	for (i = 0; i < state->n; i++) {
5377 		const struct ixl_counter *c = &state->counters[i];
5378 		uint64_t n = nvs[i], o = ovs[i];
5379 
5380 		if (c->c_width < 64) {
5381 			if (n < o)
5382 				n += (1ULL << c->c_width);
5383 		}
5384 
5385 		kstat_kv_u64(&kvs[i]) += (n - o);
5386 	}
5387 
5388 	return (0);
5389 }
5390 
5391 static void
5392 ixl_kstat_tick(void *arg)
5393 {
5394 	struct ixl_softc *sc = arg;
5395 
5396 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5397 
5398 	mtx_enter(&sc->sc_kstat_mtx);
5399 
5400 	ixl_kstat_read(sc->sc_port_kstat);
5401 	ixl_kstat_read(sc->sc_vsi_kstat);
5402 
5403 	mtx_leave(&sc->sc_kstat_mtx);
5404 }
5405 
5406 static struct kstat *
5407 ixl_kstat_create(struct ixl_softc *sc, const char *name,
5408     const struct ixl_counter *counters, size_t n, uint32_t index)
5409 {
5410 	struct kstat *ks;
5411 	struct kstat_kv *kvs;
5412 	struct ixl_counter_state *state;
5413 	const struct ixl_counter *c;
5414 	unsigned int i;
5415 
5416 	ks = kstat_create(DEVNAME(sc), 0, name, 0, KSTAT_T_KV, 0);
5417 	if (ks == NULL) {
5418 		/* unable to create kstats */
5419 		return (NULL);
5420 	}
5421 
5422 	kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO);
5423 	for (i = 0; i < n; i++) {
5424 		c = &counters[i];
5425 
5426 		kstat_kv_unit_init(&kvs[i], c->c_name,
5427 		    KSTAT_KV_T_COUNTER64, c->c_type);
5428 	}
5429 
5430 	ks->ks_data = kvs;
5431 	ks->ks_datalen = n * sizeof(*kvs);
5432 	ks->ks_read = ixl_kstat_read;
5433 
5434 	state = malloc(sizeof(*state), M_DEVBUF, M_WAITOK|M_ZERO);
5435 	state->counters = counters;
5436 	state->n = n;
5437 	state->values = mallocarray(n * 2, sizeof(*state->values),
5438 	    M_DEVBUF, M_WAITOK|M_ZERO);
5439 	state->index = index;
5440 	ks->ks_ptr = state;
5441 
5442 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
5443 	ks->ks_softc = sc;
5444 	kstat_install(ks);
5445 
5446 	/* fetch a baseline */
5447 	ixl_rd_counters(sc, state, state->values);
5448 
5449 	return (ks);
5450 }
5451 
5452 static void
5453 ixl_kstat_attach(struct ixl_softc *sc)
5454 {
5455 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
5456 	timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc);
5457 
5458 	sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port",
5459 	    ixl_port_counters, nitems(ixl_port_counters), sc->sc_port);
5460 	sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi",
5461 	    ixl_vsi_counters, nitems(ixl_vsi_counters),
5462 	    lemtoh16(&sc->sc_vsi_number));
5463 
5464 	/* ixl counters go up even when the interface is down */
5465 	timeout_add_sec(&sc->sc_kstat_tmo, 4);
5466 }
5467 
5468 #endif /* NKSTAT > 0 */
5469