xref: /openbsd-src/sys/dev/pci/if_ixl.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: if_ixl.c,v 1.38 2019/05/04 13:42:12 jsg Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/sockio.h>
56 #include <sys/mbuf.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/device.h>
60 #include <sys/pool.h>
61 #include <sys/queue.h>
62 #include <sys/timeout.h>
63 #include <sys/task.h>
64 #include <sys/syslog.h>
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83 
84 #define I40E_MASK(mask, shift)		((mask) << (shift))
85 #define I40E_PF_RESET_WAIT_COUNT	200
86 #define I40E_AQ_LARGE_BUF		512
87 
88 /* bitfields for Tx queue mapping in QTX_CTL */
89 #define I40E_QTX_CTL_VF_QUEUE		0x0
90 #define I40E_QTX_CTL_VM_QUEUE		0x1
91 #define I40E_QTX_CTL_PF_QUEUE		0x2
92 
93 #define I40E_QUEUE_TYPE_EOL		0x7ff
94 #define I40E_INTR_NOTX_QUEUE		0
95 
96 #define I40E_QUEUE_TYPE_RX		0x0
97 #define I40E_QUEUE_TYPE_TX		0x1
98 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
99 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
100 
101 #define I40E_ITR_INDEX_RX		0x0
102 #define I40E_ITR_INDEX_TX		0x1
103 #define I40E_ITR_INDEX_OTHER		0x2
104 #define I40E_ITR_INDEX_NONE		0x3
105 
106 #include <dev/pci/if_ixlreg.h>
107 
108 #define I40E_INTR_NOTX_QUEUE		0
109 #define I40E_INTR_NOTX_INTR		0
110 #define I40E_INTR_NOTX_RX_QUEUE		0
111 #define I40E_INTR_NOTX_TX_QUEUE		1
112 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
113 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
114 
115 struct ixl_aq_desc {
116 	uint16_t	iaq_flags;
117 #define	IXL_AQ_DD		(1U << 0)
118 #define	IXL_AQ_CMP		(1U << 1)
119 #define IXL_AQ_ERR		(1U << 2)
120 #define IXL_AQ_VFE		(1U << 3)
121 #define IXL_AQ_LB		(1U << 9)
122 #define IXL_AQ_RD		(1U << 10)
123 #define IXL_AQ_VFC		(1U << 11)
124 #define IXL_AQ_BUF		(1U << 12)
125 #define IXL_AQ_SI		(1U << 13)
126 #define IXL_AQ_EI		(1U << 14)
127 #define IXL_AQ_FE		(1U << 15)
128 
129 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
130 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
131 				    "\003ERR" "\002CMP" "\001DD"
132 
133 	uint16_t	iaq_opcode;
134 
135 	uint16_t	iaq_datalen;
136 	uint16_t	iaq_retval;
137 
138 	uint64_t	iaq_cookie;
139 
140 	uint32_t	iaq_param[4];
141 /*	iaq_data_hi	iaq_param[2] */
142 /*	iaq_data_lo	iaq_param[3] */
143 } __packed __aligned(8);
144 
145 /* aq commands */
146 #define IXL_AQ_OP_GET_VERSION		0x0001
147 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
148 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
149 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
150 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
151 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
152 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
153 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
154 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
155 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
156 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
157 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
158 #define IXL_AQ_OP_ADD_VSI		0x0210
159 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
160 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
161 #define IXL_AQ_OP_ADD_VEB		0x0230
162 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
163 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
164 #define IXL_AQ_OP_ADD_MACVLAN		0x0250
165 #define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
166 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
167 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
168 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
169 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
170 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
171 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
172 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
173 #define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
174 #define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
175 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
176 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
177 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
178 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
179 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
180 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
181 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
182 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
183 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
184 
185 struct ixl_aq_mac_addresses {
186 	uint8_t		pf_lan[ETHER_ADDR_LEN];
187 	uint8_t		pf_san[ETHER_ADDR_LEN];
188 	uint8_t		port[ETHER_ADDR_LEN];
189 	uint8_t		pf_wol[ETHER_ADDR_LEN];
190 } __packed;
191 
192 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
193 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
194 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
195 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
196 
197 struct ixl_aq_capability {
198 	uint16_t	cap_id;
199 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
200 #define IXL_AQ_CAP_MNG_MODE		0x0002
201 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
202 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
203 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
204 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
205 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
206 #define IXL_AQ_CAP_SRIOV		0x0012
207 #define IXL_AQ_CAP_VF			0x0013
208 #define IXL_AQ_CAP_VMDQ			0x0014
209 #define IXL_AQ_CAP_8021QBG		0x0015
210 #define IXL_AQ_CAP_8021QBR		0x0016
211 #define IXL_AQ_CAP_VSI			0x0017
212 #define IXL_AQ_CAP_DCB			0x0018
213 #define IXL_AQ_CAP_FCOE			0x0021
214 #define IXL_AQ_CAP_ISCSI		0x0022
215 #define IXL_AQ_CAP_RSS			0x0040
216 #define IXL_AQ_CAP_RXQ			0x0041
217 #define IXL_AQ_CAP_TXQ			0x0042
218 #define IXL_AQ_CAP_MSIX			0x0043
219 #define IXL_AQ_CAP_VF_MSIX		0x0044
220 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
221 #define IXL_AQ_CAP_1588			0x0046
222 #define IXL_AQ_CAP_IWARP		0x0051
223 #define IXL_AQ_CAP_LED			0x0061
224 #define IXL_AQ_CAP_SDP			0x0062
225 #define IXL_AQ_CAP_MDIO			0x0063
226 #define IXL_AQ_CAP_WSR_PROT		0x0064
227 #define IXL_AQ_CAP_NVM_MGMT		0x0080
228 #define IXL_AQ_CAP_FLEX10		0x00F1
229 #define IXL_AQ_CAP_CEM			0x00F2
230 	uint8_t		major_rev;
231 	uint8_t		minor_rev;
232 	uint32_t	number;
233 	uint32_t	logical_id;
234 	uint32_t	phys_id;
235 	uint8_t		_reserved[16];
236 } __packed __aligned(4);
237 
238 #define IXL_LLDP_SHUTDOWN		0x1
239 
240 struct ixl_aq_switch_config {
241 	uint16_t	num_reported;
242 	uint16_t	num_total;
243 	uint8_t		_reserved[12];
244 } __packed __aligned(4);
245 
246 struct ixl_aq_switch_config_element {
247 	uint8_t		type;
248 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
249 #define IXL_AQ_SW_ELEM_TYPE_PF		2
250 #define IXL_AQ_SW_ELEM_TYPE_VF		3
251 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
252 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
253 #define IXL_AQ_SW_ELEM_TYPE_PV		16
254 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
255 #define IXL_AQ_SW_ELEM_TYPE_PA		18
256 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
257 	uint8_t		revision;
258 #define IXL_AQ_SW_ELEM_REV_1		1
259 	uint16_t	seid;
260 
261 	uint16_t	uplink_seid;
262 	uint16_t	downlink_seid;
263 
264 	uint8_t		_reserved[3];
265 	uint8_t		connection_type;
266 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
267 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
268 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
269 
270 	uint16_t	scheduler_id;
271 	uint16_t	element_info;
272 } __packed __aligned(4);
273 
274 #define IXL_PHY_TYPE_SGMII		0x00
275 #define IXL_PHY_TYPE_1000BASE_KX	0x01
276 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
277 #define IXL_PHY_TYPE_10GBASE_KR		0x03
278 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
279 #define IXL_PHY_TYPE_XAUI		0x05
280 #define IXL_PHY_TYPE_XFI		0x06
281 #define IXL_PHY_TYPE_SFI		0x07
282 #define IXL_PHY_TYPE_XLAUI		0x08
283 #define IXL_PHY_TYPE_XLPPI		0x09
284 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
285 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
286 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
287 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
288 #define IXL_PHY_TYPE_100BASE_TX		0x11
289 #define IXL_PHY_TYPE_1000BASE_T		0x12
290 #define IXL_PHY_TYPE_10GBASE_T		0x13
291 #define IXL_PHY_TYPE_10GBASE_SR		0x14
292 #define IXL_PHY_TYPE_10GBASE_LR		0x15
293 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
294 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
295 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
296 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
297 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
298 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
299 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
300 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
301 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
302 
303 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
304 #define IXL_PHY_TYPE_25GBASE_CR		0x20
305 #define IXL_PHY_TYPE_25GBASE_SR		0x21
306 #define IXL_PHY_TYPE_25GBASE_LR		0x22
307 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
308 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
309 
310 struct ixl_aq_module_desc {
311 	uint8_t		oui[3];
312 	uint8_t		_reserved1;
313 	uint8_t		part_number[16];
314 	uint8_t		revision[4];
315 	uint8_t		_reserved2[8];
316 } __packed __aligned(4);
317 
318 struct ixl_aq_phy_abilities {
319 	uint32_t	phy_type;
320 
321 	uint8_t		link_speed;
322 #define IXL_AQ_PHY_LINK_SPEED_100MB	0x1
323 #define IXL_AQ_PHY_LINK_SPEED_1000MB	0x2
324 #define IXL_AQ_PHY_LINK_SPEED_10GB	0x3
325 #define IXL_AQ_PHY_LINK_SPEED_40GB	0x4
326 #define IXL_AQ_PHY_LINK_SPEED_20GB	0x5
327 #define IXL_AQ_PHY_LINK_SPEED_25GB	0x6
328 	uint8_t		abilities;
329 	uint16_t	eee_capability;
330 
331 	uint32_t	eeer_val;
332 
333 	uint8_t		d3_lpan;
334 	uint8_t		phy_type_ext;
335 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
336 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
337 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
338 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
339 	uint8_t		fec_cfg_curr_mod_ext_info;
340 #define IXL_AQ_ENABLE_FEC_KR		0x01
341 #define IXL_AQ_ENABLE_FEC_RS		0x02
342 #define IXL_AQ_REQUEST_FEC_KR		0x04
343 #define IXL_AQ_REQUEST_FEC_RS		0x08
344 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
345 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
346 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
347 	uint8_t		ext_comp_code;
348 
349 	uint8_t		phy_id[4];
350 
351 	uint8_t		module_type[3];
352 	uint8_t		qualified_module_count;
353 #define IXL_AQ_PHY_MAX_QMS		16
354 	struct ixl_aq_module_desc
355 			qualified_module[IXL_AQ_PHY_MAX_QMS];
356 } __packed __aligned(4);
357 
358 struct ixl_aq_link_param {
359 	uint8_t		notify;
360 #define IXL_AQ_LINK_NOTIFY	0x03
361 	uint8_t		_reserved1;
362 	uint8_t		phy;
363 	uint8_t		speed;
364 	uint8_t		status;
365 	uint8_t		_reserved2[11];
366 } __packed __aligned(4);
367 
368 struct ixl_aq_vsi_param {
369 	uint16_t	uplink_seid;
370 	uint8_t		connect_type;
371 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
372 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
373 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
374 	uint8_t		_reserved1;
375 
376 	uint8_t		vf_id;
377 	uint8_t		_reserved2;
378 	uint16_t	vsi_flags;
379 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
380 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
381 #define IXL_AQ_VSI_TYPE_VF		0x0
382 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
383 #define IXL_AQ_VSI_TYPE_PF		0x2
384 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
385 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
386 
387 	uint32_t	addr_hi;
388 	uint32_t	addr_lo;
389 } __packed __aligned(16);
390 
391 struct ixl_aq_add_macvlan {
392 	uint16_t	num_addrs;
393 	uint16_t	seid0;
394 	uint16_t	seid1;
395 	uint16_t	seid2;
396 	uint32_t	addr_hi;
397 	uint32_t	addr_lo;
398 } __packed __aligned(16);
399 
400 struct ixl_aq_add_macvlan_elem {
401 	uint8_t		macaddr[6];
402 	uint16_t	vlan;
403 	uint16_t	flags;
404 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
405 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
406 	uint16_t	queue;
407 	uint32_t	_reserved;
408 } __packed __aligned(16);
409 
410 struct ixl_aq_remove_macvlan {
411 	uint16_t	num_addrs;
412 	uint16_t	seid0;
413 	uint16_t	seid1;
414 	uint16_t	seid2;
415 	uint32_t	addr_hi;
416 	uint32_t	addr_lo;
417 } __packed __aligned(16);
418 
419 struct ixl_aq_remove_macvlan_elem {
420 	uint8_t		macaddr[6];
421 	uint16_t	vlan;
422 	uint8_t		flags;
423 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
424 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
425 	uint8_t		_reserved[7];
426 } __packed __aligned(16);
427 
428 struct ixl_aq_vsi_reply {
429 	uint16_t	seid;
430 	uint16_t	vsi_number;
431 
432 	uint16_t	vsis_used;
433 	uint16_t	vsis_free;
434 
435 	uint32_t	addr_hi;
436 	uint32_t	addr_lo;
437 } __packed __aligned(16);
438 
439 struct ixl_aq_vsi_data {
440 	/* first 96 byte are written by SW */
441 	uint16_t	valid_sections;
442 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
443 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
444 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
445 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
446 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
447 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
448 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
449 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
450 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
451 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
452 	/* switch section */
453 	uint16_t	switch_id;
454 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
455 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
456 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
457 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
458 
459 	uint8_t		_reserved1[2];
460 	/* security section */
461 	uint8_t		sec_flags;
462 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
463 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
464 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
465 	uint8_t		_reserved2;
466 
467 	/* vlan section */
468 	uint16_t	pvid;
469 	uint16_t	fcoe_pvid;
470 
471 	uint8_t		port_vlan_flags;
472 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
473 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
474 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
475 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED 	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
476 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
477 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
478 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
479 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
480 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
481 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
482 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
483 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
484 	uint8_t		_reserved3[3];
485 
486 	/* ingress egress up section */
487 	uint32_t	ingress_table;
488 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
489 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
490 	uint32_t	egress_table;
491 
492 	/* cascaded pv section */
493 	uint16_t	cas_pv_tag;
494 	uint8_t		cas_pv_flags;
495 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
496 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
497 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
498 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
499 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
500 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
501 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
502 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
503 					(1 << 6)
504 	uint8_t		_reserved4;
505 
506 	/* queue mapping section */
507 	uint16_t	mapping_flags;
508 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
509 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
510 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
511 	uint16_t	queue_mapping[16];
512 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
513 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
514 	uint16_t	tc_mapping[8];
515 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
516 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
517 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
518 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
519 
520 	/* queueing option section */
521 	uint8_t		queueing_opt_flags;
522 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
523 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
524 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
525 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
526 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
527 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
528 	uint8_t		_reserved5[3];
529 
530 	/* scheduler section */
531 	uint8_t		up_enable_bits;
532 	uint8_t		_reserved6;
533 
534 	/* outer up section */
535 	uint32_t	outer_up_table; /* same as ingress/egress tables */
536 	uint8_t		_reserved7[8];
537 
538 	/* last 32 bytes are written by FW */
539 	uint16_t	qs_handle[8];
540 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
541 	uint16_t	stat_counter_idx;
542 	uint16_t	sched_id;
543 
544 	uint8_t		_reserved8[12];
545 } __packed __aligned(8);
546 
547 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
548 
549 struct ixl_aq_vsi_promisc_param {
550 	uint16_t	flags;
551 	uint16_t	valid_flags;
552 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
553 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
554 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
555 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
556 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
557 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
558 
559 	uint16_t	seid;
560 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
561 	uint16_t	vlan;
562 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
563 	uint32_t	reserved[2];
564 } __packed __aligned(8);
565 
566 struct ixl_aq_veb_param {
567 	uint16_t	uplink_seid;
568 	uint16_t	downlink_seid;
569 	uint16_t	veb_flags;
570 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
571 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
572 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
573 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
574 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
575 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
576 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
577 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
578 	uint8_t		enable_tcs;
579 	uint8_t		_reserved[9];
580 } __packed __aligned(16);
581 
582 struct ixl_aq_veb_reply {
583 	uint16_t	_reserved1;
584 	uint16_t	_reserved2;
585 	uint16_t	_reserved3;
586 	uint16_t	switch_seid;
587 	uint16_t	veb_seid;
588 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
589 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
590 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
591 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
592 	uint16_t	statistic_index;
593 	uint16_t	vebs_used;
594 	uint16_t	vebs_free;
595 } __packed __aligned(16);
596 
597 /* GET PHY ABILITIES param[0] */
598 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
599 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
600 
601 struct ixl_aq_phy_reg_access {
602 	uint8_t		phy_iface;
603 #define IXL_AQ_PHY_IF_INTERNAL		0
604 #define IXL_AQ_PHY_IF_EXTERNAL		1
605 #define IXL_AQ_PHY_IF_MODULE		2
606 	uint8_t		dev_addr;
607 	uint16_t	_reserved1;
608 	uint32_t	reg;
609 	uint32_t	val;
610 	uint32_t	_reserved2;
611 } __packed __aligned(16);
612 
613 /* RESTART_AN param[0] */
614 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
615 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
616 
617 struct ixl_aq_link_status { /* this occupies the iaq_param space */
618 	uint16_t	command_flags; /* only field set on command */
619 #define IXL_AQ_LSE_MASK			0x3
620 #define IXL_AQ_LSE_NOP			0x0
621 #define IXL_AQ_LSE_DISABLE		0x2
622 #define IXL_AQ_LSE_ENABLE		0x3
623 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
624 	uint8_t		phy_type;
625 	uint8_t		link_speed;
626 	uint8_t		link_info;
627 #define IXL_AQ_LINK_UP_FUNCTION		0x01
628 #define IXL_AQ_LINK_FAULT		0x02
629 #define IXL_AQ_LINK_FAULT_TX		0x04
630 #define IXL_AQ_LINK_FAULT_RX		0x08
631 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
632 #define IXL_AQ_LINK_UP_PORT		0x20
633 #define IXL_AQ_MEDIA_AVAILABLE		0x40
634 #define IXL_AQ_SIGNAL_DETECT		0x80
635 	uint8_t		an_info;
636 #define IXL_AQ_AN_COMPLETED		0x01
637 #define IXL_AQ_LP_AN_ABILITY		0x02
638 #define IXL_AQ_PD_FAULT			0x04
639 #define IXL_AQ_FEC_EN			0x08
640 #define IXL_AQ_PHY_LOW_POWER		0x10
641 #define IXL_AQ_LINK_PAUSE_TX		0x20
642 #define IXL_AQ_LINK_PAUSE_RX		0x40
643 #define IXL_AQ_QUALIFIED_MODULE		0x80
644 
645 	uint8_t		ext_info;
646 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
647 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
648 #define IXL_AQ_LINK_TX_SHIFT		0x02
649 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
650 #define IXL_AQ_LINK_TX_ACTIVE		0x00
651 #define IXL_AQ_LINK_TX_DRAINED		0x01
652 #define IXL_AQ_LINK_TX_FLUSHED		0x03
653 #define IXL_AQ_LINK_FORCED_40G		0x10
654 /* 25G Error Codes */
655 #define IXL_AQ_25G_NO_ERR		0X00
656 #define IXL_AQ_25G_NOT_PRESENT		0X01
657 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
658 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
659 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
660 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
661 	uint8_t		loopback;
662 	uint16_t	max_frame_size;
663 
664 	uint8_t		config;
665 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
666 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
667 #define IXL_AQ_CONFIG_CRC_ENA	0x04
668 #define IXL_AQ_CONFIG_PACING_MASK	0x78
669 	uint8_t		power_desc;
670 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
671 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
672 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
673 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
674 #define IXL_AQ_PWR_CLASS_MASK		0x03
675 
676 	uint8_t		reserved[4];
677 } __packed __aligned(4);
678 /* event mask command flags for param[2] */
679 #define IXL_AQ_PHY_EV_MASK		0x3ff
680 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
681 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
682 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
683 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
684 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
685 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
686 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
687 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
688 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
689 
690 /* aq response codes */
691 #define IXL_AQ_RC_OK			0  /* success */
692 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
693 #define IXL_AQ_RC_ENOENT		2  /* No such element */
694 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
695 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
696 #define IXL_AQ_RC_EIO			5  /* I/O error */
697 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
698 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
699 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
700 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
701 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
702 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
703 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
704 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
705 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
706 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
707 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
708 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
709 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
710 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
711 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
712 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
713 #define IXL_AQ_RC_EFBIG			22 /* file too large */
714 
715 struct ixl_tx_desc {
716 	uint64_t		addr;
717 	uint64_t		cmd;
718 #define IXL_TX_DESC_DTYPE_SHIFT		0
719 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
720 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
721 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
722 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
723 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
724 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
725 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
726 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
727 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
728 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
729 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
730 
731 #define IXL_TX_DESC_CMD_SHIFT		4
732 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
733 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
734 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
735 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
736 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
737 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
738 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
739 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
740 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
741 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
742 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
743 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
744 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
745 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
746 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
747 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
748 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
749 
750 #define IXL_TX_DESC_MACLEN_SHIFT	16
751 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
752 #define IXL_TX_DESC_IPLEN_SHIFT		23
753 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
754 #define IXL_TX_DESC_L4LEN_SHIFT		30
755 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
756 #define IXL_TX_DESC_FCLEN_SHIFT		30
757 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
758 
759 #define IXL_TX_DESC_BSIZE_SHIFT		34
760 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
761 #define IXL_TX_DESC_BSIZE_MASK		\
762 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
763 } __packed __aligned(16);
764 
765 struct ixl_rx_rd_desc_16 {
766 	uint64_t		paddr; /* packet addr */
767 	uint64_t		haddr; /* header addr */
768 } __packed __aligned(16);
769 
770 struct ixl_rx_rd_desc_32 {
771 	uint64_t		paddr; /* packet addr */
772 	uint64_t		haddr; /* header addr */
773 	uint64_t		_reserved1;
774 	uint64_t		_reserved2;
775 } __packed __aligned(16);
776 
777 struct ixl_rx_wb_desc_16 {
778 	uint64_t		qword0;
779 	uint64_t		qword1;
780 #define IXL_RX_DESC_DD			(1 << 0)
781 #define IXL_RX_DESC_EOP			(1 << 1)
782 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
783 #define IXL_RX_DESC_L3L4P		(1 << 3)
784 #define IXL_RX_DESC_CRCP		(1 << 4)
785 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
786 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
787 #define IXL_RX_DESC_UMB_SHIFT		9
788 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
789 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
790 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
791 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
792 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
793 #define IXL_RX_DESC_FLM			(1 << 11)
794 #define IXL_RX_DESC_FLTSTAT_SHIFT 	12
795 #define IXL_RX_DESC_FLTSTAT_MASK 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
796 #define IXL_RX_DESC_FLTSTAT_NODATA 	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
797 #define IXL_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
798 #define IXL_RX_DESC_FLTSTAT_RSS 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
799 #define IXL_RX_DESC_LPBK		(1 << 14)
800 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
801 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
802 
803 #define IXL_RX_DESC_RXE			(1 << 19)
804 #define IXL_RX_DESC_HBO			(1 << 21)
805 #define IXL_RX_DESC_IPE			(1 << 22)
806 #define IXL_RX_DESC_L4E			(1 << 23)
807 #define IXL_RX_DESC_EIPE		(1 << 24)
808 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
809 
810 #define IXL_RX_DESC_PTYPE_SHIFT		30
811 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
812 
813 #define IXL_RX_DESC_PLEN_SHIFT		38
814 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
815 #define IXL_RX_DESC_HLEN_SHIFT		42
816 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
817 } __packed __aligned(16);
818 
819 struct ixl_rx_wb_desc_32 {
820 	uint64_t		qword0;
821 	uint64_t		qword1;
822 	uint64_t		qword2;
823 	uint64_t		qword3;
824 } __packed __aligned(16);
825 
826 #define IXL_TX_PKT_DESCS		8
827 #define IXL_TX_QUEUE_ALIGN		128
828 #define IXL_RX_QUEUE_ALIGN		128
829 
830 #define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
831 
832 #define IXL_PCIREG			PCI_MAPREG_START
833 
834 #define IXL_ITR0			0x0
835 #define IXL_ITR1			0x1
836 #define IXL_ITR2			0x2
837 #define IXL_NOITR			0x2
838 
839 #define IXL_AQ_NUM			256
840 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
841 #define IXL_AQ_ALIGN			64 /* lol */
842 #define IXL_AQ_BUFLEN			4096
843 
844 #define IXL_HMC_ROUNDUP			512
845 #define IXL_HMC_PGSIZE			4096
846 #define IXL_HMC_DVASZ			sizeof(uint64_t)
847 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
848 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
849 #define IXL_HMC_PDVALID			1ULL
850 
851 struct ixl_aq_regs {
852 	bus_size_t		atq_tail;
853 	bus_size_t		atq_head;
854 	bus_size_t		atq_len;
855 	bus_size_t		atq_bal;
856 	bus_size_t		atq_bah;
857 
858 	bus_size_t		arq_tail;
859 	bus_size_t		arq_head;
860 	bus_size_t		arq_len;
861 	bus_size_t		arq_bal;
862 	bus_size_t		arq_bah;
863 
864 	uint32_t		atq_len_enable;
865 	uint32_t		atq_tail_mask;
866 	uint32_t		atq_head_mask;
867 
868 	uint32_t		arq_len_enable;
869 	uint32_t		arq_tail_mask;
870 	uint32_t		arq_head_mask;
871 };
872 
873 struct ixl_phy_type {
874 	uint64_t	phy_type;
875 	uint64_t	ifm_type;
876 };
877 
878 struct ixl_speed_type {
879 	uint8_t		dev_speed;
880 	uint64_t	net_speed;
881 };
882 
883 struct ixl_aq_buf {
884 	SIMPLEQ_ENTRY(ixl_aq_buf)
885 				 aqb_entry;
886 	void			*aqb_data;
887 	bus_dmamap_t		 aqb_map;
888 };
889 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
890 
891 struct ixl_dmamem {
892 	bus_dmamap_t		ixm_map;
893 	bus_dma_segment_t	ixm_seg;
894 	int			ixm_nsegs;
895 	size_t			ixm_size;
896 	caddr_t			ixm_kva;
897 };
898 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
899 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
900 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
901 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
902 
903 struct ixl_hmc_entry {
904 	uint64_t		 hmc_base;
905 	uint32_t		 hmc_count;
906 	uint32_t		 hmc_size;
907 };
908 
909 #define IXL_HMC_LAN_TX		 0
910 #define IXL_HMC_LAN_RX		 1
911 #define IXL_HMC_FCOE_CTX	 2
912 #define IXL_HMC_FCOE_FILTER	 3
913 #define IXL_HMC_COUNT		 4
914 
915 struct ixl_hmc_pack {
916 	uint16_t		offset;
917 	uint16_t		width;
918 	uint16_t		lsb;
919 };
920 
921 /*
922  * these hmc objects have weird sizes and alignments, so these are abstract
923  * representations of them that are nice for c to populate.
924  *
925  * the packing code relies on little-endian values being stored in the fields,
926  * no high bits in the fields being set, and the fields must be packed in the
927  * same order as they are in the ctx structure.
928  */
929 
930 struct ixl_hmc_rxq {
931 	uint16_t		 head;
932 	uint8_t			 cpuid;
933 	uint64_t		 base;
934 #define IXL_HMC_RXQ_BASE_UNIT		128
935 	uint16_t		 qlen;
936 	uint16_t		 dbuff;
937 #define IXL_HMC_RXQ_DBUFF_UNIT		128
938 	uint8_t			 hbuff;
939 #define IXL_HMC_RXQ_HBUFF_UNIT		64
940 	uint8_t			 dtype;
941 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
942 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
943 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
944 	uint8_t			 dsize;
945 #define IXL_HMC_RXQ_DSIZE_16		0
946 #define IXL_HMC_RXQ_DSIZE_32		1
947 	uint8_t			 crcstrip;
948 	uint8_t			 fc_ena;
949 	uint8_t			 l2sel;
950 	uint8_t			 hsplit_0;
951 	uint8_t			 hsplit_1;
952 	uint8_t			 showiv;
953 	uint16_t		 rxmax;
954 	uint8_t			 tphrdesc_ena;
955 	uint8_t			 tphwdesc_ena;
956 	uint8_t			 tphdata_ena;
957 	uint8_t			 tphhead_ena;
958 	uint8_t			 lrxqthresh;
959 	uint8_t			 prefena;
960 };
961 
962 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
963 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
964 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
965 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
966 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
967 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
968 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
969 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
970 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
971 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
972 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
973 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
974 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
975 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
976 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
977 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
978 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
979 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
980 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
981 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
982 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
983 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
984 };
985 
986 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
987 
988 struct ixl_hmc_txq {
989 	uint16_t		head;
990 	uint8_t			new_context;
991 	uint64_t		base;
992 #define IXL_HMC_TXQ_BASE_UNIT		128
993 	uint8_t			fc_ena;
994 	uint8_t			timesync_ena;
995 	uint8_t			fd_ena;
996 	uint8_t			alt_vlan_ena;
997 	uint16_t		thead_wb;
998 	uint8_t			cpuid;
999 	uint8_t			head_wb_ena;
1000 #define IXL_HMC_TXQ_DESC_WB		0
1001 #define IXL_HMC_TXQ_HEAD_WB		1
1002 	uint16_t		qlen;
1003 	uint8_t			tphrdesc_ena;
1004 	uint8_t			tphrpacket_ena;
1005 	uint8_t			tphwdesc_ena;
1006 	uint64_t		head_wb_addr;
1007 	uint32_t		crc;
1008 	uint16_t		rdylist;
1009 	uint8_t			rdylist_act;
1010 };
1011 
1012 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1013 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1014 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1015 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1016 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1017 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1018 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1019 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1020 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1021 /* line 1 */
1022 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1023 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1024 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1025 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1026 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1027 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1028 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1029 /* line 7 */
1030 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1031 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1032 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1033 };
1034 
1035 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1036 
1037 struct ixl_tx_map {
1038 	struct mbuf		*txm_m;
1039 	bus_dmamap_t		 txm_map;
1040 	unsigned int		 txm_eop;
1041 };
1042 
1043 struct ixl_tx_ring {
1044 	unsigned int		 txr_prod;
1045 	unsigned int		 txr_cons;
1046 
1047 	struct ixl_tx_map	*txr_maps;
1048 	struct ixl_dmamem	 txr_mem;
1049 
1050 	bus_size_t		 txr_tail;
1051 	unsigned int		 txr_qid;
1052 };
1053 
1054 struct ixl_rx_map {
1055 	struct mbuf		*rxm_m;
1056 	bus_dmamap_t		 rxm_map;
1057 };
1058 
1059 struct ixl_rx_ring {
1060 	struct ixl_softc	*rxr_sc;
1061 
1062 	struct if_rxring	 rxr_acct;
1063 	struct timeout		 rxr_refill;
1064 
1065 	unsigned int		 rxr_prod;
1066 	unsigned int		 rxr_cons;
1067 
1068 	struct ixl_rx_map	*rxr_maps;
1069 	struct ixl_dmamem	 rxr_mem;
1070 
1071 	struct mbuf		*rxr_m_head;
1072 	struct mbuf		**rxr_m_tail;
1073 
1074 	bus_size_t		 rxr_tail;
1075 	unsigned int		 rxr_qid;
1076 };
1077 
1078 struct ixl_atq {
1079 	struct ixl_aq_desc	  iatq_desc;
1080 	void			 *iatq_arg;
1081 	void			(*iatq_fn)(struct ixl_softc *, void *);
1082 };
1083 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1084 
1085 struct ixl_softc {
1086 	struct device		 sc_dev;
1087 	struct arpcom		 sc_ac;
1088 	struct ifmedia		 sc_media;
1089 	uint64_t		 sc_media_status;
1090 	uint64_t		 sc_media_active;
1091 
1092 	pci_chipset_tag_t	 sc_pc;
1093 	pci_intr_handle_t	 sc_ih;
1094 	void			*sc_ihc;
1095 	pcitag_t		 sc_tag;
1096 
1097 	bus_dma_tag_t		 sc_dmat;
1098 	bus_space_tag_t		 sc_memt;
1099 	bus_space_handle_t	 sc_memh;
1100 	bus_size_t		 sc_mems;
1101 
1102 	uint8_t			 sc_pf_id;
1103 	uint16_t		 sc_uplink_seid;	/* le */
1104 	uint16_t		 sc_downlink_seid;	/* le */
1105 	uint16_t		 sc_veb_seid;		/* le */
1106 	uint16_t		 sc_vsi_number;		/* le */
1107 	uint16_t		 sc_seid;
1108 	unsigned int		 sc_base_queue;
1109 
1110 	struct ixl_dmamem	 sc_scratch;
1111 
1112 	const struct ixl_aq_regs *
1113 				 sc_aq_regs;
1114 
1115 	struct mutex		 sc_atq_mtx;
1116 	struct ixl_dmamem	 sc_atq;
1117 	unsigned int		 sc_atq_prod;
1118 	unsigned int		 sc_atq_cons;
1119 
1120 	struct ixl_dmamem	 sc_arq;
1121 	struct task		 sc_arq_task;
1122 	struct ixl_aq_bufs	 sc_arq_idle;
1123 	struct ixl_aq_bufs	 sc_arq_live;
1124 	struct if_rxring	 sc_arq_ring;
1125 	unsigned int		 sc_arq_prod;
1126 	unsigned int		 sc_arq_cons;
1127 
1128 	struct task		 sc_link_state_task;
1129 	struct ixl_atq		 sc_link_state_atq;
1130 
1131 	struct ixl_dmamem	 sc_hmc_sd;
1132 	struct ixl_dmamem	 sc_hmc_pd;
1133 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1134 
1135 	unsigned int		 sc_tx_ring_ndescs;
1136 	unsigned int		 sc_rx_ring_ndescs;
1137 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1138 
1139 	struct rwlock		 sc_cfg_lock;
1140 	unsigned int		 sc_dead;
1141 
1142 	struct rwlock		 sc_sff_lock;
1143 };
1144 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1145 
1146 #define delaymsec(_ms)	delay(1000 * (_ms))
1147 
1148 static void	ixl_clear_hw(struct ixl_softc *);
1149 static int	ixl_pf_reset(struct ixl_softc *);
1150 
1151 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1152 		    bus_size_t, u_int);
1153 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1154 
1155 static int	ixl_arq_fill(struct ixl_softc *);
1156 static void	ixl_arq_unfill(struct ixl_softc *);
1157 
1158 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1159 		    unsigned int);
1160 static void	ixl_atq_set(struct ixl_atq *,
1161 		    void (*)(struct ixl_softc *, void *), void *);
1162 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1163 static void	ixl_atq_done(struct ixl_softc *);
1164 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1165 		    const char *);
1166 static int	ixl_get_version(struct ixl_softc *);
1167 static int	ixl_pxe_clear(struct ixl_softc *);
1168 static int	ixl_lldp_shut(struct ixl_softc *);
1169 static int	ixl_get_mac(struct ixl_softc *);
1170 static int	ixl_get_switch_config(struct ixl_softc *);
1171 static int	ixl_phy_mask_ints(struct ixl_softc *);
1172 static int	ixl_get_phy_abilities(struct ixl_softc *, uint64_t *);
1173 static int	ixl_restart_an(struct ixl_softc *);
1174 static int	ixl_hmc(struct ixl_softc *);
1175 static void	ixl_hmc_free(struct ixl_softc *);
1176 static int	ixl_get_vsi(struct ixl_softc *);
1177 static int	ixl_set_vsi(struct ixl_softc *);
1178 static int	ixl_get_link_status(struct ixl_softc *);
1179 static int	ixl_set_link_status(struct ixl_softc *,
1180 		    const struct ixl_aq_desc *);
1181 static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1182 		    uint16_t);
1183 static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1184 		    uint16_t);
1185 static void	ixl_link_state_update(void *);
1186 static void	ixl_arq(void *);
1187 static void	ixl_hmc_pack(void *, const void *,
1188 		    const struct ixl_hmc_pack *, unsigned int);
1189 
1190 static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1191 static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1192 		    uint8_t *);
1193 static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1194 		    uint8_t);
1195 
1196 static int	ixl_match(struct device *, void *, void *);
1197 static void	ixl_attach(struct device *, struct device *, void *);
1198 
1199 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1200 static int	ixl_media_change(struct ifnet *);
1201 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1202 static void	ixl_watchdog(struct ifnet *);
1203 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1204 static void	ixl_start(struct ifqueue *);
1205 static int	ixl_intr(void *);
1206 static int	ixl_up(struct ixl_softc *);
1207 static int	ixl_down(struct ixl_softc *);
1208 static int	ixl_iff(struct ixl_softc *);
1209 
1210 static struct ixl_tx_ring *
1211 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1212 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1213 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1214 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1215 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1216 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1217 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1218 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1219 static int	ixl_txeof(struct ixl_softc *, struct ifqueue *);
1220 
1221 static struct ixl_rx_ring *
1222 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1223 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1224 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1225 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1226 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1227 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1228 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1229 static int	ixl_rxeof(struct ixl_softc *, struct ifiqueue *);
1230 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1231 static void	ixl_rxrefill(void *);
1232 static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1233 
1234 struct cfdriver ixl_cd = {
1235 	NULL,
1236 	"ixl",
1237 	DV_IFNET,
1238 };
1239 
1240 struct cfattach ixl_ca = {
1241 	sizeof(struct ixl_softc),
1242 	ixl_match,
1243 	ixl_attach,
1244 };
1245 
1246 static const struct ixl_phy_type ixl_phy_type_map[] = {
1247 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1248 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1249 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1250 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1251 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1252 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1253 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1254 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1255 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1256 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1257 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1258 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1259 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1260 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1261 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1262 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1263 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1264 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1265 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1266 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1267 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1268 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1269 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1270 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1271 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1272 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1273 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1274 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1275 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1276 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1277 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1278 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1279 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1280 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1281 };
1282 
1283 static const struct ixl_speed_type ixl_speed_type_map[] = {
1284 	{ IXL_AQ_PHY_LINK_SPEED_40GB,		IF_Gbps(40) },
1285 	{ IXL_AQ_PHY_LINK_SPEED_25GB,		IF_Gbps(25) },
1286 	{ IXL_AQ_PHY_LINK_SPEED_20GB,		IF_Gbps(20) },
1287 	{ IXL_AQ_PHY_LINK_SPEED_10GB,		IF_Gbps(10) },
1288 	{ IXL_AQ_PHY_LINK_SPEED_1000MB,		IF_Mbps(1000) },
1289 	{ IXL_AQ_PHY_LINK_SPEED_100MB,		IF_Mbps(100) },
1290 };
1291 
1292 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1293 	.atq_tail	= I40E_PF_ATQT,
1294 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1295 	.atq_head	= I40E_PF_ATQH,
1296 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1297 	.atq_len	= I40E_PF_ATQLEN,
1298 	.atq_bal	= I40E_PF_ATQBAL,
1299 	.atq_bah	= I40E_PF_ATQBAH,
1300 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1301 
1302 	.arq_tail	= I40E_PF_ARQT,
1303 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1304 	.arq_head	= I40E_PF_ARQH,
1305 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1306 	.arq_len	= I40E_PF_ARQLEN,
1307 	.arq_bal	= I40E_PF_ARQBAL,
1308 	.arq_bah	= I40E_PF_ARQBAH,
1309 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1310 };
1311 
1312 #ifdef notyet
1313 static const struct ixl_aq_regs ixl_vf_aq_regs = {
1314 	.atq_tail	= I40E_VF_ATQT1,
1315 	.atq_tail_mask	= I40E_VF_ATQT1_ATQT_MASK;
1316 	.atq_head	= I40E_VF_ATQH1,
1317 	.atq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1318 	.atq_len	= I40E_VF_ATQLEN1,
1319 	.atq_bal	= I40E_VF_ATQBAL1,
1320 	.atq_bah	= I40E_VF_ATQBAH1,
1321 	.atq_len_enable	= I40E_VF_ATQLEN1_ATQENABLE_MASK,
1322 
1323 	.arq_tail	= I40E_VF_ARQT1,
1324 	.arq_tail_mask	= I40E_VF_ARQT1_ARQT_MASK;
1325 	.arq_head	= I40E_VF_ARQH1,
1326 	.arq_head_mask	= I40E_VF_ARQH1_ARQH_MASK;
1327 	.arq_len	= I40E_VF_ARQLEN1,
1328 	.arq_bal	= I40E_VF_ARQBAL1,
1329 	.arq_bah	= I40E_VF_ARQBAH1,
1330 	.arq_len_enable	= I40E_VF_ARQLEN1_ARQENABLE_MASK,
1331 };
1332 #endif
1333 
1334 #define ixl_rd(_s, _r) \
1335 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1336 #define ixl_wr(_s, _r, _v) \
1337 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1338 #define ixl_barrier(_s, _r, _l, _o) \
1339 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1340 #define ixl_intr_enable(_s) \
1341 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1342 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1343 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1344 
1345 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1346 
1347 #ifdef __LP64__
1348 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1349 #else
1350 #define ixl_dmamem_hi(_ixm)	0
1351 #endif
1352 
1353 #define ixl_dmamem_lo(_ixm) 	(uint32_t)IXL_DMA_DVA(_ixm)
1354 
1355 static inline void
1356 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1357 {
1358 #ifdef __LP64__
1359 	htolem32(&iaq->iaq_param[2], addr >> 32);
1360 #else
1361 	iaq->iaq_param[2] = htole32(0);
1362 #endif
1363 	htolem32(&iaq->iaq_param[3], addr);
1364 }
1365 
1366 #if _BYTE_ORDER == _BIG_ENDIAN
1367 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1368 #else
1369 #define HTOLE16(_x)	(_x)
1370 #endif
1371 
1372 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1373 
1374 static const struct pci_matchid ixl_devices[] = {
1375 #ifdef notyet
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_VF_HV },
1378 #endif
1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_SFP },
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_40G_BP },
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BP },
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1384 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_QSFP },
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BASET },
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1387 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_T4_10G },
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_BP },
1390 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_KX },
1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_QSFP },
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_1G },
1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_T },
1396 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1397 };
1398 
1399 static int
1400 ixl_match(struct device *parent, void *match, void *aux)
1401 {
1402 	return (pci_matchbyid(aux, ixl_devices, nitems(ixl_devices)));
1403 }
1404 
1405 void
1406 ixl_attach(struct device *parent, struct device *self, void *aux)
1407 {
1408 	struct ixl_softc *sc = (struct ixl_softc *)self;
1409 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1410 	struct pci_attach_args *pa = aux;
1411 	pcireg_t memtype;
1412 	uint32_t port, ari, func;
1413 	uint64_t phy_types = 0;
1414 	int tries;
1415 
1416 	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1417 
1418 	sc->sc_pc = pa->pa_pc;
1419 	sc->sc_tag = pa->pa_tag;
1420 	sc->sc_dmat = pa->pa_dmat;
1421 	sc->sc_aq_regs = &ixl_pf_aq_regs; /* VF? */
1422 
1423 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1424 	sc->sc_tx_ring_ndescs = 1024;
1425 	sc->sc_rx_ring_ndescs = 1024;
1426 
1427 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1428 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1429 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1430 		printf(": unable to map registers\n");
1431 		return;
1432 	}
1433 
1434 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1435 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1436 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1437 
1438 	ixl_clear_hw(sc);
1439 	if (ixl_pf_reset(sc) == -1) {
1440 		/* error printed by ixl_pf_reset */
1441 		goto unmap;
1442 	}
1443 
1444 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1445 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1446 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1447 	printf(": port %u", port);
1448 
1449 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1450 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1451 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1452 
1453 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1454 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1455 
1456 	/* initialise the adminq */
1457 
1458 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1459 
1460 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1461 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1462 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1463 		goto unmap;
1464 	}
1465 
1466 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1467 	SIMPLEQ_INIT(&sc->sc_arq_live);
1468 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1469 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1470 	sc->sc_arq_cons = 0;
1471 	sc->sc_arq_prod = 0;
1472 
1473 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1474 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1475 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1476 		goto free_atq;
1477 	}
1478 
1479 	if (!ixl_arq_fill(sc)) {
1480 		printf("\n" "%s: unable to fill arq descriptors\n",
1481 		    DEVNAME(sc));
1482 		goto free_arq;
1483 	}
1484 
1485 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1486 	    0, IXL_DMA_LEN(&sc->sc_atq),
1487 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1488 
1489 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1490 	    0, IXL_DMA_LEN(&sc->sc_arq),
1491 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1492 
1493  	for (tries = 0; tries < 10; tries++) {
1494 		int rv;
1495 
1496 		sc->sc_atq_cons = 0;
1497 		sc->sc_atq_prod = 0;
1498 
1499 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1500 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1501 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1502 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1503 
1504 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1505 
1506 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1507 		    ixl_dmamem_lo(&sc->sc_atq));
1508 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1509 		    ixl_dmamem_hi(&sc->sc_atq));
1510 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1511 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1512 
1513 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1514 		    ixl_dmamem_lo(&sc->sc_arq));
1515 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1516 		    ixl_dmamem_hi(&sc->sc_arq));
1517 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1518 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1519 
1520 		rv = ixl_get_version(sc);
1521 		if (rv == 0)
1522 			break;
1523 		if (rv != ETIMEDOUT) {
1524 			printf(", unable to get firmware version\n");
1525 			goto shutdown;
1526 		}
1527 
1528 		delaymsec(100);
1529 	}
1530 
1531 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1532 
1533 	if (ixl_pxe_clear(sc) != 0) {
1534 		/* error printed by ixl_pxe_clear */
1535 		goto shutdown;
1536 	}
1537 
1538 	if (ixl_get_mac(sc) != 0) {
1539 		/* error printed by ixl_get_mac */
1540 		goto shutdown;
1541 	}
1542 
1543 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1544 	    pci_intr_map(pa, &sc->sc_ih) != 0) {
1545 		printf(", unable to map interrupt\n");
1546 		goto shutdown;
1547 	}
1548 
1549 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
1550 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1551 
1552 	if (ixl_hmc(sc) != 0) {
1553 		/* error printed by ixl_hmc */
1554 		goto shutdown;
1555 	}
1556 
1557 	if (ixl_lldp_shut(sc) != 0) {
1558 		/* error printed by ixl_lldp_shut */
1559 		goto free_hmc;
1560 	}
1561 
1562 	if (ixl_phy_mask_ints(sc) != 0) {
1563 		/* error printed by ixl_phy_mask_ints */
1564 		goto free_hmc;
1565 	}
1566 
1567 	if (ixl_restart_an(sc) != 0) {
1568 		/* error printed by ixl_restart_an */
1569 		goto free_hmc;
1570 	}
1571 
1572 	if (ixl_get_switch_config(sc) != 0) {
1573 		/* error printed by ixl_get_switch_config */
1574 		goto free_hmc;
1575 	}
1576 
1577 	if (ixl_get_phy_abilities(sc, &phy_types) != 0) {
1578 		/* error printed by ixl_get_phy_abilities */
1579 		goto free_hmc;
1580 	}
1581 
1582 	if (ixl_get_link_status(sc) != 0) {
1583 		/* error printed by ixl_get_link_status */
1584 		goto free_hmc;
1585 	}
1586 
1587 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1588 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1589 		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1590 		goto free_hmc;
1591 	}
1592 
1593 	if (ixl_get_vsi(sc) != 0) {
1594 		/* error printed by ixl_get_vsi */
1595 		goto free_hmc;
1596 	}
1597 
1598 	if (ixl_set_vsi(sc) != 0) {
1599 		/* error printed by ixl_set_vsi */
1600 		goto free_scratch;
1601 	}
1602 
1603 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1604 	    IPL_NET | IPL_MPSAFE, ixl_intr, sc, DEVNAME(sc));
1605 	if (sc->sc_ihc == NULL) {
1606 		printf("%s: unable to establish interrupt handler\n",
1607 		    DEVNAME(sc));
1608 		goto free_scratch;
1609 	}
1610 
1611 	ifp->if_softc = sc;
1612 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1613 	ifp->if_xflags = IFXF_MPSAFE;
1614 	ifp->if_ioctl = ixl_ioctl;
1615 	ifp->if_qstart = ixl_start;
1616 	ifp->if_watchdog = ixl_watchdog;
1617 	ifp->if_hardmtu = IXL_HARDMTU;
1618 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1619 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
1620 
1621 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1622 #if 0
1623 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1624 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1625 	    IFCAP_CSUM_UDPv4;
1626 #endif
1627 
1628 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1629 
1630 	ixl_media_add(sc, phy_types);
1631 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1632 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1633 
1634 	if_attach(ifp);
1635 	ether_ifattach(ifp);
1636 
1637 	if_attach_queues(ifp, ixl_nqueues(sc));
1638 	if_attach_iqueues(ifp, ixl_nqueues(sc));
1639 
1640 	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1641 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1642 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1643 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1644 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1645 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1646 
1647 	/* remove default mac filter and replace it so we can see vlans */
1648 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
1649 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1650 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1651 	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1652 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1653 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
1654 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1655 
1656 	ixl_intr_enable(sc);
1657 
1658 	return;
1659 free_scratch:
1660 	ixl_dmamem_free(sc, &sc->sc_scratch);
1661 free_hmc:
1662 	ixl_hmc_free(sc);
1663 shutdown:
1664 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1665 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1666 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1667 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1668 
1669 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1670 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1671 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1672 
1673 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1674 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1675 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1676 
1677 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1678 	    0, IXL_DMA_LEN(&sc->sc_arq),
1679 	    BUS_DMASYNC_POSTREAD);
1680 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1681 	    0, IXL_DMA_LEN(&sc->sc_atq),
1682 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1683 
1684 	ixl_arq_unfill(sc);
1685 free_arq:
1686 	ixl_dmamem_free(sc, &sc->sc_arq);
1687 free_atq:
1688 	ixl_dmamem_free(sc, &sc->sc_atq);
1689 unmap:
1690 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1691 	sc->sc_mems = 0;
1692 }
1693 
1694 static void
1695 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1696 {
1697 	struct ifmedia *ifm = &sc->sc_media;
1698 	const struct ixl_phy_type *itype;
1699 	unsigned int i;
1700 
1701 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
1702 		itype = &ixl_phy_type_map[i];
1703 
1704 		if (ISSET(phy_types, itype->phy_type))
1705 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
1706 	}
1707 }
1708 
1709 static int
1710 ixl_media_change(struct ifnet *ifp)
1711 {
1712 	/* ignore? */
1713 	return (EOPNOTSUPP);
1714 }
1715 
1716 static void
1717 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
1718 {
1719 	struct ixl_softc *sc = ifp->if_softc;
1720 
1721 	NET_ASSERT_LOCKED();
1722 
1723 	ifm->ifm_status = sc->sc_media_status;
1724 	ifm->ifm_active = sc->sc_media_active;
1725 }
1726 
1727 static void
1728 ixl_watchdog(struct ifnet *ifp)
1729 {
1730 
1731 }
1732 
1733 int
1734 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1735 {
1736 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1737 	struct ifreq *ifr = (struct ifreq *)data;
1738 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1739 	int aqerror, error = 0;
1740 
1741 	switch (cmd) {
1742 	case SIOCSIFADDR:
1743 		ifp->if_flags |= IFF_UP;
1744 		/* FALLTHROUGH */
1745 
1746 	case SIOCSIFFLAGS:
1747 		if (ISSET(ifp->if_flags, IFF_UP)) {
1748 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1749 				error = ENETRESET;
1750 			else
1751 				error = ixl_up(sc);
1752 		} else {
1753 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1754 				error = ixl_down(sc);
1755 		}
1756 		break;
1757 
1758 	case SIOCGIFMEDIA:
1759 	case SIOCSIFMEDIA:
1760 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1761 		break;
1762 
1763 	case SIOCGIFRXR:
1764 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1765 		break;
1766 
1767 	case SIOCADDMULTI:
1768 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
1769 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1770 			if (error != 0)
1771 				return (error);
1772 
1773 			aqerror = ixl_add_macvlan(sc, addrlo, 0,
1774 			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1775 			if (aqerror == IXL_AQ_RC_ENOSPC) {
1776 				ether_delmulti(ifr, &sc->sc_ac);
1777 				error = ENOSPC;
1778 			}
1779 
1780 			if (sc->sc_ac.ac_multirangecnt > 0) {
1781 				SET(ifp->if_flags, IFF_ALLMULTI);
1782 				error = ENETRESET;
1783 			}
1784 		}
1785 		break;
1786 
1787 	case SIOCDELMULTI:
1788 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
1789 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1790 			if (error != 0)
1791 				return (error);
1792 
1793 			ixl_remove_macvlan(sc, addrlo, 0,
1794 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1795 
1796 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
1797 			    sc->sc_ac.ac_multirangecnt == 0) {
1798 				CLR(ifp->if_flags, IFF_ALLMULTI);
1799 				error = ENETRESET;
1800 			}
1801 		}
1802 		break;
1803 
1804 	case SIOCGIFSFFPAGE:
1805 		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
1806 		if (error != 0)
1807 			break;
1808 
1809 		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
1810 		rw_exit(&ixl_sff_lock);
1811 		break;
1812 
1813 	default:
1814 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1815 		break;
1816 	}
1817 
1818 	if (error == ENETRESET)
1819 		error = ixl_iff(sc);
1820 
1821 	return (error);
1822 }
1823 
1824 static inline void *
1825 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
1826 {
1827 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1828 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1829 
1830 	if (i >= e->hmc_count)
1831 		return (NULL);
1832 
1833 	kva += e->hmc_base;
1834 	kva += i * e->hmc_size;
1835 
1836 	return (kva);
1837 }
1838 
1839 static inline size_t
1840 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
1841 {
1842 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1843 
1844 	return (e->hmc_size);
1845 }
1846 
1847 static int
1848 ixl_up(struct ixl_softc *sc)
1849 {
1850 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1851 	struct ixl_rx_ring *rxr;
1852 	struct ixl_tx_ring *txr;
1853 	unsigned int nqueues, i;
1854 	uint32_t reg;
1855 	int rv = ENOMEM;
1856 
1857 	nqueues = ixl_nqueues(sc);
1858 	KASSERT(nqueues == 1); /* XXX */
1859 
1860 	rw_enter_write(&sc->sc_cfg_lock);
1861 	if (sc->sc_dead) {
1862 		rw_exit_write(&sc->sc_cfg_lock);
1863 		return (ENXIO);
1864 	}
1865 
1866 	/* allocation is the only thing that can fail, so do it up front */
1867 	for (i = 0; i < nqueues; i++) {
1868 		rxr = ixl_rxr_alloc(sc, i);
1869 		if (rxr == NULL)
1870 			goto free;
1871 
1872 		txr = ixl_txr_alloc(sc, i);
1873 		if (txr == NULL) {
1874 			ixl_rxr_free(sc, rxr);
1875 			goto free;
1876 		}
1877 
1878 		ifp->if_iqs[i]->ifiq_softc = rxr;
1879 		ifp->if_ifqs[i]->ifq_softc = txr;
1880 	}
1881 
1882 	/* XXX wait 50ms from completion of last RX queue disable */
1883 
1884 	for (i = 0; i < nqueues; i++) {
1885 		rxr = ifp->if_iqs[i]->ifiq_softc;
1886 		txr = ifp->if_ifqs[i]->ifq_softc;
1887 
1888 		ixl_txr_qdis(sc, txr, 1);
1889 
1890 		ixl_rxr_config(sc, rxr);
1891 		ixl_txr_config(sc, txr);
1892 
1893 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1894 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1895 
1896 		ixl_wr(sc, rxr->rxr_tail, 0);
1897 		ixl_rxfill(sc, rxr);
1898 
1899 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1900 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1901 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1902 
1903 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1904 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1905 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1906 	}
1907 
1908 	for (i = 0; i < nqueues; i++) {
1909 		rxr = ifp->if_iqs[i]->ifiq_softc;
1910 		txr = ifp->if_ifqs[i]->ifq_softc;
1911 
1912 		if (ixl_rxr_enabled(sc, rxr) != 0)
1913 			goto down;
1914 
1915 		if (ixl_txr_enabled(sc, txr) != 0)
1916 			goto down;
1917 	}
1918 
1919 	SET(ifp->if_flags, IFF_RUNNING);
1920 
1921 	ixl_wr(sc, I40E_PFINT_LNKLST0,
1922 	    (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1923 	    (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1924 
1925 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
1926 	    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1927 	    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1928 	    (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
1929 	    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1930 	    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1931 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1932 
1933 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
1934 	    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1935 	    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1936 	    (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
1937 	    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1938 	    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
1939 	    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1940 
1941 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
1942 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
1943 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
1944 
1945 	rw_exit_write(&sc->sc_cfg_lock);
1946 
1947 	return (ENETRESET);
1948 
1949 free:
1950 	for (i = 0; i < nqueues; i++) {
1951 		rxr = ifp->if_iqs[i]->ifiq_softc;
1952 		txr = ifp->if_ifqs[i]->ifq_softc;
1953 
1954 		if (rxr == NULL) {
1955 			/*
1956 			 * tx and rx get set at the same time, so if one
1957 			 * is NULL, the other is too.
1958 			 */
1959 			continue;
1960 		}
1961 
1962 		ixl_txr_free(sc, txr);
1963 		ixl_rxr_free(sc, rxr);
1964 	}
1965 	rw_exit_write(&sc->sc_cfg_lock);
1966 	return (rv);
1967 down:
1968 	rw_exit_write(&sc->sc_cfg_lock);
1969 	ixl_down(sc);
1970 	return (ETIMEDOUT);
1971 }
1972 
1973 static int
1974 ixl_iff(struct ixl_softc *sc)
1975 {
1976 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1977 	struct ixl_atq iatq;
1978 	struct ixl_aq_desc *iaq;
1979 	struct ixl_aq_vsi_promisc_param *param;
1980 
1981 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1982 		return (0);
1983 
1984 	memset(&iatq, 0, sizeof(iatq));
1985 
1986 	iaq = &iatq.iatq_desc;
1987 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1988 
1989 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1990 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1991 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1992 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1993 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1994 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1995 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1996 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1997 	}
1998 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1999 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2000 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2001 	param->seid = sc->sc_seid;
2002 
2003 	ixl_atq_exec(sc, &iatq, "ixliff");
2004 
2005 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2006 		return (EIO);
2007 
2008 	return (0);
2009 }
2010 
2011 static int
2012 ixl_down(struct ixl_softc *sc)
2013 {
2014 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2015 	struct ixl_rx_ring *rxr;
2016 	struct ixl_tx_ring *txr;
2017 	unsigned int nqueues, i;
2018 	uint32_t reg;
2019 	int error = 0;
2020 
2021 	nqueues = ixl_nqueues(sc);
2022 
2023 	rw_enter_write(&sc->sc_cfg_lock);
2024 
2025 	CLR(ifp->if_flags, IFF_RUNNING);
2026 
2027 	NET_UNLOCK();
2028 
2029 	/* mask interrupts */
2030 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2031 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2032 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2033 
2034 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2035 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2036 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2037 
2038 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2039 
2040 	/* make sure the no hw generated work is still in flight */
2041 	intr_barrier(sc->sc_ihc);
2042 	for (i = 0; i < nqueues; i++) {
2043 		rxr = ifp->if_iqs[i]->ifiq_softc;
2044 		txr = ifp->if_ifqs[i]->ifq_softc;
2045 
2046 		ixl_txr_qdis(sc, txr, 0);
2047 
2048 		ifq_barrier(ifp->if_ifqs[i]);
2049 
2050 		timeout_del_barrier(&rxr->rxr_refill);
2051 	}
2052 
2053 	/* XXX wait at least 400 usec for all tx queues in one go */
2054 	delay(500);
2055 
2056 	for (i = 0; i < nqueues; i++) {
2057 		rxr = ifp->if_iqs[i]->ifiq_softc;
2058 		txr = ifp->if_ifqs[i]->ifq_softc;
2059 
2060 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2061 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2062 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2063 
2064 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2065 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2066 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2067 	}
2068 
2069 	for (i = 0; i < nqueues; i++) {
2070 		rxr = ifp->if_iqs[i]->ifiq_softc;
2071 		txr = ifp->if_ifqs[i]->ifq_softc;
2072 
2073 		if (ixl_txr_disabled(sc, txr) != 0)
2074 			goto die;
2075 
2076 		if (ixl_rxr_disabled(sc, rxr) != 0)
2077 			goto die;
2078 	}
2079 
2080 	for (i = 0; i < nqueues; i++) {
2081 		rxr = ifp->if_iqs[i]->ifiq_softc;
2082 		txr = ifp->if_ifqs[i]->ifq_softc;
2083 
2084 		ixl_txr_unconfig(sc, txr);
2085 		ixl_rxr_unconfig(sc, rxr);
2086 
2087 		ixl_txr_clean(sc, txr);
2088 		ixl_rxr_clean(sc, rxr);
2089 
2090 		ixl_txr_free(sc, txr);
2091 		ixl_rxr_free(sc, rxr);
2092 
2093 		ifp->if_iqs[i]->ifiq_softc = NULL;
2094 		ifp->if_ifqs[i]->ifq_softc =  NULL;
2095 	}
2096 
2097 out:
2098 	rw_exit_write(&sc->sc_cfg_lock);
2099 	NET_LOCK();
2100 	return (error);
2101 die:
2102 	sc->sc_dead = 1;
2103 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2104 	error = ETIMEDOUT;
2105 	goto out;
2106 }
2107 
2108 static struct ixl_tx_ring *
2109 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2110 {
2111 	struct ixl_tx_ring *txr;
2112 	struct ixl_tx_map *maps, *txm;
2113 	unsigned int i;
2114 
2115 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2116 	if (txr == NULL)
2117 		return (NULL);
2118 
2119 	maps = mallocarray(sizeof(*maps),
2120 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2121 	if (maps == NULL)
2122 		goto free;
2123 
2124 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2125 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2126 	    IXL_TX_QUEUE_ALIGN) != 0)
2127 		goto freemap;
2128 
2129 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2130 		txm = &maps[i];
2131 
2132 		if (bus_dmamap_create(sc->sc_dmat,
2133 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2134 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2135 		    &txm->txm_map) != 0)
2136 			goto uncreate;
2137 
2138 		txm->txm_eop = -1;
2139 		txm->txm_m = NULL;
2140 	}
2141 
2142 	txr->txr_cons = txr->txr_prod = 0;
2143 	txr->txr_maps = maps;
2144 
2145 	txr->txr_tail = I40E_QTX_TAIL(qid);
2146 	txr->txr_qid = qid;
2147 
2148 	return (txr);
2149 
2150 uncreate:
2151 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2152 		txm = &maps[i];
2153 
2154 		if (txm->txm_map == NULL)
2155 			continue;
2156 
2157 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2158 	}
2159 
2160 	ixl_dmamem_free(sc, &txr->txr_mem);
2161 freemap:
2162 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2163 free:
2164 	free(txr, M_DEVBUF, sizeof(*txr));
2165 	return (NULL);
2166 }
2167 
2168 static void
2169 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2170 {
2171 	unsigned int qid;
2172 	bus_size_t reg;
2173 	uint32_t r;
2174 
2175 	qid = txr->txr_qid + sc->sc_base_queue;
2176 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2177 	qid %= 128;
2178 
2179 	r = ixl_rd(sc, reg);
2180 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2181 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2182 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2183 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2184 	ixl_wr(sc, reg, r);
2185 }
2186 
2187 static void
2188 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2189 {
2190 	struct ixl_hmc_txq txq;
2191 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2192 	void *hmc;
2193 
2194 	memset(&txq, 0, sizeof(txq));
2195 	txq.head = htole16(0);
2196 	txq.new_context = 1;
2197 	htolem64(&txq.base,
2198 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2199 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2200 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2201 	txq.tphrdesc_ena = 0;
2202 	txq.tphrpacket_ena = 0;
2203 	txq.tphwdesc_ena = 0;
2204 	txq.rdylist = data->qs_handle[0];
2205 
2206 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2207 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2208 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2209 }
2210 
2211 static void
2212 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2213 {
2214 	void *hmc;
2215 
2216 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2217 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2218 }
2219 
2220 static void
2221 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2222 {
2223 	struct ixl_tx_map *maps, *txm;
2224 	bus_dmamap_t map;
2225 	unsigned int i;
2226 
2227 	maps = txr->txr_maps;
2228 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2229 		txm = &maps[i];
2230 
2231 		if (txm->txm_m == NULL)
2232 			continue;
2233 
2234 		map = txm->txm_map;
2235 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2236 		    BUS_DMASYNC_POSTWRITE);
2237 		bus_dmamap_unload(sc->sc_dmat, map);
2238 
2239 		m_freem(txm->txm_m);
2240 		txm->txm_m = NULL;
2241 	}
2242 }
2243 
2244 static int
2245 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2246 {
2247 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2248 	uint32_t reg;
2249 	int i;
2250 
2251 	for (i = 0; i < 10; i++) {
2252 		reg = ixl_rd(sc, ena);
2253 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2254 			return (0);
2255 
2256 		delaymsec(10);
2257 	}
2258 
2259 	return (ETIMEDOUT);
2260 }
2261 
2262 static int
2263 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2264 {
2265 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2266 	uint32_t reg;
2267 	int i;
2268 
2269 	for (i = 0; i < 20; i++) {
2270 		reg = ixl_rd(sc, ena);
2271 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2272 			return (0);
2273 
2274 		delaymsec(10);
2275 	}
2276 
2277 	return (ETIMEDOUT);
2278 }
2279 
2280 static void
2281 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2282 {
2283 	struct ixl_tx_map *maps, *txm;
2284 	unsigned int i;
2285 
2286 	maps = txr->txr_maps;
2287 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2288 		txm = &maps[i];
2289 
2290 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2291 	}
2292 
2293 	ixl_dmamem_free(sc, &txr->txr_mem);
2294 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2295 	free(txr, M_DEVBUF, sizeof(*txr));
2296 }
2297 
2298 static inline int
2299 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2300 {
2301 	int error;
2302 
2303 	error = bus_dmamap_load_mbuf(dmat, map, m,
2304 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2305 	if (error != EFBIG)
2306 		return (error);
2307 
2308 	error = m_defrag(m, M_DONTWAIT);
2309 	if (error != 0)
2310 		return (error);
2311 
2312 	return (bus_dmamap_load_mbuf(dmat, map, m,
2313 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2314 }
2315 
2316 static void
2317 ixl_start(struct ifqueue *ifq)
2318 {
2319 	struct ifnet *ifp = ifq->ifq_if;
2320 	struct ixl_softc *sc = ifp->if_softc;
2321 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2322 	struct ixl_tx_desc *ring, *txd;
2323 	struct ixl_tx_map *txm;
2324 	bus_dmamap_t map;
2325 	struct mbuf *m;
2326 	uint64_t cmd;
2327 	unsigned int prod, free, last, i;
2328 	unsigned int mask;
2329 	int post = 0;
2330 #if NBPFILTER > 0
2331 	caddr_t if_bpf;
2332 #endif
2333 
2334 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2335 		ifq_purge(ifq);
2336 		return;
2337 	}
2338 
2339 	prod = txr->txr_prod;
2340 	free = txr->txr_cons;
2341 	if (free <= prod)
2342 		free += sc->sc_tx_ring_ndescs;
2343 	free -= prod;
2344 
2345 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2346 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2347 
2348 	ring = IXL_DMA_KVA(&txr->txr_mem);
2349 	mask = sc->sc_tx_ring_ndescs - 1;
2350 
2351 	for (;;) {
2352 		if (free <= IXL_TX_PKT_DESCS) {
2353 			ifq_set_oactive(ifq);
2354 			break;
2355 		}
2356 
2357 		m = ifq_dequeue(ifq);
2358 		if (m == NULL)
2359 			break;
2360 
2361 		txm = &txr->txr_maps[prod];
2362 		map = txm->txm_map;
2363 
2364 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2365 			ifq->ifq_errors++;
2366 			m_freem(m);
2367 			continue;
2368 		}
2369 
2370 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2371 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2372 
2373 		for (i = 0; i < map->dm_nsegs; i++) {
2374 			txd = &ring[prod];
2375 
2376 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2377 			    IXL_TX_DESC_BSIZE_SHIFT;
2378 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2379 
2380 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2381 			htolem64(&txd->cmd, cmd);
2382 
2383 			last = prod;
2384 
2385 			prod++;
2386 			prod &= mask;
2387 		}
2388 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2389 		htolem64(&txd->cmd, cmd);
2390 
2391 		txm->txm_m = m;
2392 		txm->txm_eop = last;
2393 
2394 #if NBPFILTER > 0
2395 		if_bpf = ifp->if_bpf;
2396 		if (if_bpf)
2397 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2398 #endif
2399 
2400 		free -= i;
2401 		post = 1;
2402 	}
2403 
2404 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2405 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2406 
2407 	if (post) {
2408 		txr->txr_prod = prod;
2409 		ixl_wr(sc, txr->txr_tail, prod);
2410 	}
2411 }
2412 
2413 static int
2414 ixl_txeof(struct ixl_softc *sc, struct ifqueue *ifq)
2415 {
2416 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2417 	struct ixl_tx_desc *ring, *txd;
2418 	struct ixl_tx_map *txm;
2419 	bus_dmamap_t map;
2420 	unsigned int cons, prod, last;
2421 	unsigned int mask;
2422 	uint64_t dtype;
2423 	int done = 0;
2424 
2425 	prod = txr->txr_prod;
2426 	cons = txr->txr_cons;
2427 
2428 	if (cons == prod)
2429 		return (0);
2430 
2431 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2432 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2433 
2434 	ring = IXL_DMA_KVA(&txr->txr_mem);
2435 	mask = sc->sc_tx_ring_ndescs - 1;
2436 
2437 	do {
2438 		txm = &txr->txr_maps[cons];
2439 		last = txm->txm_eop;
2440 		txd = &ring[last];
2441 
2442 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2443 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2444 			break;
2445 
2446 		map = txm->txm_map;
2447 
2448 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2449 		    BUS_DMASYNC_POSTWRITE);
2450 		bus_dmamap_unload(sc->sc_dmat, map);
2451 		m_freem(txm->txm_m);
2452 
2453 		txm->txm_m = NULL;
2454 		txm->txm_eop = -1;
2455 
2456 		cons = last + 1;
2457 		cons &= mask;
2458 
2459 		done = 1;
2460 	} while (cons != prod);
2461 
2462 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2463 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2464 
2465 	txr->txr_cons = cons;
2466 
2467 	//ixl_enable(sc, txr->txr_msix);
2468 
2469 	if (ifq_is_oactive(ifq))
2470 		ifq_restart(ifq);
2471 
2472 	return (done);
2473 }
2474 
2475 static struct ixl_rx_ring *
2476 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2477 {
2478 	struct ixl_rx_ring *rxr;
2479 	struct ixl_rx_map *maps, *rxm;
2480 	unsigned int i;
2481 
2482 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2483 	if (rxr == NULL)
2484 		return (NULL);
2485 
2486 	maps = mallocarray(sizeof(*maps),
2487 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2488 	if (maps == NULL)
2489 		goto free;
2490 
2491 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2492 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2493 	    IXL_RX_QUEUE_ALIGN) != 0)
2494 		goto freemap;
2495 
2496 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2497 		rxm = &maps[i];
2498 
2499 		if (bus_dmamap_create(sc->sc_dmat,
2500 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2501 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2502 		    &rxm->rxm_map) != 0)
2503 			goto uncreate;
2504 
2505 		rxm->rxm_m = NULL;
2506 	}
2507 
2508 	rxr->rxr_sc = sc;
2509 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
2510 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
2511 	rxr->rxr_cons = rxr->rxr_prod = 0;
2512 	rxr->rxr_m_head = NULL;
2513 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2514 	rxr->rxr_maps = maps;
2515 
2516 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2517 	rxr->rxr_qid = qid;
2518 
2519 	return (rxr);
2520 
2521 uncreate:
2522 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2523 		rxm = &maps[i];
2524 
2525 		if (rxm->rxm_map == NULL)
2526 			continue;
2527 
2528 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2529 	}
2530 
2531 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2532 freemap:
2533 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2534 free:
2535 	free(rxr, M_DEVBUF, sizeof(*rxr));
2536 	return (NULL);
2537 }
2538 
2539 static void
2540 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2541 {
2542 	struct ixl_rx_map *maps, *rxm;
2543 	bus_dmamap_t map;
2544 	unsigned int i;
2545 
2546 	timeout_del_barrier(&rxr->rxr_refill);
2547 
2548 	maps = rxr->rxr_maps;
2549 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2550 		rxm = &maps[i];
2551 
2552 		if (rxm->rxm_m == NULL)
2553 			continue;
2554 
2555 		map = rxm->rxm_map;
2556 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2557 		    BUS_DMASYNC_POSTWRITE);
2558 		bus_dmamap_unload(sc->sc_dmat, map);
2559 
2560 		m_freem(rxm->rxm_m);
2561 		rxm->rxm_m = NULL;
2562 	}
2563 
2564 	m_freem(rxr->rxr_m_head);
2565 	rxr->rxr_m_head = NULL;
2566 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2567 
2568 	rxr->rxr_prod = rxr->rxr_cons = 0;
2569 }
2570 
2571 static int
2572 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2573 {
2574 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2575 	uint32_t reg;
2576 	int i;
2577 
2578 	for (i = 0; i < 10; i++) {
2579 		reg = ixl_rd(sc, ena);
2580 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2581 			return (0);
2582 
2583 		delaymsec(10);
2584 	}
2585 
2586 	return (ETIMEDOUT);
2587 }
2588 
2589 static int
2590 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2591 {
2592 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2593 	uint32_t reg;
2594 	int i;
2595 
2596 	for (i = 0; i < 20; i++) {
2597 		reg = ixl_rd(sc, ena);
2598 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2599 			return (0);
2600 
2601 		delaymsec(10);
2602 	}
2603 
2604 	return (ETIMEDOUT);
2605 }
2606 
2607 static void
2608 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2609 {
2610 	struct ixl_hmc_rxq rxq;
2611 	void *hmc;
2612 
2613 	memset(&rxq, 0, sizeof(rxq));
2614 
2615 	rxq.head = htole16(0);
2616 	htolem64(&rxq.base,
2617 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2618 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
2619 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2620 	rxq.hbuff = 0;
2621 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2622 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
2623 	rxq.crcstrip = 1;
2624 	rxq.l2sel = 0;
2625 	rxq.showiv = 0;
2626 	rxq.rxmax = htole16(IXL_HARDMTU);
2627 	rxq.tphrdesc_ena = 0;
2628 	rxq.tphwdesc_ena = 0;
2629 	rxq.tphdata_ena = 0;
2630 	rxq.tphhead_ena = 0;
2631 	rxq.lrxqthresh = 0;
2632 	rxq.prefena = 1;
2633 
2634 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2635 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2636 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
2637 }
2638 
2639 static void
2640 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2641 {
2642 	void *hmc;
2643 
2644 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2645 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2646 }
2647 
2648 static void
2649 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2650 {
2651 	struct ixl_rx_map *maps, *rxm;
2652 	unsigned int i;
2653 
2654 	maps = rxr->rxr_maps;
2655 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2656 		rxm = &maps[i];
2657 
2658 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2659 	}
2660 
2661 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2662 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2663 	free(rxr, M_DEVBUF, sizeof(*rxr));
2664 }
2665 
2666 static int
2667 ixl_rxeof(struct ixl_softc *sc, struct ifiqueue *ifiq)
2668 {
2669 	struct ixl_rx_ring *rxr = ifiq->ifiq_softc;
2670 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2671 	struct ixl_rx_wb_desc_16 *ring, *rxd;
2672 	struct ixl_rx_map *rxm;
2673 	bus_dmamap_t map;
2674 	unsigned int cons, prod;
2675 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2676 	struct mbuf *m;
2677 	uint64_t word;
2678 	unsigned int len;
2679 	unsigned int mask;
2680 	int done = 0;
2681 
2682 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2683 		return (0);
2684 
2685 	prod = rxr->rxr_prod;
2686 	cons = rxr->rxr_cons;
2687 
2688 	if (cons == prod)
2689 		return (0);
2690 
2691 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2692 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2693 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2694 
2695 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2696 	mask = sc->sc_rx_ring_ndescs - 1;
2697 
2698 	do {
2699 		rxd = &ring[cons];
2700 
2701 		word = lemtoh64(&rxd->qword1);
2702 		if (!ISSET(word, IXL_RX_DESC_DD))
2703 			break;
2704 
2705 		if_rxr_put(&rxr->rxr_acct, 1);
2706 
2707 		rxm = &rxr->rxr_maps[cons];
2708 
2709 		map = rxm->rxm_map;
2710 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2711 		    BUS_DMASYNC_POSTREAD);
2712 		bus_dmamap_unload(sc->sc_dmat, map);
2713 
2714 		m = rxm->rxm_m;
2715 		rxm->rxm_m = NULL;
2716 
2717 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2718 		m->m_len = len;
2719 		m->m_pkthdr.len = 0;
2720 
2721 		m->m_next = NULL;
2722 		*rxr->rxr_m_tail = m;
2723 		rxr->rxr_m_tail = &m->m_next;
2724 
2725 		m = rxr->rxr_m_head;
2726 		m->m_pkthdr.len += len;
2727 
2728 		if (ISSET(word, IXL_RX_DESC_EOP)) {
2729 			if (!ISSET(word,
2730 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2731 				ml_enqueue(&ml, m);
2732 			} else {
2733 				ifp->if_ierrors++; /* XXX */
2734 				m_freem(m);
2735 			}
2736 
2737 			rxr->rxr_m_head = NULL;
2738 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2739 		}
2740 
2741 		cons++;
2742 		cons &= mask;
2743 
2744 		done = 1;
2745 	} while (cons != prod);
2746 
2747 	if (done) {
2748 		rxr->rxr_cons = cons;
2749 		if (ifiq_input(ifiq, &ml))
2750 			if_rxr_livelocked(&rxr->rxr_acct);
2751 		ixl_rxfill(sc, rxr);
2752 	}
2753 
2754 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2755 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2756 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2757 
2758 	return (done);
2759 }
2760 
2761 static void
2762 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2763 {
2764 	struct ixl_rx_rd_desc_16 *ring, *rxd;
2765 	struct ixl_rx_map *rxm;
2766 	bus_dmamap_t map;
2767 	struct mbuf *m;
2768 	unsigned int prod;
2769 	unsigned int slots;
2770 	unsigned int mask;
2771 	int post = 0;
2772 
2773 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2774 	if (slots == 0)
2775 		return;
2776 
2777 	prod = rxr->rxr_prod;
2778 
2779 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2780 	mask = sc->sc_rx_ring_ndescs - 1;
2781 
2782 	do {
2783 		rxm = &rxr->rxr_maps[prod];
2784 
2785 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN);
2786 		if (m == NULL)
2787 			break;
2788 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2789 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2790 
2791 		map = rxm->rxm_map;
2792 
2793 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2794 		    BUS_DMA_NOWAIT) != 0) {
2795 			m_freem(m);
2796 			break;
2797 		}
2798 
2799 		rxm->rxm_m = m;
2800 
2801 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2802 		    BUS_DMASYNC_PREREAD);
2803 
2804 		rxd = &ring[prod];
2805 
2806 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2807 		rxd->haddr = htole64(0);
2808 
2809 		prod++;
2810 		prod &= mask;
2811 
2812 		post = 1;
2813 	} while (--slots);
2814 
2815 	if_rxr_put(&rxr->rxr_acct, slots);
2816 
2817 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2818 		timeout_add(&rxr->rxr_refill, 1);
2819 	else if (post) {
2820 		rxr->rxr_prod = prod;
2821 		ixl_wr(sc, rxr->rxr_tail, prod);
2822 	}
2823 }
2824 
2825 void
2826 ixl_rxrefill(void *arg)
2827 {
2828 	struct ixl_rx_ring *rxr = arg;
2829 	struct ixl_softc *sc = rxr->rxr_sc;
2830 
2831 	ixl_rxfill(sc, rxr);
2832 }
2833 
2834 static int
2835 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
2836 {
2837 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2838 	struct if_rxring_info *ifr;
2839 	struct ixl_rx_ring *ring;
2840 	int i, rv;
2841 
2842 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2843 		return (ENOTTY);
2844 
2845 	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
2846 	    M_WAITOK|M_CANFAIL|M_ZERO);
2847 	if (ifr == NULL)
2848 		return (ENOMEM);
2849 
2850 	for (i = 0; i < ixl_nqueues(sc); i++) {
2851 		ring = ifp->if_iqs[i]->ifiq_softc;
2852 		ifr[i].ifr_size = MCLBYTES;
2853 		ifr[i].ifr_info = ring->rxr_acct;
2854 	}
2855 
2856 	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
2857 	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
2858 
2859 	return (rv);
2860 }
2861 
2862 static int
2863 ixl_intr(void *xsc)
2864 {
2865 	struct ixl_softc *sc = xsc;
2866 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2867 	uint32_t icr;
2868 	int rv = 0;
2869 
2870 	ixl_intr_enable(sc);
2871 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
2872 
2873 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
2874 		ixl_atq_done(sc);
2875 		task_add(systq, &sc->sc_arq_task);
2876 		rv = 1;
2877 	}
2878 
2879 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
2880 		task_add(systq, &sc->sc_link_state_task);
2881 		rv = 1;
2882 	}
2883 
2884 	if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
2885 		rv |= ixl_rxeof(sc, ifp->if_iqs[0]);
2886 	if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
2887 		rv |= ixl_txeof(sc, ifp->if_ifqs[0]);
2888 
2889 	return (rv);
2890 }
2891 
2892 static void
2893 ixl_link_state_update_done(struct ixl_softc *sc, void *arg)
2894 {
2895 	/* IXL_AQ_OP_PHY_LINK_STATUS already posted to admin reply queue */
2896 }
2897 
2898 static void
2899 ixl_link_state_update(void *xsc)
2900 {
2901 	struct ixl_softc *sc = xsc;
2902 	struct ixl_aq_desc *iaq;
2903 	struct ixl_aq_link_param *param;
2904 
2905 	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
2906 	iaq = &sc->sc_link_state_atq.iatq_desc;
2907 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
2908 	param = (struct ixl_aq_link_param *)iaq->iaq_param;
2909 	param->notify = IXL_AQ_LINK_NOTIFY;
2910 
2911 	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_done, NULL);
2912 	ixl_atq_post(sc, &sc->sc_link_state_atq);
2913 }
2914 
2915 static void
2916 ixl_arq_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2917 {
2918 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2919 	int link_state;
2920 
2921 	NET_LOCK();
2922 	link_state = ixl_set_link_status(sc, iaq);
2923 	if (ifp->if_link_state != link_state) {
2924 		ifp->if_link_state = link_state;
2925 		if_link_state_change(ifp);
2926 	}
2927 	NET_UNLOCK();
2928 }
2929 
2930 #if 0
2931 static void
2932 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2933 {
2934 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
2935 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
2936 	    lemtoh16(&iaq->iaq_opcode));
2937 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
2938 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
2939 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
2940 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
2941 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
2942 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
2943 }
2944 #endif
2945 
2946 static void
2947 ixl_arq(void *xsc)
2948 {
2949 	struct ixl_softc *sc = xsc;
2950 	struct ixl_aq_desc *arq, *iaq;
2951 	struct ixl_aq_buf *aqb;
2952 	unsigned int cons = sc->sc_arq_cons;
2953 	unsigned int prod;
2954 	int done = 0;
2955 
2956 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
2957 	    sc->sc_aq_regs->arq_head_mask;
2958 
2959 	if (cons == prod)
2960 		goto done;
2961 
2962 	arq = IXL_DMA_KVA(&sc->sc_arq);
2963 
2964 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2965 	    0, IXL_DMA_LEN(&sc->sc_arq),
2966 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2967 
2968 	do {
2969 		iaq = &arq[cons];
2970 
2971 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2972 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
2973 		    BUS_DMASYNC_POSTREAD);
2974 
2975 		switch (iaq->iaq_opcode) {
2976 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
2977 			ixl_arq_link_status(sc, iaq);
2978 			break;
2979 		}
2980 
2981 		memset(iaq, 0, sizeof(*iaq));
2982 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2983 		if_rxr_put(&sc->sc_arq_ring, 1);
2984 
2985 		cons++;
2986 		cons &= IXL_AQ_MASK;
2987 
2988 		done = 1;
2989 	} while (cons != prod);
2990 
2991 	if (done && ixl_arq_fill(sc))
2992 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2993 
2994 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2995 	    0, IXL_DMA_LEN(&sc->sc_arq),
2996 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2997 
2998 	sc->sc_arq_cons = cons;
2999 
3000 done:
3001 	ixl_intr_enable(sc);
3002 }
3003 
3004 static void
3005 ixl_atq_set(struct ixl_atq *iatq,
3006     void (*fn)(struct ixl_softc *, void *), void *arg)
3007 {
3008 	iatq->iatq_fn = fn;
3009 	iatq->iatq_arg = arg;
3010 }
3011 
3012 static void
3013 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3014 {
3015 	struct ixl_aq_desc *atq, *slot;
3016 	unsigned int prod;
3017 
3018 	/* assert locked */
3019 
3020 	atq = IXL_DMA_KVA(&sc->sc_atq);
3021 	prod = sc->sc_atq_prod;
3022 	slot = atq + prod;
3023 
3024 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3025 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3026 
3027 	*slot = iatq->iatq_desc;
3028 	slot->iaq_cookie = (uint64_t)iatq;
3029 
3030 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3031 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3032 
3033 	prod++;
3034 	prod &= IXL_AQ_MASK;
3035 	sc->sc_atq_prod = prod;
3036 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3037 }
3038 
3039 static void
3040 ixl_atq_done(struct ixl_softc *sc)
3041 {
3042 	struct ixl_aq_desc *atq, *slot;
3043 	struct ixl_atq *iatq;
3044 	unsigned int cons;
3045 	unsigned int prod;
3046 
3047 	prod = sc->sc_atq_prod;
3048 	cons = sc->sc_atq_cons;
3049 
3050 	if (prod == cons)
3051 		return;
3052 
3053 	atq = IXL_DMA_KVA(&sc->sc_atq);
3054 
3055 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3056 	    0, IXL_DMA_LEN(&sc->sc_atq),
3057 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3058 
3059 	do {
3060 		slot = &atq[cons];
3061 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3062 			break;
3063 
3064 		iatq = (struct ixl_atq *)slot->iaq_cookie;
3065 		iatq->iatq_desc = *slot;
3066 
3067 		memset(slot, 0, sizeof(*slot));
3068 
3069 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3070 
3071 		cons++;
3072 		cons &= IXL_AQ_MASK;
3073 	} while (cons != prod);
3074 
3075 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3076 	    0, IXL_DMA_LEN(&sc->sc_atq),
3077 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3078 
3079 	sc->sc_atq_cons = cons;
3080 }
3081 
3082 static void
3083 ixl_wakeup(struct ixl_softc *sc, void *arg)
3084 {
3085 	struct cond *c = arg;
3086 
3087 	cond_signal(c);
3088 }
3089 
3090 static void
3091 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3092 {
3093 	struct cond c = COND_INITIALIZER();
3094 
3095 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3096 
3097 	ixl_atq_set(iatq, ixl_wakeup, &c);
3098 	ixl_atq_post(sc, iatq);
3099 
3100 	cond_wait(&c, wmesg);
3101 }
3102 
3103 static int
3104 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3105 {
3106 	struct ixl_aq_desc *atq, *slot;
3107 	unsigned int prod;
3108 	unsigned int t = 0;
3109 
3110 	atq = IXL_DMA_KVA(&sc->sc_atq);
3111 	prod = sc->sc_atq_prod;
3112 	slot = atq + prod;
3113 
3114 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3115 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3116 
3117 	*slot = *iaq;
3118 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3119 
3120 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3121 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3122 
3123 	prod++;
3124 	prod &= IXL_AQ_MASK;
3125 	sc->sc_atq_prod = prod;
3126 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3127 
3128 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3129 		delaymsec(1);
3130 
3131 		if (t++ > tm)
3132 			return (ETIMEDOUT);
3133 	}
3134 
3135 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3136 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3137 	*iaq = *slot;
3138 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3139 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3140 
3141 	sc->sc_atq_cons = prod;
3142 
3143 	return (0);
3144 }
3145 
3146 static int
3147 ixl_get_version(struct ixl_softc *sc)
3148 {
3149 	struct ixl_aq_desc iaq;
3150 	uint32_t fwbuild, fwver, apiver;
3151 
3152 	memset(&iaq, 0, sizeof(iaq));
3153 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3154 
3155 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3156 		return (ETIMEDOUT);
3157 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3158 		return (EIO);
3159 
3160 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3161 	fwver = lemtoh32(&iaq.iaq_param[2]);
3162 	apiver = lemtoh32(&iaq.iaq_param[3]);
3163 
3164 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3165 	    (uint16_t)(fwver >> 16), fwbuild, (uint16_t)apiver,
3166 	    (uint16_t)(apiver >> 16));
3167 
3168 	return (0);
3169 }
3170 
3171 static int
3172 ixl_pxe_clear(struct ixl_softc *sc)
3173 {
3174 	struct ixl_aq_desc iaq;
3175 
3176 	memset(&iaq, 0, sizeof(iaq));
3177 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3178 	iaq.iaq_param[0] = htole32(0x2);
3179 
3180 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3181 		printf(", CLEAR PXE MODE timeout\n");
3182 		return (-1);
3183 	}
3184 
3185 	switch (iaq.iaq_retval) {
3186 	case HTOLE16(IXL_AQ_RC_OK):
3187 	case HTOLE16(IXL_AQ_RC_EEXIST):
3188 		break;
3189 	default:
3190 		printf(", CLEAR PXE MODE error\n");
3191 		return (-1);
3192 	}
3193 
3194 	return (0);
3195 }
3196 
3197 static int
3198 ixl_lldp_shut(struct ixl_softc *sc)
3199 {
3200 	struct ixl_aq_desc iaq;
3201 
3202 	memset(&iaq, 0, sizeof(iaq));
3203 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3204 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3205 
3206 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3207 		printf(", STOP LLDP AGENT timeout\n");
3208 		return (-1);
3209 	}
3210 
3211 	switch (iaq.iaq_retval) {
3212 	case HTOLE16(IXL_AQ_RC_EMODE):
3213 	case HTOLE16(IXL_AQ_RC_EPERM):
3214 		/* ignore silently */
3215 	default:
3216 		break;
3217 	}
3218 
3219 	return (0);
3220 }
3221 
3222 static int
3223 ixl_get_mac(struct ixl_softc *sc)
3224 {
3225 	struct ixl_dmamem idm;
3226 	struct ixl_aq_desc iaq;
3227 	struct ixl_aq_mac_addresses *addrs;
3228 	int rv;
3229 
3230 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3231 		printf(", unable to allocate mac addresses\n");
3232 		return (-1);
3233 	}
3234 
3235 	memset(&iaq, 0, sizeof(iaq));
3236 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3237 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3238 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3239 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3240 
3241 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3242 	    BUS_DMASYNC_PREREAD);
3243 
3244 	rv = ixl_atq_poll(sc, &iaq, 250);
3245 
3246 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3247 	    BUS_DMASYNC_POSTREAD);
3248 
3249 	if (rv != 0) {
3250 		printf(", MAC ADDRESS READ timeout\n");
3251 		rv = -1;
3252 		goto done;
3253 	}
3254 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3255 		printf(", MAC ADDRESS READ error\n");
3256 		rv = -1;
3257 		goto done;
3258 	}
3259 
3260 	addrs = IXL_DMA_KVA(&idm);
3261 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3262 		printf(", port address is not valid\n");
3263 		goto done;
3264 	}
3265 
3266 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3267 	rv = 0;
3268 
3269 done:
3270 	ixl_dmamem_free(sc, &idm);
3271 	return (rv);
3272 }
3273 
3274 static int
3275 ixl_get_switch_config(struct ixl_softc *sc)
3276 {
3277 	struct ixl_dmamem idm;
3278 	struct ixl_aq_desc iaq;
3279 	struct ixl_aq_switch_config *hdr;
3280 	struct ixl_aq_switch_config_element *elms, *elm;
3281 	unsigned int nelm;
3282 	int rv;
3283 
3284 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3285 		printf("%s: unable to allocate switch config buffer\n",
3286 		    DEVNAME(sc));
3287 		return (-1);
3288 	}
3289 
3290 	memset(&iaq, 0, sizeof(iaq));
3291 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3292 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3293 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3294 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3295 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3296 
3297 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3298 	    BUS_DMASYNC_PREREAD);
3299 
3300 	rv = ixl_atq_poll(sc, &iaq, 250);
3301 
3302 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3303 	    BUS_DMASYNC_POSTREAD);
3304 
3305 	if (rv != 0) {
3306 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3307 		rv = -1;
3308 		goto done;
3309 	}
3310 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3311 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3312 		rv = -1;
3313 		goto done;
3314 	}
3315 
3316 	hdr = IXL_DMA_KVA(&idm);
3317 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3318 
3319 	nelm = lemtoh16(&hdr->num_reported);
3320 	if (nelm < 1) {
3321 		printf("%s: no switch config available\n", DEVNAME(sc));
3322 		rv = -1;
3323 		goto done;
3324 	}
3325 
3326 #if 0
3327 	for (i = 0; i < nelm; i++) {
3328 		elm = &elms[i];
3329 
3330 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3331 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3332 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3333 		    lemtoh16(&elm->uplink_seid),
3334 		    lemtoh16(&elm->downlink_seid));
3335 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3336 		    DEVNAME(sc), elm->connection_type,
3337 		    lemtoh16(&elm->scheduler_id),
3338 		    lemtoh16(&elm->element_info));
3339 	}
3340 #endif
3341 
3342 	elm = &elms[0];
3343 
3344 	sc->sc_uplink_seid = elm->uplink_seid;
3345 	sc->sc_downlink_seid = elm->downlink_seid;
3346 	sc->sc_seid = elm->seid;
3347 
3348 	if ((sc->sc_uplink_seid == htole16(0)) !=
3349 	    (sc->sc_downlink_seid == htole16(0))) {
3350 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3351 		rv = -1;
3352 		goto done;
3353 	}
3354 
3355 done:
3356 	ixl_dmamem_free(sc, &idm);
3357 	return (rv);
3358 }
3359 
3360 static int
3361 ixl_phy_mask_ints(struct ixl_softc *sc)
3362 {
3363 	struct ixl_aq_desc iaq;
3364 
3365 	memset(&iaq, 0, sizeof(iaq));
3366 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3367 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3368 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3369 	      IXL_AQ_PHY_EV_MEDIA_NA));
3370 
3371 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3372 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
3373 		return (-1);
3374 	}
3375 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3376 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
3377 		return (-1);
3378 	}
3379 
3380 	return (0);
3381 }
3382 
3383 static int
3384 ixl_get_phy_abilities(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3385 {
3386 	struct ixl_dmamem idm;
3387 	struct ixl_aq_desc iaq;
3388 	struct ixl_aq_phy_abilities *phy;
3389 	uint64_t phy_types;
3390 	int rv;
3391 
3392 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3393 		printf("%s: unable to allocate switch config buffer\n",
3394 		    DEVNAME(sc));
3395 		return (-1);
3396 	}
3397 
3398 	memset(&iaq, 0, sizeof(iaq));
3399 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3400 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3401 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3402 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3403 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3404 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3405 
3406 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3407 	    BUS_DMASYNC_PREREAD);
3408 
3409 	rv = ixl_atq_poll(sc, &iaq, 250);
3410 
3411 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3412 	    BUS_DMASYNC_POSTREAD);
3413 
3414 	if (rv != 0) {
3415 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
3416 		rv = -1;
3417 		goto done;
3418 	}
3419 	switch (iaq.iaq_retval) {
3420 	case HTOLE16(IXL_AQ_RC_OK):
3421 		break;
3422 	case HTOLE16(IXL_AQ_RC_EIO):
3423 		printf("%s: unable to query phy types\n", DEVNAME(sc));
3424 		rv = 0;
3425 		goto done;
3426 	default:
3427 		printf("%s: GET PHY ABILITIIES error\n", DEVNAME(sc));
3428 		rv = -1;
3429 		goto done;
3430 	}
3431 
3432 	phy = IXL_DMA_KVA(&idm);
3433 
3434 	phy_types = lemtoh32(&phy->phy_type);
3435 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
3436 
3437 	*phy_types_ptr = phy_types;
3438 
3439 	rv = 0;
3440 
3441 done:
3442 	ixl_dmamem_free(sc, &idm);
3443 	return (rv);
3444 }
3445 
3446 static int
3447 ixl_get_link_status(struct ixl_softc *sc)
3448 {
3449 	struct ixl_aq_desc iaq;
3450 	struct ixl_aq_link_param *param;
3451 
3452 	memset(&iaq, 0, sizeof(iaq));
3453 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3454 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
3455 	param->notify = IXL_AQ_LINK_NOTIFY;
3456 
3457 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3458 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
3459 		return (-1);
3460 	}
3461 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3462 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
3463 		return (0);
3464 	}
3465 
3466 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
3467 
3468 	return (0);
3469 }
3470 
3471 static int
3472 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
3473 {
3474 	uint8_t page;
3475 	size_t i;
3476 	int error;
3477 
3478 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
3479 		error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, &page);
3480 		if (error != 0)
3481 			return (error);
3482 		if (page != sff->sff_page) {
3483 			error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127,
3484 			    sff->sff_page);
3485 			if (error != 0)
3486 				return (error);
3487 		}
3488 	}
3489 
3490 	for (i = 0; i < sizeof(sff->sff_data); i++) {
3491 		error = ixl_sff_get_byte(sc, sff->sff_addr, i,
3492 		    &sff->sff_data[i]);
3493 		if (error != 0)
3494 			return (error);
3495 	}
3496 
3497 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
3498 		if (page != sff->sff_page) {
3499 			error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127,
3500 			    page);
3501 			if (error != 0)
3502 				return (error);
3503 		}
3504 	}
3505 
3506 	return (0);
3507 }
3508 
3509 static int
3510 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
3511 {
3512 	struct ixl_atq iatq;
3513 	struct ixl_aq_desc *iaq;
3514 	struct ixl_aq_phy_reg_access *param;
3515 
3516 	memset(&iatq, 0, sizeof(iatq));
3517 	iaq = &iatq.iatq_desc;
3518 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
3519 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
3520 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
3521 	param->dev_addr = dev;
3522 	htolem32(&param->reg, reg);
3523 
3524 	ixl_atq_exec(sc, &iatq, "ixlsffget");
3525 
3526 	switch (iaq->iaq_retval) {
3527 	case htole16(IXL_AQ_RC_OK):
3528 		break;
3529 	case htole16(IXL_AQ_RC_EBUSY):
3530 		return (EBUSY);
3531 	case htole16(IXL_AQ_RC_ESRCH):
3532 		return (ENODEV);
3533 	case htole16(IXL_AQ_RC_EIO):
3534 	case htole16(IXL_AQ_RC_EINVAL):
3535 	default:
3536 		return (EIO);
3537 	}
3538 
3539 	*p = lemtoh32(&param->val);
3540 
3541 	return (0);
3542 }
3543 
3544 
3545 static int
3546 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
3547 {
3548 	struct ixl_atq iatq;
3549 	struct ixl_aq_desc *iaq;
3550 	struct ixl_aq_phy_reg_access *param;
3551 
3552 	memset(&iatq, 0, sizeof(iatq));
3553 	iaq = &iatq.iatq_desc;
3554 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
3555 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
3556 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
3557 	param->dev_addr = dev;
3558 	htolem32(&param->reg, reg);
3559 	htolem32(&param->val, v);
3560 
3561 	ixl_atq_exec(sc, &iatq, "ixlsffset");
3562 
3563 	switch (iaq->iaq_retval) {
3564 	case htole16(IXL_AQ_RC_OK):
3565 		break;
3566 	case htole16(IXL_AQ_RC_EBUSY):
3567 		return (EBUSY);
3568 	case htole16(IXL_AQ_RC_ESRCH):
3569 		return (ENODEV);
3570 	case htole16(IXL_AQ_RC_EIO):
3571 	case htole16(IXL_AQ_RC_EINVAL):
3572 	default:
3573 		return (EIO);
3574 	}
3575 
3576 	return (0);
3577 }
3578 
3579 static int
3580 ixl_get_vsi(struct ixl_softc *sc)
3581 {
3582 	struct ixl_dmamem *vsi = &sc->sc_scratch;
3583 	struct ixl_aq_desc iaq;
3584 	struct ixl_aq_vsi_param *param;
3585 	struct ixl_aq_vsi_reply *reply;
3586 	int rv;
3587 
3588 	/* grumble, vsi info isn't "known" at compile time */
3589 
3590 	memset(&iaq, 0, sizeof(iaq));
3591 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
3592 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3593 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3594 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3595 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3596 
3597 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3598 	param->uplink_seid = sc->sc_seid;
3599 
3600 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3601 	    BUS_DMASYNC_PREREAD);
3602 
3603 	rv = ixl_atq_poll(sc, &iaq, 250);
3604 
3605 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3606 	    BUS_DMASYNC_POSTREAD);
3607 
3608 	if (rv != 0) {
3609 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
3610 		return (-1);
3611 	}
3612 
3613 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3614 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
3615 		    lemtoh16(&iaq.iaq_retval));
3616 		return (-1);
3617 	}
3618 
3619 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3620 	sc->sc_vsi_number = reply->vsi_number;
3621 
3622 	return (0);
3623 }
3624 
3625 static int
3626 ixl_set_vsi(struct ixl_softc *sc)
3627 {
3628 	struct ixl_dmamem *vsi = &sc->sc_scratch;
3629 	struct ixl_aq_desc iaq;
3630 	struct ixl_aq_vsi_param *param;
3631 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3632 	int rv;
3633 
3634 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3635 	    IXL_AQ_VSI_VALID_VLAN);
3636 
3637 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3638 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3639 	data->queue_mapping[0] = htole16(0);
3640 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3641 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3642 
3643 	CLR(data->port_vlan_flags,
3644 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
3645 	SET(data->port_vlan_flags,
3646 	    htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
3647 
3648 	/* grumble, vsi info isn't "known" at compile time */
3649 
3650 	memset(&iaq, 0, sizeof(iaq));
3651 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
3652 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3653 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
3654 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3655 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3656 
3657 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3658 	param->uplink_seid = sc->sc_seid;
3659 
3660 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3661 	    BUS_DMASYNC_PREWRITE);
3662 
3663 	rv = ixl_atq_poll(sc, &iaq, 250);
3664 
3665 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3666 	    BUS_DMASYNC_POSTWRITE);
3667 
3668 	if (rv != 0) {
3669 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
3670 		return (-1);
3671 	}
3672 
3673 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3674 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
3675 		    lemtoh16(&iaq.iaq_retval));
3676 		return (-1);
3677 	}
3678 
3679 	return (0);
3680 }
3681 
3682 static const struct ixl_phy_type *
3683 ixl_search_phy_type(uint8_t phy_type)
3684 {
3685 	const struct ixl_phy_type *itype;
3686 	uint64_t mask;
3687 	unsigned int i;
3688 
3689 	if (phy_type >= 64)
3690 		return (NULL);
3691 
3692 	mask = 1ULL << phy_type;
3693 
3694 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3695 		itype = &ixl_phy_type_map[i];
3696 
3697 		if (ISSET(itype->phy_type, mask))
3698 			return (itype);
3699 	}
3700 
3701 	return (NULL);
3702 }
3703 
3704 static uint64_t
3705 ixl_search_link_speed(uint8_t link_speed)
3706 {
3707 	const struct ixl_speed_type *type;
3708 	unsigned int i;
3709 
3710 	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
3711 		type = &ixl_speed_type_map[i];
3712 
3713 		if (ISSET(type->dev_speed, link_speed))
3714 			return (type->net_speed);
3715 	}
3716 
3717 	return (0);
3718 }
3719 
3720 static int
3721 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3722 {
3723 	const struct ixl_aq_link_status *status;
3724 	const struct ixl_phy_type *itype;
3725 
3726 	uint64_t ifm_active = IFM_ETHER;
3727 	uint64_t ifm_status = IFM_AVALID;
3728 	int link_state = LINK_STATE_DOWN;
3729 	uint64_t baudrate = 0;
3730 
3731 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
3732 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
3733 		goto done;
3734 
3735 	ifm_active |= IFM_FDX;
3736 	ifm_status |= IFM_ACTIVE;
3737 	link_state = LINK_STATE_FULL_DUPLEX;
3738 
3739 	itype = ixl_search_phy_type(status->phy_type);
3740 	if (itype != NULL)
3741 		ifm_active |= itype->ifm_type;
3742 
3743 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
3744 		ifm_active |= IFM_ETH_TXPAUSE;
3745 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
3746 		ifm_active |= IFM_ETH_RXPAUSE;
3747 
3748 	baudrate = ixl_search_link_speed(status->link_speed);
3749 
3750 done:
3751 	/* NET_ASSERT_LOCKED() except during attach */
3752 	sc->sc_media_active = ifm_active;
3753 	sc->sc_media_status = ifm_status;
3754 	sc->sc_ac.ac_if.if_baudrate = baudrate;
3755 
3756 	return (link_state);
3757 }
3758 
3759 static int
3760 ixl_restart_an(struct ixl_softc *sc)
3761 {
3762 	struct ixl_aq_desc iaq;
3763 
3764 	memset(&iaq, 0, sizeof(iaq));
3765 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
3766 	iaq.iaq_param[0] =
3767 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
3768 
3769 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3770 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
3771 		return (-1);
3772 	}
3773 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3774 		printf("%s: RESTART AN error\n", DEVNAME(sc));
3775 		return (-1);
3776 	}
3777 
3778 	return (0);
3779 }
3780 
3781 static int
3782 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
3783 {
3784 	struct ixl_aq_desc iaq;
3785 	struct ixl_aq_add_macvlan *param;
3786 	struct ixl_aq_add_macvlan_elem *elem;
3787 
3788 	memset(&iaq, 0, sizeof(iaq));
3789 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
3790 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
3791 	iaq.iaq_datalen = htole16(sizeof(*elem));
3792 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
3793 
3794 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
3795 	param->num_addrs = htole16(1);
3796 	param->seid0 = htole16(0x8000) | sc->sc_seid;
3797 	param->seid1 = 0;
3798 	param->seid2 = 0;
3799 
3800 	elem = IXL_DMA_KVA(&sc->sc_scratch);
3801 	memset(elem, 0, sizeof(*elem));
3802 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
3803 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
3804 	elem->vlan = htole16(vlan);
3805 
3806 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3807 		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
3808 		return (IXL_AQ_RC_EINVAL);
3809 	}
3810 
3811 	return letoh16(iaq.iaq_retval);
3812 }
3813 
3814 static int
3815 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
3816 {
3817 	struct ixl_aq_desc iaq;
3818 	struct ixl_aq_remove_macvlan *param;
3819 	struct ixl_aq_remove_macvlan_elem *elem;
3820 
3821 	memset(&iaq, 0, sizeof(iaq));
3822 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
3823 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
3824 	iaq.iaq_datalen = htole16(sizeof(*elem));
3825 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
3826 
3827 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
3828 	param->num_addrs = htole16(1);
3829 	param->seid0 = htole16(0x8000) | sc->sc_seid;
3830 	param->seid1 = 0;
3831 	param->seid2 = 0;
3832 
3833 	elem = IXL_DMA_KVA(&sc->sc_scratch);
3834 	memset(elem, 0, sizeof(*elem));
3835 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
3836 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
3837 	elem->vlan = htole16(vlan);
3838 
3839 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3840 		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
3841 		return (IXL_AQ_RC_EINVAL);
3842 	}
3843 
3844 	return letoh16(iaq.iaq_retval);
3845 }
3846 
3847 static int
3848 ixl_hmc(struct ixl_softc *sc)
3849 {
3850 	struct {
3851 		uint32_t   count;
3852 		uint32_t   minsize;
3853 		bus_size_t maxcnt;
3854 		bus_size_t setoff;
3855 		bus_size_t setcnt;
3856 	} regs[] = {
3857 		{
3858 			0,
3859 			IXL_HMC_TXQ_MINSIZE,
3860 			I40E_GLHMC_LANTXOBJSZ,
3861 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
3862 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
3863 		},
3864 		{
3865 			0,
3866 			IXL_HMC_RXQ_MINSIZE,
3867 			I40E_GLHMC_LANRXOBJSZ,
3868 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
3869 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
3870 		},
3871 		{
3872 			0,
3873 			0,
3874 			I40E_GLHMC_FCOEMAX,
3875 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
3876 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
3877 		},
3878 		{
3879 			0,
3880 			0,
3881 			I40E_GLHMC_FCOEFMAX,
3882 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
3883 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
3884 		},
3885 	};
3886 	struct ixl_hmc_entry *e;
3887 	uint64_t size, dva;
3888 	uint8_t *kva;
3889 	uint64_t *sdpage;
3890 	unsigned int i;
3891 	int npages, tables;
3892 
3893 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
3894 
3895 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
3896 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
3897 
3898 	size = 0;
3899 	for (i = 0; i < nitems(regs); i++) {
3900 		e = &sc->sc_hmc_entries[i];
3901 
3902 		e->hmc_count = regs[i].count;
3903 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
3904 		e->hmc_base = size;
3905 
3906 		if ((e->hmc_size * 8) < regs[i].minsize) {
3907 			printf("%s: kernel hmc entry is too big\n",
3908 			    DEVNAME(sc));
3909 			return (-1);
3910 		}
3911 
3912 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
3913 	}
3914 	size = roundup(size, IXL_HMC_PGSIZE);
3915 	npages = size / IXL_HMC_PGSIZE;
3916 
3917 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
3918 
3919 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
3920 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
3921 		return (-1);
3922 	}
3923 
3924 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
3925 	    IXL_HMC_PGSIZE) != 0) {
3926 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
3927 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3928 		return (-1);
3929 	}
3930 
3931 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
3932 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
3933 
3934 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
3935 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
3936 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3937 
3938 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
3939 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
3940 	for (i = 0; i < npages; i++) {
3941 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
3942 
3943 		dva += IXL_HMC_PGSIZE;
3944 	}
3945 
3946 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
3947 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
3948 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3949 
3950 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
3951 	for (i = 0; i < tables; i++) {
3952 		uint32_t count;
3953 
3954 		KASSERT(npages >= 0);
3955 
3956 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
3957 
3958 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
3959 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
3960 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
3961 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
3962 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
3963 		ixl_wr(sc, I40E_PFHMC_SDCMD,
3964 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
3965 
3966 		npages -= IXL_HMC_PGS;
3967 		dva += IXL_HMC_PGSIZE;
3968 	}
3969 
3970 	for (i = 0; i < nitems(regs); i++) {
3971 		e = &sc->sc_hmc_entries[i];
3972 
3973 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
3974 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
3975 	}
3976 
3977 	return (0);
3978 }
3979 
3980 static void
3981 ixl_hmc_free(struct ixl_softc *sc)
3982 {
3983 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
3984 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
3985 }
3986 
3987 static void
3988 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
3989     unsigned int npacking)
3990 {
3991 	uint8_t *dst = d;
3992 	const uint8_t *src = s;
3993 	unsigned int i;
3994 
3995 	for (i = 0; i < npacking; i++) {
3996 		const struct ixl_hmc_pack *pack = &packing[i];
3997 		unsigned int offset = pack->lsb / 8;
3998 		unsigned int align = pack->lsb % 8;
3999 		const uint8_t *in = src + pack->offset;
4000 		uint8_t *out = dst + offset;
4001 		int width = pack->width;
4002 		unsigned int inbits = 0;
4003 
4004 		if (align) {
4005 			inbits = (*in++) << align;
4006 			*out++ |= (inbits & 0xff);
4007 			inbits >>= 8;
4008 
4009 			width -= 8 - align;
4010 		}
4011 
4012 		while (width >= 8) {
4013 			inbits |= (*in++) << align;
4014 			*out++ = (inbits & 0xff);
4015 			inbits >>= 8;
4016 
4017 			width -= 8;
4018 		}
4019 
4020 		if (width > 0) {
4021 			inbits |= (*in) << align;
4022 			*out |= (inbits & ((1 << width) - 1));
4023 		}
4024 	}
4025 }
4026 
4027 static struct ixl_aq_buf *
4028 ixl_aqb_alloc(struct ixl_softc *sc)
4029 {
4030 	struct ixl_aq_buf *aqb;
4031 
4032 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4033 	if (aqb == NULL)
4034 		return (NULL);
4035 
4036 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4037 	if (aqb->aqb_data == NULL)
4038 		goto free;
4039 
4040 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4041 	    IXL_AQ_BUFLEN, 0,
4042 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4043 	    &aqb->aqb_map) != 0)
4044 		goto dma_free;
4045 
4046 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4047 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4048 		goto destroy;
4049 
4050 	return (aqb);
4051 
4052 destroy:
4053 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4054 dma_free:
4055 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4056 free:
4057 	free(aqb, M_DEVBUF, sizeof(*aqb));
4058 
4059 	return (NULL);
4060 }
4061 
4062 static void
4063 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4064 {
4065 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4066 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4067 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4068 	free(aqb, M_DEVBUF, sizeof(*aqb));
4069 }
4070 
4071 static int
4072 ixl_arq_fill(struct ixl_softc *sc)
4073 {
4074 	struct ixl_aq_buf *aqb;
4075 	struct ixl_aq_desc *arq, *iaq;
4076 	unsigned int prod = sc->sc_arq_prod;
4077 	unsigned int n;
4078 	int post = 0;
4079 
4080 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4081  	arq = IXL_DMA_KVA(&sc->sc_arq);
4082 
4083 	while (n > 0) {
4084 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4085 		if (aqb != NULL)
4086 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4087 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4088 			break;
4089 
4090 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4091 
4092 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4093 		    BUS_DMASYNC_PREREAD);
4094 
4095 		iaq = &arq[prod];
4096 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4097 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4098 		iaq->iaq_opcode = 0;
4099 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4100 		iaq->iaq_retval = 0;
4101 		iaq->iaq_cookie = 0;
4102 		iaq->iaq_param[0] = 0;
4103 		iaq->iaq_param[1] = 0;
4104 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4105 
4106 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4107 
4108 		prod++;
4109 		prod &= IXL_AQ_MASK;
4110 
4111 		post = 1;
4112 
4113 		n--;
4114 	}
4115 
4116 	if_rxr_put(&sc->sc_arq_ring, n);
4117 	sc->sc_arq_prod = prod;
4118 
4119 	return (post);
4120 }
4121 
4122 static void
4123 ixl_arq_unfill(struct ixl_softc *sc)
4124 {
4125 	struct ixl_aq_buf *aqb;
4126 
4127 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4128 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4129 
4130 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4131 		    BUS_DMASYNC_POSTREAD);
4132 		ixl_aqb_free(sc, aqb);
4133 	}
4134 }
4135 
4136 static void
4137 ixl_clear_hw(struct ixl_softc *sc)
4138 {
4139 	uint32_t num_queues, base_queue;
4140 	uint32_t num_pf_int;
4141 	uint32_t num_vf_int;
4142 	uint32_t num_vfs;
4143 	uint32_t i, j;
4144 	uint32_t val;
4145 	uint32_t eol = 0x7ff;
4146 
4147 	/* get number of interrupts, queues, and vfs */
4148 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4149 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4150 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4151 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4152 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4153 
4154 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4155 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4156 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4157 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4158 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4159 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4160 		num_queues = (j - base_queue) + 1;
4161 	else
4162 		num_queues = 0;
4163 
4164 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4165 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4166 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4167 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4168 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4169 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4170 		num_vfs = (j - i) + 1;
4171 	else
4172 		num_vfs = 0;
4173 
4174 	/* stop all the interrupts */
4175 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4176 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4177 	for (i = 0; i < num_pf_int - 2; i++)
4178 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4179 
4180 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4181 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4182 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4183 	for (i = 0; i < num_pf_int - 2; i++)
4184 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4185 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4186 	for (i = 0; i < num_vfs; i++)
4187 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4188 	for (i = 0; i < num_vf_int - 2; i++)
4189 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4190 
4191 	/* warn the HW of the coming Tx disables */
4192 	for (i = 0; i < num_queues; i++) {
4193 		uint32_t abs_queue_idx = base_queue + i;
4194 		uint32_t reg_block = 0;
4195 
4196 		if (abs_queue_idx >= 128) {
4197 			reg_block = abs_queue_idx / 128;
4198 			abs_queue_idx %= 128;
4199 		}
4200 
4201 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4202 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4203 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4204 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4205 
4206 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4207 	}
4208 	delaymsec(400);
4209 
4210 	/* stop all the queues */
4211 	for (i = 0; i < num_queues; i++) {
4212 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4213 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
4214 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4215 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
4216 	}
4217 
4218 	/* short wait for all queue disables to settle */
4219 	delaymsec(50);
4220 }
4221 
4222 static int
4223 ixl_pf_reset(struct ixl_softc *sc)
4224 {
4225 	uint32_t cnt = 0;
4226 	uint32_t cnt1 = 0;
4227 	uint32_t reg = 0;
4228 	uint32_t grst_del;
4229 
4230 	/*
4231 	 * Poll for Global Reset steady state in case of recent GRST.
4232 	 * The grst delay value is in 100ms units, and we'll wait a
4233 	 * couple counts longer to be sure we don't just miss the end.
4234 	 */
4235 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4236 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4237 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4238 	grst_del += 10;
4239 
4240 	for (cnt = 0; cnt < grst_del; cnt++) {
4241 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4242 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4243 			break;
4244 		delaymsec(100);
4245 	}
4246 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4247 		printf(", Global reset polling failed to complete\n");
4248 		return (-1);
4249 	}
4250 
4251 	/* Now Wait for the FW to be ready */
4252 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4253 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
4254 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4255 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4256 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4257 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4258 			break;
4259 
4260 		delaymsec(10);
4261 	}
4262 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4263 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4264 		printf(", wait for FW Reset complete timed out "
4265 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4266 		return (-1);
4267 	}
4268 
4269 	/*
4270 	 * If there was a Global Reset in progress when we got here,
4271 	 * we don't need to do the PF Reset
4272 	 */
4273 	if (cnt == 0) {
4274 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4275 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4276 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4277 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4278 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4279 				break;
4280 			delaymsec(1);
4281 		}
4282 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4283 			printf(", PF reset polling failed to complete"
4284 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4285 			return (-1);
4286 		}
4287 	}
4288 
4289 	return (0);
4290 }
4291 
4292 static int
4293 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
4294     bus_size_t size, u_int align)
4295 {
4296 	ixm->ixm_size = size;
4297 
4298 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
4299 	    ixm->ixm_size, 0,
4300 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4301 	    &ixm->ixm_map) != 0)
4302 		return (1);
4303 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
4304 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
4305 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
4306 		goto destroy;
4307 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
4308 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
4309 		goto free;
4310 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
4311 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
4312 		goto unmap;
4313 
4314 	return (0);
4315 unmap:
4316 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4317 free:
4318 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4319 destroy:
4320 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4321 	return (1);
4322 }
4323 
4324 static void
4325 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
4326 {
4327 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
4328 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4329 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4330 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4331 }
4332