xref: /openbsd-src/sys/dev/pci/if_ixl.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_ixl.c,v 1.46 2019/11/19 03:33:43 yasuoka Exp $ */
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 #include "bpfilter.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/proc.h>
55 #include <sys/sockio.h>
56 #include <sys/mbuf.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/device.h>
60 #include <sys/pool.h>
61 #include <sys/queue.h>
62 #include <sys/timeout.h>
63 #include <sys/task.h>
64 #include <sys/syslog.h>
65 
66 #include <machine/bus.h>
67 #include <machine/intr.h>
68 
69 #include <net/if.h>
70 #include <net/if_dl.h>
71 #include <net/if_media.h>
72 
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76 
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 
80 #include <dev/pci/pcireg.h>
81 #include <dev/pci/pcivar.h>
82 #include <dev/pci/pcidevs.h>
83 
84 #define I40E_MASK(mask, shift)		((mask) << (shift))
85 #define I40E_PF_RESET_WAIT_COUNT	200
86 #define I40E_AQ_LARGE_BUF		512
87 
88 /* bitfields for Tx queue mapping in QTX_CTL */
89 #define I40E_QTX_CTL_VF_QUEUE		0x0
90 #define I40E_QTX_CTL_VM_QUEUE		0x1
91 #define I40E_QTX_CTL_PF_QUEUE		0x2
92 
93 #define I40E_QUEUE_TYPE_EOL		0x7ff
94 #define I40E_INTR_NOTX_QUEUE		0
95 
96 #define I40E_QUEUE_TYPE_RX		0x0
97 #define I40E_QUEUE_TYPE_TX		0x1
98 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
99 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
100 
101 #define I40E_ITR_INDEX_RX		0x0
102 #define I40E_ITR_INDEX_TX		0x1
103 #define I40E_ITR_INDEX_OTHER		0x2
104 #define I40E_ITR_INDEX_NONE		0x3
105 
106 #include <dev/pci/if_ixlreg.h>
107 
108 #define I40E_INTR_NOTX_QUEUE		0
109 #define I40E_INTR_NOTX_INTR		0
110 #define I40E_INTR_NOTX_RX_QUEUE		0
111 #define I40E_INTR_NOTX_TX_QUEUE		1
112 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
113 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
114 
115 struct ixl_aq_desc {
116 	uint16_t	iaq_flags;
117 #define	IXL_AQ_DD		(1U << 0)
118 #define	IXL_AQ_CMP		(1U << 1)
119 #define IXL_AQ_ERR		(1U << 2)
120 #define IXL_AQ_VFE		(1U << 3)
121 #define IXL_AQ_LB		(1U << 9)
122 #define IXL_AQ_RD		(1U << 10)
123 #define IXL_AQ_VFC		(1U << 11)
124 #define IXL_AQ_BUF		(1U << 12)
125 #define IXL_AQ_SI		(1U << 13)
126 #define IXL_AQ_EI		(1U << 14)
127 #define IXL_AQ_FE		(1U << 15)
128 
129 #define IXL_AQ_FLAGS_FMT	"\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
130 				    "\014VFC" "\013DB" "\012LB" "\004VFE" \
131 				    "\003ERR" "\002CMP" "\001DD"
132 
133 	uint16_t	iaq_opcode;
134 
135 	uint16_t	iaq_datalen;
136 	uint16_t	iaq_retval;
137 
138 	uint64_t	iaq_cookie;
139 
140 	uint32_t	iaq_param[4];
141 /*	iaq_data_hi	iaq_param[2] */
142 /*	iaq_data_lo	iaq_param[3] */
143 } __packed __aligned(8);
144 
145 /* aq commands */
146 #define IXL_AQ_OP_GET_VERSION		0x0001
147 #define IXL_AQ_OP_DRIVER_VERSION	0x0002
148 #define IXL_AQ_OP_QUEUE_SHUTDOWN	0x0003
149 #define IXL_AQ_OP_SET_PF_CONTEXT	0x0004
150 #define IXL_AQ_OP_GET_AQ_ERR_REASON	0x0005
151 #define IXL_AQ_OP_REQUEST_RESOURCE	0x0008
152 #define IXL_AQ_OP_RELEASE_RESOURCE	0x0009
153 #define IXL_AQ_OP_LIST_FUNC_CAP		0x000a
154 #define IXL_AQ_OP_LIST_DEV_CAP		0x000b
155 #define IXL_AQ_OP_MAC_ADDRESS_READ	0x0107
156 #define IXL_AQ_OP_CLEAR_PXE_MODE	0x0110
157 #define IXL_AQ_OP_SWITCH_GET_CONFIG	0x0200
158 #define IXL_AQ_OP_ADD_VSI		0x0210
159 #define IXL_AQ_OP_UPD_VSI_PARAMS	0x0211
160 #define IXL_AQ_OP_GET_VSI_PARAMS	0x0212
161 #define IXL_AQ_OP_ADD_VEB		0x0230
162 #define IXL_AQ_OP_UPD_VEB_PARAMS	0x0231
163 #define IXL_AQ_OP_GET_VEB_PARAMS	0x0232
164 #define IXL_AQ_OP_ADD_MACVLAN		0x0250
165 #define IXL_AQ_OP_REMOVE_MACVLAN	0x0251
166 #define IXL_AQ_OP_SET_VSI_PROMISC	0x0254
167 #define IXL_AQ_OP_PHY_GET_ABILITIES	0x0600
168 #define IXL_AQ_OP_PHY_SET_CONFIG	0x0601
169 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG	0x0603
170 #define IXL_AQ_OP_PHY_RESTART_AN	0x0605
171 #define IXL_AQ_OP_PHY_LINK_STATUS	0x0607
172 #define IXL_AQ_OP_PHY_SET_EVENT_MASK	0x0613
173 #define IXL_AQ_OP_PHY_SET_REGISTER	0x0628
174 #define IXL_AQ_OP_PHY_GET_REGISTER	0x0629
175 #define IXL_AQ_OP_LLDP_GET_MIB		0x0a00
176 #define IXL_AQ_OP_LLDP_MIB_CHG_EV	0x0a01
177 #define IXL_AQ_OP_LLDP_ADD_TLV		0x0a02
178 #define IXL_AQ_OP_LLDP_UPD_TLV		0x0a03
179 #define IXL_AQ_OP_LLDP_DEL_TLV		0x0a04
180 #define IXL_AQ_OP_LLDP_STOP_AGENT	0x0a05
181 #define IXL_AQ_OP_LLDP_START_AGENT	0x0a06
182 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX	0x0a07
183 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT	0x0a09
184 
185 struct ixl_aq_mac_addresses {
186 	uint8_t		pf_lan[ETHER_ADDR_LEN];
187 	uint8_t		pf_san[ETHER_ADDR_LEN];
188 	uint8_t		port[ETHER_ADDR_LEN];
189 	uint8_t		pf_wol[ETHER_ADDR_LEN];
190 } __packed;
191 
192 #define IXL_AQ_MAC_PF_LAN_VALID		(1U << 4)
193 #define IXL_AQ_MAC_PF_SAN_VALID		(1U << 5)
194 #define IXL_AQ_MAC_PORT_VALID		(1U << 6)
195 #define IXL_AQ_MAC_PF_WOL_VALID		(1U << 7)
196 
197 struct ixl_aq_capability {
198 	uint16_t	cap_id;
199 #define IXL_AQ_CAP_SWITCH_MODE		0x0001
200 #define IXL_AQ_CAP_MNG_MODE		0x0002
201 #define IXL_AQ_CAP_NPAR_ACTIVE		0x0003
202 #define IXL_AQ_CAP_OS2BMC_CAP		0x0004
203 #define IXL_AQ_CAP_FUNCTIONS_VALID	0x0005
204 #define IXL_AQ_CAP_ALTERNATE_RAM	0x0006
205 #define IXL_AQ_CAP_WOL_AND_PROXY	0x0008
206 #define IXL_AQ_CAP_SRIOV		0x0012
207 #define IXL_AQ_CAP_VF			0x0013
208 #define IXL_AQ_CAP_VMDQ			0x0014
209 #define IXL_AQ_CAP_8021QBG		0x0015
210 #define IXL_AQ_CAP_8021QBR		0x0016
211 #define IXL_AQ_CAP_VSI			0x0017
212 #define IXL_AQ_CAP_DCB			0x0018
213 #define IXL_AQ_CAP_FCOE			0x0021
214 #define IXL_AQ_CAP_ISCSI		0x0022
215 #define IXL_AQ_CAP_RSS			0x0040
216 #define IXL_AQ_CAP_RXQ			0x0041
217 #define IXL_AQ_CAP_TXQ			0x0042
218 #define IXL_AQ_CAP_MSIX			0x0043
219 #define IXL_AQ_CAP_VF_MSIX		0x0044
220 #define IXL_AQ_CAP_FLOW_DIRECTOR	0x0045
221 #define IXL_AQ_CAP_1588			0x0046
222 #define IXL_AQ_CAP_IWARP		0x0051
223 #define IXL_AQ_CAP_LED			0x0061
224 #define IXL_AQ_CAP_SDP			0x0062
225 #define IXL_AQ_CAP_MDIO			0x0063
226 #define IXL_AQ_CAP_WSR_PROT		0x0064
227 #define IXL_AQ_CAP_NVM_MGMT		0x0080
228 #define IXL_AQ_CAP_FLEX10		0x00F1
229 #define IXL_AQ_CAP_CEM			0x00F2
230 	uint8_t		major_rev;
231 	uint8_t		minor_rev;
232 	uint32_t	number;
233 	uint32_t	logical_id;
234 	uint32_t	phys_id;
235 	uint8_t		_reserved[16];
236 } __packed __aligned(4);
237 
238 #define IXL_LLDP_SHUTDOWN		0x1
239 
240 struct ixl_aq_switch_config {
241 	uint16_t	num_reported;
242 	uint16_t	num_total;
243 	uint8_t		_reserved[12];
244 } __packed __aligned(4);
245 
246 struct ixl_aq_switch_config_element {
247 	uint8_t		type;
248 #define IXL_AQ_SW_ELEM_TYPE_MAC		1
249 #define IXL_AQ_SW_ELEM_TYPE_PF		2
250 #define IXL_AQ_SW_ELEM_TYPE_VF		3
251 #define IXL_AQ_SW_ELEM_TYPE_EMP		4
252 #define IXL_AQ_SW_ELEM_TYPE_BMC		5
253 #define IXL_AQ_SW_ELEM_TYPE_PV		16
254 #define IXL_AQ_SW_ELEM_TYPE_VEB		17
255 #define IXL_AQ_SW_ELEM_TYPE_PA		18
256 #define IXL_AQ_SW_ELEM_TYPE_VSI		19
257 	uint8_t		revision;
258 #define IXL_AQ_SW_ELEM_REV_1		1
259 	uint16_t	seid;
260 
261 	uint16_t	uplink_seid;
262 	uint16_t	downlink_seid;
263 
264 	uint8_t		_reserved[3];
265 	uint8_t		connection_type;
266 #define IXL_AQ_CONN_TYPE_REGULAR	0x1
267 #define IXL_AQ_CONN_TYPE_DEFAULT	0x2
268 #define IXL_AQ_CONN_TYPE_CASCADED	0x3
269 
270 	uint16_t	scheduler_id;
271 	uint16_t	element_info;
272 } __packed __aligned(4);
273 
274 #define IXL_PHY_TYPE_SGMII		0x00
275 #define IXL_PHY_TYPE_1000BASE_KX	0x01
276 #define IXL_PHY_TYPE_10GBASE_KX4	0x02
277 #define IXL_PHY_TYPE_10GBASE_KR		0x03
278 #define IXL_PHY_TYPE_40GBASE_KR4	0x04
279 #define IXL_PHY_TYPE_XAUI		0x05
280 #define IXL_PHY_TYPE_XFI		0x06
281 #define IXL_PHY_TYPE_SFI		0x07
282 #define IXL_PHY_TYPE_XLAUI		0x08
283 #define IXL_PHY_TYPE_XLPPI		0x09
284 #define IXL_PHY_TYPE_40GBASE_CR4_CU	0x0a
285 #define IXL_PHY_TYPE_10GBASE_CR1_CU	0x0b
286 #define IXL_PHY_TYPE_10GBASE_AOC	0x0c
287 #define IXL_PHY_TYPE_40GBASE_AOC	0x0d
288 #define IXL_PHY_TYPE_100BASE_TX		0x11
289 #define IXL_PHY_TYPE_1000BASE_T		0x12
290 #define IXL_PHY_TYPE_10GBASE_T		0x13
291 #define IXL_PHY_TYPE_10GBASE_SR		0x14
292 #define IXL_PHY_TYPE_10GBASE_LR		0x15
293 #define IXL_PHY_TYPE_10GBASE_SFPP_CU	0x16
294 #define IXL_PHY_TYPE_10GBASE_CR1	0x17
295 #define IXL_PHY_TYPE_40GBASE_CR4	0x18
296 #define IXL_PHY_TYPE_40GBASE_SR4	0x19
297 #define IXL_PHY_TYPE_40GBASE_LR4	0x1a
298 #define IXL_PHY_TYPE_1000BASE_SX	0x1b
299 #define IXL_PHY_TYPE_1000BASE_LX	0x1c
300 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL	0x1d
301 #define IXL_PHY_TYPE_20GBASE_KR2	0x1e
302 
303 #define IXL_PHY_TYPE_25GBASE_KR		0x1f
304 #define IXL_PHY_TYPE_25GBASE_CR		0x20
305 #define IXL_PHY_TYPE_25GBASE_SR		0x21
306 #define IXL_PHY_TYPE_25GBASE_LR		0x22
307 #define IXL_PHY_TYPE_25GBASE_AOC	0x23
308 #define IXL_PHY_TYPE_25GBASE_ACC	0x24
309 
310 struct ixl_aq_module_desc {
311 	uint8_t		oui[3];
312 	uint8_t		_reserved1;
313 	uint8_t		part_number[16];
314 	uint8_t		revision[4];
315 	uint8_t		_reserved2[8];
316 } __packed __aligned(4);
317 
318 struct ixl_aq_phy_abilities {
319 	uint32_t	phy_type;
320 
321 	uint8_t		link_speed;
322 #define IXL_AQ_PHY_LINK_SPEED_100MB	(1 << 1)
323 #define IXL_AQ_PHY_LINK_SPEED_1000MB	(1 << 2)
324 #define IXL_AQ_PHY_LINK_SPEED_10GB	(1 << 3)
325 #define IXL_AQ_PHY_LINK_SPEED_40GB	(1 << 4)
326 #define IXL_AQ_PHY_LINK_SPEED_20GB	(1 << 5)
327 #define IXL_AQ_PHY_LINK_SPEED_25GB	(1 << 6)
328 	uint8_t		abilities;
329 	uint16_t	eee_capability;
330 
331 	uint32_t	eeer_val;
332 
333 	uint8_t		d3_lpan;
334 	uint8_t		phy_type_ext;
335 #define IXL_AQ_PHY_TYPE_EXT_25G_KR	0x01
336 #define IXL_AQ_PHY_TYPE_EXT_25G_CR	0x02
337 #define IXL_AQ_PHY_TYPE_EXT_25G_SR	0x04
338 #define IXL_AQ_PHY_TYPE_EXT_25G_LR	0x08
339 	uint8_t		fec_cfg_curr_mod_ext_info;
340 #define IXL_AQ_ENABLE_FEC_KR		0x01
341 #define IXL_AQ_ENABLE_FEC_RS		0x02
342 #define IXL_AQ_REQUEST_FEC_KR		0x04
343 #define IXL_AQ_REQUEST_FEC_RS		0x08
344 #define IXL_AQ_ENABLE_FEC_AUTO		0x10
345 #define IXL_AQ_MODULE_TYPE_EXT_MASK	0xe0
346 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT	5
347 	uint8_t		ext_comp_code;
348 
349 	uint8_t		phy_id[4];
350 
351 	uint8_t		module_type[3];
352 #define IXL_SFF8024_ID_SFP		0x03
353 #define IXL_SFF8024_ID_QSFP		0x0c
354 #define IXL_SFF8024_ID_QSFP_PLUS	0x0d
355 #define IXL_SFF8024_ID_QSFP28		0x11
356 	uint8_t		qualified_module_count;
357 #define IXL_AQ_PHY_MAX_QMS		16
358 	struct ixl_aq_module_desc
359 			qualified_module[IXL_AQ_PHY_MAX_QMS];
360 } __packed __aligned(4);
361 
362 struct ixl_aq_link_param {
363 	uint8_t		notify;
364 #define IXL_AQ_LINK_NOTIFY	0x03
365 	uint8_t		_reserved1;
366 	uint8_t		phy;
367 	uint8_t		speed;
368 	uint8_t		status;
369 	uint8_t		_reserved2[11];
370 } __packed __aligned(4);
371 
372 struct ixl_aq_vsi_param {
373 	uint16_t	uplink_seid;
374 	uint8_t		connect_type;
375 #define IXL_AQ_VSI_CONN_TYPE_NORMAL	(0x1)
376 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT	(0x2)
377 #define IXL_AQ_VSI_CONN_TYPE_CASCADED	(0x3)
378 	uint8_t		_reserved1;
379 
380 	uint8_t		vf_id;
381 	uint8_t		_reserved2;
382 	uint16_t	vsi_flags;
383 #define IXL_AQ_VSI_TYPE_SHIFT		0x0
384 #define IXL_AQ_VSI_TYPE_MASK		(0x3 << IXL_AQ_VSI_TYPE_SHIFT)
385 #define IXL_AQ_VSI_TYPE_VF		0x0
386 #define IXL_AQ_VSI_TYPE_VMDQ2		0x1
387 #define IXL_AQ_VSI_TYPE_PF		0x2
388 #define IXL_AQ_VSI_TYPE_EMP_MNG		0x3
389 #define IXL_AQ_VSI_FLAG_CASCADED_PV	0x4
390 
391 	uint32_t	addr_hi;
392 	uint32_t	addr_lo;
393 } __packed __aligned(16);
394 
395 struct ixl_aq_add_macvlan {
396 	uint16_t	num_addrs;
397 	uint16_t	seid0;
398 	uint16_t	seid1;
399 	uint16_t	seid2;
400 	uint32_t	addr_hi;
401 	uint32_t	addr_lo;
402 } __packed __aligned(16);
403 
404 struct ixl_aq_add_macvlan_elem {
405 	uint8_t		macaddr[6];
406 	uint16_t	vlan;
407 	uint16_t	flags;
408 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH	0x0001
409 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN	0x0004
410 	uint16_t	queue;
411 	uint32_t	_reserved;
412 } __packed __aligned(16);
413 
414 struct ixl_aq_remove_macvlan {
415 	uint16_t	num_addrs;
416 	uint16_t	seid0;
417 	uint16_t	seid1;
418 	uint16_t	seid2;
419 	uint32_t	addr_hi;
420 	uint32_t	addr_lo;
421 } __packed __aligned(16);
422 
423 struct ixl_aq_remove_macvlan_elem {
424 	uint8_t		macaddr[6];
425 	uint16_t	vlan;
426 	uint8_t		flags;
427 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH	0x0001
428 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN	0x0008
429 	uint8_t		_reserved[7];
430 } __packed __aligned(16);
431 
432 struct ixl_aq_vsi_reply {
433 	uint16_t	seid;
434 	uint16_t	vsi_number;
435 
436 	uint16_t	vsis_used;
437 	uint16_t	vsis_free;
438 
439 	uint32_t	addr_hi;
440 	uint32_t	addr_lo;
441 } __packed __aligned(16);
442 
443 struct ixl_aq_vsi_data {
444 	/* first 96 byte are written by SW */
445 	uint16_t	valid_sections;
446 #define IXL_AQ_VSI_VALID_SWITCH		(1 << 0)
447 #define IXL_AQ_VSI_VALID_SECURITY	(1 << 1)
448 #define IXL_AQ_VSI_VALID_VLAN		(1 << 2)
449 #define IXL_AQ_VSI_VALID_CAS_PV		(1 << 3)
450 #define IXL_AQ_VSI_VALID_INGRESS_UP	(1 << 4)
451 #define IXL_AQ_VSI_VALID_EGRESS_UP	(1 << 5)
452 #define IXL_AQ_VSI_VALID_QUEUE_MAP	(1 << 6)
453 #define IXL_AQ_VSI_VALID_QUEUE_OPT	(1 << 7)
454 #define IXL_AQ_VSI_VALID_OUTER_UP	(1 << 8)
455 #define IXL_AQ_VSI_VALID_SCHED		(1 << 9)
456 	/* switch section */
457 	uint16_t	switch_id;
458 #define IXL_AQ_VSI_SWITCH_ID_SHIFT	0
459 #define IXL_AQ_VSI_SWITCH_ID_MASK	(0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
460 #define IXL_AQ_VSI_SWITCH_NOT_STAG	(1 << 12)
461 #define IXL_AQ_VSI_SWITCH_LOCAL_LB	(1 << 14)
462 
463 	uint8_t		_reserved1[2];
464 	/* security section */
465 	uint8_t		sec_flags;
466 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD	(1 << 0)
467 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK	(1 << 1)
468 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK	(1 << 2)
469 	uint8_t		_reserved2;
470 
471 	/* vlan section */
472 	uint16_t	pvid;
473 	uint16_t	fcoe_pvid;
474 
475 	uint8_t		port_vlan_flags;
476 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT	0
477 #define IXL_AQ_VSI_PVLAN_MODE_MASK	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
478 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED	(0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
479 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED 	(0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
480 #define IXL_AQ_VSI_PVLAN_MODE_ALL	(0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
481 #define IXL_AQ_VSI_PVLAN_INSERT_PVID	(0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
482 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT	0x3
483 #define IXL_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
484 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH	(0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
485 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP	(0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
486 #define IXL_AQ_VSI_PVLAN_EMOD_STR	(0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
487 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING	(0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
488 	uint8_t		_reserved3[3];
489 
490 	/* ingress egress up section */
491 	uint32_t	ingress_table;
492 #define IXL_AQ_VSI_UP_SHIFT(_up)	((_up) * 3)
493 #define IXL_AQ_VSI_UP_MASK(_up)		(0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
494 	uint32_t	egress_table;
495 
496 	/* cascaded pv section */
497 	uint16_t	cas_pv_tag;
498 	uint8_t		cas_pv_flags;
499 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT	0
500 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK	(0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
501 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE	(0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
502 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE	(0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
503 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY	(0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
504 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG	(1 << 4)
505 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE	(1 << 5)
506 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
507 					(1 << 6)
508 	uint8_t		_reserved4;
509 
510 	/* queue mapping section */
511 	uint16_t	mapping_flags;
512 #define IXL_AQ_VSI_QUE_MAP_MASK		0x1
513 #define IXL_AQ_VSI_QUE_MAP_CONTIG	0x0
514 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG	0x1
515 	uint16_t	queue_mapping[16];
516 #define IXL_AQ_VSI_QUEUE_SHIFT		0x0
517 #define IXL_AQ_VSI_QUEUE_MASK		(0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
518 	uint16_t	tc_mapping[8];
519 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT	0
520 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK	(0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
521 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT	9
522 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK	(0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
523 
524 	/* queueing option section */
525 	uint8_t		queueing_opt_flags;
526 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN	(1 << 2)
527 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN	(1 << 3)
528 #define IXL_AQ_VSI_QUE_OPT_TCP_EN	(1 << 4)
529 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN	(1 << 5)
530 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF	0
531 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI	(1 << 6)
532 	uint8_t		_reserved5[3];
533 
534 	/* scheduler section */
535 	uint8_t		up_enable_bits;
536 	uint8_t		_reserved6;
537 
538 	/* outer up section */
539 	uint32_t	outer_up_table; /* same as ingress/egress tables */
540 	uint8_t		_reserved7[8];
541 
542 	/* last 32 bytes are written by FW */
543 	uint16_t	qs_handle[8];
544 #define IXL_AQ_VSI_QS_HANDLE_INVALID	0xffff
545 	uint16_t	stat_counter_idx;
546 	uint16_t	sched_id;
547 
548 	uint8_t		_reserved8[12];
549 } __packed __aligned(8);
550 
551 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
552 
553 struct ixl_aq_vsi_promisc_param {
554 	uint16_t	flags;
555 	uint16_t	valid_flags;
556 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST	(1 << 0)
557 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST	(1 << 1)
558 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST	(1 << 2)
559 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT	(1 << 3)
560 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN	(1 << 4)
561 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY	(1 << 15)
562 
563 	uint16_t	seid;
564 #define IXL_AQ_VSI_PROMISC_SEID_VALID	(1 << 15)
565 	uint16_t	vlan;
566 #define IXL_AQ_VSI_PROMISC_VLAN_VALID	(1 << 15)
567 	uint32_t	reserved[2];
568 } __packed __aligned(8);
569 
570 struct ixl_aq_veb_param {
571 	uint16_t	uplink_seid;
572 	uint16_t	downlink_seid;
573 	uint16_t	veb_flags;
574 #define IXL_AQ_ADD_VEB_FLOATING		(1 << 0)
575 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT	1
576 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK	(0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
577 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
578 					(0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
579 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA	(0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
580 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER	(1 << 3) /* deprecated */
581 #define IXL_AQ_ADD_VEB_DISABLE_STATS	(1 << 4)
582 	uint8_t		enable_tcs;
583 	uint8_t		_reserved[9];
584 } __packed __aligned(16);
585 
586 struct ixl_aq_veb_reply {
587 	uint16_t	_reserved1;
588 	uint16_t	_reserved2;
589 	uint16_t	_reserved3;
590 	uint16_t	switch_seid;
591 	uint16_t	veb_seid;
592 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB	(1 << 0)
593 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED	(1 << 1)
594 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER	(1 << 2)
595 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY	(1 << 3);
596 	uint16_t	statistic_index;
597 	uint16_t	vebs_used;
598 	uint16_t	vebs_free;
599 } __packed __aligned(16);
600 
601 /* GET PHY ABILITIES param[0] */
602 #define IXL_AQ_PHY_REPORT_QUAL		(1 << 0)
603 #define IXL_AQ_PHY_REPORT_INIT		(1 << 1)
604 
605 struct ixl_aq_phy_reg_access {
606 	uint8_t		phy_iface;
607 #define IXL_AQ_PHY_IF_INTERNAL		0
608 #define IXL_AQ_PHY_IF_EXTERNAL		1
609 #define IXL_AQ_PHY_IF_MODULE		2
610 	uint8_t		dev_addr;
611 	uint16_t	recall;
612 #define IXL_AQ_PHY_QSFP_DEV_ADDR	0
613 #define IXL_AQ_PHY_QSFP_LAST		1
614 	uint32_t	reg;
615 	uint32_t	val;
616 	uint32_t	_reserved2;
617 } __packed __aligned(16);
618 
619 /* RESTART_AN param[0] */
620 #define IXL_AQ_PHY_RESTART_AN		(1 << 1)
621 #define IXL_AQ_PHY_LINK_ENABLE		(1 << 2)
622 
623 struct ixl_aq_link_status { /* this occupies the iaq_param space */
624 	uint16_t	command_flags; /* only field set on command */
625 #define IXL_AQ_LSE_MASK			0x3
626 #define IXL_AQ_LSE_NOP			0x0
627 #define IXL_AQ_LSE_DISABLE		0x2
628 #define IXL_AQ_LSE_ENABLE		0x3
629 #define IXL_AQ_LSE_IS_ENABLED		0x1 /* only set in response */
630 	uint8_t		phy_type;
631 	uint8_t		link_speed;
632 #define IXL_AQ_LINK_SPEED_1GB		(1 << 2)
633 #define IXL_AQ_LINK_SPEED_10GB		(1 << 3)
634 #define IXL_AQ_LINK_SPEED_40GB		(1 << 4)
635 #define IXL_AQ_LINK_SPEED_25GB		(1 << 6)
636 	uint8_t		link_info;
637 #define IXL_AQ_LINK_UP_FUNCTION		0x01
638 #define IXL_AQ_LINK_FAULT		0x02
639 #define IXL_AQ_LINK_FAULT_TX		0x04
640 #define IXL_AQ_LINK_FAULT_RX		0x08
641 #define IXL_AQ_LINK_FAULT_REMOTE	0x10
642 #define IXL_AQ_LINK_UP_PORT		0x20
643 #define IXL_AQ_MEDIA_AVAILABLE		0x40
644 #define IXL_AQ_SIGNAL_DETECT		0x80
645 	uint8_t		an_info;
646 #define IXL_AQ_AN_COMPLETED		0x01
647 #define IXL_AQ_LP_AN_ABILITY		0x02
648 #define IXL_AQ_PD_FAULT			0x04
649 #define IXL_AQ_FEC_EN			0x08
650 #define IXL_AQ_PHY_LOW_POWER		0x10
651 #define IXL_AQ_LINK_PAUSE_TX		0x20
652 #define IXL_AQ_LINK_PAUSE_RX		0x40
653 #define IXL_AQ_QUALIFIED_MODULE		0x80
654 
655 	uint8_t		ext_info;
656 #define IXL_AQ_LINK_PHY_TEMP_ALARM	0x01
657 #define IXL_AQ_LINK_XCESSIVE_ERRORS	0x02
658 #define IXL_AQ_LINK_TX_SHIFT		0x02
659 #define IXL_AQ_LINK_TX_MASK		(0x03 << IXL_AQ_LINK_TX_SHIFT)
660 #define IXL_AQ_LINK_TX_ACTIVE		0x00
661 #define IXL_AQ_LINK_TX_DRAINED		0x01
662 #define IXL_AQ_LINK_TX_FLUSHED		0x03
663 #define IXL_AQ_LINK_FORCED_40G		0x10
664 /* 25G Error Codes */
665 #define IXL_AQ_25G_NO_ERR		0X00
666 #define IXL_AQ_25G_NOT_PRESENT		0X01
667 #define IXL_AQ_25G_NVM_CRC_ERR		0X02
668 #define IXL_AQ_25G_SBUS_UCODE_ERR	0X03
669 #define IXL_AQ_25G_SERDES_UCODE_ERR	0X04
670 #define IXL_AQ_25G_NIMB_UCODE_ERR	0X05
671 	uint8_t		loopback;
672 	uint16_t	max_frame_size;
673 
674 	uint8_t		config;
675 #define IXL_AQ_CONFIG_FEC_KR_ENA	0x01
676 #define IXL_AQ_CONFIG_FEC_RS_ENA	0x02
677 #define IXL_AQ_CONFIG_CRC_ENA	0x04
678 #define IXL_AQ_CONFIG_PACING_MASK	0x78
679 	uint8_t		power_desc;
680 #define IXL_AQ_LINK_POWER_CLASS_1	0x00
681 #define IXL_AQ_LINK_POWER_CLASS_2	0x01
682 #define IXL_AQ_LINK_POWER_CLASS_3	0x02
683 #define IXL_AQ_LINK_POWER_CLASS_4	0x03
684 #define IXL_AQ_PWR_CLASS_MASK		0x03
685 
686 	uint8_t		reserved[4];
687 } __packed __aligned(4);
688 /* event mask command flags for param[2] */
689 #define IXL_AQ_PHY_EV_MASK		0x3ff
690 #define IXL_AQ_PHY_EV_LINK_UPDOWN	(1 << 1)
691 #define IXL_AQ_PHY_EV_MEDIA_NA		(1 << 2)
692 #define IXL_AQ_PHY_EV_LINK_FAULT	(1 << 3)
693 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM	(1 << 4)
694 #define IXL_AQ_PHY_EV_EXCESS_ERRORS	(1 << 5)
695 #define IXL_AQ_PHY_EV_SIGNAL_DETECT	(1 << 6)
696 #define IXL_AQ_PHY_EV_AN_COMPLETED	(1 << 7)
697 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL	(1 << 8)
698 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED	(1 << 9)
699 
700 /* aq response codes */
701 #define IXL_AQ_RC_OK			0  /* success */
702 #define IXL_AQ_RC_EPERM			1  /* Operation not permitted */
703 #define IXL_AQ_RC_ENOENT		2  /* No such element */
704 #define IXL_AQ_RC_ESRCH			3  /* Bad opcode */
705 #define IXL_AQ_RC_EINTR			4  /* operation interrupted */
706 #define IXL_AQ_RC_EIO			5  /* I/O error */
707 #define IXL_AQ_RC_ENXIO			6  /* No such resource */
708 #define IXL_AQ_RC_E2BIG			7  /* Arg too long */
709 #define IXL_AQ_RC_EAGAIN		8  /* Try again */
710 #define IXL_AQ_RC_ENOMEM		9  /* Out of memory */
711 #define IXL_AQ_RC_EACCES		10 /* Permission denied */
712 #define IXL_AQ_RC_EFAULT		11 /* Bad address */
713 #define IXL_AQ_RC_EBUSY			12 /* Device or resource busy */
714 #define IXL_AQ_RC_EEXIST		13 /* object already exists */
715 #define IXL_AQ_RC_EINVAL		14 /* invalid argument */
716 #define IXL_AQ_RC_ENOTTY		15 /* not a typewriter */
717 #define IXL_AQ_RC_ENOSPC		16 /* No space or alloc failure */
718 #define IXL_AQ_RC_ENOSYS		17 /* function not implemented */
719 #define IXL_AQ_RC_ERANGE		18 /* parameter out of range */
720 #define IXL_AQ_RC_EFLUSHED		19 /* cmd flushed due to prev error */
721 #define IXL_AQ_RC_BAD_ADDR		20 /* contains a bad pointer */
722 #define IXL_AQ_RC_EMODE			21 /* not allowed in current mode */
723 #define IXL_AQ_RC_EFBIG			22 /* file too large */
724 
725 struct ixl_tx_desc {
726 	uint64_t		addr;
727 	uint64_t		cmd;
728 #define IXL_TX_DESC_DTYPE_SHIFT		0
729 #define IXL_TX_DESC_DTYPE_MASK		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
730 #define IXL_TX_DESC_DTYPE_DATA		(0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
731 #define IXL_TX_DESC_DTYPE_NOP		(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
732 #define IXL_TX_DESC_DTYPE_CONTEXT	(0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
733 #define IXL_TX_DESC_DTYPE_FCOE_CTX	(0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
734 #define IXL_TX_DESC_DTYPE_FD		(0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
735 #define IXL_TX_DESC_DTYPE_DDP_CTX	(0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
736 #define IXL_TX_DESC_DTYPE_FLEX_DATA	(0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
737 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1	(0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
738 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2	(0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
739 #define IXL_TX_DESC_DTYPE_DONE		(0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
740 
741 #define IXL_TX_DESC_CMD_SHIFT		4
742 #define IXL_TX_DESC_CMD_MASK		(0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
743 #define IXL_TX_DESC_CMD_EOP		(0x001 << IXL_TX_DESC_CMD_SHIFT)
744 #define IXL_TX_DESC_CMD_RS		(0x002 << IXL_TX_DESC_CMD_SHIFT)
745 #define IXL_TX_DESC_CMD_ICRC		(0x004 << IXL_TX_DESC_CMD_SHIFT)
746 #define IXL_TX_DESC_CMD_IL2TAG1		(0x008 << IXL_TX_DESC_CMD_SHIFT)
747 #define IXL_TX_DESC_CMD_DUMMY		(0x010 << IXL_TX_DESC_CMD_SHIFT)
748 #define IXL_TX_DESC_CMD_IIPT_MASK	(0x060 << IXL_TX_DESC_CMD_SHIFT)
749 #define IXL_TX_DESC_CMD_IIPT_NONIP	(0x000 << IXL_TX_DESC_CMD_SHIFT)
750 #define IXL_TX_DESC_CMD_IIPT_IPV6	(0x020 << IXL_TX_DESC_CMD_SHIFT)
751 #define IXL_TX_DESC_CMD_IIPT_IPV4	(0x040 << IXL_TX_DESC_CMD_SHIFT)
752 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM	(0x060 << IXL_TX_DESC_CMD_SHIFT)
753 #define IXL_TX_DESC_CMD_FCOET		(0x080 << IXL_TX_DESC_CMD_SHIFT)
754 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK	(0x300 << IXL_TX_DESC_CMD_SHIFT)
755 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK	(0x000 << IXL_TX_DESC_CMD_SHIFT)
756 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP	(0x100 << IXL_TX_DESC_CMD_SHIFT)
757 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP	(0x200 << IXL_TX_DESC_CMD_SHIFT)
758 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP	(0x300 << IXL_TX_DESC_CMD_SHIFT)
759 
760 #define IXL_TX_DESC_MACLEN_SHIFT	16
761 #define IXL_TX_DESC_MACLEN_MASK		(0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
762 #define IXL_TX_DESC_IPLEN_SHIFT		23
763 #define IXL_TX_DESC_IPLEN_MASK		(0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
764 #define IXL_TX_DESC_L4LEN_SHIFT		30
765 #define IXL_TX_DESC_L4LEN_MASK		(0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
766 #define IXL_TX_DESC_FCLEN_SHIFT		30
767 #define IXL_TX_DESC_FCLEN_MASK		(0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
768 
769 #define IXL_TX_DESC_BSIZE_SHIFT		34
770 #define IXL_TX_DESC_BSIZE_MAX		0x3fffULL
771 #define IXL_TX_DESC_BSIZE_MASK		\
772 	(IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
773 } __packed __aligned(16);
774 
775 struct ixl_rx_rd_desc_16 {
776 	uint64_t		paddr; /* packet addr */
777 	uint64_t		haddr; /* header addr */
778 } __packed __aligned(16);
779 
780 struct ixl_rx_rd_desc_32 {
781 	uint64_t		paddr; /* packet addr */
782 	uint64_t		haddr; /* header addr */
783 	uint64_t		_reserved1;
784 	uint64_t		_reserved2;
785 } __packed __aligned(16);
786 
787 struct ixl_rx_wb_desc_16 {
788 	uint64_t		qword0;
789 	uint64_t		qword1;
790 #define IXL_RX_DESC_DD			(1 << 0)
791 #define IXL_RX_DESC_EOP			(1 << 1)
792 #define IXL_RX_DESC_L2TAG1P		(1 << 2)
793 #define IXL_RX_DESC_L3L4P		(1 << 3)
794 #define IXL_RX_DESC_CRCP		(1 << 4)
795 #define IXL_RX_DESC_TSYNINDX_SHIFT	5	/* TSYNINDX */
796 #define IXL_RX_DESC_TSYNINDX_MASK	(7 << IXL_RX_DESC_TSYNINDX_SHIFT)
797 #define IXL_RX_DESC_UMB_SHIFT		9
798 #define IXL_RX_DESC_UMB_MASK		(0x3 << IXL_RX_DESC_UMB_SHIFT)
799 #define IXL_RX_DESC_UMB_UCAST		(0x0 << IXL_RX_DESC_UMB_SHIFT)
800 #define IXL_RX_DESC_UMB_MCAST		(0x1 << IXL_RX_DESC_UMB_SHIFT)
801 #define IXL_RX_DESC_UMB_BCAST		(0x2 << IXL_RX_DESC_UMB_SHIFT)
802 #define IXL_RX_DESC_UMB_MIRROR		(0x3 << IXL_RX_DESC_UMB_SHIFT)
803 #define IXL_RX_DESC_FLM			(1 << 11)
804 #define IXL_RX_DESC_FLTSTAT_SHIFT 	12
805 #define IXL_RX_DESC_FLTSTAT_MASK 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
806 #define IXL_RX_DESC_FLTSTAT_NODATA 	(0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
807 #define IXL_RX_DESC_FLTSTAT_FDFILTID 	(0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
808 #define IXL_RX_DESC_FLTSTAT_RSS 	(0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
809 #define IXL_RX_DESC_LPBK		(1 << 14)
810 #define IXL_RX_DESC_IPV6EXTADD		(1 << 15)
811 #define IXL_RX_DESC_INT_UDP_0		(1 << 18)
812 
813 #define IXL_RX_DESC_RXE			(1 << 19)
814 #define IXL_RX_DESC_HBO			(1 << 21)
815 #define IXL_RX_DESC_IPE			(1 << 22)
816 #define IXL_RX_DESC_L4E			(1 << 23)
817 #define IXL_RX_DESC_EIPE		(1 << 24)
818 #define IXL_RX_DESC_OVERSIZE		(1 << 25)
819 
820 #define IXL_RX_DESC_PTYPE_SHIFT		30
821 #define IXL_RX_DESC_PTYPE_MASK		(0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
822 
823 #define IXL_RX_DESC_PLEN_SHIFT		38
824 #define IXL_RX_DESC_PLEN_MASK		(0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
825 #define IXL_RX_DESC_HLEN_SHIFT		42
826 #define IXL_RX_DESC_HLEN_MASK		(0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
827 } __packed __aligned(16);
828 
829 struct ixl_rx_wb_desc_32 {
830 	uint64_t		qword0;
831 	uint64_t		qword1;
832 	uint64_t		qword2;
833 	uint64_t		qword3;
834 } __packed __aligned(16);
835 
836 #define IXL_TX_PKT_DESCS		8
837 #define IXL_TX_QUEUE_ALIGN		128
838 #define IXL_RX_QUEUE_ALIGN		128
839 
840 #define IXL_HARDMTU			9712 /* 9726 - ETHER_HDR_LEN */
841 
842 #define IXL_PCIREG			PCI_MAPREG_START
843 
844 #define IXL_ITR0			0x0
845 #define IXL_ITR1			0x1
846 #define IXL_ITR2			0x2
847 #define IXL_NOITR			0x2
848 
849 #define IXL_AQ_NUM			256
850 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
851 #define IXL_AQ_ALIGN			64 /* lol */
852 #define IXL_AQ_BUFLEN			4096
853 
854 #define IXL_HMC_ROUNDUP			512
855 #define IXL_HMC_PGSIZE			4096
856 #define IXL_HMC_DVASZ			sizeof(uint64_t)
857 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
858 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
859 #define IXL_HMC_PDVALID			1ULL
860 
861 struct ixl_aq_regs {
862 	bus_size_t		atq_tail;
863 	bus_size_t		atq_head;
864 	bus_size_t		atq_len;
865 	bus_size_t		atq_bal;
866 	bus_size_t		atq_bah;
867 
868 	bus_size_t		arq_tail;
869 	bus_size_t		arq_head;
870 	bus_size_t		arq_len;
871 	bus_size_t		arq_bal;
872 	bus_size_t		arq_bah;
873 
874 	uint32_t		atq_len_enable;
875 	uint32_t		atq_tail_mask;
876 	uint32_t		atq_head_mask;
877 
878 	uint32_t		arq_len_enable;
879 	uint32_t		arq_tail_mask;
880 	uint32_t		arq_head_mask;
881 };
882 
883 struct ixl_phy_type {
884 	uint64_t	phy_type;
885 	uint64_t	ifm_type;
886 };
887 
888 struct ixl_speed_type {
889 	uint8_t		dev_speed;
890 	uint64_t	net_speed;
891 };
892 
893 struct ixl_aq_buf {
894 	SIMPLEQ_ENTRY(ixl_aq_buf)
895 				 aqb_entry;
896 	void			*aqb_data;
897 	bus_dmamap_t		 aqb_map;
898 };
899 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
900 
901 struct ixl_dmamem {
902 	bus_dmamap_t		ixm_map;
903 	bus_dma_segment_t	ixm_seg;
904 	int			ixm_nsegs;
905 	size_t			ixm_size;
906 	caddr_t			ixm_kva;
907 };
908 #define IXL_DMA_MAP(_ixm)	((_ixm)->ixm_map)
909 #define IXL_DMA_DVA(_ixm)	((_ixm)->ixm_map->dm_segs[0].ds_addr)
910 #define IXL_DMA_KVA(_ixm)	((void *)(_ixm)->ixm_kva)
911 #define IXL_DMA_LEN(_ixm)	((_ixm)->ixm_size)
912 
913 struct ixl_hmc_entry {
914 	uint64_t		 hmc_base;
915 	uint32_t		 hmc_count;
916 	uint32_t		 hmc_size;
917 };
918 
919 #define IXL_HMC_LAN_TX		 0
920 #define IXL_HMC_LAN_RX		 1
921 #define IXL_HMC_FCOE_CTX	 2
922 #define IXL_HMC_FCOE_FILTER	 3
923 #define IXL_HMC_COUNT		 4
924 
925 struct ixl_hmc_pack {
926 	uint16_t		offset;
927 	uint16_t		width;
928 	uint16_t		lsb;
929 };
930 
931 /*
932  * these hmc objects have weird sizes and alignments, so these are abstract
933  * representations of them that are nice for c to populate.
934  *
935  * the packing code relies on little-endian values being stored in the fields,
936  * no high bits in the fields being set, and the fields must be packed in the
937  * same order as they are in the ctx structure.
938  */
939 
940 struct ixl_hmc_rxq {
941 	uint16_t		 head;
942 	uint8_t			 cpuid;
943 	uint64_t		 base;
944 #define IXL_HMC_RXQ_BASE_UNIT		128
945 	uint16_t		 qlen;
946 	uint16_t		 dbuff;
947 #define IXL_HMC_RXQ_DBUFF_UNIT		128
948 	uint8_t			 hbuff;
949 #define IXL_HMC_RXQ_HBUFF_UNIT		64
950 	uint8_t			 dtype;
951 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
952 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
953 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
954 	uint8_t			 dsize;
955 #define IXL_HMC_RXQ_DSIZE_16		0
956 #define IXL_HMC_RXQ_DSIZE_32		1
957 	uint8_t			 crcstrip;
958 	uint8_t			 fc_ena;
959 	uint8_t			 l2sel;
960 	uint8_t			 hsplit_0;
961 	uint8_t			 hsplit_1;
962 	uint8_t			 showiv;
963 	uint16_t		 rxmax;
964 	uint8_t			 tphrdesc_ena;
965 	uint8_t			 tphwdesc_ena;
966 	uint8_t			 tphdata_ena;
967 	uint8_t			 tphhead_ena;
968 	uint8_t			 lrxqthresh;
969 	uint8_t			 prefena;
970 };
971 
972 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
973 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
974 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
975 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
976 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
977 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
978 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
979 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
980 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
981 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
982 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
983 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
984 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
985 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
986 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
987 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
988 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
989 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
990 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
991 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
992 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
993 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
994 };
995 
996 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
997 
998 struct ixl_hmc_txq {
999 	uint16_t		head;
1000 	uint8_t			new_context;
1001 	uint64_t		base;
1002 #define IXL_HMC_TXQ_BASE_UNIT		128
1003 	uint8_t			fc_ena;
1004 	uint8_t			timesync_ena;
1005 	uint8_t			fd_ena;
1006 	uint8_t			alt_vlan_ena;
1007 	uint16_t		thead_wb;
1008 	uint8_t			cpuid;
1009 	uint8_t			head_wb_ena;
1010 #define IXL_HMC_TXQ_DESC_WB		0
1011 #define IXL_HMC_TXQ_HEAD_WB		1
1012 	uint16_t		qlen;
1013 	uint8_t			tphrdesc_ena;
1014 	uint8_t			tphrpacket_ena;
1015 	uint8_t			tphwdesc_ena;
1016 	uint64_t		head_wb_addr;
1017 	uint32_t		crc;
1018 	uint16_t		rdylist;
1019 	uint8_t			rdylist_act;
1020 };
1021 
1022 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1023 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
1024 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
1025 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
1026 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
1027 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
1028 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
1029 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
1030 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
1031 /* line 1 */
1032 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
1033 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
1034 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
1035 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
1036 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
1037 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
1038 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
1039 /* line 7 */
1040 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
1041 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
1042 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
1043 };
1044 
1045 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1046 
1047 struct ixl_tx_map {
1048 	struct mbuf		*txm_m;
1049 	bus_dmamap_t		 txm_map;
1050 	unsigned int		 txm_eop;
1051 };
1052 
1053 struct ixl_tx_ring {
1054 	unsigned int		 txr_prod;
1055 	unsigned int		 txr_cons;
1056 
1057 	struct ixl_tx_map	*txr_maps;
1058 	struct ixl_dmamem	 txr_mem;
1059 
1060 	bus_size_t		 txr_tail;
1061 	unsigned int		 txr_qid;
1062 };
1063 
1064 struct ixl_rx_map {
1065 	struct mbuf		*rxm_m;
1066 	bus_dmamap_t		 rxm_map;
1067 };
1068 
1069 struct ixl_rx_ring {
1070 	struct ixl_softc	*rxr_sc;
1071 
1072 	struct if_rxring	 rxr_acct;
1073 	struct timeout		 rxr_refill;
1074 
1075 	unsigned int		 rxr_prod;
1076 	unsigned int		 rxr_cons;
1077 
1078 	struct ixl_rx_map	*rxr_maps;
1079 	struct ixl_dmamem	 rxr_mem;
1080 
1081 	struct mbuf		*rxr_m_head;
1082 	struct mbuf		**rxr_m_tail;
1083 
1084 	bus_size_t		 rxr_tail;
1085 	unsigned int		 rxr_qid;
1086 };
1087 
1088 struct ixl_atq {
1089 	struct ixl_aq_desc	  iatq_desc;
1090 	void			 *iatq_arg;
1091 	void			(*iatq_fn)(struct ixl_softc *, void *);
1092 };
1093 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1094 
1095 struct ixl_softc {
1096 	struct device		 sc_dev;
1097 	struct arpcom		 sc_ac;
1098 	struct ifmedia		 sc_media;
1099 	uint64_t		 sc_media_status;
1100 	uint64_t		 sc_media_active;
1101 
1102 	pci_chipset_tag_t	 sc_pc;
1103 	pci_intr_handle_t	 sc_ih;
1104 	void			*sc_ihc;
1105 	pcitag_t		 sc_tag;
1106 
1107 	bus_dma_tag_t		 sc_dmat;
1108 	bus_space_tag_t		 sc_memt;
1109 	bus_space_handle_t	 sc_memh;
1110 	bus_size_t		 sc_mems;
1111 
1112 	uint8_t			 sc_pf_id;
1113 	uint16_t		 sc_uplink_seid;	/* le */
1114 	uint16_t		 sc_downlink_seid;	/* le */
1115 	uint16_t		 sc_veb_seid;		/* le */
1116 	uint16_t		 sc_vsi_number;		/* le */
1117 	uint16_t		 sc_seid;
1118 	unsigned int		 sc_base_queue;
1119 
1120 	struct ixl_dmamem	 sc_scratch;
1121 
1122 	const struct ixl_aq_regs *
1123 				 sc_aq_regs;
1124 
1125 	struct mutex		 sc_atq_mtx;
1126 	struct ixl_dmamem	 sc_atq;
1127 	unsigned int		 sc_atq_prod;
1128 	unsigned int		 sc_atq_cons;
1129 
1130 	struct ixl_dmamem	 sc_arq;
1131 	struct task		 sc_arq_task;
1132 	struct ixl_aq_bufs	 sc_arq_idle;
1133 	struct ixl_aq_bufs	 sc_arq_live;
1134 	struct if_rxring	 sc_arq_ring;
1135 	unsigned int		 sc_arq_prod;
1136 	unsigned int		 sc_arq_cons;
1137 
1138 	struct task		 sc_link_state_task;
1139 	struct ixl_atq		 sc_link_state_atq;
1140 
1141 	struct ixl_dmamem	 sc_hmc_sd;
1142 	struct ixl_dmamem	 sc_hmc_pd;
1143 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
1144 
1145 	unsigned int		 sc_tx_ring_ndescs;
1146 	unsigned int		 sc_rx_ring_ndescs;
1147 	unsigned int		 sc_nqueues;	/* 1 << sc_nqueues */
1148 
1149 	struct rwlock		 sc_cfg_lock;
1150 	unsigned int		 sc_dead;
1151 
1152 	struct rwlock		 sc_sff_lock;
1153 
1154 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
1155 };
1156 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1157 
1158 #define delaymsec(_ms)	delay(1000 * (_ms))
1159 
1160 static void	ixl_clear_hw(struct ixl_softc *);
1161 static int	ixl_pf_reset(struct ixl_softc *);
1162 
1163 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1164 		    bus_size_t, u_int);
1165 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1166 
1167 static int	ixl_arq_fill(struct ixl_softc *);
1168 static void	ixl_arq_unfill(struct ixl_softc *);
1169 
1170 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1171 		    unsigned int);
1172 static void	ixl_atq_set(struct ixl_atq *,
1173 		    void (*)(struct ixl_softc *, void *), void *);
1174 static void	ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1175 static void	ixl_atq_done(struct ixl_softc *);
1176 static void	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1177 		    const char *);
1178 static int	ixl_get_version(struct ixl_softc *);
1179 static int	ixl_pxe_clear(struct ixl_softc *);
1180 static int	ixl_lldp_shut(struct ixl_softc *);
1181 static int	ixl_get_mac(struct ixl_softc *);
1182 static int	ixl_get_switch_config(struct ixl_softc *);
1183 static int	ixl_phy_mask_ints(struct ixl_softc *);
1184 static int	ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1185 static int	ixl_restart_an(struct ixl_softc *);
1186 static int	ixl_hmc(struct ixl_softc *);
1187 static void	ixl_hmc_free(struct ixl_softc *);
1188 static int	ixl_get_vsi(struct ixl_softc *);
1189 static int	ixl_set_vsi(struct ixl_softc *);
1190 static int	ixl_get_link_status(struct ixl_softc *);
1191 static int	ixl_set_link_status(struct ixl_softc *,
1192 		    const struct ixl_aq_desc *);
1193 static int	ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1194 		    uint16_t);
1195 static int	ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1196 		    uint16_t);
1197 static void	ixl_link_state_update(void *);
1198 static void	ixl_arq(void *);
1199 static void	ixl_hmc_pack(void *, const void *,
1200 		    const struct ixl_hmc_pack *, unsigned int);
1201 
1202 static int	ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1203 static int	ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1204 		    uint8_t *);
1205 static int	ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1206 		    uint8_t);
1207 
1208 static int	ixl_match(struct device *, void *, void *);
1209 static void	ixl_attach(struct device *, struct device *, void *);
1210 
1211 static void	ixl_media_add(struct ixl_softc *, uint64_t);
1212 static int	ixl_media_change(struct ifnet *);
1213 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
1214 static void	ixl_watchdog(struct ifnet *);
1215 static int	ixl_ioctl(struct ifnet *, u_long, caddr_t);
1216 static void	ixl_start(struct ifqueue *);
1217 static int	ixl_intr(void *);
1218 static int	ixl_up(struct ixl_softc *);
1219 static int	ixl_down(struct ixl_softc *);
1220 static int	ixl_iff(struct ixl_softc *);
1221 
1222 static struct ixl_tx_ring *
1223 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
1224 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1225 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1226 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1227 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1228 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1229 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1230 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1231 static int	ixl_txeof(struct ixl_softc *, struct ifqueue *);
1232 
1233 static struct ixl_rx_ring *
1234 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1235 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1236 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1237 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1238 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1239 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1240 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1241 static int	ixl_rxeof(struct ixl_softc *, struct ifiqueue *);
1242 static void	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1243 static void	ixl_rxrefill(void *);
1244 static int	ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1245 
1246 struct cfdriver ixl_cd = {
1247 	NULL,
1248 	"ixl",
1249 	DV_IFNET,
1250 };
1251 
1252 struct cfattach ixl_ca = {
1253 	sizeof(struct ixl_softc),
1254 	ixl_match,
1255 	ixl_attach,
1256 };
1257 
1258 static const struct ixl_phy_type ixl_phy_type_map[] = {
1259 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
1260 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
1261 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
1262 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
1263 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
1264 	{ 1ULL << IXL_PHY_TYPE_XAUI |
1265 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
1266 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
1267 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
1268 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
1269 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1270 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
1271 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1272 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
1273 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
1274 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
1275 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
1276 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1277 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
1278 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
1279 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
1280 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
1281 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_SFP_CU },
1282 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
1283 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
1284 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
1285 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
1286 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
1287 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
1288 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
1289 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
1290 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
1291 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
1292 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_CR },
1293 };
1294 
1295 static const struct ixl_speed_type ixl_speed_type_map[] = {
1296 	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
1297 	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
1298 	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
1299 	{ IXL_AQ_LINK_SPEED_1GB,		IF_Gbps(1) },
1300 };
1301 
1302 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1303 	.atq_tail	= I40E_PF_ATQT,
1304 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
1305 	.atq_head	= I40E_PF_ATQH,
1306 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
1307 	.atq_len	= I40E_PF_ATQLEN,
1308 	.atq_bal	= I40E_PF_ATQBAL,
1309 	.atq_bah	= I40E_PF_ATQBAH,
1310 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
1311 
1312 	.arq_tail	= I40E_PF_ARQT,
1313 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
1314 	.arq_head	= I40E_PF_ARQH,
1315 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
1316 	.arq_len	= I40E_PF_ARQLEN,
1317 	.arq_bal	= I40E_PF_ARQBAL,
1318 	.arq_bah	= I40E_PF_ARQBAH,
1319 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
1320 };
1321 
1322 #define ixl_rd(_s, _r) \
1323 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1324 #define ixl_wr(_s, _r, _v) \
1325 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1326 #define ixl_barrier(_s, _r, _l, _o) \
1327 	bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1328 #define ixl_intr_enable(_s) \
1329 	ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1330 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1331 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1332 
1333 #define ixl_nqueues(_sc)	(1 << (_sc)->sc_nqueues)
1334 
1335 #ifdef __LP64__
1336 #define ixl_dmamem_hi(_ixm)	(uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1337 #else
1338 #define ixl_dmamem_hi(_ixm)	0
1339 #endif
1340 
1341 #define ixl_dmamem_lo(_ixm) 	(uint32_t)IXL_DMA_DVA(_ixm)
1342 
1343 static inline void
1344 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1345 {
1346 #ifdef __LP64__
1347 	htolem32(&iaq->iaq_param[2], addr >> 32);
1348 #else
1349 	iaq->iaq_param[2] = htole32(0);
1350 #endif
1351 	htolem32(&iaq->iaq_param[3], addr);
1352 }
1353 
1354 #if _BYTE_ORDER == _BIG_ENDIAN
1355 #define HTOLE16(_x)	(uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1356 #else
1357 #define HTOLE16(_x)	(_x)
1358 #endif
1359 
1360 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1361 
1362 static const struct pci_matchid ixl_devices[] = {
1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_SFP },
1364 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_40G_BP },
1365 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BP },
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1367 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1368 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_QSFP },
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BASET },
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1371 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_T4_10G },
1373 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_BP },
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_KX },
1376 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_QSFP },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_1G },
1379 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_T },
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1381 };
1382 
1383 static int
1384 ixl_match(struct device *parent, void *match, void *aux)
1385 {
1386 	return (pci_matchbyid(aux, ixl_devices, nitems(ixl_devices)));
1387 }
1388 
1389 void
1390 ixl_attach(struct device *parent, struct device *self, void *aux)
1391 {
1392 	struct ixl_softc *sc = (struct ixl_softc *)self;
1393 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1394 	struct pci_attach_args *pa = aux;
1395 	pcireg_t memtype;
1396 	uint32_t port, ari, func;
1397 	uint64_t phy_types = 0;
1398 	int tries;
1399 
1400 	rw_init(&sc->sc_cfg_lock, "ixlcfg");
1401 
1402 	sc->sc_pc = pa->pa_pc;
1403 	sc->sc_tag = pa->pa_tag;
1404 	sc->sc_dmat = pa->pa_dmat;
1405 	sc->sc_aq_regs = &ixl_pf_aq_regs;
1406 
1407 	sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1408 	sc->sc_tx_ring_ndescs = 1024;
1409 	sc->sc_rx_ring_ndescs = 1024;
1410 
1411 	memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1412 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1413 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1414 		printf(": unable to map registers\n");
1415 		return;
1416 	}
1417 
1418 	sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1419 	    I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1420 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1421 
1422 	ixl_clear_hw(sc);
1423 	if (ixl_pf_reset(sc) == -1) {
1424 		/* error printed by ixl_pf_reset */
1425 		goto unmap;
1426 	}
1427 
1428 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1429 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1430 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1431 	printf(": port %u", port);
1432 
1433 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1434 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1435 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1436 
1437 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1438 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1439 
1440 	/* initialise the adminq */
1441 
1442 	mtx_init(&sc->sc_atq_mtx, IPL_NET);
1443 
1444 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1445 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1446 		printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1447 		goto unmap;
1448 	}
1449 
1450 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1451 	SIMPLEQ_INIT(&sc->sc_arq_live);
1452 	if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1453 	task_set(&sc->sc_arq_task, ixl_arq, sc);
1454 	sc->sc_arq_cons = 0;
1455 	sc->sc_arq_prod = 0;
1456 
1457 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1458 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1459 		printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1460 		goto free_atq;
1461 	}
1462 
1463 	if (!ixl_arq_fill(sc)) {
1464 		printf("\n" "%s: unable to fill arq descriptors\n",
1465 		    DEVNAME(sc));
1466 		goto free_arq;
1467 	}
1468 
1469 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1470 	    0, IXL_DMA_LEN(&sc->sc_atq),
1471 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1472 
1473 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1474 	    0, IXL_DMA_LEN(&sc->sc_arq),
1475 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1476 
1477  	for (tries = 0; tries < 10; tries++) {
1478 		int rv;
1479 
1480 		sc->sc_atq_cons = 0;
1481 		sc->sc_atq_prod = 0;
1482 
1483 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1484 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1485 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1486 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1487 
1488 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1489 
1490 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1491 		    ixl_dmamem_lo(&sc->sc_atq));
1492 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1493 		    ixl_dmamem_hi(&sc->sc_atq));
1494 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1495 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1496 
1497 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1498 		    ixl_dmamem_lo(&sc->sc_arq));
1499 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1500 		    ixl_dmamem_hi(&sc->sc_arq));
1501 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1502 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1503 
1504 		rv = ixl_get_version(sc);
1505 		if (rv == 0)
1506 			break;
1507 		if (rv != ETIMEDOUT) {
1508 			printf(", unable to get firmware version\n");
1509 			goto shutdown;
1510 		}
1511 
1512 		delaymsec(100);
1513 	}
1514 
1515 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1516 
1517 	if (ixl_pxe_clear(sc) != 0) {
1518 		/* error printed by ixl_pxe_clear */
1519 		goto shutdown;
1520 	}
1521 
1522 	if (ixl_get_mac(sc) != 0) {
1523 		/* error printed by ixl_get_mac */
1524 		goto shutdown;
1525 	}
1526 
1527 	if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1528 	    pci_intr_map(pa, &sc->sc_ih) != 0) {
1529 		printf(", unable to map interrupt\n");
1530 		goto shutdown;
1531 	}
1532 
1533 	printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
1534 	    ether_sprintf(sc->sc_ac.ac_enaddr));
1535 
1536 	if (ixl_hmc(sc) != 0) {
1537 		/* error printed by ixl_hmc */
1538 		goto shutdown;
1539 	}
1540 
1541 	if (ixl_lldp_shut(sc) != 0) {
1542 		/* error printed by ixl_lldp_shut */
1543 		goto free_hmc;
1544 	}
1545 
1546 	if (ixl_phy_mask_ints(sc) != 0) {
1547 		/* error printed by ixl_phy_mask_ints */
1548 		goto free_hmc;
1549 	}
1550 
1551 	if (ixl_restart_an(sc) != 0) {
1552 		/* error printed by ixl_restart_an */
1553 		goto free_hmc;
1554 	}
1555 
1556 	if (ixl_get_switch_config(sc) != 0) {
1557 		/* error printed by ixl_get_switch_config */
1558 		goto free_hmc;
1559 	}
1560 
1561 	if (ixl_get_phy_types(sc, &phy_types) != 0) {
1562 		/* error printed by ixl_get_phy_abilities */
1563 		goto free_hmc;
1564 	}
1565 
1566 	if (ixl_get_link_status(sc) != 0) {
1567 		/* error printed by ixl_get_link_status */
1568 		goto free_hmc;
1569 	}
1570 
1571 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1572 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1573 		printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1574 		goto free_hmc;
1575 	}
1576 
1577 	if (ixl_get_vsi(sc) != 0) {
1578 		/* error printed by ixl_get_vsi */
1579 		goto free_hmc;
1580 	}
1581 
1582 	if (ixl_set_vsi(sc) != 0) {
1583 		/* error printed by ixl_set_vsi */
1584 		goto free_scratch;
1585 	}
1586 
1587 	sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1588 	    IPL_NET | IPL_MPSAFE, ixl_intr, sc, DEVNAME(sc));
1589 	if (sc->sc_ihc == NULL) {
1590 		printf("%s: unable to establish interrupt handler\n",
1591 		    DEVNAME(sc));
1592 		goto free_scratch;
1593 	}
1594 
1595 	ifp->if_softc = sc;
1596 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1597 	ifp->if_xflags = IFXF_MPSAFE;
1598 	ifp->if_ioctl = ixl_ioctl;
1599 	ifp->if_qstart = ixl_start;
1600 	ifp->if_watchdog = ixl_watchdog;
1601 	ifp->if_hardmtu = IXL_HARDMTU;
1602 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1603 	IFQ_SET_MAXLEN(&ifp->if_snd, 1);
1604 
1605 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1606 #if 0
1607 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1608 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1609 	    IFCAP_CSUM_UDPv4;
1610 #endif
1611 
1612 	ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1613 
1614 	ixl_media_add(sc, phy_types);
1615 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1616 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1617 
1618 	if_attach(ifp);
1619 	ether_ifattach(ifp);
1620 
1621 	if_attach_queues(ifp, ixl_nqueues(sc));
1622 	if_attach_iqueues(ifp, ixl_nqueues(sc));
1623 
1624 	task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1625 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1626 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1627 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1628 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1629 	    IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1630 
1631 	/* remove default mac filter and replace it so we can see vlans */
1632 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
1633 	ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1634 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1635 	ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1636 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1637 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
1638 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1639 	memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1640 
1641 	ixl_intr_enable(sc);
1642 
1643 	return;
1644 free_scratch:
1645 	ixl_dmamem_free(sc, &sc->sc_scratch);
1646 free_hmc:
1647 	ixl_hmc_free(sc);
1648 shutdown:
1649 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1650 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1651 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1652 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1653 
1654 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1655 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1656 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1657 
1658 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1659 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1660 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1661 
1662 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1663 	    0, IXL_DMA_LEN(&sc->sc_arq),
1664 	    BUS_DMASYNC_POSTREAD);
1665 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1666 	    0, IXL_DMA_LEN(&sc->sc_atq),
1667 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1668 
1669 	ixl_arq_unfill(sc);
1670 free_arq:
1671 	ixl_dmamem_free(sc, &sc->sc_arq);
1672 free_atq:
1673 	ixl_dmamem_free(sc, &sc->sc_atq);
1674 unmap:
1675 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1676 	sc->sc_mems = 0;
1677 }
1678 
1679 static void
1680 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
1681 {
1682 	struct ifmedia *ifm = &sc->sc_media;
1683 	const struct ixl_phy_type *itype;
1684 	unsigned int i;
1685 
1686 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
1687 		itype = &ixl_phy_type_map[i];
1688 
1689 		if (ISSET(phy_types, itype->phy_type))
1690 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
1691 	}
1692 }
1693 
1694 static int
1695 ixl_media_change(struct ifnet *ifp)
1696 {
1697 	/* ignore? */
1698 	return (EOPNOTSUPP);
1699 }
1700 
1701 static void
1702 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
1703 {
1704 	struct ixl_softc *sc = ifp->if_softc;
1705 
1706 	NET_ASSERT_LOCKED();
1707 
1708 	ifm->ifm_status = sc->sc_media_status;
1709 	ifm->ifm_active = sc->sc_media_active;
1710 }
1711 
1712 static void
1713 ixl_watchdog(struct ifnet *ifp)
1714 {
1715 
1716 }
1717 
1718 int
1719 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1720 {
1721 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1722 	struct ifreq *ifr = (struct ifreq *)data;
1723 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1724 	int aqerror, error = 0;
1725 
1726 	switch (cmd) {
1727 	case SIOCSIFADDR:
1728 		ifp->if_flags |= IFF_UP;
1729 		/* FALLTHROUGH */
1730 
1731 	case SIOCSIFFLAGS:
1732 		if (ISSET(ifp->if_flags, IFF_UP)) {
1733 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1734 				error = ENETRESET;
1735 			else
1736 				error = ixl_up(sc);
1737 		} else {
1738 			if (ISSET(ifp->if_flags, IFF_RUNNING))
1739 				error = ixl_down(sc);
1740 		}
1741 		break;
1742 
1743 	case SIOCGIFMEDIA:
1744 	case SIOCSIFMEDIA:
1745 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1746 		break;
1747 
1748 	case SIOCGIFRXR:
1749 		error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1750 		break;
1751 
1752 	case SIOCADDMULTI:
1753 		if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
1754 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1755 			if (error != 0)
1756 				return (error);
1757 
1758 			aqerror = ixl_add_macvlan(sc, addrlo, 0,
1759 			    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1760 			if (aqerror == IXL_AQ_RC_ENOSPC) {
1761 				ether_delmulti(ifr, &sc->sc_ac);
1762 				error = ENOSPC;
1763 			}
1764 
1765 			if (sc->sc_ac.ac_multirangecnt > 0) {
1766 				SET(ifp->if_flags, IFF_ALLMULTI);
1767 				error = ENETRESET;
1768 			}
1769 		}
1770 		break;
1771 
1772 	case SIOCDELMULTI:
1773 		if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
1774 			error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1775 			if (error != 0)
1776 				return (error);
1777 
1778 			ixl_remove_macvlan(sc, addrlo, 0,
1779 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1780 
1781 			if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
1782 			    sc->sc_ac.ac_multirangecnt == 0) {
1783 				CLR(ifp->if_flags, IFF_ALLMULTI);
1784 				error = ENETRESET;
1785 			}
1786 		}
1787 		break;
1788 
1789 	case SIOCGIFSFFPAGE:
1790 		error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
1791 		if (error != 0)
1792 			break;
1793 
1794 		error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
1795 		rw_exit(&ixl_sff_lock);
1796 		break;
1797 
1798 	default:
1799 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1800 		break;
1801 	}
1802 
1803 	if (error == ENETRESET)
1804 		error = ixl_iff(sc);
1805 
1806 	return (error);
1807 }
1808 
1809 static inline void *
1810 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
1811 {
1812 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1813 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1814 
1815 	if (i >= e->hmc_count)
1816 		return (NULL);
1817 
1818 	kva += e->hmc_base;
1819 	kva += i * e->hmc_size;
1820 
1821 	return (kva);
1822 }
1823 
1824 static inline size_t
1825 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
1826 {
1827 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1828 
1829 	return (e->hmc_size);
1830 }
1831 
1832 static int
1833 ixl_up(struct ixl_softc *sc)
1834 {
1835 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1836 	struct ixl_rx_ring *rxr;
1837 	struct ixl_tx_ring *txr;
1838 	unsigned int nqueues, i;
1839 	uint32_t reg;
1840 	int rv = ENOMEM;
1841 
1842 	nqueues = ixl_nqueues(sc);
1843 	KASSERT(nqueues == 1); /* XXX */
1844 
1845 	rw_enter_write(&sc->sc_cfg_lock);
1846 	if (sc->sc_dead) {
1847 		rw_exit_write(&sc->sc_cfg_lock);
1848 		return (ENXIO);
1849 	}
1850 
1851 	/* allocation is the only thing that can fail, so do it up front */
1852 	for (i = 0; i < nqueues; i++) {
1853 		rxr = ixl_rxr_alloc(sc, i);
1854 		if (rxr == NULL)
1855 			goto free;
1856 
1857 		txr = ixl_txr_alloc(sc, i);
1858 		if (txr == NULL) {
1859 			ixl_rxr_free(sc, rxr);
1860 			goto free;
1861 		}
1862 
1863 		ifp->if_iqs[i]->ifiq_softc = rxr;
1864 		ifp->if_ifqs[i]->ifq_softc = txr;
1865 	}
1866 
1867 	/* XXX wait 50ms from completion of last RX queue disable */
1868 
1869 	for (i = 0; i < nqueues; i++) {
1870 		rxr = ifp->if_iqs[i]->ifiq_softc;
1871 		txr = ifp->if_ifqs[i]->ifq_softc;
1872 
1873 		ixl_txr_qdis(sc, txr, 1);
1874 
1875 		ixl_rxr_config(sc, rxr);
1876 		ixl_txr_config(sc, txr);
1877 
1878 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1879 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1880 
1881 		ixl_wr(sc, rxr->rxr_tail, 0);
1882 		ixl_rxfill(sc, rxr);
1883 
1884 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
1885 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
1886 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
1887 
1888 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
1889 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
1890 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
1891 	}
1892 
1893 	for (i = 0; i < nqueues; i++) {
1894 		rxr = ifp->if_iqs[i]->ifiq_softc;
1895 		txr = ifp->if_ifqs[i]->ifq_softc;
1896 
1897 		if (ixl_rxr_enabled(sc, rxr) != 0)
1898 			goto down;
1899 
1900 		if (ixl_txr_enabled(sc, txr) != 0)
1901 			goto down;
1902 	}
1903 
1904 	SET(ifp->if_flags, IFF_RUNNING);
1905 
1906 	ixl_wr(sc, I40E_PFINT_LNKLST0,
1907 	    (I40E_INTR_NOTX_QUEUE << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
1908 	    (I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
1909 
1910 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
1911 	    (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
1912 	    (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
1913 	    (I40E_INTR_NOTX_RX_QUEUE << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
1914 	    (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
1915 	    (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
1916 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
1917 
1918 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
1919 	    (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
1920 	    (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
1921 	    (I40E_INTR_NOTX_TX_QUEUE << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
1922 	    (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
1923 	    (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
1924 	    I40E_QINT_TQCTL_CAUSE_ENA_MASK);
1925 
1926 	ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
1927 	ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
1928 	ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
1929 
1930 	rw_exit_write(&sc->sc_cfg_lock);
1931 
1932 	return (ENETRESET);
1933 
1934 free:
1935 	for (i = 0; i < nqueues; i++) {
1936 		rxr = ifp->if_iqs[i]->ifiq_softc;
1937 		txr = ifp->if_ifqs[i]->ifq_softc;
1938 
1939 		if (rxr == NULL) {
1940 			/*
1941 			 * tx and rx get set at the same time, so if one
1942 			 * is NULL, the other is too.
1943 			 */
1944 			continue;
1945 		}
1946 
1947 		ixl_txr_free(sc, txr);
1948 		ixl_rxr_free(sc, rxr);
1949 	}
1950 	rw_exit_write(&sc->sc_cfg_lock);
1951 	return (rv);
1952 down:
1953 	rw_exit_write(&sc->sc_cfg_lock);
1954 	ixl_down(sc);
1955 	return (ETIMEDOUT);
1956 }
1957 
1958 static int
1959 ixl_iff(struct ixl_softc *sc)
1960 {
1961 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1962 	struct ixl_atq iatq;
1963 	struct ixl_aq_desc *iaq;
1964 	struct ixl_aq_vsi_promisc_param *param;
1965 
1966 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1967 		return (0);
1968 
1969 	memset(&iatq, 0, sizeof(iatq));
1970 
1971 	iaq = &iatq.iatq_desc;
1972 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
1973 
1974 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
1975 	param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1976 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1977 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1978 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1979 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1980 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1981 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
1982 	}
1983 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
1984 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
1985 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
1986 	param->seid = sc->sc_seid;
1987 
1988 	ixl_atq_exec(sc, &iatq, "ixliff");
1989 
1990 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
1991 		return (EIO);
1992 
1993 	if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
1994 		ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1995 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1996 		ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
1997 		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1998 		memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1999 	}
2000 	return (0);
2001 }
2002 
2003 static int
2004 ixl_down(struct ixl_softc *sc)
2005 {
2006 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2007 	struct ixl_rx_ring *rxr;
2008 	struct ixl_tx_ring *txr;
2009 	unsigned int nqueues, i;
2010 	uint32_t reg;
2011 	int error = 0;
2012 
2013 	nqueues = ixl_nqueues(sc);
2014 
2015 	rw_enter_write(&sc->sc_cfg_lock);
2016 
2017 	CLR(ifp->if_flags, IFF_RUNNING);
2018 
2019 	NET_UNLOCK();
2020 
2021 	/* mask interrupts */
2022 	reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2023 	CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2024 	ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2025 
2026 	reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2027 	CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2028 	ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2029 
2030 	ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2031 
2032 	/* make sure the no hw generated work is still in flight */
2033 	intr_barrier(sc->sc_ihc);
2034 	for (i = 0; i < nqueues; i++) {
2035 		rxr = ifp->if_iqs[i]->ifiq_softc;
2036 		txr = ifp->if_ifqs[i]->ifq_softc;
2037 
2038 		ixl_txr_qdis(sc, txr, 0);
2039 
2040 		ifq_barrier(ifp->if_ifqs[i]);
2041 
2042 		timeout_del_barrier(&rxr->rxr_refill);
2043 	}
2044 
2045 	/* XXX wait at least 400 usec for all tx queues in one go */
2046 	delay(500);
2047 
2048 	for (i = 0; i < nqueues; i++) {
2049 		rxr = ifp->if_iqs[i]->ifiq_softc;
2050 		txr = ifp->if_ifqs[i]->ifq_softc;
2051 
2052 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2053 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2054 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2055 
2056 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2057 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2058 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2059 	}
2060 
2061 	for (i = 0; i < nqueues; i++) {
2062 		rxr = ifp->if_iqs[i]->ifiq_softc;
2063 		txr = ifp->if_ifqs[i]->ifq_softc;
2064 
2065 		if (ixl_txr_disabled(sc, txr) != 0)
2066 			goto die;
2067 
2068 		if (ixl_rxr_disabled(sc, rxr) != 0)
2069 			goto die;
2070 	}
2071 
2072 	for (i = 0; i < nqueues; i++) {
2073 		rxr = ifp->if_iqs[i]->ifiq_softc;
2074 		txr = ifp->if_ifqs[i]->ifq_softc;
2075 
2076 		ixl_txr_unconfig(sc, txr);
2077 		ixl_rxr_unconfig(sc, rxr);
2078 
2079 		ixl_txr_clean(sc, txr);
2080 		ixl_rxr_clean(sc, rxr);
2081 
2082 		ixl_txr_free(sc, txr);
2083 		ixl_rxr_free(sc, rxr);
2084 
2085 		ifp->if_iqs[i]->ifiq_softc = NULL;
2086 		ifp->if_ifqs[i]->ifq_softc =  NULL;
2087 	}
2088 
2089 out:
2090 	rw_exit_write(&sc->sc_cfg_lock);
2091 	NET_LOCK();
2092 	return (error);
2093 die:
2094 	sc->sc_dead = 1;
2095 	log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2096 	error = ETIMEDOUT;
2097 	goto out;
2098 }
2099 
2100 static struct ixl_tx_ring *
2101 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2102 {
2103 	struct ixl_tx_ring *txr;
2104 	struct ixl_tx_map *maps, *txm;
2105 	unsigned int i;
2106 
2107 	txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2108 	if (txr == NULL)
2109 		return (NULL);
2110 
2111 	maps = mallocarray(sizeof(*maps),
2112 	    sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2113 	if (maps == NULL)
2114 		goto free;
2115 
2116 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2117 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2118 	    IXL_TX_QUEUE_ALIGN) != 0)
2119 		goto freemap;
2120 
2121 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2122 		txm = &maps[i];
2123 
2124 		if (bus_dmamap_create(sc->sc_dmat,
2125 		    IXL_HARDMTU, IXL_TX_PKT_DESCS, IXL_HARDMTU, 0,
2126 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2127 		    &txm->txm_map) != 0)
2128 			goto uncreate;
2129 
2130 		txm->txm_eop = -1;
2131 		txm->txm_m = NULL;
2132 	}
2133 
2134 	txr->txr_cons = txr->txr_prod = 0;
2135 	txr->txr_maps = maps;
2136 
2137 	txr->txr_tail = I40E_QTX_TAIL(qid);
2138 	txr->txr_qid = qid;
2139 
2140 	return (txr);
2141 
2142 uncreate:
2143 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2144 		txm = &maps[i];
2145 
2146 		if (txm->txm_map == NULL)
2147 			continue;
2148 
2149 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2150 	}
2151 
2152 	ixl_dmamem_free(sc, &txr->txr_mem);
2153 freemap:
2154 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2155 free:
2156 	free(txr, M_DEVBUF, sizeof(*txr));
2157 	return (NULL);
2158 }
2159 
2160 static void
2161 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2162 {
2163 	unsigned int qid;
2164 	bus_size_t reg;
2165 	uint32_t r;
2166 
2167 	qid = txr->txr_qid + sc->sc_base_queue;
2168 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2169 	qid %= 128;
2170 
2171 	r = ixl_rd(sc, reg);
2172 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2173 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2174 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2175 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2176 	ixl_wr(sc, reg, r);
2177 }
2178 
2179 static void
2180 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2181 {
2182 	struct ixl_hmc_txq txq;
2183 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2184 	void *hmc;
2185 
2186 	memset(&txq, 0, sizeof(txq));
2187 	txq.head = htole16(0);
2188 	txq.new_context = 1;
2189 	htolem64(&txq.base,
2190 	    IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2191 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2192 	htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2193 	txq.tphrdesc_ena = 0;
2194 	txq.tphrpacket_ena = 0;
2195 	txq.tphwdesc_ena = 0;
2196 	txq.rdylist = data->qs_handle[0];
2197 
2198 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2199 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2200 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2201 }
2202 
2203 static void
2204 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2205 {
2206 	void *hmc;
2207 
2208 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2209 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2210 }
2211 
2212 static void
2213 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2214 {
2215 	struct ixl_tx_map *maps, *txm;
2216 	bus_dmamap_t map;
2217 	unsigned int i;
2218 
2219 	maps = txr->txr_maps;
2220 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2221 		txm = &maps[i];
2222 
2223 		if (txm->txm_m == NULL)
2224 			continue;
2225 
2226 		map = txm->txm_map;
2227 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2228 		    BUS_DMASYNC_POSTWRITE);
2229 		bus_dmamap_unload(sc->sc_dmat, map);
2230 
2231 		m_freem(txm->txm_m);
2232 		txm->txm_m = NULL;
2233 	}
2234 }
2235 
2236 static int
2237 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2238 {
2239 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2240 	uint32_t reg;
2241 	int i;
2242 
2243 	for (i = 0; i < 10; i++) {
2244 		reg = ixl_rd(sc, ena);
2245 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2246 			return (0);
2247 
2248 		delaymsec(10);
2249 	}
2250 
2251 	return (ETIMEDOUT);
2252 }
2253 
2254 static int
2255 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2256 {
2257 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2258 	uint32_t reg;
2259 	int i;
2260 
2261 	for (i = 0; i < 20; i++) {
2262 		reg = ixl_rd(sc, ena);
2263 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2264 			return (0);
2265 
2266 		delaymsec(10);
2267 	}
2268 
2269 	return (ETIMEDOUT);
2270 }
2271 
2272 static void
2273 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2274 {
2275 	struct ixl_tx_map *maps, *txm;
2276 	unsigned int i;
2277 
2278 	maps = txr->txr_maps;
2279 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2280 		txm = &maps[i];
2281 
2282 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2283 	}
2284 
2285 	ixl_dmamem_free(sc, &txr->txr_mem);
2286 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2287 	free(txr, M_DEVBUF, sizeof(*txr));
2288 }
2289 
2290 static inline int
2291 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2292 {
2293 	int error;
2294 
2295 	error = bus_dmamap_load_mbuf(dmat, map, m,
2296 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2297 	if (error != EFBIG)
2298 		return (error);
2299 
2300 	error = m_defrag(m, M_DONTWAIT);
2301 	if (error != 0)
2302 		return (error);
2303 
2304 	return (bus_dmamap_load_mbuf(dmat, map, m,
2305 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2306 }
2307 
2308 static void
2309 ixl_start(struct ifqueue *ifq)
2310 {
2311 	struct ifnet *ifp = ifq->ifq_if;
2312 	struct ixl_softc *sc = ifp->if_softc;
2313 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2314 	struct ixl_tx_desc *ring, *txd;
2315 	struct ixl_tx_map *txm;
2316 	bus_dmamap_t map;
2317 	struct mbuf *m;
2318 	uint64_t cmd;
2319 	unsigned int prod, free, last, i;
2320 	unsigned int mask;
2321 	int post = 0;
2322 #if NBPFILTER > 0
2323 	caddr_t if_bpf;
2324 #endif
2325 
2326 	if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2327 		ifq_purge(ifq);
2328 		return;
2329 	}
2330 
2331 	prod = txr->txr_prod;
2332 	free = txr->txr_cons;
2333 	if (free <= prod)
2334 		free += sc->sc_tx_ring_ndescs;
2335 	free -= prod;
2336 
2337 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2338 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2339 
2340 	ring = IXL_DMA_KVA(&txr->txr_mem);
2341 	mask = sc->sc_tx_ring_ndescs - 1;
2342 
2343 	for (;;) {
2344 		if (free <= IXL_TX_PKT_DESCS) {
2345 			ifq_set_oactive(ifq);
2346 			break;
2347 		}
2348 
2349 		m = ifq_dequeue(ifq);
2350 		if (m == NULL)
2351 			break;
2352 
2353 		txm = &txr->txr_maps[prod];
2354 		map = txm->txm_map;
2355 
2356 		if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2357 			ifq->ifq_errors++;
2358 			m_freem(m);
2359 			continue;
2360 		}
2361 
2362 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2363 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2364 
2365 		for (i = 0; i < map->dm_nsegs; i++) {
2366 			txd = &ring[prod];
2367 
2368 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2369 			    IXL_TX_DESC_BSIZE_SHIFT;
2370 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2371 
2372 			htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2373 			htolem64(&txd->cmd, cmd);
2374 
2375 			last = prod;
2376 
2377 			prod++;
2378 			prod &= mask;
2379 		}
2380 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2381 		htolem64(&txd->cmd, cmd);
2382 
2383 		txm->txm_m = m;
2384 		txm->txm_eop = last;
2385 
2386 #if NBPFILTER > 0
2387 		if_bpf = ifp->if_bpf;
2388 		if (if_bpf)
2389 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2390 #endif
2391 
2392 		free -= i;
2393 		post = 1;
2394 	}
2395 
2396 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2397 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2398 
2399 	if (post) {
2400 		txr->txr_prod = prod;
2401 		ixl_wr(sc, txr->txr_tail, prod);
2402 	}
2403 }
2404 
2405 static int
2406 ixl_txeof(struct ixl_softc *sc, struct ifqueue *ifq)
2407 {
2408 	struct ixl_tx_ring *txr = ifq->ifq_softc;
2409 	struct ixl_tx_desc *ring, *txd;
2410 	struct ixl_tx_map *txm;
2411 	bus_dmamap_t map;
2412 	unsigned int cons, prod, last;
2413 	unsigned int mask;
2414 	uint64_t dtype;
2415 	int done = 0;
2416 
2417 	prod = txr->txr_prod;
2418 	cons = txr->txr_cons;
2419 
2420 	if (cons == prod)
2421 		return (0);
2422 
2423 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2424 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2425 
2426 	ring = IXL_DMA_KVA(&txr->txr_mem);
2427 	mask = sc->sc_tx_ring_ndescs - 1;
2428 
2429 	do {
2430 		txm = &txr->txr_maps[cons];
2431 		last = txm->txm_eop;
2432 		txd = &ring[last];
2433 
2434 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2435 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2436 			break;
2437 
2438 		map = txm->txm_map;
2439 
2440 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2441 		    BUS_DMASYNC_POSTWRITE);
2442 		bus_dmamap_unload(sc->sc_dmat, map);
2443 		m_freem(txm->txm_m);
2444 
2445 		txm->txm_m = NULL;
2446 		txm->txm_eop = -1;
2447 
2448 		cons = last + 1;
2449 		cons &= mask;
2450 
2451 		done = 1;
2452 	} while (cons != prod);
2453 
2454 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2455 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2456 
2457 	txr->txr_cons = cons;
2458 
2459 	//ixl_enable(sc, txr->txr_msix);
2460 
2461 	if (ifq_is_oactive(ifq))
2462 		ifq_restart(ifq);
2463 
2464 	return (done);
2465 }
2466 
2467 static struct ixl_rx_ring *
2468 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2469 {
2470 	struct ixl_rx_ring *rxr;
2471 	struct ixl_rx_map *maps, *rxm;
2472 	unsigned int i;
2473 
2474 	rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2475 	if (rxr == NULL)
2476 		return (NULL);
2477 
2478 	maps = mallocarray(sizeof(*maps),
2479 	    sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2480 	if (maps == NULL)
2481 		goto free;
2482 
2483 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2484 	    sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
2485 	    IXL_RX_QUEUE_ALIGN) != 0)
2486 		goto freemap;
2487 
2488 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2489 		rxm = &maps[i];
2490 
2491 		if (bus_dmamap_create(sc->sc_dmat,
2492 		    IXL_HARDMTU, 1, IXL_HARDMTU, 0,
2493 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2494 		    &rxm->rxm_map) != 0)
2495 			goto uncreate;
2496 
2497 		rxm->rxm_m = NULL;
2498 	}
2499 
2500 	rxr->rxr_sc = sc;
2501 	if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
2502 	timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
2503 	rxr->rxr_cons = rxr->rxr_prod = 0;
2504 	rxr->rxr_m_head = NULL;
2505 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2506 	rxr->rxr_maps = maps;
2507 
2508 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2509 	rxr->rxr_qid = qid;
2510 
2511 	return (rxr);
2512 
2513 uncreate:
2514 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2515 		rxm = &maps[i];
2516 
2517 		if (rxm->rxm_map == NULL)
2518 			continue;
2519 
2520 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2521 	}
2522 
2523 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2524 freemap:
2525 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2526 free:
2527 	free(rxr, M_DEVBUF, sizeof(*rxr));
2528 	return (NULL);
2529 }
2530 
2531 static void
2532 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2533 {
2534 	struct ixl_rx_map *maps, *rxm;
2535 	bus_dmamap_t map;
2536 	unsigned int i;
2537 
2538 	timeout_del_barrier(&rxr->rxr_refill);
2539 
2540 	maps = rxr->rxr_maps;
2541 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2542 		rxm = &maps[i];
2543 
2544 		if (rxm->rxm_m == NULL)
2545 			continue;
2546 
2547 		map = rxm->rxm_map;
2548 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2549 		    BUS_DMASYNC_POSTWRITE);
2550 		bus_dmamap_unload(sc->sc_dmat, map);
2551 
2552 		m_freem(rxm->rxm_m);
2553 		rxm->rxm_m = NULL;
2554 	}
2555 
2556 	m_freem(rxr->rxr_m_head);
2557 	rxr->rxr_m_head = NULL;
2558 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2559 
2560 	rxr->rxr_prod = rxr->rxr_cons = 0;
2561 }
2562 
2563 static int
2564 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2565 {
2566 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2567 	uint32_t reg;
2568 	int i;
2569 
2570 	for (i = 0; i < 10; i++) {
2571 		reg = ixl_rd(sc, ena);
2572 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
2573 			return (0);
2574 
2575 		delaymsec(10);
2576 	}
2577 
2578 	return (ETIMEDOUT);
2579 }
2580 
2581 static int
2582 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2583 {
2584 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
2585 	uint32_t reg;
2586 	int i;
2587 
2588 	for (i = 0; i < 20; i++) {
2589 		reg = ixl_rd(sc, ena);
2590 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2591 			return (0);
2592 
2593 		delaymsec(10);
2594 	}
2595 
2596 	return (ETIMEDOUT);
2597 }
2598 
2599 static void
2600 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2601 {
2602 	struct ixl_hmc_rxq rxq;
2603 	void *hmc;
2604 
2605 	memset(&rxq, 0, sizeof(rxq));
2606 
2607 	rxq.head = htole16(0);
2608 	htolem64(&rxq.base,
2609 	    IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
2610 	htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
2611 	rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
2612 	rxq.hbuff = 0;
2613 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
2614 	rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
2615 	rxq.crcstrip = 1;
2616 	rxq.l2sel = 0;
2617 	rxq.showiv = 0;
2618 	rxq.rxmax = htole16(IXL_HARDMTU);
2619 	rxq.tphrdesc_ena = 0;
2620 	rxq.tphwdesc_ena = 0;
2621 	rxq.tphdata_ena = 0;
2622 	rxq.tphhead_ena = 0;
2623 	rxq.lrxqthresh = 0;
2624 	rxq.prefena = 1;
2625 
2626 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2627 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2628 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
2629 }
2630 
2631 static void
2632 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2633 {
2634 	void *hmc;
2635 
2636 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
2637 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
2638 }
2639 
2640 static void
2641 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2642 {
2643 	struct ixl_rx_map *maps, *rxm;
2644 	unsigned int i;
2645 
2646 	maps = rxr->rxr_maps;
2647 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2648 		rxm = &maps[i];
2649 
2650 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2651 	}
2652 
2653 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2654 	free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
2655 	free(rxr, M_DEVBUF, sizeof(*rxr));
2656 }
2657 
2658 static int
2659 ixl_rxeof(struct ixl_softc *sc, struct ifiqueue *ifiq)
2660 {
2661 	struct ixl_rx_ring *rxr = ifiq->ifiq_softc;
2662 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2663 	struct ixl_rx_wb_desc_16 *ring, *rxd;
2664 	struct ixl_rx_map *rxm;
2665 	bus_dmamap_t map;
2666 	unsigned int cons, prod;
2667 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2668 	struct mbuf *m;
2669 	uint64_t word;
2670 	unsigned int len;
2671 	unsigned int mask;
2672 	int done = 0;
2673 
2674 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2675 		return (0);
2676 
2677 	prod = rxr->rxr_prod;
2678 	cons = rxr->rxr_cons;
2679 
2680 	if (cons == prod)
2681 		return (0);
2682 
2683 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2684 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2685 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2686 
2687 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2688 	mask = sc->sc_rx_ring_ndescs - 1;
2689 
2690 	do {
2691 		rxd = &ring[cons];
2692 
2693 		word = lemtoh64(&rxd->qword1);
2694 		if (!ISSET(word, IXL_RX_DESC_DD))
2695 			break;
2696 
2697 		if_rxr_put(&rxr->rxr_acct, 1);
2698 
2699 		rxm = &rxr->rxr_maps[cons];
2700 
2701 		map = rxm->rxm_map;
2702 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2703 		    BUS_DMASYNC_POSTREAD);
2704 		bus_dmamap_unload(sc->sc_dmat, map);
2705 
2706 		m = rxm->rxm_m;
2707 		rxm->rxm_m = NULL;
2708 
2709 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2710 		m->m_len = len;
2711 		m->m_pkthdr.len = 0;
2712 
2713 		m->m_next = NULL;
2714 		*rxr->rxr_m_tail = m;
2715 		rxr->rxr_m_tail = &m->m_next;
2716 
2717 		m = rxr->rxr_m_head;
2718 		m->m_pkthdr.len += len;
2719 
2720 		if (ISSET(word, IXL_RX_DESC_EOP)) {
2721 			if (!ISSET(word,
2722 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2723 				ml_enqueue(&ml, m);
2724 			} else {
2725 				ifp->if_ierrors++; /* XXX */
2726 				m_freem(m);
2727 			}
2728 
2729 			rxr->rxr_m_head = NULL;
2730 			rxr->rxr_m_tail = &rxr->rxr_m_head;
2731 		}
2732 
2733 		cons++;
2734 		cons &= mask;
2735 
2736 		done = 1;
2737 	} while (cons != prod);
2738 
2739 	if (done) {
2740 		rxr->rxr_cons = cons;
2741 		if (ifiq_input(ifiq, &ml))
2742 			if_rxr_livelocked(&rxr->rxr_acct);
2743 		ixl_rxfill(sc, rxr);
2744 	}
2745 
2746 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2747 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
2748 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2749 
2750 	return (done);
2751 }
2752 
2753 static void
2754 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2755 {
2756 	struct ixl_rx_rd_desc_16 *ring, *rxd;
2757 	struct ixl_rx_map *rxm;
2758 	bus_dmamap_t map;
2759 	struct mbuf *m;
2760 	unsigned int prod;
2761 	unsigned int slots;
2762 	unsigned int mask;
2763 	int post = 0;
2764 
2765 	slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2766 	if (slots == 0)
2767 		return;
2768 
2769 	prod = rxr->rxr_prod;
2770 
2771 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
2772 	mask = sc->sc_rx_ring_ndescs - 1;
2773 
2774 	do {
2775 		rxm = &rxr->rxr_maps[prod];
2776 
2777 		m = MCLGETI(NULL, M_DONTWAIT, NULL, MCLBYTES + ETHER_ALIGN);
2778 		if (m == NULL)
2779 			break;
2780 		m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2781 		m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2782 
2783 		map = rxm->rxm_map;
2784 
2785 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2786 		    BUS_DMA_NOWAIT) != 0) {
2787 			m_freem(m);
2788 			break;
2789 		}
2790 
2791 		rxm->rxm_m = m;
2792 
2793 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2794 		    BUS_DMASYNC_PREREAD);
2795 
2796 		rxd = &ring[prod];
2797 
2798 		htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2799 		rxd->haddr = htole64(0);
2800 
2801 		prod++;
2802 		prod &= mask;
2803 
2804 		post = 1;
2805 	} while (--slots);
2806 
2807 	if_rxr_put(&rxr->rxr_acct, slots);
2808 
2809 	if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2810 		timeout_add(&rxr->rxr_refill, 1);
2811 	else if (post) {
2812 		rxr->rxr_prod = prod;
2813 		ixl_wr(sc, rxr->rxr_tail, prod);
2814 	}
2815 }
2816 
2817 void
2818 ixl_rxrefill(void *arg)
2819 {
2820 	struct ixl_rx_ring *rxr = arg;
2821 	struct ixl_softc *sc = rxr->rxr_sc;
2822 
2823 	ixl_rxfill(sc, rxr);
2824 }
2825 
2826 static int
2827 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
2828 {
2829 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2830 	struct if_rxring_info *ifr;
2831 	struct ixl_rx_ring *ring;
2832 	int i, rv;
2833 
2834 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2835 		return (ENOTTY);
2836 
2837 	ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
2838 	    M_WAITOK|M_CANFAIL|M_ZERO);
2839 	if (ifr == NULL)
2840 		return (ENOMEM);
2841 
2842 	for (i = 0; i < ixl_nqueues(sc); i++) {
2843 		ring = ifp->if_iqs[i]->ifiq_softc;
2844 		ifr[i].ifr_size = MCLBYTES;
2845 		ifr[i].ifr_info = ring->rxr_acct;
2846 	}
2847 
2848 	rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
2849 	free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
2850 
2851 	return (rv);
2852 }
2853 
2854 static int
2855 ixl_intr(void *xsc)
2856 {
2857 	struct ixl_softc *sc = xsc;
2858 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2859 	uint32_t icr;
2860 	int rv = 0;
2861 
2862 	ixl_intr_enable(sc);
2863 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
2864 
2865 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
2866 		ixl_atq_done(sc);
2867 		task_add(systq, &sc->sc_arq_task);
2868 		rv = 1;
2869 	}
2870 
2871 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
2872 		task_add(systq, &sc->sc_link_state_task);
2873 		rv = 1;
2874 	}
2875 
2876 	if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
2877 		rv |= ixl_rxeof(sc, ifp->if_iqs[0]);
2878 	if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
2879 		rv |= ixl_txeof(sc, ifp->if_ifqs[0]);
2880 
2881 	return (rv);
2882 }
2883 
2884 static void
2885 ixl_link_state_update_done(struct ixl_softc *sc, void *arg)
2886 {
2887 	/* IXL_AQ_OP_PHY_LINK_STATUS already posted to admin reply queue */
2888 }
2889 
2890 static void
2891 ixl_link_state_update(void *xsc)
2892 {
2893 	struct ixl_softc *sc = xsc;
2894 	struct ixl_aq_desc *iaq;
2895 	struct ixl_aq_link_param *param;
2896 
2897 	memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
2898 	iaq = &sc->sc_link_state_atq.iatq_desc;
2899 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
2900 	param = (struct ixl_aq_link_param *)iaq->iaq_param;
2901 	param->notify = IXL_AQ_LINK_NOTIFY;
2902 
2903 	ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_done, NULL);
2904 	ixl_atq_post(sc, &sc->sc_link_state_atq);
2905 }
2906 
2907 static void
2908 ixl_arq_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2909 {
2910 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2911 	int link_state;
2912 
2913 	NET_LOCK();
2914 	link_state = ixl_set_link_status(sc, iaq);
2915 	if (ifp->if_link_state != link_state) {
2916 		ifp->if_link_state = link_state;
2917 		if_link_state_change(ifp);
2918 	}
2919 	NET_UNLOCK();
2920 }
2921 
2922 #if 0
2923 static void
2924 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
2925 {
2926 	printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
2927 	    lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
2928 	    lemtoh16(&iaq->iaq_opcode));
2929 	printf("%s: datalen %u retval %u\n", DEVNAME(sc),
2930 	    lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
2931 	printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
2932 	printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
2933 	    lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
2934 	    lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
2935 }
2936 #endif
2937 
2938 static void
2939 ixl_arq(void *xsc)
2940 {
2941 	struct ixl_softc *sc = xsc;
2942 	struct ixl_aq_desc *arq, *iaq;
2943 	struct ixl_aq_buf *aqb;
2944 	unsigned int cons = sc->sc_arq_cons;
2945 	unsigned int prod;
2946 	int done = 0;
2947 
2948 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
2949 	    sc->sc_aq_regs->arq_head_mask;
2950 
2951 	if (cons == prod)
2952 		goto done;
2953 
2954 	arq = IXL_DMA_KVA(&sc->sc_arq);
2955 
2956 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2957 	    0, IXL_DMA_LEN(&sc->sc_arq),
2958 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2959 
2960 	do {
2961 		iaq = &arq[cons];
2962 
2963 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2964 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2965 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
2966 		    BUS_DMASYNC_POSTREAD);
2967 
2968 		switch (iaq->iaq_opcode) {
2969 		case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
2970 			ixl_arq_link_status(sc, iaq);
2971 			break;
2972 		}
2973 
2974 		memset(iaq, 0, sizeof(*iaq));
2975 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2976 		if_rxr_put(&sc->sc_arq_ring, 1);
2977 
2978 		cons++;
2979 		cons &= IXL_AQ_MASK;
2980 
2981 		done = 1;
2982 	} while (cons != prod);
2983 
2984 	if (done && ixl_arq_fill(sc))
2985 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2986 
2987 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2988 	    0, IXL_DMA_LEN(&sc->sc_arq),
2989 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2990 
2991 	sc->sc_arq_cons = cons;
2992 
2993 done:
2994 	ixl_intr_enable(sc);
2995 }
2996 
2997 static void
2998 ixl_atq_set(struct ixl_atq *iatq,
2999     void (*fn)(struct ixl_softc *, void *), void *arg)
3000 {
3001 	iatq->iatq_fn = fn;
3002 	iatq->iatq_arg = arg;
3003 }
3004 
3005 static void
3006 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3007 {
3008 	struct ixl_aq_desc *atq, *slot;
3009 	unsigned int prod;
3010 
3011 	/* assert locked */
3012 
3013 	atq = IXL_DMA_KVA(&sc->sc_atq);
3014 	prod = sc->sc_atq_prod;
3015 	slot = atq + prod;
3016 
3017 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3018 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3019 
3020 	*slot = iatq->iatq_desc;
3021 	slot->iaq_cookie = (uint64_t)iatq;
3022 
3023 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3024 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3025 
3026 	prod++;
3027 	prod &= IXL_AQ_MASK;
3028 	sc->sc_atq_prod = prod;
3029 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3030 }
3031 
3032 static void
3033 ixl_atq_done(struct ixl_softc *sc)
3034 {
3035 	struct ixl_aq_desc *atq, *slot;
3036 	struct ixl_atq *iatq;
3037 	unsigned int cons;
3038 	unsigned int prod;
3039 
3040 	prod = sc->sc_atq_prod;
3041 	cons = sc->sc_atq_cons;
3042 
3043 	if (prod == cons)
3044 		return;
3045 
3046 	atq = IXL_DMA_KVA(&sc->sc_atq);
3047 
3048 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3049 	    0, IXL_DMA_LEN(&sc->sc_atq),
3050 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3051 
3052 	do {
3053 		slot = &atq[cons];
3054 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3055 			break;
3056 
3057 		iatq = (struct ixl_atq *)slot->iaq_cookie;
3058 		iatq->iatq_desc = *slot;
3059 
3060 		memset(slot, 0, sizeof(*slot));
3061 
3062 		(*iatq->iatq_fn)(sc, iatq->iatq_arg);
3063 
3064 		cons++;
3065 		cons &= IXL_AQ_MASK;
3066 	} while (cons != prod);
3067 
3068 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3069 	    0, IXL_DMA_LEN(&sc->sc_atq),
3070 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3071 
3072 	sc->sc_atq_cons = cons;
3073 }
3074 
3075 static void
3076 ixl_wakeup(struct ixl_softc *sc, void *arg)
3077 {
3078 	struct cond *c = arg;
3079 
3080 	cond_signal(c);
3081 }
3082 
3083 static void
3084 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3085 {
3086 	struct cond c = COND_INITIALIZER();
3087 
3088 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3089 
3090 	ixl_atq_set(iatq, ixl_wakeup, &c);
3091 	ixl_atq_post(sc, iatq);
3092 
3093 	cond_wait(&c, wmesg);
3094 }
3095 
3096 static int
3097 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3098 {
3099 	struct ixl_aq_desc *atq, *slot;
3100 	unsigned int prod;
3101 	unsigned int t = 0;
3102 
3103 	atq = IXL_DMA_KVA(&sc->sc_atq);
3104 	prod = sc->sc_atq_prod;
3105 	slot = atq + prod;
3106 
3107 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3108 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3109 
3110 	*slot = *iaq;
3111 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3112 
3113 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3114 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3115 
3116 	prod++;
3117 	prod &= IXL_AQ_MASK;
3118 	sc->sc_atq_prod = prod;
3119 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3120 
3121 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3122 		delaymsec(1);
3123 
3124 		if (t++ > tm)
3125 			return (ETIMEDOUT);
3126 	}
3127 
3128 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3129 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3130 	*iaq = *slot;
3131 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3132 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3133 
3134 	sc->sc_atq_cons = prod;
3135 
3136 	return (0);
3137 }
3138 
3139 static int
3140 ixl_get_version(struct ixl_softc *sc)
3141 {
3142 	struct ixl_aq_desc iaq;
3143 	uint32_t fwbuild, fwver, apiver;
3144 
3145 	memset(&iaq, 0, sizeof(iaq));
3146 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3147 
3148 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3149 		return (ETIMEDOUT);
3150 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3151 		return (EIO);
3152 
3153 	fwbuild = lemtoh32(&iaq.iaq_param[1]);
3154 	fwver = lemtoh32(&iaq.iaq_param[2]);
3155 	apiver = lemtoh32(&iaq.iaq_param[3]);
3156 
3157 	printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3158 	    (uint16_t)(fwver >> 16), fwbuild, (uint16_t)apiver,
3159 	    (uint16_t)(apiver >> 16));
3160 
3161 	return (0);
3162 }
3163 
3164 static int
3165 ixl_pxe_clear(struct ixl_softc *sc)
3166 {
3167 	struct ixl_aq_desc iaq;
3168 
3169 	memset(&iaq, 0, sizeof(iaq));
3170 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3171 	iaq.iaq_param[0] = htole32(0x2);
3172 
3173 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3174 		printf(", CLEAR PXE MODE timeout\n");
3175 		return (-1);
3176 	}
3177 
3178 	switch (iaq.iaq_retval) {
3179 	case HTOLE16(IXL_AQ_RC_OK):
3180 	case HTOLE16(IXL_AQ_RC_EEXIST):
3181 		break;
3182 	default:
3183 		printf(", CLEAR PXE MODE error\n");
3184 		return (-1);
3185 	}
3186 
3187 	return (0);
3188 }
3189 
3190 static int
3191 ixl_lldp_shut(struct ixl_softc *sc)
3192 {
3193 	struct ixl_aq_desc iaq;
3194 
3195 	memset(&iaq, 0, sizeof(iaq));
3196 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3197 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3198 
3199 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3200 		printf(", STOP LLDP AGENT timeout\n");
3201 		return (-1);
3202 	}
3203 
3204 	switch (iaq.iaq_retval) {
3205 	case HTOLE16(IXL_AQ_RC_EMODE):
3206 	case HTOLE16(IXL_AQ_RC_EPERM):
3207 		/* ignore silently */
3208 	default:
3209 		break;
3210 	}
3211 
3212 	return (0);
3213 }
3214 
3215 static int
3216 ixl_get_mac(struct ixl_softc *sc)
3217 {
3218 	struct ixl_dmamem idm;
3219 	struct ixl_aq_desc iaq;
3220 	struct ixl_aq_mac_addresses *addrs;
3221 	int rv;
3222 
3223 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3224 		printf(", unable to allocate mac addresses\n");
3225 		return (-1);
3226 	}
3227 
3228 	memset(&iaq, 0, sizeof(iaq));
3229 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
3230 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3231 	iaq.iaq_datalen = htole16(sizeof(*addrs));
3232 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3233 
3234 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3235 	    BUS_DMASYNC_PREREAD);
3236 
3237 	rv = ixl_atq_poll(sc, &iaq, 250);
3238 
3239 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3240 	    BUS_DMASYNC_POSTREAD);
3241 
3242 	if (rv != 0) {
3243 		printf(", MAC ADDRESS READ timeout\n");
3244 		rv = -1;
3245 		goto done;
3246 	}
3247 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3248 		printf(", MAC ADDRESS READ error\n");
3249 		rv = -1;
3250 		goto done;
3251 	}
3252 
3253 	addrs = IXL_DMA_KVA(&idm);
3254 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3255 		printf(", port address is not valid\n");
3256 		goto done;
3257 	}
3258 
3259 	memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3260 	rv = 0;
3261 
3262 done:
3263 	ixl_dmamem_free(sc, &idm);
3264 	return (rv);
3265 }
3266 
3267 static int
3268 ixl_get_switch_config(struct ixl_softc *sc)
3269 {
3270 	struct ixl_dmamem idm;
3271 	struct ixl_aq_desc iaq;
3272 	struct ixl_aq_switch_config *hdr;
3273 	struct ixl_aq_switch_config_element *elms, *elm;
3274 	unsigned int nelm;
3275 	int rv;
3276 
3277 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3278 		printf("%s: unable to allocate switch config buffer\n",
3279 		    DEVNAME(sc));
3280 		return (-1);
3281 	}
3282 
3283 	memset(&iaq, 0, sizeof(iaq));
3284 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3285 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3286 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3287 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3288 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3289 
3290 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3291 	    BUS_DMASYNC_PREREAD);
3292 
3293 	rv = ixl_atq_poll(sc, &iaq, 250);
3294 
3295 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3296 	    BUS_DMASYNC_POSTREAD);
3297 
3298 	if (rv != 0) {
3299 		printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3300 		rv = -1;
3301 		goto done;
3302 	}
3303 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3304 		printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3305 		rv = -1;
3306 		goto done;
3307 	}
3308 
3309 	hdr = IXL_DMA_KVA(&idm);
3310 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3311 
3312 	nelm = lemtoh16(&hdr->num_reported);
3313 	if (nelm < 1) {
3314 		printf("%s: no switch config available\n", DEVNAME(sc));
3315 		rv = -1;
3316 		goto done;
3317 	}
3318 
3319 #if 0
3320 	for (i = 0; i < nelm; i++) {
3321 		elm = &elms[i];
3322 
3323 		printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3324 		    elm->type, elm->revision, lemtoh16(&elm->seid));
3325 		printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
3326 		    lemtoh16(&elm->uplink_seid),
3327 		    lemtoh16(&elm->downlink_seid));
3328 		printf("%s: conntype %x scheduler %04x extra %04x\n",
3329 		    DEVNAME(sc), elm->connection_type,
3330 		    lemtoh16(&elm->scheduler_id),
3331 		    lemtoh16(&elm->element_info));
3332 	}
3333 #endif
3334 
3335 	elm = &elms[0];
3336 
3337 	sc->sc_uplink_seid = elm->uplink_seid;
3338 	sc->sc_downlink_seid = elm->downlink_seid;
3339 	sc->sc_seid = elm->seid;
3340 
3341 	if ((sc->sc_uplink_seid == htole16(0)) !=
3342 	    (sc->sc_downlink_seid == htole16(0))) {
3343 		printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
3344 		rv = -1;
3345 		goto done;
3346 	}
3347 
3348 done:
3349 	ixl_dmamem_free(sc, &idm);
3350 	return (rv);
3351 }
3352 
3353 static int
3354 ixl_phy_mask_ints(struct ixl_softc *sc)
3355 {
3356 	struct ixl_aq_desc iaq;
3357 
3358 	memset(&iaq, 0, sizeof(iaq));
3359 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
3360 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
3361 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
3362 	      IXL_AQ_PHY_EV_MEDIA_NA));
3363 
3364 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3365 		printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
3366 		return (-1);
3367 	}
3368 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3369 		printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
3370 		return (-1);
3371 	}
3372 
3373 	return (0);
3374 }
3375 
3376 static int
3377 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
3378 {
3379 	struct ixl_aq_desc iaq;
3380 	int rv;
3381 
3382 	memset(&iaq, 0, sizeof(iaq));
3383 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
3384 	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3385 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
3386 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
3387 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
3388 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
3389 
3390 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3391 	    BUS_DMASYNC_PREREAD);
3392 
3393 	rv = ixl_atq_poll(sc, &iaq, 250);
3394 
3395 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
3396 	    BUS_DMASYNC_POSTREAD);
3397 
3398 	if (rv != 0)
3399 		return (-1);
3400 
3401 	return (lemtoh16(&iaq.iaq_retval));
3402 }
3403 
3404 static int
3405 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
3406 {
3407 	struct ixl_dmamem idm;
3408 	struct ixl_aq_phy_abilities *phy;
3409 	uint64_t phy_types;
3410 	int rv;
3411 
3412 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3413 		printf("%s: unable to allocate switch config buffer\n",
3414 		    DEVNAME(sc));
3415 		return (-1);
3416 	}
3417 
3418 	rv = ixl_get_phy_abilities(sc, &idm);
3419 	switch (rv) {
3420 	case -1:
3421 		printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
3422 		goto done;
3423 	case IXL_AQ_RC_OK:
3424 		break;
3425 	case IXL_AQ_RC_EIO:
3426 		printf("%s: unable to query phy types\n", DEVNAME(sc));
3427 		break;
3428 	default:
3429 		printf("%s: GET PHY ABILITIIES error %u\n", DEVNAME(sc), rv);
3430 		goto done;
3431 	}
3432 
3433 	phy = IXL_DMA_KVA(&idm);
3434 
3435 	phy_types = lemtoh32(&phy->phy_type);
3436 	phy_types |= (uint64_t)phy->phy_type_ext << 32;
3437 
3438 	*phy_types_ptr = phy_types;
3439 
3440 	rv = 0;
3441 
3442 done:
3443 	ixl_dmamem_free(sc, &idm);
3444 	return (rv);
3445 }
3446 
3447 /* this returns -1 on failure, or the sff module type */
3448 static int
3449 ixl_get_module_type(struct ixl_softc *sc)
3450 {
3451 	struct ixl_dmamem idm;
3452 	struct ixl_aq_phy_abilities *phy;
3453 	int rv;
3454 
3455 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
3456 		return (-1);
3457 
3458 	rv = ixl_get_phy_abilities(sc, &idm);
3459 	if (rv != IXL_AQ_RC_OK) {
3460 		rv = -1;
3461 		goto done;
3462 	}
3463 
3464 	phy = IXL_DMA_KVA(&idm);
3465 
3466 	rv = phy->module_type[0];
3467 
3468 done:
3469 	ixl_dmamem_free(sc, &idm);
3470 	return (rv);
3471 }
3472 
3473 static int
3474 ixl_get_link_status(struct ixl_softc *sc)
3475 {
3476 	struct ixl_aq_desc iaq;
3477 	struct ixl_aq_link_param *param;
3478 
3479 	memset(&iaq, 0, sizeof(iaq));
3480 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3481 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
3482 	param->notify = IXL_AQ_LINK_NOTIFY;
3483 
3484 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3485 		printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
3486 		return (-1);
3487 	}
3488 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3489 		printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
3490 		return (0);
3491 	}
3492 
3493 	sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
3494 
3495 	return (0);
3496 }
3497 
3498 struct ixl_sff_ops {
3499 	int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
3500 	int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
3501 	int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
3502 };
3503 
3504 static int
3505 ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
3506 {
3507 	int error;
3508 
3509 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
3510 		return (0);
3511 
3512 	error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
3513 	if (error != 0)
3514 		return (error);
3515 	if (*page == sff->sff_page)
3516 		return (0);
3517 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
3518 	if (error != 0)
3519 		return (error);
3520 
3521 	return (0);
3522 }
3523 
3524 static int
3525 ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
3526 {
3527 	return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
3528 }
3529 
3530 static int
3531 ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
3532 {
3533 	int error;
3534 
3535 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
3536 		return (0);
3537 
3538 	if (page == sff->sff_page)
3539 		return (0);
3540 
3541 	error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
3542 	if (error != 0)
3543 		return (error);
3544 
3545 	return (0);
3546 }
3547 
3548 static const struct ixl_sff_ops ixl_sfp_ops = {
3549 	ixl_sfp_open,
3550 	ixl_sfp_get,
3551 	ixl_sfp_close,
3552 };
3553 
3554 static int
3555 ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
3556 {
3557 	if (sff->sff_addr != IFSFF_ADDR_EEPROM)
3558 		return (EIO);
3559 
3560 	return (0);
3561 }
3562 
3563 static int
3564 ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
3565 {
3566 	return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
3567 }
3568 
3569 static int
3570 ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
3571 {
3572 	return (0);
3573 }
3574 
3575 static const struct ixl_sff_ops ixl_qsfp_ops = {
3576 	ixl_qsfp_open,
3577 	ixl_qsfp_get,
3578 	ixl_qsfp_close,
3579 };
3580 
3581 static int
3582 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
3583 {
3584 	const struct ixl_sff_ops *ops;
3585 	uint8_t page;
3586 	size_t i;
3587 	int error;
3588 
3589 	switch (ixl_get_module_type(sc)) {
3590 	case -1:
3591 		return (EIO);
3592 	case IXL_SFF8024_ID_SFP:
3593 		ops = &ixl_sfp_ops;
3594 		break;
3595 	case IXL_SFF8024_ID_QSFP:
3596 	case IXL_SFF8024_ID_QSFP_PLUS:
3597 	case IXL_SFF8024_ID_QSFP28:
3598 		ops = &ixl_qsfp_ops;
3599 		break;
3600 	default:
3601 		return (EOPNOTSUPP);
3602 	}
3603 
3604 	error = (*ops->open)(sc, sff, &page);
3605 	if (error != 0)
3606 		return (error);
3607 
3608 	for (i = 0; i < sizeof(sff->sff_data); i++) {
3609 		error = (*ops->get)(sc, sff, i);
3610 		if (error != 0)
3611 			return (error);
3612 	}
3613 
3614 	error = (*ops->close)(sc, sff, page);
3615 
3616 	return (0);
3617 }
3618 
3619 static int
3620 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
3621 {
3622 	struct ixl_atq iatq;
3623 	struct ixl_aq_desc *iaq;
3624 	struct ixl_aq_phy_reg_access *param;
3625 
3626 	memset(&iatq, 0, sizeof(iatq));
3627 	iaq = &iatq.iatq_desc;
3628 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
3629 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
3630 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
3631 	param->dev_addr = dev;
3632 	htolem32(&param->reg, reg);
3633 
3634 	ixl_atq_exec(sc, &iatq, "ixlsffget");
3635 
3636 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
3637 		printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
3638 		    DEVNAME(sc), __func__,
3639 		    dev, reg, lemtoh16(&iaq->iaq_retval));
3640 	}
3641 
3642 	switch (iaq->iaq_retval) {
3643 	case htole16(IXL_AQ_RC_OK):
3644 		break;
3645 	case htole16(IXL_AQ_RC_EBUSY):
3646 		return (EBUSY);
3647 	case htole16(IXL_AQ_RC_ESRCH):
3648 		return (ENODEV);
3649 	case htole16(IXL_AQ_RC_EIO):
3650 	case htole16(IXL_AQ_RC_EINVAL):
3651 	default:
3652 		return (EIO);
3653 	}
3654 
3655 	*p = lemtoh32(&param->val);
3656 
3657 	return (0);
3658 }
3659 
3660 static int
3661 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
3662 {
3663 	struct ixl_atq iatq;
3664 	struct ixl_aq_desc *iaq;
3665 	struct ixl_aq_phy_reg_access *param;
3666 
3667 	memset(&iatq, 0, sizeof(iatq));
3668 	iaq = &iatq.iatq_desc;
3669 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
3670 	param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
3671 	param->phy_iface = IXL_AQ_PHY_IF_MODULE;
3672 	param->dev_addr = dev;
3673 	htolem32(&param->reg, reg);
3674 	htolem32(&param->val, v);
3675 
3676 	ixl_atq_exec(sc, &iatq, "ixlsffset");
3677 
3678 	if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
3679 		printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
3680 		    DEVNAME(sc), __func__,
3681 		    dev, reg, v, lemtoh16(&iaq->iaq_retval));
3682 	}
3683 
3684 	switch (iaq->iaq_retval) {
3685 	case htole16(IXL_AQ_RC_OK):
3686 		break;
3687 	case htole16(IXL_AQ_RC_EBUSY):
3688 		return (EBUSY);
3689 	case htole16(IXL_AQ_RC_ESRCH):
3690 		return (ENODEV);
3691 	case htole16(IXL_AQ_RC_EIO):
3692 	case htole16(IXL_AQ_RC_EINVAL):
3693 	default:
3694 		return (EIO);
3695 	}
3696 
3697 	return (0);
3698 }
3699 
3700 static int
3701 ixl_get_vsi(struct ixl_softc *sc)
3702 {
3703 	struct ixl_dmamem *vsi = &sc->sc_scratch;
3704 	struct ixl_aq_desc iaq;
3705 	struct ixl_aq_vsi_param *param;
3706 	struct ixl_aq_vsi_reply *reply;
3707 	int rv;
3708 
3709 	/* grumble, vsi info isn't "known" at compile time */
3710 
3711 	memset(&iaq, 0, sizeof(iaq));
3712 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
3713 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3714 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
3715 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3716 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3717 
3718 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3719 	param->uplink_seid = sc->sc_seid;
3720 
3721 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3722 	    BUS_DMASYNC_PREREAD);
3723 
3724 	rv = ixl_atq_poll(sc, &iaq, 250);
3725 
3726 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3727 	    BUS_DMASYNC_POSTREAD);
3728 
3729 	if (rv != 0) {
3730 		printf("%s: GET VSI timeout\n", DEVNAME(sc));
3731 		return (-1);
3732 	}
3733 
3734 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3735 		printf("%s: GET VSI error %u\n", DEVNAME(sc),
3736 		    lemtoh16(&iaq.iaq_retval));
3737 		return (-1);
3738 	}
3739 
3740 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
3741 	sc->sc_vsi_number = reply->vsi_number;
3742 
3743 	return (0);
3744 }
3745 
3746 static int
3747 ixl_set_vsi(struct ixl_softc *sc)
3748 {
3749 	struct ixl_dmamem *vsi = &sc->sc_scratch;
3750 	struct ixl_aq_desc iaq;
3751 	struct ixl_aq_vsi_param *param;
3752 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
3753 	int rv;
3754 
3755 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
3756 	    IXL_AQ_VSI_VALID_VLAN);
3757 
3758 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
3759 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
3760 	data->queue_mapping[0] = htole16(0);
3761 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
3762 	    (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
3763 
3764 	CLR(data->port_vlan_flags,
3765 	    htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
3766 	SET(data->port_vlan_flags,
3767 	    htole16(IXL_AQ_VSI_PVLAN_MODE_ALL | IXL_AQ_VSI_PVLAN_EMOD_NOTHING));
3768 
3769 	/* grumble, vsi info isn't "known" at compile time */
3770 
3771 	memset(&iaq, 0, sizeof(iaq));
3772 	htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
3773 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3774 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
3775 	htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
3776 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
3777 
3778 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
3779 	param->uplink_seid = sc->sc_seid;
3780 
3781 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3782 	    BUS_DMASYNC_PREWRITE);
3783 
3784 	rv = ixl_atq_poll(sc, &iaq, 250);
3785 
3786 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
3787 	    BUS_DMASYNC_POSTWRITE);
3788 
3789 	if (rv != 0) {
3790 		printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
3791 		return (-1);
3792 	}
3793 
3794 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3795 		printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
3796 		    lemtoh16(&iaq.iaq_retval));
3797 		return (-1);
3798 	}
3799 
3800 	return (0);
3801 }
3802 
3803 static const struct ixl_phy_type *
3804 ixl_search_phy_type(uint8_t phy_type)
3805 {
3806 	const struct ixl_phy_type *itype;
3807 	uint64_t mask;
3808 	unsigned int i;
3809 
3810 	if (phy_type >= 64)
3811 		return (NULL);
3812 
3813 	mask = 1ULL << phy_type;
3814 
3815 	for (i = 0; i < nitems(ixl_phy_type_map); i++) {
3816 		itype = &ixl_phy_type_map[i];
3817 
3818 		if (ISSET(itype->phy_type, mask))
3819 			return (itype);
3820 	}
3821 
3822 	return (NULL);
3823 }
3824 
3825 static uint64_t
3826 ixl_search_link_speed(uint8_t link_speed)
3827 {
3828 	const struct ixl_speed_type *type;
3829 	unsigned int i;
3830 
3831 	for (i = 0; i < nitems(ixl_speed_type_map); i++) {
3832 		type = &ixl_speed_type_map[i];
3833 
3834 		if (ISSET(type->dev_speed, link_speed))
3835 			return (type->net_speed);
3836 	}
3837 
3838 	return (0);
3839 }
3840 
3841 static int
3842 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3843 {
3844 	const struct ixl_aq_link_status *status;
3845 	const struct ixl_phy_type *itype;
3846 
3847 	uint64_t ifm_active = IFM_ETHER;
3848 	uint64_t ifm_status = IFM_AVALID;
3849 	int link_state = LINK_STATE_DOWN;
3850 	uint64_t baudrate = 0;
3851 
3852 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
3853 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
3854 		goto done;
3855 
3856 	ifm_active |= IFM_FDX;
3857 	ifm_status |= IFM_ACTIVE;
3858 	link_state = LINK_STATE_FULL_DUPLEX;
3859 
3860 	itype = ixl_search_phy_type(status->phy_type);
3861 	if (itype != NULL)
3862 		ifm_active |= itype->ifm_type;
3863 
3864 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
3865 		ifm_active |= IFM_ETH_TXPAUSE;
3866 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
3867 		ifm_active |= IFM_ETH_RXPAUSE;
3868 
3869 	baudrate = ixl_search_link_speed(status->link_speed);
3870 
3871 done:
3872 	/* NET_ASSERT_LOCKED() except during attach */
3873 	sc->sc_media_active = ifm_active;
3874 	sc->sc_media_status = ifm_status;
3875 	sc->sc_ac.ac_if.if_baudrate = baudrate;
3876 
3877 	return (link_state);
3878 }
3879 
3880 static int
3881 ixl_restart_an(struct ixl_softc *sc)
3882 {
3883 	struct ixl_aq_desc iaq;
3884 
3885 	memset(&iaq, 0, sizeof(iaq));
3886 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
3887 	iaq.iaq_param[0] =
3888 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
3889 
3890 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3891 		printf("%s: RESTART AN timeout\n", DEVNAME(sc));
3892 		return (-1);
3893 	}
3894 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3895 		printf("%s: RESTART AN error\n", DEVNAME(sc));
3896 		return (-1);
3897 	}
3898 
3899 	return (0);
3900 }
3901 
3902 static int
3903 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
3904 {
3905 	struct ixl_aq_desc iaq;
3906 	struct ixl_aq_add_macvlan *param;
3907 	struct ixl_aq_add_macvlan_elem *elem;
3908 
3909 	memset(&iaq, 0, sizeof(iaq));
3910 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
3911 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
3912 	iaq.iaq_datalen = htole16(sizeof(*elem));
3913 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
3914 
3915 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
3916 	param->num_addrs = htole16(1);
3917 	param->seid0 = htole16(0x8000) | sc->sc_seid;
3918 	param->seid1 = 0;
3919 	param->seid2 = 0;
3920 
3921 	elem = IXL_DMA_KVA(&sc->sc_scratch);
3922 	memset(elem, 0, sizeof(*elem));
3923 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
3924 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
3925 	elem->vlan = htole16(vlan);
3926 
3927 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3928 		printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
3929 		return (IXL_AQ_RC_EINVAL);
3930 	}
3931 
3932 	return letoh16(iaq.iaq_retval);
3933 }
3934 
3935 static int
3936 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
3937 {
3938 	struct ixl_aq_desc iaq;
3939 	struct ixl_aq_remove_macvlan *param;
3940 	struct ixl_aq_remove_macvlan_elem *elem;
3941 
3942 	memset(&iaq, 0, sizeof(iaq));
3943 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
3944 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
3945 	iaq.iaq_datalen = htole16(sizeof(*elem));
3946 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
3947 
3948 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
3949 	param->num_addrs = htole16(1);
3950 	param->seid0 = htole16(0x8000) | sc->sc_seid;
3951 	param->seid1 = 0;
3952 	param->seid2 = 0;
3953 
3954 	elem = IXL_DMA_KVA(&sc->sc_scratch);
3955 	memset(elem, 0, sizeof(*elem));
3956 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
3957 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
3958 	elem->vlan = htole16(vlan);
3959 
3960 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3961 		printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
3962 		return (IXL_AQ_RC_EINVAL);
3963 	}
3964 
3965 	return letoh16(iaq.iaq_retval);
3966 }
3967 
3968 static int
3969 ixl_hmc(struct ixl_softc *sc)
3970 {
3971 	struct {
3972 		uint32_t   count;
3973 		uint32_t   minsize;
3974 		bus_size_t maxcnt;
3975 		bus_size_t setoff;
3976 		bus_size_t setcnt;
3977 	} regs[] = {
3978 		{
3979 			0,
3980 			IXL_HMC_TXQ_MINSIZE,
3981 			I40E_GLHMC_LANTXOBJSZ,
3982 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
3983 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
3984 		},
3985 		{
3986 			0,
3987 			IXL_HMC_RXQ_MINSIZE,
3988 			I40E_GLHMC_LANRXOBJSZ,
3989 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
3990 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
3991 		},
3992 		{
3993 			0,
3994 			0,
3995 			I40E_GLHMC_FCOEMAX,
3996 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
3997 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
3998 		},
3999 		{
4000 			0,
4001 			0,
4002 			I40E_GLHMC_FCOEFMAX,
4003 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4004 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4005 		},
4006 	};
4007 	struct ixl_hmc_entry *e;
4008 	uint64_t size, dva;
4009 	uint8_t *kva;
4010 	uint64_t *sdpage;
4011 	unsigned int i;
4012 	int npages, tables;
4013 
4014 	CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4015 
4016 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4017 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
4018 
4019 	size = 0;
4020 	for (i = 0; i < nitems(regs); i++) {
4021 		e = &sc->sc_hmc_entries[i];
4022 
4023 		e->hmc_count = regs[i].count;
4024 		e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4025 		e->hmc_base = size;
4026 
4027 		if ((e->hmc_size * 8) < regs[i].minsize) {
4028 			printf("%s: kernel hmc entry is too big\n",
4029 			    DEVNAME(sc));
4030 			return (-1);
4031 		}
4032 
4033 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4034 	}
4035 	size = roundup(size, IXL_HMC_PGSIZE);
4036 	npages = size / IXL_HMC_PGSIZE;
4037 
4038 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4039 
4040 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4041 		printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4042 		return (-1);
4043 	}
4044 
4045 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4046 	    IXL_HMC_PGSIZE) != 0) {
4047 		printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4048 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4049 		return (-1);
4050 	}
4051 
4052 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4053 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4054 
4055 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4056 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4057 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4058 
4059 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4060 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4061 	for (i = 0; i < npages; i++) {
4062 		htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4063 
4064 		dva += IXL_HMC_PGSIZE;
4065 	}
4066 
4067 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4068 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4069 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4070 
4071 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4072 	for (i = 0; i < tables; i++) {
4073 		uint32_t count;
4074 
4075 		KASSERT(npages >= 0);
4076 
4077 		count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4078 
4079 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4080 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4081 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4082 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4083 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4084 		ixl_wr(sc, I40E_PFHMC_SDCMD,
4085 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4086 
4087 		npages -= IXL_HMC_PGS;
4088 		dva += IXL_HMC_PGSIZE;
4089 	}
4090 
4091 	for (i = 0; i < nitems(regs); i++) {
4092 		e = &sc->sc_hmc_entries[i];
4093 
4094 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4095 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4096 	}
4097 
4098 	return (0);
4099 }
4100 
4101 static void
4102 ixl_hmc_free(struct ixl_softc *sc)
4103 {
4104 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4105 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4106 }
4107 
4108 static void
4109 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4110     unsigned int npacking)
4111 {
4112 	uint8_t *dst = d;
4113 	const uint8_t *src = s;
4114 	unsigned int i;
4115 
4116 	for (i = 0; i < npacking; i++) {
4117 		const struct ixl_hmc_pack *pack = &packing[i];
4118 		unsigned int offset = pack->lsb / 8;
4119 		unsigned int align = pack->lsb % 8;
4120 		const uint8_t *in = src + pack->offset;
4121 		uint8_t *out = dst + offset;
4122 		int width = pack->width;
4123 		unsigned int inbits = 0;
4124 
4125 		if (align) {
4126 			inbits = (*in++) << align;
4127 			*out++ |= (inbits & 0xff);
4128 			inbits >>= 8;
4129 
4130 			width -= 8 - align;
4131 		}
4132 
4133 		while (width >= 8) {
4134 			inbits |= (*in++) << align;
4135 			*out++ = (inbits & 0xff);
4136 			inbits >>= 8;
4137 
4138 			width -= 8;
4139 		}
4140 
4141 		if (width > 0) {
4142 			inbits |= (*in) << align;
4143 			*out |= (inbits & ((1 << width) - 1));
4144 		}
4145 	}
4146 }
4147 
4148 static struct ixl_aq_buf *
4149 ixl_aqb_alloc(struct ixl_softc *sc)
4150 {
4151 	struct ixl_aq_buf *aqb;
4152 
4153 	aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4154 	if (aqb == NULL)
4155 		return (NULL);
4156 
4157 	aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4158 	if (aqb->aqb_data == NULL)
4159 		goto free;
4160 
4161 	if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4162 	    IXL_AQ_BUFLEN, 0,
4163 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4164 	    &aqb->aqb_map) != 0)
4165 		goto dma_free;
4166 
4167 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4168 	    IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4169 		goto destroy;
4170 
4171 	return (aqb);
4172 
4173 destroy:
4174 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4175 dma_free:
4176 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4177 free:
4178 	free(aqb, M_DEVBUF, sizeof(*aqb));
4179 
4180 	return (NULL);
4181 }
4182 
4183 static void
4184 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4185 {
4186 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4187 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4188 	dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4189 	free(aqb, M_DEVBUF, sizeof(*aqb));
4190 }
4191 
4192 static int
4193 ixl_arq_fill(struct ixl_softc *sc)
4194 {
4195 	struct ixl_aq_buf *aqb;
4196 	struct ixl_aq_desc *arq, *iaq;
4197 	unsigned int prod = sc->sc_arq_prod;
4198 	unsigned int n;
4199 	int post = 0;
4200 
4201 	n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4202  	arq = IXL_DMA_KVA(&sc->sc_arq);
4203 
4204 	while (n > 0) {
4205 		aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4206 		if (aqb != NULL)
4207 			SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4208 		else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4209 			break;
4210 
4211 		memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4212 
4213 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4214 		    BUS_DMASYNC_PREREAD);
4215 
4216 		iaq = &arq[prod];
4217 		iaq->iaq_flags = htole16(IXL_AQ_BUF |
4218 		    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4219 		iaq->iaq_opcode = 0;
4220 		iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4221 		iaq->iaq_retval = 0;
4222 		iaq->iaq_cookie = 0;
4223 		iaq->iaq_param[0] = 0;
4224 		iaq->iaq_param[1] = 0;
4225 		ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4226 
4227 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4228 
4229 		prod++;
4230 		prod &= IXL_AQ_MASK;
4231 
4232 		post = 1;
4233 
4234 		n--;
4235 	}
4236 
4237 	if_rxr_put(&sc->sc_arq_ring, n);
4238 	sc->sc_arq_prod = prod;
4239 
4240 	return (post);
4241 }
4242 
4243 static void
4244 ixl_arq_unfill(struct ixl_softc *sc)
4245 {
4246 	struct ixl_aq_buf *aqb;
4247 
4248 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4249 		SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4250 
4251 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4252 		    BUS_DMASYNC_POSTREAD);
4253 		ixl_aqb_free(sc, aqb);
4254 	}
4255 }
4256 
4257 static void
4258 ixl_clear_hw(struct ixl_softc *sc)
4259 {
4260 	uint32_t num_queues, base_queue;
4261 	uint32_t num_pf_int;
4262 	uint32_t num_vf_int;
4263 	uint32_t num_vfs;
4264 	uint32_t i, j;
4265 	uint32_t val;
4266 	uint32_t eol = 0x7ff;
4267 
4268 	/* get number of interrupts, queues, and vfs */
4269 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
4270 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4271 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4272 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4273 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4274 
4275 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4276 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4277 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4278 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4279 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4280 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4281 		num_queues = (j - base_queue) + 1;
4282 	else
4283 		num_queues = 0;
4284 
4285 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4286 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4287 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4288 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4289 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4290 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4291 		num_vfs = (j - i) + 1;
4292 	else
4293 		num_vfs = 0;
4294 
4295 	/* stop all the interrupts */
4296 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4297 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4298 	for (i = 0; i < num_pf_int - 2; i++)
4299 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4300 
4301 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4302 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4303 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4304 	for (i = 0; i < num_pf_int - 2; i++)
4305 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4306 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4307 	for (i = 0; i < num_vfs; i++)
4308 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4309 	for (i = 0; i < num_vf_int - 2; i++)
4310 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4311 
4312 	/* warn the HW of the coming Tx disables */
4313 	for (i = 0; i < num_queues; i++) {
4314 		uint32_t abs_queue_idx = base_queue + i;
4315 		uint32_t reg_block = 0;
4316 
4317 		if (abs_queue_idx >= 128) {
4318 			reg_block = abs_queue_idx / 128;
4319 			abs_queue_idx %= 128;
4320 		}
4321 
4322 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
4323 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
4324 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
4325 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
4326 
4327 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
4328 	}
4329 	delaymsec(400);
4330 
4331 	/* stop all the queues */
4332 	for (i = 0; i < num_queues; i++) {
4333 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
4334 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
4335 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
4336 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
4337 	}
4338 
4339 	/* short wait for all queue disables to settle */
4340 	delaymsec(50);
4341 }
4342 
4343 static int
4344 ixl_pf_reset(struct ixl_softc *sc)
4345 {
4346 	uint32_t cnt = 0;
4347 	uint32_t cnt1 = 0;
4348 	uint32_t reg = 0;
4349 	uint32_t grst_del;
4350 
4351 	/*
4352 	 * Poll for Global Reset steady state in case of recent GRST.
4353 	 * The grst delay value is in 100ms units, and we'll wait a
4354 	 * couple counts longer to be sure we don't just miss the end.
4355 	 */
4356 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
4357 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
4358 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
4359 	grst_del += 10;
4360 
4361 	for (cnt = 0; cnt < grst_del; cnt++) {
4362 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
4363 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
4364 			break;
4365 		delaymsec(100);
4366 	}
4367 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
4368 		printf(", Global reset polling failed to complete\n");
4369 		return (-1);
4370 	}
4371 
4372 	/* Now Wait for the FW to be ready */
4373 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
4374 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
4375 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4376 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
4377 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4378 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
4379 			break;
4380 
4381 		delaymsec(10);
4382 	}
4383 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
4384 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
4385 		printf(", wait for FW Reset complete timed out "
4386 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
4387 		return (-1);
4388 	}
4389 
4390 	/*
4391 	 * If there was a Global Reset in progress when we got here,
4392 	 * we don't need to do the PF Reset
4393 	 */
4394 	if (cnt == 0) {
4395 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4396 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
4397 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
4398 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
4399 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
4400 				break;
4401 			delaymsec(1);
4402 		}
4403 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
4404 			printf(", PF reset polling failed to complete"
4405 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
4406 			return (-1);
4407 		}
4408 	}
4409 
4410 	return (0);
4411 }
4412 
4413 static int
4414 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
4415     bus_size_t size, u_int align)
4416 {
4417 	ixm->ixm_size = size;
4418 
4419 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
4420 	    ixm->ixm_size, 0,
4421 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4422 	    &ixm->ixm_map) != 0)
4423 		return (1);
4424 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
4425 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
4426 	    BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
4427 		goto destroy;
4428 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
4429 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
4430 		goto free;
4431 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
4432 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
4433 		goto unmap;
4434 
4435 	return (0);
4436 unmap:
4437 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4438 free:
4439 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4440 destroy:
4441 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4442 	return (1);
4443 }
4444 
4445 static void
4446 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
4447 {
4448 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
4449 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
4450 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
4451 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
4452 }
4453