xref: /netbsd-src/sys/dev/pci/if_ixl.c (revision be6f2fcee7fefd8149c125c7283a8c03adc8149e)
1 /*	$NetBSD: if_ixl.c,v 1.99 2024/06/29 12:11:12 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2013-2015, Intel Corporation
5  * All rights reserved.
6 
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are met:
9  *
10  *  1. Redistributions of source code must retain the above copyright notice,
11  *     this list of conditions and the following disclaimer.
12  *
13  *  2. Redistributions in binary form must reproduce the above copyright
14  *     notice, this list of conditions and the following disclaimer in the
15  *     documentation and/or other materials provided with the distribution.
16  *
17  *  3. Neither the name of the Intel Corporation nor the names of its
18  *     contributors may be used to endorse or promote products derived from
19  *     this software without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36  *
37  * Permission to use, copy, modify, and distribute this software for any
38  * purpose with or without fee is hereby granted, provided that the above
39  * copyright notice and this permission notice appear in all copies.
40  *
41  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48  */
49 
50 /*
51  * Copyright (c) 2019 Internet Initiative Japan, Inc.
52  * All rights reserved.
53  *
54  * Redistribution and use in source and binary forms, with or without
55  * modification, are permitted provided that the following conditions
56  * are met:
57  * 1. Redistributions of source code must retain the above copyright
58  *    notice, this list of conditions and the following disclaimer.
59  * 2. Redistributions in binary form must reproduce the above copyright
60  *    notice, this list of conditions and the following disclaimer in the
61  *    documentation and/or other materials provided with the distribution.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
64  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
65  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
66  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
67  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
68  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
69  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
70  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
71  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
72  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
73  * POSSIBILITY OF SUCH DAMAGE.
74  */
75 
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: if_ixl.c,v 1.99 2024/06/29 12:11:12 riastradh Exp $");
78 
79 #ifdef _KERNEL_OPT
80 #include "opt_if_ixl.h"
81 #endif
82 
83 #include <sys/param.h>
84 #include <sys/types.h>
85 
86 #include <sys/bitops.h>
87 #include <sys/cpu.h>
88 #include <sys/device.h>
89 #include <sys/evcnt.h>
90 #include <sys/interrupt.h>
91 #include <sys/kmem.h>
92 #include <sys/module.h>
93 #include <sys/mutex.h>
94 #include <sys/pcq.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97 #include <sys/xcall.h>
98 
99 #include <sys/bus.h>
100 
101 #include <net/bpf.h>
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106 #include <net/rss_config.h>
107 
108 #include <netinet/tcp.h>	/* for struct tcphdr */
109 #include <netinet/udp.h>	/* for struct udphdr */
110 
111 #include <dev/pci/pcivar.h>
112 #include <dev/pci/pcidevs.h>
113 
114 #include <dev/pci/if_ixlreg.h>
115 #include <dev/pci/if_ixlvar.h>
116 
117 #include <prop/proplib.h>
118 
119 struct ixl_softc; /* defined */
120 
121 #define I40E_PF_RESET_WAIT_COUNT	200
122 #define I40E_AQ_LARGE_BUF		512
123 
124 /* bitfields for Tx queue mapping in QTX_CTL */
125 #define I40E_QTX_CTL_VF_QUEUE		0x0
126 #define I40E_QTX_CTL_VM_QUEUE		0x1
127 #define I40E_QTX_CTL_PF_QUEUE		0x2
128 
129 #define I40E_QUEUE_TYPE_EOL		0x7ff
130 #define I40E_INTR_NOTX_QUEUE		0
131 
132 #define I40E_QUEUE_TYPE_RX		0x0
133 #define I40E_QUEUE_TYPE_TX		0x1
134 #define I40E_QUEUE_TYPE_PE_CEQ		0x2
135 #define I40E_QUEUE_TYPE_UNKNOWN		0x3
136 
137 #define I40E_ITR_INDEX_RX		0x0
138 #define I40E_ITR_INDEX_TX		0x1
139 #define I40E_ITR_INDEX_OTHER		0x2
140 #define I40E_ITR_INDEX_NONE		0x3
141 #define IXL_ITR_RX			0x7a /* 4K intrs/sec */
142 #define IXL_ITR_TX			0x7a /* 4K intrs/sec */
143 
144 #define I40E_INTR_NOTX_QUEUE		0
145 #define I40E_INTR_NOTX_INTR		0
146 #define I40E_INTR_NOTX_RX_QUEUE		0
147 #define I40E_INTR_NOTX_TX_QUEUE		1
148 #define I40E_INTR_NOTX_RX_MASK		I40E_PFINT_ICR0_QUEUE_0_MASK
149 #define I40E_INTR_NOTX_TX_MASK		I40E_PFINT_ICR0_QUEUE_1_MASK
150 
151 #define I40E_HASH_LUT_SIZE_128		0
152 
153 #define IXL_ICR0_CRIT_ERR_MASK			\
154 	(I40E_PFINT_ICR0_PCI_EXCEPTION_MASK |	\
155 	I40E_PFINT_ICR0_ECC_ERR_MASK |		\
156 	I40E_PFINT_ICR0_PE_CRITERR_MASK)
157 
158 #define IXL_QUEUE_MAX_XL710		64
159 #define IXL_QUEUE_MAX_X722		128
160 
161 #define IXL_TX_PKT_DESCS		8
162 #define IXL_TX_PKT_MAXSIZE		(MCLBYTES * IXL_TX_PKT_DESCS)
163 #define IXL_TX_QUEUE_ALIGN		128
164 #define IXL_RX_QUEUE_ALIGN		128
165 
166 #define IXL_MCLBYTES			(MCLBYTES - ETHER_ALIGN)
167 #define IXL_MTU_ETHERLEN		ETHER_HDR_LEN		\
168 					+ ETHER_CRC_LEN		\
169 					+ ETHER_VLAN_ENCAP_LEN
170 #if 0
171 #define IXL_MAX_MTU			(9728 - IXL_MTU_ETHERLEN)
172 #else
173 /* (dbuff * 5) - ETHER_HDR_LEN - ETHER_CRC_LEN */
174 #define IXL_MAX_MTU			(9600 - IXL_MTU_ETHERLEN)
175 #endif
176 #define IXL_MIN_MTU			(ETHER_MIN_LEN - ETHER_CRC_LEN)
177 
178 #define IXL_PCIREG			PCI_MAPREG_START
179 
180 #define IXL_ITR0			0x0
181 #define IXL_ITR1			0x1
182 #define IXL_ITR2			0x2
183 #define IXL_NOITR			0x3
184 
185 #define IXL_AQ_NUM			256
186 #define IXL_AQ_MASK			(IXL_AQ_NUM - 1)
187 #define IXL_AQ_ALIGN			64 /* lol */
188 #define IXL_AQ_BUFLEN			4096
189 
190 #define IXL_HMC_ROUNDUP			512
191 #define IXL_HMC_PGSIZE			4096
192 #define IXL_HMC_DVASZ			sizeof(uint64_t)
193 #define IXL_HMC_PGS			(IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
194 #define IXL_HMC_L2SZ			(IXL_HMC_PGSIZE * IXL_HMC_PGS)
195 #define IXL_HMC_PDVALID			1ULL
196 
197 #define IXL_ATQ_EXEC_TIMEOUT		(10 * hz)
198 
199 #define IXL_SRRD_SRCTL_ATTEMPTS		100000
200 
201 struct ixl_aq_regs {
202 	bus_size_t		atq_tail;
203 	bus_size_t		atq_head;
204 	bus_size_t		atq_len;
205 	bus_size_t		atq_bal;
206 	bus_size_t		atq_bah;
207 
208 	bus_size_t		arq_tail;
209 	bus_size_t		arq_head;
210 	bus_size_t		arq_len;
211 	bus_size_t		arq_bal;
212 	bus_size_t		arq_bah;
213 
214 	uint32_t		atq_len_enable;
215 	uint32_t		atq_tail_mask;
216 	uint32_t		atq_head_mask;
217 
218 	uint32_t		arq_len_enable;
219 	uint32_t		arq_tail_mask;
220 	uint32_t		arq_head_mask;
221 };
222 
223 struct ixl_phy_type {
224 	uint64_t	phy_type;
225 	uint64_t	ifm_type;
226 };
227 
228 struct ixl_speed_type {
229 	uint8_t		dev_speed;
230 	uint64_t	net_speed;
231 };
232 
233 struct ixl_hmc_entry {
234 	uint64_t		 hmc_base;
235 	uint32_t		 hmc_count;
236 	uint64_t		 hmc_size;
237 };
238 
239 enum  ixl_hmc_types {
240 	IXL_HMC_LAN_TX = 0,
241 	IXL_HMC_LAN_RX,
242 	IXL_HMC_FCOE_CTX,
243 	IXL_HMC_FCOE_FILTER,
244 	IXL_HMC_COUNT
245 };
246 
247 struct ixl_hmc_pack {
248 	uint16_t		offset;
249 	uint16_t		width;
250 	uint16_t		lsb;
251 };
252 
253 /*
254  * these hmc objects have weird sizes and alignments, so these are abstract
255  * representations of them that are nice for c to populate.
256  *
257  * the packing code relies on little-endian values being stored in the fields,
258  * no high bits in the fields being set, and the fields must be packed in the
259  * same order as they are in the ctx structure.
260  */
261 
262 struct ixl_hmc_rxq {
263 	uint16_t		 head;
264 	uint8_t			 cpuid;
265 	uint64_t		 base;
266 #define IXL_HMC_RXQ_BASE_UNIT		128
267 	uint16_t		 qlen;
268 	uint16_t		 dbuff;
269 #define IXL_HMC_RXQ_DBUFF_UNIT		128
270 	uint8_t			 hbuff;
271 #define IXL_HMC_RXQ_HBUFF_UNIT		64
272 	uint8_t			 dtype;
273 #define IXL_HMC_RXQ_DTYPE_NOSPLIT	0x0
274 #define IXL_HMC_RXQ_DTYPE_HSPLIT	0x1
275 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS	0x2
276 	uint8_t			 dsize;
277 #define IXL_HMC_RXQ_DSIZE_16		0
278 #define IXL_HMC_RXQ_DSIZE_32		1
279 	uint8_t			 crcstrip;
280 	uint8_t			 fc_ena;
281 	uint8_t			 l2sel;
282 	uint8_t			 hsplit_0;
283 	uint8_t			 hsplit_1;
284 	uint8_t			 showiv;
285 	uint16_t		 rxmax;
286 	uint8_t			 tphrdesc_ena;
287 	uint8_t			 tphwdesc_ena;
288 	uint8_t			 tphdata_ena;
289 	uint8_t			 tphhead_ena;
290 	uint8_t			 lrxqthresh;
291 	uint8_t			 prefena;
292 };
293 
294 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
295 	{ offsetof(struct ixl_hmc_rxq, head),		13,	0 },
296 	{ offsetof(struct ixl_hmc_rxq, cpuid),		8,	13 },
297 	{ offsetof(struct ixl_hmc_rxq, base),		57,	32 },
298 	{ offsetof(struct ixl_hmc_rxq, qlen),		13,	89 },
299 	{ offsetof(struct ixl_hmc_rxq, dbuff),		7,	102 },
300 	{ offsetof(struct ixl_hmc_rxq, hbuff),		5,	109 },
301 	{ offsetof(struct ixl_hmc_rxq, dtype),		2,	114 },
302 	{ offsetof(struct ixl_hmc_rxq, dsize),		1,	116 },
303 	{ offsetof(struct ixl_hmc_rxq, crcstrip),	1,	117 },
304 	{ offsetof(struct ixl_hmc_rxq, fc_ena),		1,	118 },
305 	{ offsetof(struct ixl_hmc_rxq, l2sel),		1,	119 },
306 	{ offsetof(struct ixl_hmc_rxq, hsplit_0),	4,	120 },
307 	{ offsetof(struct ixl_hmc_rxq, hsplit_1),	2,	124 },
308 	{ offsetof(struct ixl_hmc_rxq, showiv),		1,	127 },
309 	{ offsetof(struct ixl_hmc_rxq, rxmax),		14,	174 },
310 	{ offsetof(struct ixl_hmc_rxq, tphrdesc_ena),	1,	193 },
311 	{ offsetof(struct ixl_hmc_rxq, tphwdesc_ena),	1,	194 },
312 	{ offsetof(struct ixl_hmc_rxq, tphdata_ena),	1,	195 },
313 	{ offsetof(struct ixl_hmc_rxq, tphhead_ena),	1,	196 },
314 	{ offsetof(struct ixl_hmc_rxq, lrxqthresh),	3,	198 },
315 	{ offsetof(struct ixl_hmc_rxq, prefena),	1,	201 },
316 };
317 
318 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
319 
320 struct ixl_hmc_txq {
321 	uint16_t		head;
322 	uint8_t			new_context;
323 	uint64_t		base;
324 #define IXL_HMC_TXQ_BASE_UNIT		128
325 	uint8_t			fc_ena;
326 	uint8_t			timesync_ena;
327 	uint8_t			fd_ena;
328 	uint8_t			alt_vlan_ena;
329 	uint8_t			cpuid;
330 	uint16_t		thead_wb;
331 	uint8_t			head_wb_ena;
332 #define IXL_HMC_TXQ_DESC_WB		0
333 #define IXL_HMC_TXQ_HEAD_WB		1
334 	uint16_t		qlen;
335 	uint8_t			tphrdesc_ena;
336 	uint8_t			tphrpacket_ena;
337 	uint8_t			tphwdesc_ena;
338 	uint64_t		head_wb_addr;
339 	uint32_t		crc;
340 	uint16_t		rdylist;
341 	uint8_t			rdylist_act;
342 };
343 
344 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
345 	{ offsetof(struct ixl_hmc_txq, head),		13,	0 },
346 	{ offsetof(struct ixl_hmc_txq, new_context),	1,	30 },
347 	{ offsetof(struct ixl_hmc_txq, base),		57,	32 },
348 	{ offsetof(struct ixl_hmc_txq, fc_ena),		1,	89 },
349 	{ offsetof(struct ixl_hmc_txq, timesync_ena),	1,	90 },
350 	{ offsetof(struct ixl_hmc_txq, fd_ena),		1,	91 },
351 	{ offsetof(struct ixl_hmc_txq, alt_vlan_ena),	1,	92 },
352 	{ offsetof(struct ixl_hmc_txq, cpuid),		8,	96 },
353 /* line 1 */
354 	{ offsetof(struct ixl_hmc_txq, thead_wb),	13,	0 + 128 },
355 	{ offsetof(struct ixl_hmc_txq, head_wb_ena),	1,	32 + 128 },
356 	{ offsetof(struct ixl_hmc_txq, qlen),		13,	33 + 128 },
357 	{ offsetof(struct ixl_hmc_txq, tphrdesc_ena),	1,	46 + 128 },
358 	{ offsetof(struct ixl_hmc_txq, tphrpacket_ena),	1,	47 + 128 },
359 	{ offsetof(struct ixl_hmc_txq, tphwdesc_ena),	1,	48 + 128 },
360 	{ offsetof(struct ixl_hmc_txq, head_wb_addr),	64,	64 + 128 },
361 /* line 7 */
362 	{ offsetof(struct ixl_hmc_txq, crc),		32,	0 + (7*128) },
363 	{ offsetof(struct ixl_hmc_txq, rdylist),	10,	84 + (7*128) },
364 	{ offsetof(struct ixl_hmc_txq, rdylist_act),	1,	94 + (7*128) },
365 };
366 
367 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
368 
369 struct ixl_work {
370 	struct work	 ixw_cookie;
371 	void		(*ixw_func)(void *);
372 	void		*ixw_arg;
373 	unsigned int	 ixw_added;
374 };
375 #define IXL_WORKQUEUE_PRI	PRI_SOFTNET
376 
377 struct ixl_tx_map {
378 	struct mbuf		*txm_m;
379 	bus_dmamap_t		 txm_map;
380 	unsigned int		 txm_eop;
381 };
382 
383 struct ixl_tx_ring {
384 	kmutex_t		 txr_lock;
385 	struct ixl_softc	*txr_sc;
386 
387 	unsigned int		 txr_prod;
388 	unsigned int		 txr_cons;
389 
390 	struct ixl_tx_map	*txr_maps;
391 	struct ixl_dmamem	 txr_mem;
392 
393 	bus_size_t		 txr_tail;
394 	unsigned int		 txr_qid;
395 	pcq_t			*txr_intrq;
396 	void			*txr_si;
397 
398 	struct evcnt		 txr_defragged;
399 	struct evcnt		 txr_defrag_failed;
400 	struct evcnt		 txr_pcqdrop;
401 	struct evcnt		 txr_transmitdef;
402 	struct evcnt		 txr_intr;
403 	struct evcnt		 txr_defer;
404 };
405 
406 struct ixl_rx_map {
407 	struct mbuf		*rxm_m;
408 	bus_dmamap_t		 rxm_map;
409 };
410 
411 struct ixl_rx_ring {
412 	kmutex_t		 rxr_lock;
413 
414 	unsigned int		 rxr_prod;
415 	unsigned int		 rxr_cons;
416 
417 	struct ixl_rx_map	*rxr_maps;
418 	struct ixl_dmamem	 rxr_mem;
419 
420 	struct mbuf		*rxr_m_head;
421 	struct mbuf		**rxr_m_tail;
422 
423 	bus_size_t		 rxr_tail;
424 	unsigned int		 rxr_qid;
425 
426 	struct evcnt		 rxr_mgethdr_failed;
427 	struct evcnt		 rxr_mgetcl_failed;
428 	struct evcnt		 rxr_mbuf_load_failed;
429 	struct evcnt		 rxr_intr;
430 	struct evcnt		 rxr_defer;
431 };
432 
433 struct ixl_queue_pair {
434 	struct ixl_softc	*qp_sc;
435 	struct ixl_tx_ring	*qp_txr;
436 	struct ixl_rx_ring	*qp_rxr;
437 
438 	char			 qp_name[16];
439 
440 	void			*qp_si;
441 	struct work		 qp_work;
442 	bool			 qp_workqueue;
443 };
444 
445 struct ixl_atq {
446 	struct ixl_aq_desc	 iatq_desc;
447 	void			(*iatq_fn)(struct ixl_softc *,
448 				    const struct ixl_aq_desc *);
449 	bool			 iatq_inuse;
450 };
451 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
452 
453 struct ixl_product {
454 	unsigned int	 vendor_id;
455 	unsigned int	 product_id;
456 };
457 
458 struct ixl_stats_counters {
459 	bool		 isc_has_offset;
460 	struct evcnt	 isc_crc_errors;
461 	uint64_t	 isc_crc_errors_offset;
462 	struct evcnt	 isc_illegal_bytes;
463 	uint64_t	 isc_illegal_bytes_offset;
464 	struct evcnt	 isc_rx_bytes;
465 	uint64_t	 isc_rx_bytes_offset;
466 	struct evcnt	 isc_rx_discards;
467 	uint64_t	 isc_rx_discards_offset;
468 	struct evcnt	 isc_rx_unicast;
469 	uint64_t	 isc_rx_unicast_offset;
470 	struct evcnt	 isc_rx_multicast;
471 	uint64_t	 isc_rx_multicast_offset;
472 	struct evcnt	 isc_rx_broadcast;
473 	uint64_t	 isc_rx_broadcast_offset;
474 	struct evcnt	 isc_rx_size_64;
475 	uint64_t	 isc_rx_size_64_offset;
476 	struct evcnt	 isc_rx_size_127;
477 	uint64_t	 isc_rx_size_127_offset;
478 	struct evcnt	 isc_rx_size_255;
479 	uint64_t	 isc_rx_size_255_offset;
480 	struct evcnt	 isc_rx_size_511;
481 	uint64_t	 isc_rx_size_511_offset;
482 	struct evcnt	 isc_rx_size_1023;
483 	uint64_t	 isc_rx_size_1023_offset;
484 	struct evcnt	 isc_rx_size_1522;
485 	uint64_t	 isc_rx_size_1522_offset;
486 	struct evcnt	 isc_rx_size_big;
487 	uint64_t	 isc_rx_size_big_offset;
488 	struct evcnt	 isc_rx_undersize;
489 	uint64_t	 isc_rx_undersize_offset;
490 	struct evcnt	 isc_rx_oversize;
491 	uint64_t	 isc_rx_oversize_offset;
492 	struct evcnt	 isc_rx_fragments;
493 	uint64_t	 isc_rx_fragments_offset;
494 	struct evcnt	 isc_rx_jabber;
495 	uint64_t	 isc_rx_jabber_offset;
496 	struct evcnt	 isc_tx_bytes;
497 	uint64_t	 isc_tx_bytes_offset;
498 	struct evcnt	 isc_tx_dropped_link_down;
499 	uint64_t	 isc_tx_dropped_link_down_offset;
500 	struct evcnt	 isc_tx_unicast;
501 	uint64_t	 isc_tx_unicast_offset;
502 	struct evcnt	 isc_tx_multicast;
503 	uint64_t	 isc_tx_multicast_offset;
504 	struct evcnt	 isc_tx_broadcast;
505 	uint64_t	 isc_tx_broadcast_offset;
506 	struct evcnt	 isc_tx_size_64;
507 	uint64_t	 isc_tx_size_64_offset;
508 	struct evcnt	 isc_tx_size_127;
509 	uint64_t	 isc_tx_size_127_offset;
510 	struct evcnt	 isc_tx_size_255;
511 	uint64_t	 isc_tx_size_255_offset;
512 	struct evcnt	 isc_tx_size_511;
513 	uint64_t	 isc_tx_size_511_offset;
514 	struct evcnt	 isc_tx_size_1023;
515 	uint64_t	 isc_tx_size_1023_offset;
516 	struct evcnt	 isc_tx_size_1522;
517 	uint64_t	 isc_tx_size_1522_offset;
518 	struct evcnt	 isc_tx_size_big;
519 	uint64_t	 isc_tx_size_big_offset;
520 	struct evcnt	 isc_mac_local_faults;
521 	uint64_t	 isc_mac_local_faults_offset;
522 	struct evcnt	 isc_mac_remote_faults;
523 	uint64_t	 isc_mac_remote_faults_offset;
524 	struct evcnt	 isc_link_xon_rx;
525 	uint64_t	 isc_link_xon_rx_offset;
526 	struct evcnt	 isc_link_xon_tx;
527 	uint64_t	 isc_link_xon_tx_offset;
528 	struct evcnt	 isc_link_xoff_rx;
529 	uint64_t	 isc_link_xoff_rx_offset;
530 	struct evcnt	 isc_link_xoff_tx;
531 	uint64_t	 isc_link_xoff_tx_offset;
532 	struct evcnt	 isc_vsi_rx_discards;
533 	uint64_t	 isc_vsi_rx_discards_offset;
534 	struct evcnt	 isc_vsi_rx_bytes;
535 	uint64_t	 isc_vsi_rx_bytes_offset;
536 	struct evcnt	 isc_vsi_rx_unicast;
537 	uint64_t	 isc_vsi_rx_unicast_offset;
538 	struct evcnt	 isc_vsi_rx_multicast;
539 	uint64_t	 isc_vsi_rx_multicast_offset;
540 	struct evcnt	 isc_vsi_rx_broadcast;
541 	uint64_t	 isc_vsi_rx_broadcast_offset;
542 	struct evcnt	 isc_vsi_tx_errors;
543 	uint64_t	 isc_vsi_tx_errors_offset;
544 	struct evcnt	 isc_vsi_tx_bytes;
545 	uint64_t	 isc_vsi_tx_bytes_offset;
546 	struct evcnt	 isc_vsi_tx_unicast;
547 	uint64_t	 isc_vsi_tx_unicast_offset;
548 	struct evcnt	 isc_vsi_tx_multicast;
549 	uint64_t	 isc_vsi_tx_multicast_offset;
550 	struct evcnt	 isc_vsi_tx_broadcast;
551 	uint64_t	 isc_vsi_tx_broadcast_offset;
552 };
553 
554 /*
555  * Locking notes:
556  * + a field in ixl_tx_ring is protected by txr_lock (a spin mutex), and
557  *   a field in ixl_rx_ring is protected by rxr_lock (a spin mutex).
558  *    - more than one lock of them cannot be held at once.
559  * + a field named sc_atq_* in ixl_softc is protected by sc_atq_lock
560  *   (a spin mutex).
561  *    - the lock cannot held with txr_lock or rxr_lock.
562  * + a field named sc_arq_* is not protected by any lock.
563  *    - operations for sc_arq_* is done in one context related to
564  *      sc_arq_task.
565  * + other fields in ixl_softc is protected by sc_cfg_lock
566  *   (an adaptive mutex)
567  *    - It must be held before another lock is held, and It can be
568  *      released after the other lock is released.
569  * */
570 
571 struct ixl_softc {
572 	device_t		 sc_dev;
573 	struct ethercom		 sc_ec;
574 	bool			 sc_attached;
575 	bool			 sc_dead;
576 	uint32_t		 sc_port;
577 	struct sysctllog	*sc_sysctllog;
578 	struct workqueue	*sc_workq;
579 	struct workqueue	*sc_workq_txrx;
580 	int			 sc_stats_intval;
581 	callout_t		 sc_stats_callout;
582 	struct ixl_work		 sc_stats_task;
583 	struct ixl_stats_counters
584 				 sc_stats_counters;
585 	uint8_t			 sc_enaddr[ETHER_ADDR_LEN];
586 	struct ifmedia		 sc_media;
587 	uint64_t		 sc_media_status;
588 	uint64_t		 sc_media_active;
589 	uint64_t		 sc_phy_types;
590 	uint8_t			 sc_phy_abilities;
591 	uint8_t			 sc_phy_linkspeed;
592 	uint8_t			 sc_phy_fec_cfg;
593 	uint16_t		 sc_eee_cap;
594 	uint32_t		 sc_eeer_val;
595 	uint8_t			 sc_d3_lpan;
596 	kmutex_t		 sc_cfg_lock;
597 	enum i40e_mac_type	 sc_mac_type;
598 	uint32_t		 sc_rss_table_size;
599 	uint32_t		 sc_rss_table_entry_width;
600 	bool			 sc_txrx_workqueue;
601 	u_int			 sc_tx_process_limit;
602 	u_int			 sc_rx_process_limit;
603 	u_int			 sc_tx_intr_process_limit;
604 	u_int			 sc_rx_intr_process_limit;
605 
606 	int			 sc_cur_ec_capenable;
607 
608 	struct pci_attach_args	 sc_pa;
609 	pci_intr_handle_t	*sc_ihp;
610 	void			**sc_ihs;
611 	unsigned int		 sc_nintrs;
612 
613 	bus_dma_tag_t		 sc_dmat;
614 	bus_space_tag_t		 sc_memt;
615 	bus_space_handle_t	 sc_memh;
616 	bus_size_t		 sc_mems;
617 
618 	uint8_t			 sc_pf_id;
619 	uint16_t		 sc_uplink_seid;	/* le */
620 	uint16_t		 sc_downlink_seid;	/* le */
621 	uint16_t		 sc_vsi_number;
622 	uint16_t		 sc_vsi_stat_counter_idx;
623 	uint16_t		 sc_seid;
624 	unsigned int		 sc_base_queue;
625 
626 	pci_intr_type_t		 sc_intrtype;
627 	unsigned int		 sc_msix_vector_queue;
628 
629 	struct ixl_dmamem	 sc_scratch;
630 	struct ixl_dmamem	 sc_aqbuf;
631 
632 	const struct ixl_aq_regs *
633 				 sc_aq_regs;
634 	uint32_t		 sc_aq_flags;
635 #define IXL_SC_AQ_FLAG_RXCTL	__BIT(0)
636 #define IXL_SC_AQ_FLAG_NVMLOCK	__BIT(1)
637 #define IXL_SC_AQ_FLAG_NVMREAD	__BIT(2)
638 #define IXL_SC_AQ_FLAG_RSS	__BIT(3)
639 
640 	kmutex_t		 sc_atq_lock;
641 	kcondvar_t		 sc_atq_cv;
642 	struct ixl_dmamem	 sc_atq;
643 	unsigned int		 sc_atq_prod;
644 	unsigned int		 sc_atq_cons;
645 
646 	struct ixl_dmamem	 sc_arq;
647 	struct ixl_work		 sc_arq_task;
648 	struct ixl_aq_bufs	 sc_arq_idle;
649 	struct ixl_aq_buf	*sc_arq_live[IXL_AQ_NUM];
650 	unsigned int		 sc_arq_prod;
651 	unsigned int		 sc_arq_cons;
652 
653 	struct ixl_work		 sc_link_state_task;
654 	struct ixl_work		 sc_link_state_done_task;
655 	struct ixl_atq		 sc_link_state_atq;
656 
657 	struct ixl_dmamem	 sc_hmc_sd;
658 	struct ixl_dmamem	 sc_hmc_pd;
659 	struct ixl_hmc_entry	 sc_hmc_entries[IXL_HMC_COUNT];
660 
661 	struct if_percpuq	*sc_ipq;
662 	unsigned int		 sc_tx_ring_ndescs;
663 	unsigned int		 sc_rx_ring_ndescs;
664 	unsigned int		 sc_nqueue_pairs;
665 	unsigned int		 sc_nqueue_pairs_max;
666 	unsigned int		 sc_nqueue_pairs_device;
667 	struct ixl_queue_pair	*sc_qps;
668 	uint32_t		 sc_itr_rx;
669 	uint32_t		 sc_itr_tx;
670 
671 	struct evcnt		 sc_event_atq;
672 	struct evcnt		 sc_event_link;
673 	struct evcnt		 sc_event_ecc_err;
674 	struct evcnt		 sc_event_pci_exception;
675 	struct evcnt		 sc_event_crit_err;
676 };
677 
678 #define IXL_TXRX_PROCESS_UNLIMIT	UINT_MAX
679 #define IXL_TX_PROCESS_LIMIT		256
680 #define IXL_RX_PROCESS_LIMIT		256
681 #define IXL_TX_INTR_PROCESS_LIMIT	256
682 #define IXL_RX_INTR_PROCESS_LIMIT	0U
683 
684 #define IXL_IFCAP_RXCSUM	(IFCAP_CSUM_IPv4_Rx |	\
685 				 IFCAP_CSUM_TCPv4_Rx |	\
686 				 IFCAP_CSUM_UDPv4_Rx |	\
687 				 IFCAP_CSUM_TCPv6_Rx |	\
688 				 IFCAP_CSUM_UDPv6_Rx)
689 #define IXL_IFCAP_TXCSUM	(IFCAP_CSUM_IPv4_Tx |	\
690 				 IFCAP_CSUM_TCPv4_Tx |	\
691 				 IFCAP_CSUM_UDPv4_Tx |	\
692 				 IFCAP_CSUM_TCPv6_Tx |	\
693 				 IFCAP_CSUM_UDPv6_Tx)
694 #define IXL_CSUM_ALL_OFFLOAD	(M_CSUM_IPv4 |			\
695 				 M_CSUM_TCPv4 | M_CSUM_TCPv6 |	\
696 				 M_CSUM_UDPv4 | M_CSUM_UDPv6)
697 
698 #define delaymsec(_x)	DELAY(1000 * (_x))
699 #ifdef IXL_DEBUG
700 #define DDPRINTF(sc, fmt, args...)			\
701 do {							\
702 	if ((sc) != NULL) {				\
703 		device_printf(				\
704 		    ((struct ixl_softc *)(sc))->sc_dev,	\
705 		    "");				\
706 	}						\
707 	printf("%s:\t" fmt, __func__, ##args);		\
708 } while (0)
709 #else
710 #define DDPRINTF(sc, fmt, args...)	__nothing
711 #endif
712 #ifndef IXL_STATS_INTERVAL_MSEC
713 #define IXL_STATS_INTERVAL_MSEC	10000
714 #endif
715 #ifndef IXL_QUEUE_NUM
716 #define IXL_QUEUE_NUM		0
717 #endif
718 
719 enum ixl_link_flags {
720 	IXL_LINK_NOFLAGS	= 0,
721 	IXL_LINK_FLAG_WAITDONE	= __BIT(0),
722 };
723 
724 static bool		 ixl_param_nomsix = false;
725 static int		 ixl_param_stats_interval = IXL_STATS_INTERVAL_MSEC;
726 static int		 ixl_param_nqps_limit = IXL_QUEUE_NUM;
727 static unsigned int	 ixl_param_tx_ndescs = 512;
728 static unsigned int	 ixl_param_rx_ndescs = 512;
729 
730 static enum i40e_mac_type
731 	    ixl_mactype(pci_product_id_t);
732 static void	ixl_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
733 static void	ixl_clear_hw(struct ixl_softc *);
734 static int	ixl_pf_reset(struct ixl_softc *);
735 
736 static int	ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
737 		    bus_size_t, bus_size_t);
738 static void	ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
739 
740 static int	ixl_arq_fill(struct ixl_softc *);
741 static void	ixl_arq_unfill(struct ixl_softc *);
742 
743 static int	ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
744 		    unsigned int);
745 static void	ixl_atq_set(struct ixl_atq *,
746 		    void (*)(struct ixl_softc *, const struct ixl_aq_desc *));
747 static void	ixl_wakeup(struct ixl_softc *, const struct ixl_aq_desc *);
748 static int	ixl_atq_post_locked(struct ixl_softc *, struct ixl_atq *);
749 static void	ixl_atq_done(struct ixl_softc *);
750 static int	ixl_atq_exec(struct ixl_softc *, struct ixl_atq *);
751 static int	ixl_atq_exec_locked(struct ixl_softc *, struct ixl_atq *);
752 static int	ixl_get_version(struct ixl_softc *);
753 static int	ixl_get_nvm_version(struct ixl_softc *);
754 static int	ixl_get_hw_capabilities(struct ixl_softc *);
755 static int	ixl_pxe_clear(struct ixl_softc *);
756 static int	ixl_lldp_shut(struct ixl_softc *);
757 static int	ixl_get_mac(struct ixl_softc *);
758 static int	ixl_get_switch_config(struct ixl_softc *);
759 static int	ixl_phy_mask_ints(struct ixl_softc *);
760 static int	ixl_get_phy_info(struct ixl_softc *);
761 static int	ixl_set_phy_config(struct ixl_softc *, uint8_t, uint8_t, bool);
762 static int	ixl_set_phy_autoselect(struct ixl_softc *);
763 static int	ixl_restart_an(struct ixl_softc *);
764 static int	ixl_hmc(struct ixl_softc *);
765 static void	ixl_hmc_free(struct ixl_softc *);
766 static int	ixl_get_vsi(struct ixl_softc *);
767 static int	ixl_set_vsi(struct ixl_softc *);
768 static void	ixl_set_filter_control(struct ixl_softc *);
769 static int	ixl_get_link_status(struct ixl_softc *, enum ixl_link_flags);
770 static void	ixl_get_link_status_work(void *);
771 static int	ixl_get_link_status_poll(struct ixl_softc *, int *);
772 static void	ixl_get_link_status_done(struct ixl_softc *,
773 		    const struct ixl_aq_desc *);
774 static void	ixl_get_link_status_done_work(void *);
775 static int	ixl_set_link_status_locked(struct ixl_softc *,
776 		    const struct ixl_aq_desc *);
777 static uint64_t	ixl_search_link_speed(uint8_t);
778 static uint8_t	ixl_search_baudrate(uint64_t);
779 static void	ixl_config_rss(struct ixl_softc *);
780 static int	ixl_add_macvlan(struct ixl_softc *, const uint8_t *,
781 		    uint16_t, uint16_t);
782 static int	ixl_remove_macvlan(struct ixl_softc *, const uint8_t *,
783 		    uint16_t, uint16_t);
784 static void	ixl_arq(void *);
785 static void	ixl_hmc_pack(void *, const void *,
786 		    const struct ixl_hmc_pack *, unsigned int);
787 static uint32_t	ixl_rd_rx_csr(struct ixl_softc *, uint32_t);
788 static void	ixl_wr_rx_csr(struct ixl_softc *, uint32_t, uint32_t);
789 static int	ixl_rd16_nvm(struct ixl_softc *, uint16_t, uint16_t *);
790 
791 static int	ixl_match(device_t, cfdata_t, void *);
792 static void	ixl_attach(device_t, device_t, void *);
793 static int	ixl_detach(device_t, int);
794 
795 static void	ixl_media_add(struct ixl_softc *);
796 static int	ixl_media_change(struct ifnet *);
797 static void	ixl_media_status(struct ifnet *, struct ifmediareq *);
798 static int	ixl_ioctl(struct ifnet *, u_long, void *);
799 static void	ixl_start(struct ifnet *);
800 static int	ixl_transmit(struct ifnet *, struct mbuf *);
801 static void	ixl_deferred_transmit(void *);
802 static int	ixl_intr(void *);
803 static int	ixl_queue_intr(void *);
804 static int	ixl_other_intr(void *);
805 static void	ixl_handle_queue(void *);
806 static void	ixl_handle_queue_wk(struct work *, void *);
807 static void	ixl_sched_handle_queue(struct ixl_softc *,
808 		    struct ixl_queue_pair *);
809 static int	ixl_init(struct ifnet *);
810 static int	ixl_init_locked(struct ixl_softc *);
811 static void	ixl_stop(struct ifnet *, int);
812 static void	ixl_stop_locked(struct ixl_softc *);
813 static int	ixl_iff(struct ixl_softc *);
814 static int	ixl_ifflags_cb(struct ethercom *);
815 static int	ixl_setup_interrupts(struct ixl_softc *);
816 static int	ixl_establish_intx(struct ixl_softc *);
817 static int	ixl_establish_msix(struct ixl_softc *);
818 static void	ixl_enable_queue_intr(struct ixl_softc *,
819 		    struct ixl_queue_pair *);
820 static void	ixl_disable_queue_intr(struct ixl_softc *,
821 		    struct ixl_queue_pair *);
822 static void	ixl_enable_other_intr(struct ixl_softc *);
823 static void	ixl_disable_other_intr(struct ixl_softc *);
824 static void	ixl_config_queue_intr(struct ixl_softc *);
825 static void	ixl_config_other_intr(struct ixl_softc *);
826 
827 static struct ixl_tx_ring *
828 		ixl_txr_alloc(struct ixl_softc *, unsigned int);
829 static void	ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
830 static void	ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
831 static int	ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
832 static int	ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
833 static void	ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
834 static void	ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
835 static void	ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
836 static int	ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *, u_int);
837 
838 static struct ixl_rx_ring *
839 		ixl_rxr_alloc(struct ixl_softc *, unsigned int);
840 static void	ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
841 static int	ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
842 static int	ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
843 static void	ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
844 static void	ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
845 static void	ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
846 static int	ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *, u_int);
847 static int	ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
848 
849 static struct workqueue *
850     ixl_workq_create(const char *, pri_t, int, int);
851 static void	ixl_workq_destroy(struct workqueue *);
852 static int	ixl_workqs_teardown(device_t);
853 static void	ixl_work_set(struct ixl_work *, void (*)(void *), void *);
854 static void	ixl_work_add(struct workqueue *, struct ixl_work *);
855 static void	ixl_work_wait(struct workqueue *, struct ixl_work *);
856 static void	ixl_workq_work(struct work *, void *);
857 static const struct ixl_product *
858 		ixl_lookup(const struct pci_attach_args *pa);
859 static void	ixl_link_state_update(struct ixl_softc *,
860 		    const struct ixl_aq_desc *);
861 static int	ixl_vlan_cb(struct ethercom *, uint16_t, bool);
862 static int	ixl_setup_vlan_hwfilter(struct ixl_softc *);
863 static void	ixl_teardown_vlan_hwfilter(struct ixl_softc *);
864 static int	ixl_update_macvlan(struct ixl_softc *);
865 static int	ixl_setup_interrupts(struct ixl_softc *);
866 static void	ixl_teardown_interrupts(struct ixl_softc *);
867 static int	ixl_setup_stats(struct ixl_softc *);
868 static void	ixl_teardown_stats(struct ixl_softc *);
869 static void	ixl_stats_callout(void *);
870 static void	ixl_stats_update(void *);
871 static int	ixl_setup_sysctls(struct ixl_softc *);
872 static void	ixl_teardown_sysctls(struct ixl_softc *);
873 static int	ixl_sysctl_itr_handler(SYSCTLFN_PROTO);
874 static int	ixl_queue_pairs_alloc(struct ixl_softc *);
875 static void	ixl_queue_pairs_free(struct ixl_softc *);
876 
877 static const struct ixl_phy_type ixl_phy_type_map[] = {
878 	{ 1ULL << IXL_PHY_TYPE_SGMII,		IFM_1000_SGMII },
879 	{ 1ULL << IXL_PHY_TYPE_1000BASE_KX,	IFM_1000_KX },
880 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KX4,	IFM_10G_KX4 },
881 	{ 1ULL << IXL_PHY_TYPE_10GBASE_KR,	IFM_10G_KR },
882 	{ 1ULL << IXL_PHY_TYPE_40GBASE_KR4,	IFM_40G_KR4 },
883 	{ 1ULL << IXL_PHY_TYPE_XAUI |
884 	  1ULL << IXL_PHY_TYPE_XFI,		IFM_10G_CX4 },
885 	{ 1ULL << IXL_PHY_TYPE_SFI,		IFM_10G_SFI },
886 	{ 1ULL << IXL_PHY_TYPE_XLAUI |
887 	  1ULL << IXL_PHY_TYPE_XLPPI,		IFM_40G_XLPPI },
888 	{ 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
889 	  1ULL << IXL_PHY_TYPE_40GBASE_CR4,	IFM_40G_CR4 },
890 	{ 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
891 	  1ULL << IXL_PHY_TYPE_10GBASE_CR1,	IFM_10G_CR1 },
892 	{ 1ULL << IXL_PHY_TYPE_10GBASE_AOC,	IFM_10G_AOC },
893 	{ 1ULL << IXL_PHY_TYPE_40GBASE_AOC,	IFM_40G_AOC },
894 	{ 1ULL << IXL_PHY_TYPE_100BASE_TX,	IFM_100_TX },
895 	{ 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
896 	  1ULL << IXL_PHY_TYPE_1000BASE_T,	IFM_1000_T },
897 	{ 1ULL << IXL_PHY_TYPE_10GBASE_T,	IFM_10G_T },
898 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SR,	IFM_10G_SR },
899 	{ 1ULL << IXL_PHY_TYPE_10GBASE_LR,	IFM_10G_LR },
900 	{ 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU,	IFM_10G_TWINAX },
901 	{ 1ULL << IXL_PHY_TYPE_40GBASE_SR4,	IFM_40G_SR4 },
902 	{ 1ULL << IXL_PHY_TYPE_40GBASE_LR4,	IFM_40G_LR4 },
903 	{ 1ULL << IXL_PHY_TYPE_1000BASE_SX,	IFM_1000_SX },
904 	{ 1ULL << IXL_PHY_TYPE_1000BASE_LX,	IFM_1000_LX },
905 	{ 1ULL << IXL_PHY_TYPE_20GBASE_KR2,	IFM_20G_KR2 },
906 	{ 1ULL << IXL_PHY_TYPE_25GBASE_KR,	IFM_25G_KR },
907 	{ 1ULL << IXL_PHY_TYPE_25GBASE_CR,	IFM_25G_CR },
908 	{ 1ULL << IXL_PHY_TYPE_25GBASE_SR,	IFM_25G_SR },
909 	{ 1ULL << IXL_PHY_TYPE_25GBASE_LR,	IFM_25G_LR },
910 	{ 1ULL << IXL_PHY_TYPE_25GBASE_AOC,	IFM_25G_AOC },
911 	{ 1ULL << IXL_PHY_TYPE_25GBASE_ACC,	IFM_25G_ACC },
912 	{ 1ULL << IXL_PHY_TYPE_2500BASE_T_1,	IFM_2500_T },
913 	{ 1ULL << IXL_PHY_TYPE_5000BASE_T_1,	IFM_5000_T },
914 	{ 1ULL << IXL_PHY_TYPE_2500BASE_T_2,	IFM_2500_T },
915 	{ 1ULL << IXL_PHY_TYPE_5000BASE_T_2,	IFM_5000_T },
916 };
917 
918 static const struct ixl_speed_type ixl_speed_type_map[] = {
919 	{ IXL_AQ_LINK_SPEED_40GB,		IF_Gbps(40) },
920 	{ IXL_AQ_LINK_SPEED_25GB,		IF_Gbps(25) },
921 	{ IXL_AQ_LINK_SPEED_10GB,		IF_Gbps(10) },
922 	{ IXL_AQ_LINK_SPEED_5000MB,		IF_Mbps(5000) },
923 	{ IXL_AQ_LINK_SPEED_2500MB,		IF_Mbps(2500) },
924 	{ IXL_AQ_LINK_SPEED_1000MB,		IF_Mbps(1000) },
925 	{ IXL_AQ_LINK_SPEED_100MB,		IF_Mbps(100)},
926 };
927 
928 static const struct ixl_aq_regs ixl_pf_aq_regs = {
929 	.atq_tail	= I40E_PF_ATQT,
930 	.atq_tail_mask	= I40E_PF_ATQT_ATQT_MASK,
931 	.atq_head	= I40E_PF_ATQH,
932 	.atq_head_mask	= I40E_PF_ATQH_ATQH_MASK,
933 	.atq_len	= I40E_PF_ATQLEN,
934 	.atq_bal	= I40E_PF_ATQBAL,
935 	.atq_bah	= I40E_PF_ATQBAH,
936 	.atq_len_enable	= I40E_PF_ATQLEN_ATQENABLE_MASK,
937 
938 	.arq_tail	= I40E_PF_ARQT,
939 	.arq_tail_mask	= I40E_PF_ARQT_ARQT_MASK,
940 	.arq_head	= I40E_PF_ARQH,
941 	.arq_head_mask	= I40E_PF_ARQH_ARQH_MASK,
942 	.arq_len	= I40E_PF_ARQLEN,
943 	.arq_bal	= I40E_PF_ARQBAL,
944 	.arq_bah	= I40E_PF_ARQBAH,
945 	.arq_len_enable	= I40E_PF_ARQLEN_ARQENABLE_MASK,
946 };
947 
948 #define ixl_rd(_s, _r)			\
949 	bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
950 #define ixl_wr(_s, _r, _v)		\
951 	bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
952 #define ixl_barrier(_s, _r, _l, _o) \
953     bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
954 #define ixl_flush(_s)	(void)ixl_rd((_s), I40E_GLGEN_STAT)
955 #define ixl_nqueues(_sc)	(1 << ((_sc)->sc_nqueue_pairs - 1))
956 
957 CFATTACH_DECL3_NEW(ixl, sizeof(struct ixl_softc),
958     ixl_match, ixl_attach, ixl_detach, NULL, NULL, NULL,
959     DVF_DETACH_SHUTDOWN);
960 
961 static const struct ixl_product ixl_products[] = {
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_SFP },
963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_KX_B },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_KX_C },
965 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_A },
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_B },
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_QSFP_C },
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_T_1 },
969 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_T_2 },
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_T4_10G },
973 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_BP },
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_XXV710_25G_SFP28 },
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_KX },
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_QSFP },
977 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_SFP },
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_1G_BASET },
979 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_10G_BASET },
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X722_I_SFP },
981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_SFP },
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_X710_10G_BP },
983 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_V710_5G_T},
984 	/* required last entry */
985 	{0, 0}
986 };
987 
988 static const struct ixl_product *
ixl_lookup(const struct pci_attach_args * pa)989 ixl_lookup(const struct pci_attach_args *pa)
990 {
991 	const struct ixl_product *ixlp;
992 
993 	for (ixlp = ixl_products; ixlp->vendor_id != 0; ixlp++) {
994 		if (PCI_VENDOR(pa->pa_id) == ixlp->vendor_id &&
995 		    PCI_PRODUCT(pa->pa_id) == ixlp->product_id)
996 			return ixlp;
997 	}
998 
999 	return NULL;
1000 }
1001 
1002 static void
ixl_intr_barrier(void)1003 ixl_intr_barrier(void)
1004 {
1005 
1006 	/* wait for finish of all handler */
1007 	xc_barrier(0);
1008 }
1009 
1010 static int
ixl_match(device_t parent,cfdata_t match,void * aux)1011 ixl_match(device_t parent, cfdata_t match, void *aux)
1012 {
1013 	const struct pci_attach_args *pa = aux;
1014 
1015 	return (ixl_lookup(pa) != NULL) ? 1 : 0;
1016 }
1017 
1018 static void
ixl_attach(device_t parent,device_t self,void * aux)1019 ixl_attach(device_t parent, device_t self, void *aux)
1020 {
1021 	struct ixl_softc *sc;
1022 	struct pci_attach_args *pa = aux;
1023 	struct ifnet *ifp;
1024 	pcireg_t memtype;
1025 	uint32_t firstq, port, ari, func;
1026 	char xnamebuf[32];
1027 	int tries, rv, link;
1028 
1029 	sc = device_private(self);
1030 	sc->sc_dev = self;
1031 	ifp = &sc->sc_ec.ec_if;
1032 
1033 	sc->sc_pa = *pa;
1034 	sc->sc_dmat = (pci_dma64_available(pa)) ?
1035 	    pa->pa_dmat64 : pa->pa_dmat;
1036 	sc->sc_aq_regs = &ixl_pf_aq_regs;
1037 
1038 	sc->sc_mac_type = ixl_mactype(PCI_PRODUCT(pa->pa_id));
1039 
1040 	ixl_pci_csr_setup(pa->pa_pc, pa->pa_tag);
1041 
1042 	pci_aprint_devinfo(pa, "Ethernet controller");
1043 
1044 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IXL_PCIREG);
1045 	if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1046 	    &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
1047 		aprint_error(": unable to map registers\n");
1048 		return;
1049 	}
1050 
1051 	mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
1052 
1053 	firstq = ixl_rd(sc, I40E_PFLAN_QALLOC);
1054 	firstq &= I40E_PFLAN_QALLOC_FIRSTQ_MASK;
1055 	firstq >>= I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1056 	sc->sc_base_queue = firstq;
1057 
1058 	ixl_clear_hw(sc);
1059 	if (ixl_pf_reset(sc) == -1) {
1060 		/* error printed by ixl pf_reset */
1061 		goto unmap;
1062 	}
1063 
1064 	port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1065 	port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1066 	port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1067 	sc->sc_port = port;
1068 	aprint_normal_dev(self, "port %u", sc->sc_port);
1069 
1070 	ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1071 	ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1072 	ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1073 
1074 	func = ixl_rd(sc, I40E_PF_FUNC_RID);
1075 	sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1076 
1077 	/* initialise the adminq */
1078 
1079 	mutex_init(&sc->sc_atq_lock, MUTEX_DEFAULT, IPL_NET);
1080 
1081 	if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1082 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1083 		aprint_error("\n" "%s: unable to allocate atq\n",
1084 		    device_xname(self));
1085 		goto unmap;
1086 	}
1087 
1088 	SIMPLEQ_INIT(&sc->sc_arq_idle);
1089 	ixl_work_set(&sc->sc_arq_task, ixl_arq, sc);
1090 	sc->sc_arq_cons = 0;
1091 	sc->sc_arq_prod = 0;
1092 
1093 	if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1094 	    sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1095 		aprint_error("\n" "%s: unable to allocate arq\n",
1096 		    device_xname(self));
1097 		goto free_atq;
1098 	}
1099 
1100 	if (!ixl_arq_fill(sc)) {
1101 		aprint_error("\n" "%s: unable to fill arq descriptors\n",
1102 		    device_xname(self));
1103 		goto free_arq;
1104 	}
1105 
1106 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1107 	    0, IXL_DMA_LEN(&sc->sc_atq),
1108 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1109 
1110 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1111 	    0, IXL_DMA_LEN(&sc->sc_arq),
1112 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1113 
1114 	for (tries = 0; tries < 10; tries++) {
1115 		sc->sc_atq_cons = 0;
1116 		sc->sc_atq_prod = 0;
1117 
1118 		ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1119 		ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1120 		ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1121 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1122 
1123 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1124 
1125 		ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1126 		    ixl_dmamem_lo(&sc->sc_atq));
1127 		ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1128 		    ixl_dmamem_hi(&sc->sc_atq));
1129 		ixl_wr(sc, sc->sc_aq_regs->atq_len,
1130 		    sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1131 
1132 		ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1133 		    ixl_dmamem_lo(&sc->sc_arq));
1134 		ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1135 		    ixl_dmamem_hi(&sc->sc_arq));
1136 		ixl_wr(sc, sc->sc_aq_regs->arq_len,
1137 		    sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1138 
1139 		rv = ixl_get_version(sc);
1140 		if (rv == 0)
1141 			break;
1142 		if (rv != ETIMEDOUT) {
1143 			aprint_error(", unable to get firmware version\n");
1144 			goto shutdown;
1145 		}
1146 
1147 		delaymsec(100);
1148 	}
1149 
1150 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1151 
1152 	if (ixl_dmamem_alloc(sc, &sc->sc_aqbuf, IXL_AQ_BUFLEN, 0) != 0) {
1153 		aprint_error_dev(self, ", unable to allocate nvm buffer\n");
1154 		goto shutdown;
1155 	}
1156 
1157 	ixl_get_nvm_version(sc);
1158 
1159 	if (sc->sc_mac_type == I40E_MAC_X722)
1160 		sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_X722;
1161 	else
1162 		sc->sc_nqueue_pairs_device = IXL_QUEUE_MAX_XL710;
1163 
1164 	rv = ixl_get_hw_capabilities(sc);
1165 	if (rv != 0) {
1166 		aprint_error(", GET HW CAPABILITIES %s\n",
1167 		    rv == ETIMEDOUT ? "timeout" : "error");
1168 		goto free_aqbuf;
1169 	}
1170 
1171 	sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_device, ncpu);
1172 	if (ixl_param_nqps_limit > 0) {
1173 		sc->sc_nqueue_pairs_max = MIN((int)sc->sc_nqueue_pairs_max,
1174 		    ixl_param_nqps_limit);
1175 	}
1176 
1177 	sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
1178 	sc->sc_tx_ring_ndescs = ixl_param_tx_ndescs;
1179 	sc->sc_rx_ring_ndescs = ixl_param_rx_ndescs;
1180 
1181 	KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_rx_ring_ndescs);
1182 	KASSERT(IXL_TXRX_PROCESS_UNLIMIT > sc->sc_tx_ring_ndescs);
1183 	KASSERT(sc->sc_rx_ring_ndescs ==
1184 	    (1U << (fls32(sc->sc_rx_ring_ndescs) - 1)));
1185 	KASSERT(sc->sc_tx_ring_ndescs ==
1186 	    (1U << (fls32(sc->sc_tx_ring_ndescs) - 1)));
1187 
1188 	if (ixl_get_mac(sc) != 0) {
1189 		/* error printed by ixl_get_mac */
1190 		goto free_aqbuf;
1191 	}
1192 
1193 	aprint_normal("\n");
1194 	aprint_naive("\n");
1195 
1196 	aprint_normal_dev(self, "Ethernet address %s\n",
1197 	    ether_sprintf(sc->sc_enaddr));
1198 
1199 	rv = ixl_pxe_clear(sc);
1200 	if (rv != 0) {
1201 		aprint_debug_dev(self, "CLEAR PXE MODE %s\n",
1202 		    rv == ETIMEDOUT ? "timeout" : "error");
1203 	}
1204 
1205 	ixl_set_filter_control(sc);
1206 
1207 	if (ixl_hmc(sc) != 0) {
1208 		/* error printed by ixl_hmc */
1209 		goto free_aqbuf;
1210 	}
1211 
1212 	if (ixl_lldp_shut(sc) != 0) {
1213 		/* error printed by ixl_lldp_shut */
1214 		goto free_hmc;
1215 	}
1216 
1217 	if (ixl_phy_mask_ints(sc) != 0) {
1218 		/* error printed by ixl_phy_mask_ints */
1219 		goto free_hmc;
1220 	}
1221 
1222 	if (ixl_restart_an(sc) != 0) {
1223 		/* error printed by ixl_restart_an */
1224 		goto free_hmc;
1225 	}
1226 
1227 	if (ixl_get_switch_config(sc) != 0) {
1228 		/* error printed by ixl_get_switch_config */
1229 		goto free_hmc;
1230 	}
1231 
1232 	rv = ixl_get_link_status_poll(sc, NULL);
1233 	if (rv != 0) {
1234 		aprint_error_dev(self, "GET LINK STATUS %s\n",
1235 		    rv == ETIMEDOUT ? "timeout" : "error");
1236 		goto free_hmc;
1237 	}
1238 
1239 	/*
1240 	 * The FW often returns EIO in "Get PHY Abilities" command
1241 	 * if there is no delay
1242 	 */
1243 	DELAY(500);
1244 	if (ixl_get_phy_info(sc) != 0) {
1245 		/* error printed by ixl_get_phy_info */
1246 		goto free_hmc;
1247 	}
1248 
1249 	if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1250 	    sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1251 		aprint_error_dev(self, "unable to allocate scratch buffer\n");
1252 		goto free_hmc;
1253 	}
1254 
1255 	rv = ixl_get_vsi(sc);
1256 	if (rv != 0) {
1257 		aprint_error_dev(self, "GET VSI %s %d\n",
1258 		    rv == ETIMEDOUT ? "timeout" : "error", rv);
1259 		goto free_scratch;
1260 	}
1261 
1262 	rv = ixl_set_vsi(sc);
1263 	if (rv != 0) {
1264 		aprint_error_dev(self, "UPDATE VSI error %s %d\n",
1265 		    rv == ETIMEDOUT ? "timeout" : "error", rv);
1266 		goto free_scratch;
1267 	}
1268 
1269 	if (ixl_queue_pairs_alloc(sc) != 0) {
1270 		/* error printed by ixl_queue_pairs_alloc */
1271 		goto free_scratch;
1272 	}
1273 
1274 	if (ixl_setup_interrupts(sc) != 0) {
1275 		/* error printed by ixl_setup_interrupts */
1276 		goto free_queue_pairs;
1277 	}
1278 
1279 	if (ixl_setup_stats(sc) != 0) {
1280 		aprint_error_dev(self, "failed to setup event counters\n");
1281 		goto teardown_intrs;
1282 	}
1283 
1284 	if (ixl_setup_sysctls(sc) != 0) {
1285 		/* error printed by ixl_setup_sysctls */
1286 		goto teardown_stats;
1287 	}
1288 
1289 	snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
1290 	sc->sc_workq = ixl_workq_create(xnamebuf, IXL_WORKQUEUE_PRI,
1291 	    IPL_NET, WQ_MPSAFE);
1292 	if (sc->sc_workq == NULL)
1293 		goto teardown_sysctls;
1294 
1295 	snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
1296 	rv = workqueue_create(&sc->sc_workq_txrx, xnamebuf, ixl_handle_queue_wk,
1297 	    sc, IXL_WORKQUEUE_PRI, IPL_NET, WQ_PERCPU | WQ_MPSAFE);
1298 	if (rv != 0) {
1299 		sc->sc_workq_txrx = NULL;
1300 		goto teardown_wqs;
1301 	}
1302 
1303 	snprintf(xnamebuf, sizeof(xnamebuf), "%s_atq_cv", device_xname(self));
1304 	cv_init(&sc->sc_atq_cv, xnamebuf);
1305 
1306 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
1307 
1308 	ifp->if_softc = sc;
1309 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1310 	ifp->if_extflags = IFEF_MPSAFE;
1311 	ifp->if_ioctl = ixl_ioctl;
1312 	ifp->if_start = ixl_start;
1313 	ifp->if_transmit = ixl_transmit;
1314 	ifp->if_init = ixl_init;
1315 	ifp->if_stop = ixl_stop;
1316 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1317 	IFQ_SET_READY(&ifp->if_snd);
1318 	ifp->if_capabilities |= IXL_IFCAP_RXCSUM;
1319 	ifp->if_capabilities |= IXL_IFCAP_TXCSUM;
1320 #if 0
1321 	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1322 #endif
1323 	ether_set_vlan_cb(&sc->sc_ec, ixl_vlan_cb);
1324 	sc->sc_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1325 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_MTU;
1326 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
1327 	sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
1328 
1329 	sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
1330 	/* Disable VLAN_HWFILTER by default */
1331 	CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1332 
1333 	sc->sc_cur_ec_capenable = sc->sc_ec.ec_capenable;
1334 
1335 	sc->sc_ec.ec_ifmedia = &sc->sc_media;
1336 	ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, ixl_media_change,
1337 	    ixl_media_status, &sc->sc_cfg_lock);
1338 
1339 	ixl_media_add(sc);
1340 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1341 	if (ISSET(sc->sc_phy_abilities,
1342 	    (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1343 		ifmedia_add(&sc->sc_media,
1344 		    IFM_ETHER | IFM_AUTO | IFM_FLOW, 0, NULL);
1345 	}
1346 	ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_NONE, 0, NULL);
1347 	ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1348 
1349 	if_initialize(ifp);
1350 
1351 	sc->sc_ipq = if_percpuq_create(ifp);
1352 	if_deferred_start_init(ifp, NULL);
1353 	ether_ifattach(ifp, sc->sc_enaddr);
1354 	ether_set_ifflags_cb(&sc->sc_ec, ixl_ifflags_cb);
1355 
1356 	rv = ixl_get_link_status_poll(sc, &link);
1357 	if (rv != 0)
1358 		link = LINK_STATE_UNKNOWN;
1359 	if_link_state_change(ifp, link);
1360 
1361 	ixl_atq_set(&sc->sc_link_state_atq, ixl_get_link_status_done);
1362 	ixl_work_set(&sc->sc_link_state_task,
1363 	    ixl_get_link_status_work, sc);
1364 	ixl_work_set(&sc->sc_link_state_done_task,
1365 	    ixl_get_link_status_done_work, sc);
1366 
1367 	ixl_config_other_intr(sc);
1368 	ixl_enable_other_intr(sc);
1369 
1370 	ixl_set_phy_autoselect(sc);
1371 
1372 	/* remove default mac filter and replace it so we can see vlans */
1373 	rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0, 0);
1374 	if (rv != ENOENT) {
1375 		aprint_debug_dev(self,
1376 		    "unable to remove macvlan %u\n", rv);
1377 	}
1378 	rv = ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
1379 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1380 	if (rv != ENOENT) {
1381 		aprint_debug_dev(self,
1382 		    "unable to remove macvlan, ignore vlan %u\n", rv);
1383 	}
1384 
1385 	if (ixl_update_macvlan(sc) != 0) {
1386 		aprint_debug_dev(self,
1387 		    "couldn't enable vlan hardware filter\n");
1388 		CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1389 		CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
1390 	}
1391 
1392 	sc->sc_txrx_workqueue = true;
1393 	sc->sc_tx_process_limit = IXL_TX_PROCESS_LIMIT;
1394 	sc->sc_rx_process_limit = IXL_RX_PROCESS_LIMIT;
1395 	sc->sc_tx_intr_process_limit = IXL_TX_INTR_PROCESS_LIMIT;
1396 	sc->sc_rx_intr_process_limit = IXL_RX_INTR_PROCESS_LIMIT;
1397 
1398 	ixl_stats_update(sc);
1399 	sc->sc_stats_counters.isc_has_offset = true;
1400 
1401 	if (pmf_device_register(self, NULL, NULL) != true)
1402 		aprint_debug_dev(self, "couldn't establish power handler\n");
1403 	sc->sc_itr_rx = IXL_ITR_RX;
1404 	sc->sc_itr_tx = IXL_ITR_TX;
1405 	sc->sc_attached = true;
1406 	if_register(ifp);
1407 
1408 	return;
1409 
1410 teardown_wqs:
1411 	config_finalize_register(self, ixl_workqs_teardown);
1412 teardown_sysctls:
1413 	ixl_teardown_sysctls(sc);
1414 teardown_stats:
1415 	ixl_teardown_stats(sc);
1416 teardown_intrs:
1417 	ixl_teardown_interrupts(sc);
1418 free_queue_pairs:
1419 	ixl_queue_pairs_free(sc);
1420 free_scratch:
1421 	ixl_dmamem_free(sc, &sc->sc_scratch);
1422 free_hmc:
1423 	ixl_hmc_free(sc);
1424 free_aqbuf:
1425 	ixl_dmamem_free(sc, &sc->sc_aqbuf);
1426 shutdown:
1427 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1428 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1429 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1430 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1431 
1432 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1433 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1434 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1435 
1436 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1437 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1438 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1439 
1440 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1441 	    0, IXL_DMA_LEN(&sc->sc_arq),
1442 	    BUS_DMASYNC_POSTREAD);
1443 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1444 	    0, IXL_DMA_LEN(&sc->sc_atq),
1445 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1446 
1447 	ixl_arq_unfill(sc);
1448 free_arq:
1449 	ixl_dmamem_free(sc, &sc->sc_arq);
1450 free_atq:
1451 	ixl_dmamem_free(sc, &sc->sc_atq);
1452 unmap:
1453 	mutex_destroy(&sc->sc_atq_lock);
1454 	bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1455 	mutex_destroy(&sc->sc_cfg_lock);
1456 	sc->sc_mems = 0;
1457 
1458 	sc->sc_attached = false;
1459 }
1460 
1461 static int
ixl_detach(device_t self,int flags)1462 ixl_detach(device_t self, int flags)
1463 {
1464 	struct ixl_softc *sc = device_private(self);
1465 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1466 
1467 	if (!sc->sc_attached)
1468 		return 0;
1469 
1470 	ixl_stop(ifp, 1);
1471 
1472 	callout_halt(&sc->sc_stats_callout, NULL);
1473 	ixl_work_wait(sc->sc_workq, &sc->sc_stats_task);
1474 
1475 	/* detach the I/F before stop adminq due to callbacks */
1476 	ether_ifdetach(ifp);
1477 	if_detach(ifp);
1478 	ifmedia_fini(&sc->sc_media);
1479 	if_percpuq_destroy(sc->sc_ipq);
1480 
1481 	ixl_disable_other_intr(sc);
1482 	ixl_intr_barrier();
1483 	ixl_work_wait(sc->sc_workq, &sc->sc_arq_task);
1484 	ixl_work_wait(sc->sc_workq, &sc->sc_link_state_task);
1485 
1486 	if (sc->sc_workq != NULL) {
1487 		ixl_workq_destroy(sc->sc_workq);
1488 		sc->sc_workq = NULL;
1489 	}
1490 
1491 	if (sc->sc_workq_txrx != NULL) {
1492 		workqueue_destroy(sc->sc_workq_txrx);
1493 		sc->sc_workq_txrx = NULL;
1494 	}
1495 
1496 	ixl_teardown_interrupts(sc);
1497 	ixl_teardown_stats(sc);
1498 	ixl_teardown_sysctls(sc);
1499 
1500 	ixl_queue_pairs_free(sc);
1501 
1502 	ixl_dmamem_free(sc, &sc->sc_scratch);
1503 	ixl_hmc_free(sc);
1504 
1505 	/* shutdown */
1506 	ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1507 	ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1508 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1509 	ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1510 
1511 	ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
1512 	ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
1513 	ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
1514 
1515 	ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
1516 	ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
1517 	ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
1518 
1519 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1520 	    0, IXL_DMA_LEN(&sc->sc_arq),
1521 	    BUS_DMASYNC_POSTREAD);
1522 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1523 	    0, IXL_DMA_LEN(&sc->sc_atq),
1524 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1525 
1526 	ixl_arq_unfill(sc);
1527 
1528 	ixl_dmamem_free(sc, &sc->sc_arq);
1529 	ixl_dmamem_free(sc, &sc->sc_atq);
1530 	ixl_dmamem_free(sc, &sc->sc_aqbuf);
1531 
1532 	cv_destroy(&sc->sc_atq_cv);
1533 	mutex_destroy(&sc->sc_atq_lock);
1534 
1535 	if (sc->sc_mems != 0) {
1536 		bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1537 		sc->sc_mems = 0;
1538 	}
1539 
1540 	mutex_destroy(&sc->sc_cfg_lock);
1541 
1542 	return 0;
1543 }
1544 
1545 static int
ixl_workqs_teardown(device_t self)1546 ixl_workqs_teardown(device_t self)
1547 {
1548 	struct ixl_softc *sc = device_private(self);
1549 
1550 	if (sc->sc_workq != NULL) {
1551 		ixl_workq_destroy(sc->sc_workq);
1552 		sc->sc_workq = NULL;
1553 	}
1554 
1555 	if (sc->sc_workq_txrx != NULL) {
1556 		workqueue_destroy(sc->sc_workq_txrx);
1557 		sc->sc_workq_txrx = NULL;
1558 	}
1559 
1560 	return 0;
1561 }
1562 
1563 static int
ixl_vlan_cb(struct ethercom * ec,uint16_t vid,bool set)1564 ixl_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1565 {
1566 	struct ifnet *ifp = &ec->ec_if;
1567 	struct ixl_softc *sc = ifp->if_softc;
1568 	int rv;
1569 
1570 	if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
1571 		return 0;
1572 	}
1573 
1574 	if (set) {
1575 		rv = ixl_add_macvlan(sc, sc->sc_enaddr, vid,
1576 		    IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1577 		if (rv == 0) {
1578 			rv = ixl_add_macvlan(sc, etherbroadcastaddr,
1579 			    vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
1580 		}
1581 	} else {
1582 		rv = ixl_remove_macvlan(sc, sc->sc_enaddr, vid,
1583 		    IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1584 		(void)ixl_remove_macvlan(sc, etherbroadcastaddr, vid,
1585 		    IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
1586 	}
1587 
1588 	return rv;
1589 }
1590 
1591 static void
ixl_media_add(struct ixl_softc * sc)1592 ixl_media_add(struct ixl_softc *sc)
1593 {
1594 	struct ifmedia *ifm = &sc->sc_media;
1595 	const struct ixl_phy_type *itype;
1596 	unsigned int i;
1597 	bool flow;
1598 
1599 	if (ISSET(sc->sc_phy_abilities,
1600 	    (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX))) {
1601 		flow = true;
1602 	} else {
1603 		flow = false;
1604 	}
1605 
1606 	for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
1607 		itype = &ixl_phy_type_map[i];
1608 
1609 		if (ISSET(sc->sc_phy_types, itype->phy_type)) {
1610 			ifmedia_add(ifm,
1611 			    IFM_ETHER | IFM_FDX | itype->ifm_type, 0, NULL);
1612 
1613 			if (flow) {
1614 				ifmedia_add(ifm,
1615 				    IFM_ETHER | IFM_FDX | IFM_FLOW |
1616 				    itype->ifm_type, 0, NULL);
1617 			}
1618 
1619 			if (itype->ifm_type != IFM_100_TX)
1620 				continue;
1621 
1622 			ifmedia_add(ifm, IFM_ETHER | itype->ifm_type,
1623 			    0, NULL);
1624 			if (flow) {
1625 				ifmedia_add(ifm,
1626 				    IFM_ETHER | IFM_FLOW | itype->ifm_type,
1627 				    0, NULL);
1628 			}
1629 		}
1630 	}
1631 }
1632 
1633 static void
ixl_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1634 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1635 {
1636 	struct ixl_softc *sc = ifp->if_softc;
1637 
1638 	KASSERT(mutex_owned(&sc->sc_cfg_lock));
1639 
1640 	ifmr->ifm_status = sc->sc_media_status;
1641 	ifmr->ifm_active = sc->sc_media_active;
1642 }
1643 
1644 static int
ixl_media_change(struct ifnet * ifp)1645 ixl_media_change(struct ifnet *ifp)
1646 {
1647 	struct ixl_softc *sc = ifp->if_softc;
1648 	struct ifmedia *ifm = &sc->sc_media;
1649 	uint64_t ifm_active = sc->sc_media_active;
1650 	uint8_t link_speed, abilities;
1651 
1652 	switch (IFM_SUBTYPE(ifm_active)) {
1653 	case IFM_1000_SGMII:
1654 	case IFM_1000_KX:
1655 	case IFM_10G_KX4:
1656 	case IFM_10G_KR:
1657 	case IFM_40G_KR4:
1658 	case IFM_20G_KR2:
1659 	case IFM_25G_KR:
1660 		/* backplanes */
1661 		return EINVAL;
1662 	}
1663 
1664 	abilities = IXL_PHY_ABILITY_AUTONEGO | IXL_PHY_ABILITY_LINKUP;
1665 
1666 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1667 	case IFM_AUTO:
1668 		link_speed = sc->sc_phy_linkspeed;
1669 		break;
1670 	case IFM_NONE:
1671 		link_speed = 0;
1672 		CLR(abilities, IXL_PHY_ABILITY_LINKUP);
1673 		break;
1674 	default:
1675 		link_speed = ixl_search_baudrate(
1676 		    ifmedia_baudrate(ifm->ifm_media));
1677 	}
1678 
1679 	if (ISSET(abilities, IXL_PHY_ABILITY_LINKUP)) {
1680 		if (ISSET(link_speed, sc->sc_phy_linkspeed) == 0)
1681 			return EINVAL;
1682 	}
1683 
1684 	if (ifm->ifm_media & IFM_FLOW) {
1685 		abilities |= sc->sc_phy_abilities &
1686 		    (IXL_PHY_ABILITY_PAUSE_TX | IXL_PHY_ABILITY_PAUSE_RX);
1687 	}
1688 
1689 	return ixl_set_phy_config(sc, link_speed, abilities, false);
1690 }
1691 
1692 
1693 static void
ixl_del_all_multiaddr(struct ixl_softc * sc)1694 ixl_del_all_multiaddr(struct ixl_softc *sc)
1695 {
1696 	struct ethercom *ec = &sc->sc_ec;
1697 	struct ether_multi *enm;
1698 	struct ether_multistep step;
1699 
1700 	ETHER_LOCK(ec);
1701 	for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1702 	    ETHER_NEXT_MULTI(step, enm)) {
1703 		ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1704 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1705 	}
1706 	ETHER_UNLOCK(ec);
1707 }
1708 
1709 static int
ixl_add_multi(struct ixl_softc * sc,uint8_t * addrlo,uint8_t * addrhi)1710 ixl_add_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1711 {
1712 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1713 	int rv;
1714 
1715 	if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1716 		return 0;
1717 
1718 	if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1719 		ixl_del_all_multiaddr(sc);
1720 		SET(ifp->if_flags, IFF_ALLMULTI);
1721 		return ENETRESET;
1722 	}
1723 
1724 	/* multicast address can not use VLAN HWFILTER */
1725 	rv = ixl_add_macvlan(sc, addrlo, 0,
1726 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1727 
1728 	if (rv == ENOSPC) {
1729 		ixl_del_all_multiaddr(sc);
1730 		SET(ifp->if_flags, IFF_ALLMULTI);
1731 		return ENETRESET;
1732 	}
1733 
1734 	return rv;
1735 }
1736 
1737 static int
ixl_del_multi(struct ixl_softc * sc,uint8_t * addrlo,uint8_t * addrhi)1738 ixl_del_multi(struct ixl_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1739 {
1740 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1741 	struct ethercom *ec = &sc->sc_ec;
1742 	struct ether_multi *enm, *enm_last;
1743 	struct ether_multistep step;
1744 	int error, rv = 0;
1745 
1746 	if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1747 		ixl_remove_macvlan(sc, addrlo, 0,
1748 		    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1749 		return 0;
1750 	}
1751 
1752 	ETHER_LOCK(ec);
1753 	for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1754 	    ETHER_NEXT_MULTI(step, enm)) {
1755 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1756 		    ETHER_ADDR_LEN) != 0) {
1757 			goto out;
1758 		}
1759 	}
1760 
1761 	for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1762 	    ETHER_NEXT_MULTI(step, enm)) {
1763 		error = ixl_add_macvlan(sc, enm->enm_addrlo, 0,
1764 		    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
1765 		if (error != 0)
1766 			break;
1767 	}
1768 
1769 	if (enm != NULL) {
1770 		enm_last = enm;
1771 		for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1772 		    ETHER_NEXT_MULTI(step, enm)) {
1773 			if (enm == enm_last)
1774 				break;
1775 
1776 			ixl_remove_macvlan(sc, enm->enm_addrlo, 0,
1777 			    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
1778 		}
1779 	} else {
1780 		CLR(ifp->if_flags, IFF_ALLMULTI);
1781 		rv = ENETRESET;
1782 	}
1783 
1784 out:
1785 	ETHER_UNLOCK(ec);
1786 	return rv;
1787 }
1788 
1789 static int
ixl_ioctl(struct ifnet * ifp,u_long cmd,void * data)1790 ixl_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1791 {
1792 	struct ifreq *ifr = (struct ifreq *)data;
1793 	struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
1794 	const struct sockaddr *sa;
1795 	uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1796 	int s, error = 0;
1797 	unsigned int nmtu;
1798 
1799 	switch (cmd) {
1800 	case SIOCSIFMTU:
1801 		nmtu = ifr->ifr_mtu;
1802 
1803 		if (nmtu < IXL_MIN_MTU || nmtu > IXL_MAX_MTU) {
1804 			error = EINVAL;
1805 			break;
1806 		}
1807 		if (ifp->if_mtu != nmtu) {
1808 			s = splnet();
1809 			error = ether_ioctl(ifp, cmd, data);
1810 			splx(s);
1811 			if (error == ENETRESET)
1812 				error = ixl_init(ifp);
1813 		}
1814 		break;
1815 	case SIOCADDMULTI:
1816 		sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1817 		if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1818 			error = ether_multiaddr(sa, addrlo, addrhi);
1819 			if (error != 0)
1820 				return error;
1821 
1822 			error = ixl_add_multi(sc, addrlo, addrhi);
1823 			if (error != 0 && error != ENETRESET) {
1824 				ether_delmulti(sa, &sc->sc_ec);
1825 				error = EIO;
1826 			}
1827 		}
1828 		break;
1829 
1830 	case SIOCDELMULTI:
1831 		sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1832 		if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1833 			error = ether_multiaddr(sa, addrlo, addrhi);
1834 			if (error != 0)
1835 				return error;
1836 
1837 			error = ixl_del_multi(sc, addrlo, addrhi);
1838 		}
1839 		break;
1840 
1841 	default:
1842 		s = splnet();
1843 		error = ether_ioctl(ifp, cmd, data);
1844 		splx(s);
1845 	}
1846 
1847 	if (error == ENETRESET)
1848 		error = ixl_iff(sc);
1849 
1850 	return error;
1851 }
1852 
1853 static enum i40e_mac_type
ixl_mactype(pci_product_id_t id)1854 ixl_mactype(pci_product_id_t id)
1855 {
1856 
1857 	switch (id) {
1858 	case PCI_PRODUCT_INTEL_XL710_SFP:
1859 	case PCI_PRODUCT_INTEL_XL710_KX_B:
1860 	case PCI_PRODUCT_INTEL_XL710_KX_C:
1861 	case PCI_PRODUCT_INTEL_XL710_QSFP_A:
1862 	case PCI_PRODUCT_INTEL_XL710_QSFP_B:
1863 	case PCI_PRODUCT_INTEL_XL710_QSFP_C:
1864 	case PCI_PRODUCT_INTEL_X710_10G_T_1:
1865 	case PCI_PRODUCT_INTEL_X710_10G_T_2:
1866 	case PCI_PRODUCT_INTEL_XL710_20G_BP_1:
1867 	case PCI_PRODUCT_INTEL_XL710_20G_BP_2:
1868 	case PCI_PRODUCT_INTEL_X710_T4_10G:
1869 	case PCI_PRODUCT_INTEL_XXV710_25G_BP:
1870 	case PCI_PRODUCT_INTEL_XXV710_25G_SFP28:
1871 	case PCI_PRODUCT_INTEL_X710_10G_SFP:
1872 	case PCI_PRODUCT_INTEL_X710_10G_BP:
1873 		return I40E_MAC_XL710;
1874 
1875 	case PCI_PRODUCT_INTEL_X722_KX:
1876 	case PCI_PRODUCT_INTEL_X722_QSFP:
1877 	case PCI_PRODUCT_INTEL_X722_SFP:
1878 	case PCI_PRODUCT_INTEL_X722_1G_BASET:
1879 	case PCI_PRODUCT_INTEL_X722_10G_BASET:
1880 	case PCI_PRODUCT_INTEL_X722_I_SFP:
1881 		return I40E_MAC_X722;
1882 	}
1883 
1884 	return I40E_MAC_GENERIC;
1885 }
1886 
1887 static void
ixl_pci_csr_setup(pci_chipset_tag_t pc,pcitag_t tag)1888 ixl_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1889 {
1890 	pcireg_t csr;
1891 
1892 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1893 	csr |= (PCI_COMMAND_MASTER_ENABLE |
1894 	    PCI_COMMAND_MEM_ENABLE);
1895 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1896 }
1897 
1898 static inline void *
ixl_hmc_kva(struct ixl_softc * sc,enum ixl_hmc_types type,unsigned int i)1899 ixl_hmc_kva(struct ixl_softc *sc, enum ixl_hmc_types type, unsigned int i)
1900 {
1901 	uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
1902 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1903 
1904 	if (i >= e->hmc_count)
1905 		return NULL;
1906 
1907 	kva += e->hmc_base;
1908 	kva += i * e->hmc_size;
1909 
1910 	return kva;
1911 }
1912 
1913 static inline size_t
ixl_hmc_len(struct ixl_softc * sc,enum ixl_hmc_types type)1914 ixl_hmc_len(struct ixl_softc *sc, enum ixl_hmc_types type)
1915 {
1916 	struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
1917 
1918 	return e->hmc_size;
1919 }
1920 
1921 static void
ixl_enable_queue_intr(struct ixl_softc * sc,struct ixl_queue_pair * qp)1922 ixl_enable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1923 {
1924 	struct ixl_rx_ring *rxr = qp->qp_rxr;
1925 
1926 	ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1927 	    I40E_PFINT_DYN_CTLN_INTENA_MASK |
1928 	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1929 	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1930 	ixl_flush(sc);
1931 }
1932 
1933 static void
ixl_disable_queue_intr(struct ixl_softc * sc,struct ixl_queue_pair * qp)1934 ixl_disable_queue_intr(struct ixl_softc *sc, struct ixl_queue_pair *qp)
1935 {
1936 	struct ixl_rx_ring *rxr = qp->qp_rxr;
1937 
1938 	ixl_wr(sc, I40E_PFINT_DYN_CTLN(rxr->rxr_qid),
1939 	    (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1940 	ixl_flush(sc);
1941 }
1942 
1943 static void
ixl_enable_other_intr(struct ixl_softc * sc)1944 ixl_enable_other_intr(struct ixl_softc *sc)
1945 {
1946 
1947 	ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1948 	    I40E_PFINT_DYN_CTL0_INTENA_MASK |
1949 	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1950 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1951 	ixl_flush(sc);
1952 }
1953 
1954 static void
ixl_disable_other_intr(struct ixl_softc * sc)1955 ixl_disable_other_intr(struct ixl_softc *sc)
1956 {
1957 
1958 	ixl_wr(sc, I40E_PFINT_DYN_CTL0,
1959 	    (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT));
1960 	ixl_flush(sc);
1961 }
1962 
1963 static int
ixl_reinit(struct ixl_softc * sc)1964 ixl_reinit(struct ixl_softc *sc)
1965 {
1966 	struct ixl_rx_ring *rxr;
1967 	struct ixl_tx_ring *txr;
1968 	unsigned int i;
1969 	uint32_t reg;
1970 
1971 	KASSERT(mutex_owned(&sc->sc_cfg_lock));
1972 
1973 	if (ixl_get_vsi(sc) != 0)
1974 		return EIO;
1975 
1976 	if (ixl_set_vsi(sc) != 0)
1977 		return EIO;
1978 
1979 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1980 		txr = sc->sc_qps[i].qp_txr;
1981 		rxr = sc->sc_qps[i].qp_rxr;
1982 
1983 		ixl_txr_config(sc, txr);
1984 		ixl_rxr_config(sc, rxr);
1985 	}
1986 
1987 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
1988 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_PREWRITE);
1989 
1990 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1991 		txr = sc->sc_qps[i].qp_txr;
1992 		rxr = sc->sc_qps[i].qp_rxr;
1993 
1994 		ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
1995 		    (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
1996 		ixl_flush(sc);
1997 
1998 		ixl_wr(sc, txr->txr_tail, txr->txr_prod);
1999 		ixl_wr(sc, rxr->rxr_tail, rxr->rxr_prod);
2000 
2001 		/* ixl_rxfill() needs lock held */
2002 		mutex_enter(&rxr->rxr_lock);
2003 		ixl_rxfill(sc, rxr);
2004 		mutex_exit(&rxr->rxr_lock);
2005 
2006 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2007 		SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2008 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2009 		if (ixl_rxr_enabled(sc, rxr) != 0)
2010 			goto stop;
2011 
2012 		ixl_txr_qdis(sc, txr, 1);
2013 
2014 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2015 		SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2016 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2017 
2018 		if (ixl_txr_enabled(sc, txr) != 0)
2019 			goto stop;
2020 	}
2021 
2022 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2023 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2024 
2025 	return 0;
2026 
2027 stop:
2028 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
2029 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd), BUS_DMASYNC_POSTWRITE);
2030 
2031 	return ETIMEDOUT;
2032 }
2033 
2034 static int
ixl_init_locked(struct ixl_softc * sc)2035 ixl_init_locked(struct ixl_softc *sc)
2036 {
2037 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2038 	unsigned int i;
2039 	int error, eccap_change;
2040 
2041 	KASSERT(mutex_owned(&sc->sc_cfg_lock));
2042 
2043 	if (ISSET(ifp->if_flags, IFF_RUNNING))
2044 		ixl_stop_locked(sc);
2045 
2046 	if (sc->sc_dead) {
2047 		return ENXIO;
2048 	}
2049 
2050 	eccap_change = sc->sc_ec.ec_capenable ^ sc->sc_cur_ec_capenable;
2051 	if (ISSET(eccap_change, ETHERCAP_VLAN_HWTAGGING))
2052 		sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWTAGGING;
2053 
2054 	if (ISSET(eccap_change, ETHERCAP_VLAN_HWFILTER)) {
2055 		if (ixl_update_macvlan(sc) == 0) {
2056 			sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
2057 		} else {
2058 			CLR(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
2059 			CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
2060 		}
2061 	}
2062 
2063 	if (sc->sc_intrtype != PCI_INTR_TYPE_MSIX)
2064 		sc->sc_nqueue_pairs = 1;
2065 	else
2066 		sc->sc_nqueue_pairs = sc->sc_nqueue_pairs_max;
2067 
2068 	error = ixl_reinit(sc);
2069 	if (error) {
2070 		ixl_stop_locked(sc);
2071 		return error;
2072 	}
2073 
2074 	SET(ifp->if_flags, IFF_RUNNING);
2075 	CLR(ifp->if_flags, IFF_OACTIVE);
2076 
2077 	ixl_config_rss(sc);
2078 	ixl_config_queue_intr(sc);
2079 
2080 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2081 		ixl_enable_queue_intr(sc, &sc->sc_qps[i]);
2082 	}
2083 
2084 	error = ixl_iff(sc);
2085 	if (error) {
2086 		ixl_stop_locked(sc);
2087 		return error;
2088 	}
2089 
2090 	callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
2091 
2092 	return 0;
2093 }
2094 
2095 static int
ixl_init(struct ifnet * ifp)2096 ixl_init(struct ifnet *ifp)
2097 {
2098 	struct ixl_softc *sc = ifp->if_softc;
2099 	int error;
2100 
2101 	mutex_enter(&sc->sc_cfg_lock);
2102 	error = ixl_init_locked(sc);
2103 	mutex_exit(&sc->sc_cfg_lock);
2104 
2105 	if (error == 0) {
2106 		error = ixl_get_link_status(sc,
2107 		    IXL_LINK_FLAG_WAITDONE);
2108 	}
2109 
2110 	return error;
2111 }
2112 
2113 static int
ixl_iff(struct ixl_softc * sc)2114 ixl_iff(struct ixl_softc *sc)
2115 {
2116 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2117 	struct ixl_atq iatq;
2118 	struct ixl_aq_desc *iaq;
2119 	struct ixl_aq_vsi_promisc_param *param;
2120 	uint16_t flag_add, flag_del;
2121 	int error;
2122 
2123 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2124 		return 0;
2125 
2126 	memset(&iatq, 0, sizeof(iatq));
2127 
2128 	iaq = &iatq.iatq_desc;
2129 	iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2130 
2131 	param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2132 	param->flags = htole16(0);
2133 
2134 	if (!ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)
2135 	    || ISSET(ifp->if_flags, IFF_PROMISC)) {
2136 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2137 		    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2138 	}
2139 
2140 	if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2141 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2142 		    IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2143 	} else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2144 		param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2145 	}
2146 	param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2147 	    IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2148 	    IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2149 	param->seid = sc->sc_seid;
2150 
2151 	error = ixl_atq_exec(sc, &iatq);
2152 	if (error)
2153 		return error;
2154 
2155 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2156 		return EIO;
2157 
2158 	if (memcmp(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN) != 0) {
2159 		if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
2160 			flag_add = IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH;
2161 			flag_del = IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH;
2162 		} else {
2163 			flag_add = IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN;
2164 			flag_del = IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN;
2165 		}
2166 
2167 		ixl_remove_macvlan(sc, sc->sc_enaddr, 0, flag_del);
2168 
2169 		memcpy(sc->sc_enaddr, CLLADDR(ifp->if_sadl), ETHER_ADDR_LEN);
2170 		ixl_add_macvlan(sc, sc->sc_enaddr, 0, flag_add);
2171 	}
2172 	return 0;
2173 }
2174 
2175 static void
ixl_stop_locked(struct ixl_softc * sc)2176 ixl_stop_locked(struct ixl_softc *sc)
2177 {
2178 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2179 	struct ixl_rx_ring *rxr;
2180 	struct ixl_tx_ring *txr;
2181 	unsigned int i;
2182 	uint32_t reg;
2183 
2184 	KASSERT(mutex_owned(&sc->sc_cfg_lock));
2185 
2186 	CLR(ifp->if_flags, IFF_RUNNING | IFF_OACTIVE);
2187 	callout_stop(&sc->sc_stats_callout);
2188 
2189 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2190 		txr = sc->sc_qps[i].qp_txr;
2191 		rxr = sc->sc_qps[i].qp_rxr;
2192 
2193 		ixl_disable_queue_intr(sc, &sc->sc_qps[i]);
2194 
2195 		mutex_enter(&txr->txr_lock);
2196 		ixl_txr_qdis(sc, txr, 0);
2197 		mutex_exit(&txr->txr_lock);
2198 	}
2199 
2200 	/* XXX wait at least 400 usec for all tx queues in one go */
2201 	ixl_flush(sc);
2202 	DELAY(500);
2203 
2204 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2205 		txr = sc->sc_qps[i].qp_txr;
2206 		rxr = sc->sc_qps[i].qp_rxr;
2207 
2208 		mutex_enter(&txr->txr_lock);
2209 		reg = ixl_rd(sc, I40E_QTX_ENA(i));
2210 		CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2211 		ixl_wr(sc, I40E_QTX_ENA(i), reg);
2212 		mutex_exit(&txr->txr_lock);
2213 
2214 		mutex_enter(&rxr->rxr_lock);
2215 		reg = ixl_rd(sc, I40E_QRX_ENA(i));
2216 		CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2217 		ixl_wr(sc, I40E_QRX_ENA(i), reg);
2218 		mutex_exit(&rxr->rxr_lock);
2219 	}
2220 
2221 	/* XXX short wait for all queue disables to settle */
2222 	ixl_flush(sc);
2223 	DELAY(50);
2224 
2225 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2226 		txr = sc->sc_qps[i].qp_txr;
2227 		rxr = sc->sc_qps[i].qp_rxr;
2228 
2229 		mutex_enter(&txr->txr_lock);
2230 		if (ixl_txr_disabled(sc, txr) != 0) {
2231 			mutex_exit(&txr->txr_lock);
2232 			goto die;
2233 		}
2234 		mutex_exit(&txr->txr_lock);
2235 
2236 		mutex_enter(&rxr->rxr_lock);
2237 		if (ixl_rxr_disabled(sc, rxr) != 0) {
2238 			mutex_exit(&rxr->rxr_lock);
2239 			goto die;
2240 		}
2241 		mutex_exit(&rxr->rxr_lock);
2242 	}
2243 
2244 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2245 		sc->sc_qps[i].qp_workqueue = false;
2246 		workqueue_wait(sc->sc_workq_txrx,
2247 		    &sc->sc_qps[i].qp_work);
2248 	}
2249 
2250 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
2251 		txr = sc->sc_qps[i].qp_txr;
2252 		rxr = sc->sc_qps[i].qp_rxr;
2253 
2254 		mutex_enter(&txr->txr_lock);
2255 		ixl_txr_unconfig(sc, txr);
2256 		mutex_exit(&txr->txr_lock);
2257 
2258 		mutex_enter(&rxr->rxr_lock);
2259 		ixl_rxr_unconfig(sc, rxr);
2260 		mutex_exit(&rxr->rxr_lock);
2261 
2262 		ixl_txr_clean(sc, txr);
2263 		ixl_rxr_clean(sc, rxr);
2264 	}
2265 
2266 	return;
2267 die:
2268 	sc->sc_dead = true;
2269 	log(LOG_CRIT, "%s: failed to shut down rings",
2270 	    device_xname(sc->sc_dev));
2271 	return;
2272 }
2273 
2274 static void
ixl_stop(struct ifnet * ifp,int disable)2275 ixl_stop(struct ifnet *ifp, int disable)
2276 {
2277 	struct ixl_softc *sc = ifp->if_softc;
2278 
2279 	mutex_enter(&sc->sc_cfg_lock);
2280 	ixl_stop_locked(sc);
2281 	mutex_exit(&sc->sc_cfg_lock);
2282 }
2283 
2284 static int
ixl_queue_pairs_alloc(struct ixl_softc * sc)2285 ixl_queue_pairs_alloc(struct ixl_softc *sc)
2286 {
2287 	struct ixl_queue_pair *qp;
2288 	unsigned int i;
2289 	size_t sz;
2290 
2291 	sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2292 	sc->sc_qps = kmem_zalloc(sz, KM_SLEEP);
2293 
2294 	for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2295 		qp = &sc->sc_qps[i];
2296 
2297 		qp->qp_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2298 		    ixl_handle_queue, qp);
2299 		if (qp->qp_si == NULL)
2300 			goto free;
2301 
2302 		qp->qp_txr = ixl_txr_alloc(sc, i);
2303 		if (qp->qp_txr == NULL)
2304 			goto free;
2305 
2306 		qp->qp_rxr = ixl_rxr_alloc(sc, i);
2307 		if (qp->qp_rxr == NULL)
2308 			goto free;
2309 
2310 		qp->qp_sc = sc;
2311 		snprintf(qp->qp_name, sizeof(qp->qp_name),
2312 		    "%s-TXRX%d", device_xname(sc->sc_dev), i);
2313 	}
2314 
2315 	return 0;
2316 free:
2317 	if (sc->sc_qps != NULL) {
2318 		for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2319 			qp = &sc->sc_qps[i];
2320 
2321 			if (qp->qp_txr != NULL)
2322 				ixl_txr_free(sc, qp->qp_txr);
2323 			if (qp->qp_rxr != NULL)
2324 				ixl_rxr_free(sc, qp->qp_rxr);
2325 			if (qp->qp_si != NULL)
2326 				softint_disestablish(qp->qp_si);
2327 		}
2328 
2329 		sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2330 		kmem_free(sc->sc_qps, sz);
2331 		sc->sc_qps = NULL;
2332 	}
2333 
2334 	return -1;
2335 }
2336 
2337 static void
ixl_queue_pairs_free(struct ixl_softc * sc)2338 ixl_queue_pairs_free(struct ixl_softc *sc)
2339 {
2340 	struct ixl_queue_pair *qp;
2341 	unsigned int i;
2342 	size_t sz;
2343 
2344 	for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
2345 		qp = &sc->sc_qps[i];
2346 		ixl_txr_free(sc, qp->qp_txr);
2347 		ixl_rxr_free(sc, qp->qp_rxr);
2348 		softint_disestablish(qp->qp_si);
2349 	}
2350 
2351 	sz = sizeof(sc->sc_qps[0]) * sc->sc_nqueue_pairs_max;
2352 	kmem_free(sc->sc_qps, sz);
2353 	sc->sc_qps = NULL;
2354 }
2355 
2356 static struct ixl_tx_ring *
ixl_txr_alloc(struct ixl_softc * sc,unsigned int qid)2357 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2358 {
2359 	struct ixl_tx_ring *txr = NULL;
2360 	struct ixl_tx_map *maps = NULL, *txm;
2361 	unsigned int i;
2362 
2363 	txr = kmem_zalloc(sizeof(*txr), KM_SLEEP);
2364 	maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2365 	    KM_SLEEP);
2366 
2367 	if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2368 	    sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2369 	    IXL_TX_QUEUE_ALIGN) != 0)
2370 	    goto free;
2371 
2372 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2373 		txm = &maps[i];
2374 
2375 		if (bus_dmamap_create(sc->sc_dmat, IXL_TX_PKT_MAXSIZE,
2376 		    IXL_TX_PKT_DESCS, IXL_TX_PKT_MAXSIZE, 0,
2377 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &txm->txm_map) != 0)
2378 			goto uncreate;
2379 
2380 		txm->txm_eop = -1;
2381 		txm->txm_m = NULL;
2382 	}
2383 
2384 	txr->txr_cons = txr->txr_prod = 0;
2385 	txr->txr_maps = maps;
2386 
2387 	txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2388 	if (txr->txr_intrq == NULL)
2389 		goto uncreate;
2390 
2391 	txr->txr_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
2392 	    ixl_deferred_transmit, txr);
2393 	if (txr->txr_si == NULL)
2394 		goto destroy_pcq;
2395 
2396 	txr->txr_tail = I40E_QTX_TAIL(qid);
2397 	txr->txr_qid = qid;
2398 	txr->txr_sc = sc;
2399 	mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2400 
2401 	return txr;
2402 
2403 destroy_pcq:
2404 	pcq_destroy(txr->txr_intrq);
2405 uncreate:
2406 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2407 		txm = &maps[i];
2408 
2409 		if (txm->txm_map == NULL)
2410 			continue;
2411 
2412 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2413 	}
2414 
2415 	ixl_dmamem_free(sc, &txr->txr_mem);
2416 free:
2417 	kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2418 	kmem_free(txr, sizeof(*txr));
2419 
2420 	return NULL;
2421 }
2422 
2423 static void
ixl_txr_qdis(struct ixl_softc * sc,struct ixl_tx_ring * txr,int enable)2424 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2425 {
2426 	unsigned int qid;
2427 	bus_size_t reg;
2428 	uint32_t r;
2429 
2430 	qid = txr->txr_qid + sc->sc_base_queue;
2431 	reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2432 	qid %= 128;
2433 
2434 	r = ixl_rd(sc, reg);
2435 	CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2436 	SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2437 	SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2438 	    I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2439 	ixl_wr(sc, reg, r);
2440 }
2441 
2442 static void
ixl_txr_config(struct ixl_softc * sc,struct ixl_tx_ring * txr)2443 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2444 {
2445 	struct ixl_hmc_txq txq;
2446 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2447 	void *hmc;
2448 
2449 	memset(&txq, 0, sizeof(txq));
2450 	txq.head = htole16(txr->txr_cons);
2451 	txq.new_context = 1;
2452 	txq.base = htole64(IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2453 	txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2454 	txq.qlen = htole16(sc->sc_tx_ring_ndescs);
2455 	txq.tphrdesc_ena = 0;
2456 	txq.tphrpacket_ena = 0;
2457 	txq.tphwdesc_ena = 0;
2458 	txq.rdylist = data->qs_handle[0];
2459 
2460 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2461 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2462 	ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq,
2463 	    __arraycount(ixl_hmc_pack_txq));
2464 }
2465 
2466 static void
ixl_txr_unconfig(struct ixl_softc * sc,struct ixl_tx_ring * txr)2467 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2468 {
2469 	void *hmc;
2470 
2471 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2472 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2473 	txr->txr_cons = txr->txr_prod = 0;
2474 }
2475 
2476 static void
ixl_txr_clean(struct ixl_softc * sc,struct ixl_tx_ring * txr)2477 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2478 {
2479 	struct ixl_tx_map *maps, *txm;
2480 	bus_dmamap_t map;
2481 	unsigned int i;
2482 
2483 	maps = txr->txr_maps;
2484 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2485 		txm = &maps[i];
2486 
2487 		if (txm->txm_m == NULL)
2488 			continue;
2489 
2490 		map = txm->txm_map;
2491 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2492 		    BUS_DMASYNC_POSTWRITE);
2493 		bus_dmamap_unload(sc->sc_dmat, map);
2494 
2495 		m_freem(txm->txm_m);
2496 		txm->txm_m = NULL;
2497 	}
2498 }
2499 
2500 static int
ixl_txr_enabled(struct ixl_softc * sc,struct ixl_tx_ring * txr)2501 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2502 {
2503 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2504 	uint32_t reg;
2505 	int i;
2506 
2507 	for (i = 0; i < 10; i++) {
2508 		reg = ixl_rd(sc, ena);
2509 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2510 			return 0;
2511 
2512 		delaymsec(10);
2513 	}
2514 
2515 	return ETIMEDOUT;
2516 }
2517 
2518 static int
ixl_txr_disabled(struct ixl_softc * sc,struct ixl_tx_ring * txr)2519 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2520 {
2521 	bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2522 	uint32_t reg;
2523 	int i;
2524 
2525 	KASSERT(mutex_owned(&txr->txr_lock));
2526 
2527 	for (i = 0; i < 10; i++) {
2528 		reg = ixl_rd(sc, ena);
2529 		if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2530 			return 0;
2531 
2532 		delaymsec(10);
2533 	}
2534 
2535 	return ETIMEDOUT;
2536 }
2537 
2538 static void
ixl_txr_free(struct ixl_softc * sc,struct ixl_tx_ring * txr)2539 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2540 {
2541 	struct ixl_tx_map *maps, *txm;
2542 	struct mbuf *m;
2543 	unsigned int i;
2544 
2545 	softint_disestablish(txr->txr_si);
2546 
2547 	maps = txr->txr_maps;
2548 	for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2549 		txm = &maps[i];
2550 
2551 		bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2552 	}
2553 
2554 	while ((m = pcq_get(txr->txr_intrq)) != NULL)
2555 		m_freem(m);
2556 	pcq_destroy(txr->txr_intrq);
2557 
2558 	ixl_dmamem_free(sc, &txr->txr_mem);
2559 	mutex_destroy(&txr->txr_lock);
2560 	kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2561 	kmem_free(txr, sizeof(*txr));
2562 }
2563 
2564 static inline int
ixl_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf ** m0,struct ixl_tx_ring * txr)2565 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2566     struct ixl_tx_ring *txr)
2567 {
2568 	struct mbuf *m;
2569 	int error;
2570 
2571 	KASSERT(mutex_owned(&txr->txr_lock));
2572 
2573 	m = *m0;
2574 
2575 	error = bus_dmamap_load_mbuf(dmat, map, m,
2576 	    BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2577 	if (error != EFBIG)
2578 		return error;
2579 
2580 	m = m_defrag(m, M_DONTWAIT);
2581 	if (m != NULL) {
2582 		*m0 = m;
2583 		txr->txr_defragged.ev_count++;
2584 
2585 		error = bus_dmamap_load_mbuf(dmat, map, m,
2586 		    BUS_DMA_STREAMING | BUS_DMA_WRITE | BUS_DMA_NOWAIT);
2587 	} else {
2588 		txr->txr_defrag_failed.ev_count++;
2589 		error = ENOBUFS;
2590 	}
2591 
2592 	return error;
2593 }
2594 
2595 static inline int
ixl_tx_setup_offloads(struct mbuf * m,uint64_t * cmd_txd)2596 ixl_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2597 {
2598 	struct ether_header *eh;
2599 	size_t len;
2600 	uint64_t cmd;
2601 
2602 	cmd = 0;
2603 
2604 	eh = mtod(m, struct ether_header *);
2605 	switch (htons(eh->ether_type)) {
2606 	case ETHERTYPE_IP:
2607 	case ETHERTYPE_IPV6:
2608 		len = ETHER_HDR_LEN;
2609 		break;
2610 	case ETHERTYPE_VLAN:
2611 		len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2612 		break;
2613 	default:
2614 		len = 0;
2615 	}
2616 	cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2617 
2618 	if (m->m_pkthdr.csum_flags &
2619 	    (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2620 		cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2621 	}
2622 	if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2623 		cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2624 	}
2625 
2626 	if (m->m_pkthdr.csum_flags &
2627 	    (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2628 		cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2629 	}
2630 
2631 	switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2632 	case IXL_TX_DESC_CMD_IIPT_IPV4:
2633 	case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2634 		len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2635 		break;
2636 	case IXL_TX_DESC_CMD_IIPT_IPV6:
2637 		len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2638 		break;
2639 	default:
2640 		len = 0;
2641 	}
2642 	cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2643 
2644 	if (m->m_pkthdr.csum_flags &
2645 	    (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2646 		len = sizeof(struct tcphdr);
2647 		cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2648 	} else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2649 		len = sizeof(struct udphdr);
2650 		cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2651 	} else {
2652 		len = 0;
2653 	}
2654 	cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2655 
2656 	*cmd_txd |= cmd;
2657 	return 0;
2658 }
2659 
2660 static void
ixl_tx_common_locked(struct ifnet * ifp,struct ixl_tx_ring * txr,bool is_transmit)2661 ixl_tx_common_locked(struct ifnet *ifp, struct ixl_tx_ring *txr,
2662     bool is_transmit)
2663 {
2664 	struct ixl_softc *sc = ifp->if_softc;
2665 	struct ixl_tx_desc *ring, *txd;
2666 	struct ixl_tx_map *txm;
2667 	bus_dmamap_t map;
2668 	struct mbuf *m;
2669 	uint64_t cmd, cmd_txd;
2670 	unsigned int prod, free, last, i;
2671 	unsigned int mask;
2672 	int post = 0;
2673 
2674 	KASSERT(mutex_owned(&txr->txr_lock));
2675 
2676 	if (!ISSET(ifp->if_flags, IFF_RUNNING)
2677 	    || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2678 		if (!is_transmit)
2679 			IFQ_PURGE(&ifp->if_snd);
2680 		return;
2681 	}
2682 
2683 	prod = txr->txr_prod;
2684 	free = txr->txr_cons;
2685 	if (free <= prod)
2686 		free += sc->sc_tx_ring_ndescs;
2687 	free -= prod;
2688 
2689 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2690 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2691 
2692 	ring = IXL_DMA_KVA(&txr->txr_mem);
2693 	mask = sc->sc_tx_ring_ndescs - 1;
2694 	last = prod;
2695 	cmd = 0;
2696 	txd = NULL;
2697 
2698 	for (;;) {
2699 		if (free <= IXL_TX_PKT_DESCS) {
2700 			if (!is_transmit)
2701 				SET(ifp->if_flags, IFF_OACTIVE);
2702 			break;
2703 		}
2704 
2705 		if (is_transmit)
2706 			m = pcq_get(txr->txr_intrq);
2707 		else
2708 			IFQ_DEQUEUE(&ifp->if_snd, m);
2709 
2710 		if (m == NULL)
2711 			break;
2712 
2713 		txm = &txr->txr_maps[prod];
2714 		map = txm->txm_map;
2715 
2716 		if (ixl_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
2717 			if_statinc(ifp, if_oerrors);
2718 			m_freem(m);
2719 			continue;
2720 		}
2721 
2722 		cmd_txd = 0;
2723 		if (m->m_pkthdr.csum_flags & IXL_CSUM_ALL_OFFLOAD) {
2724 			ixl_tx_setup_offloads(m, &cmd_txd);
2725 		}
2726 
2727 		if (vlan_has_tag(m)) {
2728 			uint16_t vtag;
2729 			vtag = htole16(vlan_get_tag(m));
2730 			cmd_txd |= (uint64_t)vtag <<
2731 			    IXL_TX_DESC_L2TAG1_SHIFT;
2732 			cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1;
2733 		}
2734 
2735 		bus_dmamap_sync(sc->sc_dmat, map, 0,
2736 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2737 
2738 		for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
2739 			txd = &ring[prod];
2740 
2741 			cmd = (uint64_t)map->dm_segs[i].ds_len <<
2742 			    IXL_TX_DESC_BSIZE_SHIFT;
2743 			cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2744 			cmd |= cmd_txd;
2745 
2746 			txd->addr = htole64(map->dm_segs[i].ds_addr);
2747 			txd->cmd = htole64(cmd);
2748 
2749 			last = prod;
2750 
2751 			prod++;
2752 			prod &= mask;
2753 		}
2754 		cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2755 		txd->cmd = htole64(cmd);
2756 
2757 		txm->txm_m = m;
2758 		txm->txm_eop = last;
2759 
2760 		bpf_mtap(ifp, m, BPF_D_OUT);
2761 
2762 		free -= i;
2763 		post = 1;
2764 	}
2765 
2766 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2767 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2768 
2769 	if (post) {
2770 		txr->txr_prod = prod;
2771 		ixl_wr(sc, txr->txr_tail, prod);
2772 	}
2773 }
2774 
2775 static int
ixl_txeof(struct ixl_softc * sc,struct ixl_tx_ring * txr,u_int txlimit)2776 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr, u_int txlimit)
2777 {
2778 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2779 	struct ixl_tx_desc *ring, *txd;
2780 	struct ixl_tx_map *txm;
2781 	struct mbuf *m;
2782 	bus_dmamap_t map;
2783 	unsigned int cons, prod, last;
2784 	unsigned int mask;
2785 	uint64_t dtype;
2786 	int done = 0, more = 0;
2787 
2788 	KASSERT(mutex_owned(&txr->txr_lock));
2789 
2790 	prod = txr->txr_prod;
2791 	cons = txr->txr_cons;
2792 
2793 	if (cons == prod)
2794 		return 0;
2795 
2796 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2797 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2798 
2799 	ring = IXL_DMA_KVA(&txr->txr_mem);
2800 	mask = sc->sc_tx_ring_ndescs - 1;
2801 
2802 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2803 
2804 	do {
2805 		if (txlimit-- <= 0) {
2806 			more = 1;
2807 			break;
2808 		}
2809 
2810 		txm = &txr->txr_maps[cons];
2811 		last = txm->txm_eop;
2812 		txd = &ring[last];
2813 
2814 		dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2815 		if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2816 			break;
2817 
2818 		map = txm->txm_map;
2819 
2820 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2821 		    BUS_DMASYNC_POSTWRITE);
2822 		bus_dmamap_unload(sc->sc_dmat, map);
2823 
2824 		m = txm->txm_m;
2825 		if (m != NULL) {
2826 			if_statinc_ref(ifp, nsr, if_opackets);
2827 			if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
2828 			if (ISSET(m->m_flags, M_MCAST))
2829 				if_statinc_ref(ifp, nsr, if_omcasts);
2830 			m_freem(m);
2831 		}
2832 
2833 		txm->txm_m = NULL;
2834 		txm->txm_eop = -1;
2835 
2836 		cons = last + 1;
2837 		cons &= mask;
2838 		done = 1;
2839 	} while (cons != prod);
2840 
2841 	IF_STAT_PUTREF(ifp);
2842 
2843 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2844 	    0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2845 
2846 	txr->txr_cons = cons;
2847 
2848 	if (done) {
2849 		softint_schedule(txr->txr_si);
2850 		if (txr->txr_qid == 0) {
2851 			CLR(ifp->if_flags, IFF_OACTIVE);
2852 			if_schedule_deferred_start(ifp);
2853 		}
2854 	}
2855 
2856 	return more;
2857 }
2858 
2859 static void
ixl_start(struct ifnet * ifp)2860 ixl_start(struct ifnet *ifp)
2861 {
2862 	struct ixl_softc	*sc;
2863 	struct ixl_tx_ring	*txr;
2864 
2865 	sc = ifp->if_softc;
2866 	txr = sc->sc_qps[0].qp_txr;
2867 
2868 	mutex_enter(&txr->txr_lock);
2869 	ixl_tx_common_locked(ifp, txr, false);
2870 	mutex_exit(&txr->txr_lock);
2871 }
2872 
2873 static inline unsigned int
ixl_select_txqueue(struct ixl_softc * sc,struct mbuf * m)2874 ixl_select_txqueue(struct ixl_softc *sc, struct mbuf *m)
2875 {
2876 	u_int cpuid;
2877 
2878 	cpuid = cpu_index(curcpu());
2879 
2880 	return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
2881 }
2882 
2883 static int
ixl_transmit(struct ifnet * ifp,struct mbuf * m)2884 ixl_transmit(struct ifnet *ifp, struct mbuf *m)
2885 {
2886 	struct ixl_softc *sc;
2887 	struct ixl_tx_ring *txr;
2888 	unsigned int qid;
2889 
2890 	sc = ifp->if_softc;
2891 	qid = ixl_select_txqueue(sc, m);
2892 
2893 	txr = sc->sc_qps[qid].qp_txr;
2894 
2895 	if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
2896 		mutex_enter(&txr->txr_lock);
2897 		txr->txr_pcqdrop.ev_count++;
2898 		mutex_exit(&txr->txr_lock);
2899 
2900 		m_freem(m);
2901 		return ENOBUFS;
2902 	}
2903 
2904 #ifdef IXL_ALWAYS_TXDEFER
2905 	kpreempt_disable();
2906 	softint_schedule(txr->txr_si);
2907 	kpreempt_enable();
2908 #else
2909 	if (mutex_tryenter(&txr->txr_lock)) {
2910 		ixl_tx_common_locked(ifp, txr, true);
2911 		mutex_exit(&txr->txr_lock);
2912 	} else {
2913 		kpreempt_disable();
2914 		softint_schedule(txr->txr_si);
2915 		kpreempt_enable();
2916 	}
2917 #endif
2918 
2919 	return 0;
2920 }
2921 
2922 static void
ixl_deferred_transmit(void * xtxr)2923 ixl_deferred_transmit(void *xtxr)
2924 {
2925 	struct ixl_tx_ring *txr = xtxr;
2926 	struct ixl_softc *sc = txr->txr_sc;
2927 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2928 
2929 	mutex_enter(&txr->txr_lock);
2930 	txr->txr_transmitdef.ev_count++;
2931 	if (pcq_peek(txr->txr_intrq) != NULL)
2932 		ixl_tx_common_locked(ifp, txr, true);
2933 	mutex_exit(&txr->txr_lock);
2934 }
2935 
2936 static struct ixl_rx_ring *
ixl_rxr_alloc(struct ixl_softc * sc,unsigned int qid)2937 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
2938 {
2939 	struct ixl_rx_ring *rxr = NULL;
2940 	struct ixl_rx_map *maps = NULL, *rxm;
2941 	unsigned int i;
2942 
2943 	rxr = kmem_zalloc(sizeof(*rxr), KM_SLEEP);
2944 	maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2945 	    KM_SLEEP);
2946 
2947 	if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
2948 	    sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2949 	    IXL_RX_QUEUE_ALIGN) != 0)
2950 		goto free;
2951 
2952 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2953 		rxm = &maps[i];
2954 
2955 		if (bus_dmamap_create(sc->sc_dmat,
2956 		    IXL_MCLBYTES, 1, IXL_MCLBYTES, 0,
2957 		    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &rxm->rxm_map) != 0)
2958 			goto uncreate;
2959 
2960 		rxm->rxm_m = NULL;
2961 	}
2962 
2963 	rxr->rxr_cons = rxr->rxr_prod = 0;
2964 	rxr->rxr_m_head = NULL;
2965 	rxr->rxr_m_tail = &rxr->rxr_m_head;
2966 	rxr->rxr_maps = maps;
2967 
2968 	rxr->rxr_tail = I40E_QRX_TAIL(qid);
2969 	rxr->rxr_qid = qid;
2970 	mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2971 
2972 	return rxr;
2973 
2974 uncreate:
2975 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2976 		rxm = &maps[i];
2977 
2978 		if (rxm->rxm_map == NULL)
2979 			continue;
2980 
2981 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
2982 	}
2983 
2984 	ixl_dmamem_free(sc, &rxr->rxr_mem);
2985 free:
2986 	kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2987 	kmem_free(rxr, sizeof(*rxr));
2988 
2989 	return NULL;
2990 }
2991 
2992 static void
ixl_rxr_clean(struct ixl_softc * sc,struct ixl_rx_ring * rxr)2993 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
2994 {
2995 	struct ixl_rx_map *maps, *rxm;
2996 	bus_dmamap_t map;
2997 	unsigned int i;
2998 
2999 	maps = rxr->rxr_maps;
3000 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3001 		rxm = &maps[i];
3002 
3003 		if (rxm->rxm_m == NULL)
3004 			continue;
3005 
3006 		map = rxm->rxm_map;
3007 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3008 		    BUS_DMASYNC_POSTWRITE);
3009 		bus_dmamap_unload(sc->sc_dmat, map);
3010 
3011 		m_freem(rxm->rxm_m);
3012 		rxm->rxm_m = NULL;
3013 	}
3014 
3015 	m_freem(rxr->rxr_m_head);
3016 	rxr->rxr_m_head = NULL;
3017 	rxr->rxr_m_tail = &rxr->rxr_m_head;
3018 
3019 	rxr->rxr_prod = rxr->rxr_cons = 0;
3020 }
3021 
3022 static int
ixl_rxr_enabled(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3023 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3024 {
3025 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3026 	uint32_t reg;
3027 	int i;
3028 
3029 	for (i = 0; i < 10; i++) {
3030 		reg = ixl_rd(sc, ena);
3031 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3032 			return 0;
3033 
3034 		delaymsec(10);
3035 	}
3036 
3037 	return ETIMEDOUT;
3038 }
3039 
3040 static int
ixl_rxr_disabled(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3041 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3042 {
3043 	bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3044 	uint32_t reg;
3045 	int i;
3046 
3047 	KASSERT(mutex_owned(&rxr->rxr_lock));
3048 
3049 	for (i = 0; i < 10; i++) {
3050 		reg = ixl_rd(sc, ena);
3051 		if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3052 			return 0;
3053 
3054 		delaymsec(10);
3055 	}
3056 
3057 	return ETIMEDOUT;
3058 }
3059 
3060 static void
ixl_rxr_config(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3061 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3062 {
3063 	struct ixl_hmc_rxq rxq;
3064 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3065 	uint16_t rxmax;
3066 	void *hmc;
3067 
3068 	memset(&rxq, 0, sizeof(rxq));
3069 	rxmax = ifp->if_mtu + IXL_MTU_ETHERLEN;
3070 	if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_MTU))
3071 		rxmax -= ETHER_VLAN_ENCAP_LEN;
3072 
3073 	rxq.head = htole16(rxr->rxr_cons);
3074 	rxq.base = htole64(IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3075 	rxq.qlen = htole16(sc->sc_rx_ring_ndescs);
3076 	rxq.dbuff = htole16(IXL_MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3077 	rxq.hbuff = 0;
3078 	rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3079 	rxq.dsize = IXL_HMC_RXQ_DSIZE_32;
3080 	rxq.crcstrip = 1;
3081 	rxq.l2sel = 1;
3082 	rxq.showiv = 1;
3083 	rxq.rxmax = htole16(rxmax);
3084 	rxq.tphrdesc_ena = 0;
3085 	rxq.tphwdesc_ena = 0;
3086 	rxq.tphdata_ena = 0;
3087 	rxq.tphhead_ena = 0;
3088 	rxq.lrxqthresh = 0;
3089 	rxq.prefena = 1;
3090 
3091 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3092 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3093 	ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq,
3094 	    __arraycount(ixl_hmc_pack_rxq));
3095 }
3096 
3097 static void
ixl_rxr_unconfig(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3098 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3099 {
3100 	void *hmc;
3101 
3102 	hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3103 	memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3104 	rxr->rxr_cons = rxr->rxr_prod = 0;
3105 }
3106 
3107 static void
ixl_rxr_free(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3108 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3109 {
3110 	struct ixl_rx_map *maps, *rxm;
3111 	unsigned int i;
3112 
3113 	maps = rxr->rxr_maps;
3114 	for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3115 		rxm = &maps[i];
3116 
3117 		bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3118 	}
3119 
3120 	ixl_dmamem_free(sc, &rxr->rxr_mem);
3121 	mutex_destroy(&rxr->rxr_lock);
3122 	kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
3123 	kmem_free(rxr, sizeof(*rxr));
3124 }
3125 
3126 static inline void
ixl_rx_csum(struct mbuf * m,uint64_t qword)3127 ixl_rx_csum(struct mbuf *m, uint64_t qword)
3128 {
3129 	int flags_mask;
3130 
3131 	if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
3132 		/* No L3 or L4 checksum was calculated */
3133 		return;
3134 	}
3135 
3136 	switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
3137 	case IXL_RX_DESC_PTYPE_IPV4FRAG:
3138 	case IXL_RX_DESC_PTYPE_IPV4:
3139 	case IXL_RX_DESC_PTYPE_SCTPV4:
3140 	case IXL_RX_DESC_PTYPE_ICMPV4:
3141 		flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3142 		break;
3143 	case IXL_RX_DESC_PTYPE_TCPV4:
3144 		flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3145 		flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
3146 		break;
3147 	case IXL_RX_DESC_PTYPE_UDPV4:
3148 		flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
3149 		flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
3150 		break;
3151 	case IXL_RX_DESC_PTYPE_TCPV6:
3152 		flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
3153 		break;
3154 	case IXL_RX_DESC_PTYPE_UDPV6:
3155 		flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
3156 		break;
3157 	default:
3158 		flags_mask = 0;
3159 	}
3160 
3161 	m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
3162 	    M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
3163 
3164 	if (ISSET(qword, IXL_RX_DESC_IPE)) {
3165 		m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
3166 	}
3167 
3168 	if (ISSET(qword, IXL_RX_DESC_L4E)) {
3169 		m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
3170 	}
3171 }
3172 
3173 static int
ixl_rxeof(struct ixl_softc * sc,struct ixl_rx_ring * rxr,u_int rxlimit)3174 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr, u_int rxlimit)
3175 {
3176 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3177 	struct ixl_rx_wb_desc_32 *ring, *rxd;
3178 	struct ixl_rx_map *rxm;
3179 	bus_dmamap_t map;
3180 	unsigned int cons, prod;
3181 	struct mbuf *m;
3182 	uint64_t word, word0;
3183 	unsigned int len;
3184 	unsigned int mask;
3185 	int done = 0, more = 0;
3186 
3187 	KASSERT(mutex_owned(&rxr->rxr_lock));
3188 
3189 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3190 		return 0;
3191 
3192 	prod = rxr->rxr_prod;
3193 	cons = rxr->rxr_cons;
3194 
3195 	if (cons == prod)
3196 		return 0;
3197 
3198 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3199 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3200 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3201 
3202 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3203 	mask = sc->sc_rx_ring_ndescs - 1;
3204 
3205 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
3206 
3207 	do {
3208 		if (rxlimit-- <= 0) {
3209 			more = 1;
3210 			break;
3211 		}
3212 
3213 		rxd = &ring[cons];
3214 
3215 		word = le64toh(rxd->qword1);
3216 
3217 		if (!ISSET(word, IXL_RX_DESC_DD))
3218 			break;
3219 
3220 		rxm = &rxr->rxr_maps[cons];
3221 
3222 		map = rxm->rxm_map;
3223 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3224 		    BUS_DMASYNC_POSTREAD);
3225 		bus_dmamap_unload(sc->sc_dmat, map);
3226 
3227 		m = rxm->rxm_m;
3228 		rxm->rxm_m = NULL;
3229 
3230 		KASSERT(m != NULL);
3231 
3232 		len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3233 		m->m_len = len;
3234 		m->m_pkthdr.len = 0;
3235 
3236 		m->m_next = NULL;
3237 		*rxr->rxr_m_tail = m;
3238 		rxr->rxr_m_tail = &m->m_next;
3239 
3240 		m = rxr->rxr_m_head;
3241 		m->m_pkthdr.len += len;
3242 
3243 		if (ISSET(word, IXL_RX_DESC_EOP)) {
3244 			word0 = le64toh(rxd->qword0);
3245 
3246 			if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3247 				uint16_t vtag;
3248 				vtag = __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK);
3249 				vlan_set_tag(m, le16toh(vtag));
3250 			}
3251 
3252 			if ((ifp->if_capenable & IXL_IFCAP_RXCSUM) != 0)
3253 				ixl_rx_csum(m, word);
3254 
3255 			if (!ISSET(word,
3256 			    IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3257 				m_set_rcvif(m, ifp);
3258 				if_statinc_ref(ifp, nsr, if_ipackets);
3259 				if_statadd_ref(ifp, nsr, if_ibytes,
3260 				    m->m_pkthdr.len);
3261 				if_percpuq_enqueue(sc->sc_ipq, m);
3262 			} else {
3263 				if_statinc_ref(ifp, nsr, if_ierrors);
3264 				m_freem(m);
3265 			}
3266 
3267 			rxr->rxr_m_head = NULL;
3268 			rxr->rxr_m_tail = &rxr->rxr_m_head;
3269 		}
3270 
3271 		cons++;
3272 		cons &= mask;
3273 
3274 		done = 1;
3275 	} while (cons != prod);
3276 
3277 	if (done) {
3278 		rxr->rxr_cons = cons;
3279 		if (ixl_rxfill(sc, rxr) == -1)
3280 			if_statinc_ref(ifp, nsr, if_iqdrops);
3281 	}
3282 
3283 	IF_STAT_PUTREF(ifp);
3284 
3285 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3286 	    0, IXL_DMA_LEN(&rxr->rxr_mem),
3287 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3288 
3289 	return more;
3290 }
3291 
3292 static int
ixl_rxfill(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3293 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3294 {
3295 	struct ixl_rx_rd_desc_32 *ring, *rxd;
3296 	struct ixl_rx_map *rxm;
3297 	bus_dmamap_t map;
3298 	struct mbuf *m;
3299 	unsigned int prod;
3300 	unsigned int slots;
3301 	unsigned int mask;
3302 	int post = 0, error = 0;
3303 
3304 	KASSERT(mutex_owned(&rxr->rxr_lock));
3305 
3306 	prod = rxr->rxr_prod;
3307 	slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
3308 	    sc->sc_rx_ring_ndescs);
3309 
3310 	ring = IXL_DMA_KVA(&rxr->rxr_mem);
3311 	mask = sc->sc_rx_ring_ndescs - 1;
3312 
3313 	if (__predict_false(slots <= 0))
3314 		return -1;
3315 
3316 	do {
3317 		rxm = &rxr->rxr_maps[prod];
3318 
3319 		MGETHDR(m, M_DONTWAIT, MT_DATA);
3320 		if (m == NULL) {
3321 			rxr->rxr_mgethdr_failed.ev_count++;
3322 			error = -1;
3323 			break;
3324 		}
3325 
3326 		MCLGET(m, M_DONTWAIT);
3327 		if (!ISSET(m->m_flags, M_EXT)) {
3328 			rxr->rxr_mgetcl_failed.ev_count++;
3329 			error = -1;
3330 			m_freem(m);
3331 			break;
3332 		}
3333 
3334 		m->m_len = m->m_pkthdr.len = MCLBYTES;
3335 		m_adj(m, ETHER_ALIGN);
3336 
3337 		map = rxm->rxm_map;
3338 
3339 		if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3340 		    BUS_DMA_READ | BUS_DMA_NOWAIT) != 0) {
3341 			rxr->rxr_mbuf_load_failed.ev_count++;
3342 			error = -1;
3343 			m_freem(m);
3344 			break;
3345 		}
3346 
3347 		rxm->rxm_m = m;
3348 
3349 		bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3350 		    BUS_DMASYNC_PREREAD);
3351 
3352 		rxd = &ring[prod];
3353 
3354 		rxd->paddr = htole64(map->dm_segs[0].ds_addr);
3355 		rxd->haddr = htole64(0);
3356 
3357 		prod++;
3358 		prod &= mask;
3359 
3360 		post = 1;
3361 
3362 	} while (--slots);
3363 
3364 	if (post) {
3365 		rxr->rxr_prod = prod;
3366 		ixl_wr(sc, rxr->rxr_tail, prod);
3367 	}
3368 
3369 	return error;
3370 }
3371 
3372 static inline int
ixl_handle_queue_common(struct ixl_softc * sc,struct ixl_queue_pair * qp,u_int txlimit,struct evcnt * txevcnt,u_int rxlimit,struct evcnt * rxevcnt)3373 ixl_handle_queue_common(struct ixl_softc *sc, struct ixl_queue_pair *qp,
3374     u_int txlimit, struct evcnt *txevcnt,
3375     u_int rxlimit, struct evcnt *rxevcnt)
3376 {
3377 	struct ixl_tx_ring *txr = qp->qp_txr;
3378 	struct ixl_rx_ring *rxr = qp->qp_rxr;
3379 	int txmore, rxmore;
3380 	int rv;
3381 
3382 	mutex_enter(&txr->txr_lock);
3383 	txevcnt->ev_count++;
3384 	txmore = ixl_txeof(sc, txr, txlimit);
3385 	mutex_exit(&txr->txr_lock);
3386 
3387 	mutex_enter(&rxr->rxr_lock);
3388 	rxevcnt->ev_count++;
3389 	rxmore = ixl_rxeof(sc, rxr, rxlimit);
3390 	mutex_exit(&rxr->rxr_lock);
3391 
3392 	rv = txmore | (rxmore << 1);
3393 
3394 	return rv;
3395 }
3396 
3397 static void
ixl_sched_handle_queue(struct ixl_softc * sc,struct ixl_queue_pair * qp)3398 ixl_sched_handle_queue(struct ixl_softc *sc, struct ixl_queue_pair *qp)
3399 {
3400 
3401 	if (qp->qp_workqueue)
3402 		workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3403 	else
3404 		softint_schedule(qp->qp_si);
3405 }
3406 
3407 static int
ixl_intr(void * xsc)3408 ixl_intr(void *xsc)
3409 {
3410 	struct ixl_softc *sc = xsc;
3411 	struct ixl_tx_ring *txr;
3412 	struct ixl_rx_ring *rxr;
3413 	uint32_t icr, rxintr, txintr;
3414 	int rv = 0;
3415 	unsigned int i;
3416 
3417 	KASSERT(sc != NULL);
3418 
3419 	ixl_enable_other_intr(sc);
3420 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3421 
3422 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3423 		atomic_inc_64(&sc->sc_event_atq.ev_count);
3424 		ixl_atq_done(sc);
3425 		ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3426 		rv = 1;
3427 	}
3428 
3429 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3430 		atomic_inc_64(&sc->sc_event_link.ev_count);
3431 		ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3432 		rv = 1;
3433 	}
3434 
3435 	rxintr = icr & I40E_INTR_NOTX_RX_MASK;
3436 	txintr = icr & I40E_INTR_NOTX_TX_MASK;
3437 
3438 	if (txintr || rxintr) {
3439 		for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3440 			txr = sc->sc_qps[i].qp_txr;
3441 			rxr = sc->sc_qps[i].qp_rxr;
3442 
3443 			ixl_handle_queue_common(sc, &sc->sc_qps[i],
3444 			    IXL_TXRX_PROCESS_UNLIMIT, &txr->txr_intr,
3445 			    IXL_TXRX_PROCESS_UNLIMIT, &rxr->rxr_intr);
3446 		}
3447 		rv = 1;
3448 	}
3449 
3450 	return rv;
3451 }
3452 
3453 static int
ixl_queue_intr(void * xqp)3454 ixl_queue_intr(void *xqp)
3455 {
3456 	struct ixl_queue_pair *qp = xqp;
3457 	struct ixl_tx_ring *txr = qp->qp_txr;
3458 	struct ixl_rx_ring *rxr = qp->qp_rxr;
3459 	struct ixl_softc *sc = qp->qp_sc;
3460 	u_int txlimit, rxlimit;
3461 	int more;
3462 
3463 	txlimit = sc->sc_tx_intr_process_limit;
3464 	rxlimit = sc->sc_rx_intr_process_limit;
3465 	qp->qp_workqueue = sc->sc_txrx_workqueue;
3466 
3467 	more = ixl_handle_queue_common(sc, qp,
3468 	    txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3469 
3470 	if (more != 0) {
3471 		ixl_sched_handle_queue(sc, qp);
3472 	} else {
3473 		/* for ALTQ */
3474 		if (txr->txr_qid == 0)
3475 			if_schedule_deferred_start(&sc->sc_ec.ec_if);
3476 		softint_schedule(txr->txr_si);
3477 
3478 		ixl_enable_queue_intr(sc, qp);
3479 	}
3480 
3481 	return 1;
3482 }
3483 
3484 static void
ixl_handle_queue_wk(struct work * wk,void * xsc)3485 ixl_handle_queue_wk(struct work *wk, void *xsc)
3486 {
3487 	struct ixl_queue_pair *qp;
3488 
3489 	qp = container_of(wk, struct ixl_queue_pair, qp_work);
3490 	ixl_handle_queue(qp);
3491 }
3492 
3493 static void
ixl_handle_queue(void * xqp)3494 ixl_handle_queue(void *xqp)
3495 {
3496 	struct ixl_queue_pair *qp = xqp;
3497 	struct ixl_softc *sc = qp->qp_sc;
3498 	struct ixl_tx_ring *txr = qp->qp_txr;
3499 	struct ixl_rx_ring *rxr = qp->qp_rxr;
3500 	u_int txlimit, rxlimit;
3501 	int more;
3502 
3503 	txlimit = sc->sc_tx_process_limit;
3504 	rxlimit = sc->sc_rx_process_limit;
3505 
3506 	more = ixl_handle_queue_common(sc, qp,
3507 	    txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3508 
3509 	if (more != 0)
3510 		ixl_sched_handle_queue(sc, qp);
3511 	else
3512 		ixl_enable_queue_intr(sc, qp);
3513 }
3514 
3515 static inline void
ixl_print_hmc_error(struct ixl_softc * sc,uint32_t reg)3516 ixl_print_hmc_error(struct ixl_softc *sc, uint32_t reg)
3517 {
3518 	uint32_t hmc_idx, hmc_isvf;
3519 	uint32_t hmc_errtype, hmc_objtype, hmc_data;
3520 
3521 	hmc_idx = reg & I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK;
3522 	hmc_idx = hmc_idx >> I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT;
3523 	hmc_isvf = reg & I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK;
3524 	hmc_isvf = hmc_isvf >> I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT;
3525 	hmc_errtype = reg & I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK;
3526 	hmc_errtype = hmc_errtype >> I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT;
3527 	hmc_objtype = reg & I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK;
3528 	hmc_objtype = hmc_objtype >> I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT;
3529 	hmc_data = ixl_rd(sc, I40E_PFHMC_ERRORDATA);
3530 
3531 	device_printf(sc->sc_dev,
3532 	    "HMC Error (idx=0x%x, isvf=0x%x, err=0x%x, obj=0x%x, data=0x%x)\n",
3533 	    hmc_idx, hmc_isvf, hmc_errtype, hmc_objtype, hmc_data);
3534 }
3535 
3536 static int
ixl_other_intr(void * xsc)3537 ixl_other_intr(void *xsc)
3538 {
3539 	struct ixl_softc *sc = xsc;
3540 	uint32_t icr, mask, reg;
3541 	int rv;
3542 
3543 	icr = ixl_rd(sc, I40E_PFINT_ICR0);
3544 	mask = ixl_rd(sc, I40E_PFINT_ICR0_ENA);
3545 
3546 	if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3547 		atomic_inc_64(&sc->sc_event_atq.ev_count);
3548 		ixl_atq_done(sc);
3549 		ixl_work_add(sc->sc_workq, &sc->sc_arq_task);
3550 		rv = 1;
3551 	}
3552 
3553 	if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3554 		if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3555 			device_printf(sc->sc_dev, "link stat changed\n");
3556 
3557 		atomic_inc_64(&sc->sc_event_link.ev_count);
3558 		ixl_work_add(sc->sc_workq, &sc->sc_link_state_task);
3559 		rv = 1;
3560 	}
3561 
3562 	if (ISSET(icr, I40E_PFINT_ICR0_GRST_MASK)) {
3563 		CLR(mask, I40E_PFINT_ICR0_ENA_GRST_MASK);
3564 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
3565 		reg = reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK;
3566 		reg = reg >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3567 
3568 		device_printf(sc->sc_dev, "GRST: %s\n",
3569 		    reg == I40E_RESET_CORER ? "CORER" :
3570 		    reg == I40E_RESET_GLOBR ? "GLOBR" :
3571 		    reg == I40E_RESET_EMPR ? "EMPR" :
3572 		    "POR");
3573 	}
3574 
3575 	if (ISSET(icr, I40E_PFINT_ICR0_ECC_ERR_MASK))
3576 		atomic_inc_64(&sc->sc_event_ecc_err.ev_count);
3577 	if (ISSET(icr, I40E_PFINT_ICR0_PCI_EXCEPTION_MASK))
3578 		atomic_inc_64(&sc->sc_event_pci_exception.ev_count);
3579 	if (ISSET(icr, I40E_PFINT_ICR0_PE_CRITERR_MASK))
3580 		atomic_inc_64(&sc->sc_event_crit_err.ev_count);
3581 
3582 	if (ISSET(icr, IXL_ICR0_CRIT_ERR_MASK)) {
3583 		CLR(mask, IXL_ICR0_CRIT_ERR_MASK);
3584 		device_printf(sc->sc_dev, "critical error\n");
3585 	}
3586 
3587 	if (ISSET(icr, I40E_PFINT_ICR0_HMC_ERR_MASK)) {
3588 		reg = ixl_rd(sc, I40E_PFHMC_ERRORINFO);
3589 		if (ISSET(reg, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK))
3590 			ixl_print_hmc_error(sc, reg);
3591 		ixl_wr(sc, I40E_PFHMC_ERRORINFO, 0);
3592 	}
3593 
3594 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, mask);
3595 	ixl_flush(sc);
3596 	ixl_enable_other_intr(sc);
3597 	return rv;
3598 }
3599 
3600 static void
ixl_get_link_status_done_work(void * xsc)3601 ixl_get_link_status_done_work(void *xsc)
3602 {
3603 	struct ixl_softc *sc = xsc;
3604 	struct ixl_aq_desc *iaq, iaq_buf;
3605 
3606 	mutex_enter(&sc->sc_atq_lock);
3607 	iaq = &sc->sc_link_state_atq.iatq_desc;
3608 	iaq_buf = *iaq;
3609 	mutex_exit(&sc->sc_atq_lock);
3610 
3611 	ixl_link_state_update(sc, &iaq_buf);
3612 
3613 	mutex_enter(&sc->sc_atq_lock);
3614 	CLR(iaq->iaq_flags, htole16(IXL_AQ_DD));
3615 	ixl_wakeup(sc, iaq);
3616 	mutex_exit(&sc->sc_atq_lock);
3617 }
3618 
3619 static void
ixl_get_link_status_done(struct ixl_softc * sc,const struct ixl_aq_desc * iaq)3620 ixl_get_link_status_done(struct ixl_softc *sc,
3621     const struct ixl_aq_desc *iaq)
3622 {
3623 
3624 	ixl_work_add(sc->sc_workq, &sc->sc_link_state_done_task);
3625 }
3626 
3627 static int
ixl_get_link_status(struct ixl_softc * sc,enum ixl_link_flags flags)3628 ixl_get_link_status(struct ixl_softc *sc, enum ixl_link_flags flags)
3629 {
3630 	struct ixl_atq *iatq;
3631 	struct ixl_aq_desc *iaq;
3632 	struct ixl_aq_link_param *param;
3633 	int error;
3634 
3635 	mutex_enter(&sc->sc_atq_lock);
3636 
3637 	iatq = &sc->sc_link_state_atq;
3638 	iaq = &iatq->iatq_desc;
3639 
3640 	if (!sc->sc_link_state_atq.iatq_inuse &&
3641 	    !ISSET(iaq->iaq_flags, htole16(IXL_AQ_DD))) {
3642 		memset(iaq, 0, sizeof(*iaq));
3643 		iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3644 		param = (struct ixl_aq_link_param *)iaq->iaq_param;
3645 		param->notify = IXL_AQ_LINK_NOTIFY;
3646 
3647 		KASSERT(iatq->iatq_fn == ixl_get_link_status_done);
3648 		error = ixl_atq_post_locked(sc, iatq);
3649 		if (error != 0)
3650 			goto out;
3651 	} else {
3652 		/* the previous command is not completed */
3653 		error = EBUSY;
3654 	}
3655 
3656 	if (ISSET(flags, IXL_LINK_FLAG_WAITDONE)) {
3657 		do {
3658 			error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3659 			    IXL_ATQ_EXEC_TIMEOUT);
3660 			if (error == EWOULDBLOCK)
3661 				break;
3662 		} while (iatq->iatq_inuse ||
3663 		    ISSET(iaq->iaq_flags, htole16(IXL_AQ_DD)));
3664 	}
3665 
3666 out:
3667 	mutex_exit(&sc->sc_atq_lock);
3668 
3669 	return error;
3670 }
3671 
3672 static void
ixl_get_link_status_work(void * xsc)3673 ixl_get_link_status_work(void *xsc)
3674 {
3675 	struct ixl_softc *sc = xsc;
3676 
3677 	/*
3678 	 * IXL_LINK_FLAG_WAITDONE causes deadlock
3679 	 * because of doing ixl_gt_link_status_done_work()
3680 	 * in the same workqueue.
3681 	 */
3682 	(void)ixl_get_link_status(sc, IXL_LINK_NOFLAGS);
3683 }
3684 
3685 static void
ixl_link_state_update(struct ixl_softc * sc,const struct ixl_aq_desc * iaq)3686 ixl_link_state_update(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3687 {
3688 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3689 	int link_state;
3690 
3691 	mutex_enter(&sc->sc_cfg_lock);
3692 	link_state = ixl_set_link_status_locked(sc, iaq);
3693 	mutex_exit(&sc->sc_cfg_lock);
3694 
3695 	if (ifp->if_link_state != link_state)
3696 		if_link_state_change(ifp, link_state);
3697 
3698 	if (link_state != LINK_STATE_DOWN) {
3699 		kpreempt_disable();
3700 		if_schedule_deferred_start(ifp);
3701 		kpreempt_enable();
3702 	}
3703 }
3704 
3705 static void
ixl_aq_dump(const struct ixl_softc * sc,const struct ixl_aq_desc * iaq,const char * msg)3706 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq,
3707     const char *msg)
3708 {
3709 	char	 buf[512];
3710 	size_t	 len;
3711 
3712 	len = sizeof(buf);
3713 	buf[--len] = '\0';
3714 
3715 	device_printf(sc->sc_dev, "%s\n", msg);
3716 	snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3717 	device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3718 	    buf, le16toh(iaq->iaq_opcode));
3719 	device_printf(sc->sc_dev, "datalen %u retval %u\n",
3720 	    le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3721 	device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3722 	device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3723 	    le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3724 	    le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3725 }
3726 
3727 static void
ixl_arq(void * xsc)3728 ixl_arq(void *xsc)
3729 {
3730 	struct ixl_softc *sc = xsc;
3731 	struct ixl_aq_desc *arq, *iaq;
3732 	struct ixl_aq_buf *aqb;
3733 	unsigned int cons = sc->sc_arq_cons;
3734 	unsigned int prod;
3735 	int done = 0;
3736 
3737 	prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3738 	    sc->sc_aq_regs->arq_head_mask;
3739 
3740 	if (cons == prod)
3741 		goto done;
3742 
3743 	arq = IXL_DMA_KVA(&sc->sc_arq);
3744 
3745 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3746 	    0, IXL_DMA_LEN(&sc->sc_arq),
3747 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3748 
3749 	do {
3750 		iaq = &arq[cons];
3751 		aqb = sc->sc_arq_live[cons];
3752 
3753 		KASSERT(aqb != NULL);
3754 
3755 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3756 		    BUS_DMASYNC_POSTREAD);
3757 
3758 		if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3759 			ixl_aq_dump(sc, iaq, "arq event");
3760 
3761 		switch (iaq->iaq_opcode) {
3762 		case htole16(IXL_AQ_OP_PHY_LINK_STATUS):
3763 			ixl_link_state_update(sc, iaq);
3764 			break;
3765 		}
3766 
3767 		memset(iaq, 0, sizeof(*iaq));
3768 		sc->sc_arq_live[cons] = NULL;
3769 		SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3770 
3771 		cons++;
3772 		cons &= IXL_AQ_MASK;
3773 
3774 		done = 1;
3775 	} while (cons != prod);
3776 
3777 	if (done) {
3778 		sc->sc_arq_cons = cons;
3779 		ixl_arq_fill(sc);
3780 		bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3781 		    0, IXL_DMA_LEN(&sc->sc_arq),
3782 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3783 	}
3784 
3785 done:
3786 	ixl_enable_other_intr(sc);
3787 }
3788 
3789 static void
ixl_atq_set(struct ixl_atq * iatq,void (* fn)(struct ixl_softc *,const struct ixl_aq_desc *))3790 ixl_atq_set(struct ixl_atq *iatq,
3791     void (*fn)(struct ixl_softc *, const struct ixl_aq_desc *))
3792 {
3793 
3794 	iatq->iatq_fn = fn;
3795 }
3796 
3797 static int
ixl_atq_post_locked(struct ixl_softc * sc,struct ixl_atq * iatq)3798 ixl_atq_post_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3799 {
3800 	struct ixl_aq_desc *atq, *slot;
3801 	unsigned int prod, cons, prod_next;
3802 
3803 	/* assert locked */
3804 	KASSERT(mutex_owned(&sc->sc_atq_lock));
3805 
3806 	atq = IXL_DMA_KVA(&sc->sc_atq);
3807 	prod = sc->sc_atq_prod;
3808 	cons = sc->sc_atq_cons;
3809 	prod_next = (prod +1) & IXL_AQ_MASK;
3810 
3811 	if (cons == prod_next)
3812 		return ENOMEM;
3813 
3814 	slot = &atq[prod];
3815 
3816 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3817 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3818 
3819 	KASSERT(iatq->iatq_fn != NULL);
3820 	*slot = iatq->iatq_desc;
3821 	slot->iaq_cookie = (uint64_t)((intptr_t)iatq);
3822 
3823 	if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3824 		ixl_aq_dump(sc, slot, "atq command");
3825 
3826 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3827 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3828 
3829 	sc->sc_atq_prod = prod_next;
3830 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, sc->sc_atq_prod);
3831 	iatq->iatq_inuse = true;
3832 
3833 	return 0;
3834 }
3835 
3836 static void
ixl_atq_done_locked(struct ixl_softc * sc)3837 ixl_atq_done_locked(struct ixl_softc *sc)
3838 {
3839 	struct ixl_aq_desc *atq, *slot;
3840 	struct ixl_atq *iatq;
3841 	unsigned int cons;
3842 	unsigned int prod;
3843 
3844 	KASSERT(mutex_owned(&sc->sc_atq_lock));
3845 
3846 	prod = sc->sc_atq_prod;
3847 	cons = sc->sc_atq_cons;
3848 
3849 	if (prod == cons)
3850 		return;
3851 
3852 	atq = IXL_DMA_KVA(&sc->sc_atq);
3853 
3854 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3855 	    0, IXL_DMA_LEN(&sc->sc_atq),
3856 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3857 
3858 	do {
3859 		slot = &atq[cons];
3860 		if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3861 			break;
3862 
3863 		iatq = (struct ixl_atq *)((intptr_t)slot->iaq_cookie);
3864 		iatq->iatq_desc = *slot;
3865 		iatq->iatq_inuse = false;
3866 
3867 		memset(slot, 0, sizeof(*slot));
3868 
3869 		if (ISSET(sc->sc_ec.ec_if.if_flags, IFF_DEBUG))
3870 			ixl_aq_dump(sc, &iatq->iatq_desc, "atq response");
3871 
3872 		(*iatq->iatq_fn)(sc, &iatq->iatq_desc);
3873 
3874 		cons++;
3875 		cons &= IXL_AQ_MASK;
3876 	} while (cons != prod);
3877 
3878 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3879 	    0, IXL_DMA_LEN(&sc->sc_atq),
3880 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3881 
3882 	sc->sc_atq_cons = cons;
3883 }
3884 
3885 static void
ixl_atq_done(struct ixl_softc * sc)3886 ixl_atq_done(struct ixl_softc *sc)
3887 {
3888 
3889 	mutex_enter(&sc->sc_atq_lock);
3890 	ixl_atq_done_locked(sc);
3891 	mutex_exit(&sc->sc_atq_lock);
3892 }
3893 
3894 static void
ixl_wakeup(struct ixl_softc * sc,const struct ixl_aq_desc * iaq)3895 ixl_wakeup(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3896 {
3897 
3898 	KASSERT(mutex_owned(&sc->sc_atq_lock));
3899 
3900 	cv_broadcast(&sc->sc_atq_cv);
3901 }
3902 
3903 static int
ixl_atq_exec(struct ixl_softc * sc,struct ixl_atq * iatq)3904 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq)
3905 {
3906 	int error;
3907 
3908 	mutex_enter(&sc->sc_atq_lock);
3909 	error = ixl_atq_exec_locked(sc, iatq);
3910 	mutex_exit(&sc->sc_atq_lock);
3911 
3912 	return error;
3913 }
3914 
3915 static int
ixl_atq_exec_locked(struct ixl_softc * sc,struct ixl_atq * iatq)3916 ixl_atq_exec_locked(struct ixl_softc *sc, struct ixl_atq *iatq)
3917 {
3918 	int error;
3919 
3920 	KASSERT(mutex_owned(&sc->sc_atq_lock));
3921 	KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3922 
3923 	ixl_atq_set(iatq, ixl_wakeup);
3924 
3925 	error = ixl_atq_post_locked(sc, iatq);
3926 	if (error)
3927 		return error;
3928 
3929 	do {
3930 		error = cv_timedwait(&sc->sc_atq_cv, &sc->sc_atq_lock,
3931 		    IXL_ATQ_EXEC_TIMEOUT);
3932 		if (error == EWOULDBLOCK)
3933 			break;
3934 	} while (iatq->iatq_inuse);
3935 
3936 	return error;
3937 }
3938 
3939 static int
ixl_atq_poll(struct ixl_softc * sc,struct ixl_aq_desc * iaq,unsigned int tm)3940 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3941 {
3942 	struct ixl_aq_desc *atq, *slot;
3943 	unsigned int prod;
3944 	unsigned int t = 0;
3945 
3946 	mutex_enter(&sc->sc_atq_lock);
3947 
3948 	atq = IXL_DMA_KVA(&sc->sc_atq);
3949 	prod = sc->sc_atq_prod;
3950 	slot = atq + prod;
3951 
3952 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3953 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3954 
3955 	*slot = *iaq;
3956 	slot->iaq_flags |= htole16(IXL_AQ_SI);
3957 
3958 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3959 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3960 
3961 	prod++;
3962 	prod &= IXL_AQ_MASK;
3963 	sc->sc_atq_prod = prod;
3964 	ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3965 
3966 	while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3967 		delaymsec(1);
3968 
3969 		if (t++ > tm) {
3970 			mutex_exit(&sc->sc_atq_lock);
3971 			return ETIMEDOUT;
3972 		}
3973 	}
3974 
3975 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3976 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3977 	*iaq = *slot;
3978 	memset(slot, 0, sizeof(*slot));
3979 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3980 	    0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3981 
3982 	sc->sc_atq_cons = prod;
3983 
3984 	mutex_exit(&sc->sc_atq_lock);
3985 
3986 	return 0;
3987 }
3988 
3989 static int
ixl_get_version(struct ixl_softc * sc)3990 ixl_get_version(struct ixl_softc *sc)
3991 {
3992 	struct ixl_aq_desc iaq;
3993 	uint32_t fwbuild, fwver, apiver;
3994 	uint16_t api_maj_ver, api_min_ver;
3995 
3996 	memset(&iaq, 0, sizeof(iaq));
3997 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3998 
3999 	iaq.iaq_retval = le16toh(23);
4000 
4001 	if (ixl_atq_poll(sc, &iaq, 2000) != 0)
4002 		return ETIMEDOUT;
4003 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
4004 		return EIO;
4005 
4006 	fwbuild = le32toh(iaq.iaq_param[1]);
4007 	fwver = le32toh(iaq.iaq_param[2]);
4008 	apiver = le32toh(iaq.iaq_param[3]);
4009 
4010 	api_maj_ver = (uint16_t)apiver;
4011 	api_min_ver = (uint16_t)(apiver >> 16);
4012 
4013 	aprint_normal(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
4014 	    (uint16_t)(fwver >> 16), fwbuild, api_maj_ver, api_min_ver);
4015 
4016 	if (sc->sc_mac_type == I40E_MAC_X722) {
4017 		SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK |
4018 		    IXL_SC_AQ_FLAG_NVMREAD);
4019 		SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4020 		SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS);
4021 	}
4022 
4023 #define IXL_API_VER(maj, min)	(((uint32_t)(maj) << 16) | (min))
4024 	if (IXL_API_VER(api_maj_ver, api_min_ver) >= IXL_API_VER(1, 5)) {
4025 		SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL);
4026 		SET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK);
4027 	}
4028 #undef IXL_API_VER
4029 
4030 	return 0;
4031 }
4032 
4033 static int
ixl_get_nvm_version(struct ixl_softc * sc)4034 ixl_get_nvm_version(struct ixl_softc *sc)
4035 {
4036 	uint16_t nvmver, cfg_ptr, eetrack_hi, eetrack_lo, oem_hi, oem_lo;
4037 	uint32_t eetrack, oem;
4038 	uint16_t nvm_maj_ver, nvm_min_ver, oem_build;
4039 	uint8_t oem_ver, oem_patch;
4040 
4041 	nvmver = cfg_ptr = eetrack_hi = eetrack_lo = oem_hi = oem_lo = 0;
4042 	ixl_rd16_nvm(sc, I40E_SR_NVM_DEV_STARTER_VERSION, &nvmver);
4043 	ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
4044 	ixl_rd16_nvm(sc, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
4045 	ixl_rd16_nvm(sc, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
4046 	ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF, &oem_hi);
4047 	ixl_rd16_nvm(sc, cfg_ptr + I40E_NVM_OEM_VER_OFF + 1, &oem_lo);
4048 
4049 	nvm_maj_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_HI_MASK);
4050 	nvm_min_ver = (uint16_t)__SHIFTOUT(nvmver, IXL_NVM_VERSION_LO_MASK);
4051 	eetrack = ((uint32_t)eetrack_hi << 16) | eetrack_lo;
4052 	oem = ((uint32_t)oem_hi << 16) | oem_lo;
4053 	oem_ver = __SHIFTOUT(oem, IXL_NVM_OEMVERSION_MASK);
4054 	oem_build = __SHIFTOUT(oem, IXL_NVM_OEMBUILD_MASK);
4055 	oem_patch = __SHIFTOUT(oem, IXL_NVM_OEMPATCH_MASK);
4056 
4057 	aprint_normal(" nvm %x.%02x etid %08x oem %d.%d.%d",
4058 	    nvm_maj_ver, nvm_min_ver, eetrack,
4059 	    oem_ver, oem_build, oem_patch);
4060 
4061 	return 0;
4062 }
4063 
4064 static int
ixl_pxe_clear(struct ixl_softc * sc)4065 ixl_pxe_clear(struct ixl_softc *sc)
4066 {
4067 	struct ixl_aq_desc iaq;
4068 	int rv;
4069 
4070 	memset(&iaq, 0, sizeof(iaq));
4071 	iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
4072 	iaq.iaq_param[0] = htole32(0x2);
4073 
4074 	rv = ixl_atq_poll(sc, &iaq, 250);
4075 
4076 	ixl_wr(sc, I40E_GLLAN_RCTL_0, 0x1);
4077 
4078 	if (rv != 0)
4079 		return ETIMEDOUT;
4080 
4081 	switch (iaq.iaq_retval) {
4082 	case htole16(IXL_AQ_RC_OK):
4083 	case htole16(IXL_AQ_RC_EEXIST):
4084 		break;
4085 	default:
4086 		return EIO;
4087 	}
4088 
4089 	return 0;
4090 }
4091 
4092 static int
ixl_lldp_shut(struct ixl_softc * sc)4093 ixl_lldp_shut(struct ixl_softc *sc)
4094 {
4095 	struct ixl_aq_desc iaq;
4096 
4097 	memset(&iaq, 0, sizeof(iaq));
4098 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
4099 	iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
4100 
4101 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4102 		aprint_error_dev(sc->sc_dev, "STOP LLDP AGENT timeout\n");
4103 		return -1;
4104 	}
4105 
4106 	switch (iaq.iaq_retval) {
4107 	case htole16(IXL_AQ_RC_EMODE):
4108 	case htole16(IXL_AQ_RC_EPERM):
4109 		/* ignore silently */
4110 	default:
4111 		break;
4112 	}
4113 
4114 	return 0;
4115 }
4116 
4117 static void
ixl_parse_hw_capability(struct ixl_softc * sc,struct ixl_aq_capability * cap)4118 ixl_parse_hw_capability(struct ixl_softc *sc, struct ixl_aq_capability *cap)
4119 {
4120 	uint16_t id;
4121 	uint32_t number, logical_id;
4122 
4123 	id = le16toh(cap->cap_id);
4124 	number = le32toh(cap->number);
4125 	logical_id = le32toh(cap->logical_id);
4126 
4127 	switch (id) {
4128 	case IXL_AQ_CAP_RSS:
4129 		sc->sc_rss_table_size = number;
4130 		sc->sc_rss_table_entry_width = logical_id;
4131 		break;
4132 	case IXL_AQ_CAP_RXQ:
4133 	case IXL_AQ_CAP_TXQ:
4134 		sc->sc_nqueue_pairs_device = MIN(number,
4135 		    sc->sc_nqueue_pairs_device);
4136 		break;
4137 	}
4138 }
4139 
4140 static int
ixl_get_hw_capabilities(struct ixl_softc * sc)4141 ixl_get_hw_capabilities(struct ixl_softc *sc)
4142 {
4143 	struct ixl_dmamem idm;
4144 	struct ixl_aq_desc iaq;
4145 	struct ixl_aq_capability *caps;
4146 	size_t i, ncaps;
4147 	bus_size_t caps_size;
4148 	uint16_t status;
4149 	int rv;
4150 
4151 	caps_size = sizeof(caps[0]) * 40;
4152 	memset(&iaq, 0, sizeof(iaq));
4153 	iaq.iaq_opcode = htole16(IXL_AQ_OP_LIST_FUNC_CAP);
4154 
4155 	do {
4156 		if (ixl_dmamem_alloc(sc, &idm, caps_size, 0) != 0) {
4157 			return -1;
4158 		}
4159 
4160 		iaq.iaq_flags = htole16(IXL_AQ_BUF |
4161 		    (caps_size > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4162 		iaq.iaq_datalen = htole16(caps_size);
4163 		ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4164 
4165 		bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4166 		    IXL_DMA_LEN(&idm), BUS_DMASYNC_PREREAD);
4167 
4168 		rv = ixl_atq_poll(sc, &iaq, 250);
4169 
4170 		bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0,
4171 		    IXL_DMA_LEN(&idm), BUS_DMASYNC_POSTREAD);
4172 
4173 		if (rv != 0) {
4174 			aprint_error(", HW capabilities timeout\n");
4175 			goto done;
4176 		}
4177 
4178 		status = le16toh(iaq.iaq_retval);
4179 
4180 		if (status == IXL_AQ_RC_ENOMEM) {
4181 			caps_size = le16toh(iaq.iaq_datalen);
4182 			ixl_dmamem_free(sc, &idm);
4183 		}
4184 	} while (status == IXL_AQ_RC_ENOMEM);
4185 
4186 	if (status != IXL_AQ_RC_OK) {
4187 		aprint_error(", HW capabilities error\n");
4188 		goto done;
4189 	}
4190 
4191 	caps = IXL_DMA_KVA(&idm);
4192 	ncaps = le16toh(iaq.iaq_param[1]);
4193 
4194 	for (i = 0; i < ncaps; i++) {
4195 		ixl_parse_hw_capability(sc, &caps[i]);
4196 	}
4197 
4198 done:
4199 	ixl_dmamem_free(sc, &idm);
4200 	return rv;
4201 }
4202 
4203 static int
ixl_get_mac(struct ixl_softc * sc)4204 ixl_get_mac(struct ixl_softc *sc)
4205 {
4206 	struct ixl_dmamem idm;
4207 	struct ixl_aq_desc iaq;
4208 	struct ixl_aq_mac_addresses *addrs;
4209 	int rv;
4210 
4211 	if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
4212 		aprint_error(", unable to allocate mac addresses\n");
4213 		return -1;
4214 	}
4215 
4216 	memset(&iaq, 0, sizeof(iaq));
4217 	iaq.iaq_flags = htole16(IXL_AQ_BUF);
4218 	iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
4219 	iaq.iaq_datalen = htole16(sizeof(*addrs));
4220 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4221 
4222 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4223 	    BUS_DMASYNC_PREREAD);
4224 
4225 	rv = ixl_atq_poll(sc, &iaq, 250);
4226 
4227 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4228 	    BUS_DMASYNC_POSTREAD);
4229 
4230 	if (rv != 0) {
4231 		aprint_error(", MAC ADDRESS READ timeout\n");
4232 		rv = -1;
4233 		goto done;
4234 	}
4235 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4236 		aprint_error(", MAC ADDRESS READ error\n");
4237 		rv = -1;
4238 		goto done;
4239 	}
4240 
4241 	addrs = IXL_DMA_KVA(&idm);
4242 	if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
4243 		printf(", port address is not valid\n");
4244 		goto done;
4245 	}
4246 
4247 	memcpy(sc->sc_enaddr, addrs->port, ETHER_ADDR_LEN);
4248 	rv = 0;
4249 
4250 done:
4251 	ixl_dmamem_free(sc, &idm);
4252 	return rv;
4253 }
4254 
4255 static int
ixl_get_switch_config(struct ixl_softc * sc)4256 ixl_get_switch_config(struct ixl_softc *sc)
4257 {
4258 	struct ixl_dmamem idm;
4259 	struct ixl_aq_desc iaq;
4260 	struct ixl_aq_switch_config *hdr;
4261 	struct ixl_aq_switch_config_element *elms, *elm;
4262 	unsigned int nelm, i;
4263 	int rv;
4264 
4265 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4266 		aprint_error_dev(sc->sc_dev,
4267 		    "unable to allocate switch config buffer\n");
4268 		return -1;
4269 	}
4270 
4271 	memset(&iaq, 0, sizeof(iaq));
4272 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4273 	    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4274 	iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
4275 	iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
4276 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
4277 
4278 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4279 	    BUS_DMASYNC_PREREAD);
4280 
4281 	rv = ixl_atq_poll(sc, &iaq, 250);
4282 
4283 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
4284 	    BUS_DMASYNC_POSTREAD);
4285 
4286 	if (rv != 0) {
4287 		aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG timeout\n");
4288 		rv = -1;
4289 		goto done;
4290 	}
4291 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4292 		aprint_error_dev(sc->sc_dev, "GET SWITCH CONFIG error\n");
4293 		rv = -1;
4294 		goto done;
4295 	}
4296 
4297 	hdr = IXL_DMA_KVA(&idm);
4298 	elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
4299 
4300 	nelm = le16toh(hdr->num_reported);
4301 	if (nelm < 1) {
4302 		aprint_error_dev(sc->sc_dev, "no switch config available\n");
4303 		rv = -1;
4304 		goto done;
4305 	}
4306 
4307 	for (i = 0; i < nelm; i++) {
4308 		elm = &elms[i];
4309 
4310 		aprint_debug_dev(sc->sc_dev,
4311 		    "type %x revision %u seid %04x\n",
4312 		    elm->type, elm->revision, le16toh(elm->seid));
4313 		aprint_debug_dev(sc->sc_dev,
4314 		    "uplink %04x downlink %04x\n",
4315 		    le16toh(elm->uplink_seid),
4316 		    le16toh(elm->downlink_seid));
4317 		aprint_debug_dev(sc->sc_dev,
4318 		    "conntype %x scheduler %04x extra %04x\n",
4319 		    elm->connection_type,
4320 		    le16toh(elm->scheduler_id),
4321 		    le16toh(elm->element_info));
4322 	}
4323 
4324 	elm = &elms[0];
4325 
4326 	sc->sc_uplink_seid = elm->uplink_seid;
4327 	sc->sc_downlink_seid = elm->downlink_seid;
4328 	sc->sc_seid = elm->seid;
4329 
4330 	if ((sc->sc_uplink_seid == htole16(0)) !=
4331 	    (sc->sc_downlink_seid == htole16(0))) {
4332 		aprint_error_dev(sc->sc_dev, "SEIDs are misconfigured\n");
4333 		rv = -1;
4334 		goto done;
4335 	}
4336 
4337 done:
4338 	ixl_dmamem_free(sc, &idm);
4339 	return rv;
4340 }
4341 
4342 static int
ixl_phy_mask_ints(struct ixl_softc * sc)4343 ixl_phy_mask_ints(struct ixl_softc *sc)
4344 {
4345 	struct ixl_aq_desc iaq;
4346 
4347 	memset(&iaq, 0, sizeof(iaq));
4348 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4349 	iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4350 	    ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4351 	      IXL_AQ_PHY_EV_MEDIA_NA));
4352 
4353 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4354 		aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK timeout\n");
4355 		return -1;
4356 	}
4357 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4358 		aprint_error_dev(sc->sc_dev, "SET PHY EVENT MASK error\n");
4359 		return -1;
4360 	}
4361 
4362 	return 0;
4363 }
4364 
4365 static int
ixl_get_phy_abilities(struct ixl_softc * sc,struct ixl_dmamem * idm)4366 ixl_get_phy_abilities(struct ixl_softc *sc, struct ixl_dmamem *idm)
4367 {
4368 	struct ixl_aq_desc iaq;
4369 	int rv;
4370 
4371 	memset(&iaq, 0, sizeof(iaq));
4372 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4373 	    (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4374 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4375 	iaq.iaq_datalen = htole16(IXL_DMA_LEN(idm));
4376 	iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4377 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4378 
4379 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4380 	    BUS_DMASYNC_PREREAD);
4381 
4382 	rv = ixl_atq_poll(sc, &iaq, 250);
4383 
4384 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4385 	    BUS_DMASYNC_POSTREAD);
4386 
4387 	if (rv != 0)
4388 		return -1;
4389 
4390 	return le16toh(iaq.iaq_retval);
4391 }
4392 
4393 static int
ixl_get_phy_info(struct ixl_softc * sc)4394 ixl_get_phy_info(struct ixl_softc *sc)
4395 {
4396 	struct ixl_dmamem idm;
4397 	struct ixl_aq_phy_abilities *phy;
4398 	int rv;
4399 
4400 	if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4401 		aprint_error_dev(sc->sc_dev,
4402 		    "unable to allocate phy abilities buffer\n");
4403 		return -1;
4404 	}
4405 
4406 	rv = ixl_get_phy_abilities(sc, &idm);
4407 	switch (rv) {
4408 	case -1:
4409 		aprint_error_dev(sc->sc_dev, "GET PHY ABILITIES timeout\n");
4410 		goto done;
4411 	case IXL_AQ_RC_OK:
4412 		break;
4413 	case IXL_AQ_RC_EIO:
4414 		aprint_error_dev(sc->sc_dev,"unable to query phy types\n");
4415 		goto done;
4416 	default:
4417 		aprint_error_dev(sc->sc_dev,
4418 		    "GET PHY ABILITIES error %u\n", rv);
4419 		goto done;
4420 	}
4421 
4422 	phy = IXL_DMA_KVA(&idm);
4423 
4424 	sc->sc_phy_types = le32toh(phy->phy_type);
4425 	sc->sc_phy_types |= (uint64_t)le32toh(phy->phy_type_ext) << 32;
4426 
4427 	sc->sc_phy_abilities = phy->abilities;
4428 	sc->sc_phy_linkspeed = phy->link_speed;
4429 	sc->sc_phy_fec_cfg = phy->fec_cfg_curr_mod_ext_info &
4430 	    (IXL_AQ_ENABLE_FEC_KR | IXL_AQ_ENABLE_FEC_RS |
4431 	    IXL_AQ_REQUEST_FEC_KR | IXL_AQ_REQUEST_FEC_RS);
4432 	sc->sc_eee_cap = phy->eee_capability;
4433 	sc->sc_eeer_val = phy->eeer_val;
4434 	sc->sc_d3_lpan = phy->d3_lpan;
4435 
4436 	rv = 0;
4437 
4438 done:
4439 	ixl_dmamem_free(sc, &idm);
4440 	return rv;
4441 }
4442 
4443 static int
ixl_set_phy_config(struct ixl_softc * sc,uint8_t link_speed,uint8_t abilities,bool polling)4444 ixl_set_phy_config(struct ixl_softc *sc,
4445     uint8_t link_speed, uint8_t abilities, bool polling)
4446 {
4447 	struct ixl_aq_phy_param *param;
4448 	struct ixl_atq iatq;
4449 	struct ixl_aq_desc *iaq;
4450 	int error;
4451 
4452 	memset(&iatq, 0, sizeof(iatq));
4453 
4454 	iaq = &iatq.iatq_desc;
4455 	iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_CONFIG);
4456 	param = (struct ixl_aq_phy_param *)&iaq->iaq_param;
4457 	param->phy_types = htole32((uint32_t)sc->sc_phy_types);
4458 	param->phy_type_ext = (uint8_t)(sc->sc_phy_types >> 32);
4459 	param->link_speed = link_speed;
4460 	param->abilities = abilities | IXL_AQ_PHY_ABILITY_AUTO_LINK;
4461 	param->fec_cfg = sc->sc_phy_fec_cfg;
4462 	param->eee_capability = sc->sc_eee_cap;
4463 	param->eeer_val = sc->sc_eeer_val;
4464 	param->d3_lpan = sc->sc_d3_lpan;
4465 
4466 	if (polling)
4467 		error = ixl_atq_poll(sc, iaq, 250);
4468 	else
4469 		error = ixl_atq_exec(sc, &iatq);
4470 
4471 	if (error != 0)
4472 		return error;
4473 
4474 	switch (le16toh(iaq->iaq_retval)) {
4475 	case IXL_AQ_RC_OK:
4476 		break;
4477 	case IXL_AQ_RC_EPERM:
4478 		return EPERM;
4479 	default:
4480 		return EIO;
4481 	}
4482 
4483 	return 0;
4484 }
4485 
4486 static int
ixl_set_phy_autoselect(struct ixl_softc * sc)4487 ixl_set_phy_autoselect(struct ixl_softc *sc)
4488 {
4489 	uint8_t link_speed, abilities;
4490 
4491 	link_speed = sc->sc_phy_linkspeed;
4492 	abilities = IXL_PHY_ABILITY_LINKUP | IXL_PHY_ABILITY_AUTONEGO;
4493 
4494 	return ixl_set_phy_config(sc, link_speed, abilities, true);
4495 }
4496 
4497 static int
ixl_get_link_status_poll(struct ixl_softc * sc,int * l)4498 ixl_get_link_status_poll(struct ixl_softc *sc, int *l)
4499 {
4500 	struct ixl_aq_desc iaq;
4501 	struct ixl_aq_link_param *param;
4502 	int link;
4503 
4504 	memset(&iaq, 0, sizeof(iaq));
4505 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4506 	param = (struct ixl_aq_link_param *)iaq.iaq_param;
4507 	param->notify = IXL_AQ_LINK_NOTIFY;
4508 
4509 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4510 		return ETIMEDOUT;
4511 	}
4512 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4513 		return EIO;
4514 	}
4515 
4516 	/* It is unnecessary to hold lock */
4517 	link = ixl_set_link_status_locked(sc, &iaq);
4518 
4519 	if (l != NULL)
4520 		*l = link;
4521 
4522 	return 0;
4523 }
4524 
4525 static int
ixl_get_vsi(struct ixl_softc * sc)4526 ixl_get_vsi(struct ixl_softc *sc)
4527 {
4528 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4529 	struct ixl_aq_desc iaq;
4530 	struct ixl_aq_vsi_param *param;
4531 	struct ixl_aq_vsi_reply *reply;
4532 	struct ixl_aq_vsi_data *data;
4533 	int rv;
4534 
4535 	/* grumble, vsi info isn't "known" at compile time */
4536 
4537 	memset(&iaq, 0, sizeof(iaq));
4538 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
4539 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4540 	iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4541 	iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4542 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4543 
4544 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4545 	param->uplink_seid = sc->sc_seid;
4546 
4547 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4548 	    BUS_DMASYNC_PREREAD);
4549 
4550 	rv = ixl_atq_poll(sc, &iaq, 250);
4551 
4552 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4553 	    BUS_DMASYNC_POSTREAD);
4554 
4555 	if (rv != 0) {
4556 		return ETIMEDOUT;
4557 	}
4558 
4559 	switch (le16toh(iaq.iaq_retval)) {
4560 	case IXL_AQ_RC_OK:
4561 		break;
4562 	case IXL_AQ_RC_ENOENT:
4563 		return ENOENT;
4564 	case IXL_AQ_RC_EACCES:
4565 		return EACCES;
4566 	default:
4567 		return EIO;
4568 	}
4569 
4570 	reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4571 	sc->sc_vsi_number = le16toh(reply->vsi_number);
4572 	data = IXL_DMA_KVA(vsi);
4573 	sc->sc_vsi_stat_counter_idx = le16toh(data->stat_counter_idx);
4574 
4575 	return 0;
4576 }
4577 
4578 static int
ixl_set_vsi(struct ixl_softc * sc)4579 ixl_set_vsi(struct ixl_softc *sc)
4580 {
4581 	struct ixl_dmamem *vsi = &sc->sc_scratch;
4582 	struct ixl_aq_desc iaq;
4583 	struct ixl_aq_vsi_param *param;
4584 	struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4585 	unsigned int qnum;
4586 	uint16_t val;
4587 	int rv;
4588 
4589 	qnum = sc->sc_nqueue_pairs - 1;
4590 
4591 	data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4592 	    IXL_AQ_VSI_VALID_VLAN);
4593 
4594 	CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4595 	SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4596 	data->queue_mapping[0] = htole16(0);
4597 	data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4598 	    (qnum << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4599 
4600 	val = le16toh(data->port_vlan_flags);
4601 	CLR(val, IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK);
4602 	SET(val, IXL_AQ_VSI_PVLAN_MODE_ALL);
4603 
4604 	if (ISSET(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
4605 		SET(val, IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH);
4606 	} else {
4607 		SET(val, IXL_AQ_VSI_PVLAN_EMOD_NOTHING);
4608 	}
4609 
4610 	data->port_vlan_flags = htole16(val);
4611 
4612 	/* grumble, vsi info isn't "known" at compile time */
4613 
4614 	memset(&iaq, 0, sizeof(iaq));
4615 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4616 	    (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4617 	iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4618 	iaq.iaq_datalen = htole16(IXL_DMA_LEN(vsi));
4619 	ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4620 
4621 	param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4622 	param->uplink_seid = sc->sc_seid;
4623 
4624 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4625 	    BUS_DMASYNC_PREWRITE);
4626 
4627 	rv = ixl_atq_poll(sc, &iaq, 250);
4628 
4629 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4630 	    BUS_DMASYNC_POSTWRITE);
4631 
4632 	if (rv != 0) {
4633 		return ETIMEDOUT;
4634 	}
4635 
4636 	switch (le16toh(iaq.iaq_retval)) {
4637 	case IXL_AQ_RC_OK:
4638 		break;
4639 	case IXL_AQ_RC_ENOENT:
4640 		return ENOENT;
4641 	case IXL_AQ_RC_EACCES:
4642 		return EACCES;
4643 	default:
4644 		return EIO;
4645 	}
4646 
4647 	return 0;
4648 }
4649 
4650 static void
ixl_set_filter_control(struct ixl_softc * sc)4651 ixl_set_filter_control(struct ixl_softc *sc)
4652 {
4653 	uint32_t reg;
4654 
4655 	reg = ixl_rd_rx_csr(sc, I40E_PFQF_CTL_0);
4656 
4657 	CLR(reg, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
4658 	SET(reg, I40E_HASH_LUT_SIZE_128 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT);
4659 
4660 	SET(reg, I40E_PFQF_CTL_0_FD_ENA_MASK);
4661 	SET(reg, I40E_PFQF_CTL_0_ETYPE_ENA_MASK);
4662 	SET(reg, I40E_PFQF_CTL_0_MACVLAN_ENA_MASK);
4663 
4664 	ixl_wr_rx_csr(sc, I40E_PFQF_CTL_0, reg);
4665 }
4666 
4667 static inline void
ixl_get_default_rss_key(uint32_t * buf,size_t len)4668 ixl_get_default_rss_key(uint32_t *buf, size_t len)
4669 {
4670 	size_t cplen;
4671 	uint8_t rss_seed[RSS_KEYSIZE];
4672 
4673 	rss_getkey(rss_seed);
4674 	memset(buf, 0, len);
4675 
4676 	cplen = MIN(len, sizeof(rss_seed));
4677 	memcpy(buf, rss_seed, cplen);
4678 }
4679 
4680 static int
ixl_set_rss_key(struct ixl_softc * sc,uint8_t * key,size_t keylen)4681 ixl_set_rss_key(struct ixl_softc *sc, uint8_t *key, size_t keylen)
4682 {
4683 	struct ixl_dmamem *idm;
4684 	struct ixl_atq iatq;
4685 	struct ixl_aq_desc *iaq;
4686 	struct ixl_aq_rss_key_param *param;
4687 	struct ixl_aq_rss_key_data *data;
4688 	size_t len, datalen, stdlen, extlen;
4689 	uint16_t vsi_id;
4690 	int rv;
4691 
4692 	memset(&iatq, 0, sizeof(iatq));
4693 	iaq = &iatq.iatq_desc;
4694 	idm = &sc->sc_aqbuf;
4695 
4696 	datalen = sizeof(*data);
4697 
4698 	/*XXX The buf size has to be less than the size of the register */
4699 	datalen = MIN(IXL_RSS_KEY_SIZE_REG * sizeof(uint32_t), datalen);
4700 
4701 	iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4702 	    (datalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4703 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_KEY);
4704 	iaq->iaq_datalen = htole16(datalen);
4705 
4706 	param = (struct ixl_aq_rss_key_param *)iaq->iaq_param;
4707 	vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSKEY_VSI_ID_SHIFT) |
4708 	    IXL_AQ_RSSKEY_VSI_VALID;
4709 	param->vsi_id = htole16(vsi_id);
4710 
4711 	memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4712 	data = IXL_DMA_KVA(idm);
4713 
4714 	len = MIN(keylen, datalen);
4715 	stdlen = MIN(sizeof(data->standard_rss_key), len);
4716 	memcpy(data->standard_rss_key, key, stdlen);
4717 	len = (len > stdlen) ? (len - stdlen) : 0;
4718 
4719 	extlen = MIN(sizeof(data->extended_hash_key), len);
4720 	extlen = (stdlen < keylen) ? 0 : keylen - stdlen;
4721 	memcpy(data->extended_hash_key, key + stdlen, extlen);
4722 
4723 	ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4724 
4725 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4726 	    IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4727 
4728 	rv = ixl_atq_exec(sc, &iatq);
4729 
4730 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4731 	    IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4732 
4733 	if (rv != 0) {
4734 		return ETIMEDOUT;
4735 	}
4736 
4737 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4738 		return EIO;
4739 	}
4740 
4741 	return 0;
4742 }
4743 
4744 static int
ixl_set_rss_lut(struct ixl_softc * sc,uint8_t * lut,size_t lutlen)4745 ixl_set_rss_lut(struct ixl_softc *sc, uint8_t *lut, size_t lutlen)
4746 {
4747 	struct ixl_dmamem *idm;
4748 	struct ixl_atq iatq;
4749 	struct ixl_aq_desc *iaq;
4750 	struct ixl_aq_rss_lut_param *param;
4751 	uint16_t vsi_id;
4752 	uint8_t *data;
4753 	size_t dmalen;
4754 	int rv;
4755 
4756 	memset(&iatq, 0, sizeof(iatq));
4757 	iaq = &iatq.iatq_desc;
4758 	idm = &sc->sc_aqbuf;
4759 
4760 	dmalen = MIN(lutlen, IXL_DMA_LEN(idm));
4761 
4762 	iaq->iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD |
4763 	    (dmalen > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4764 	iaq->iaq_opcode = htole16(IXL_AQ_OP_RSS_SET_LUT);
4765 	iaq->iaq_datalen = htole16(dmalen);
4766 
4767 	memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
4768 	data = IXL_DMA_KVA(idm);
4769 	memcpy(data, lut, dmalen);
4770 	ixl_aq_dva(iaq, IXL_DMA_DVA(idm));
4771 
4772 	param = (struct ixl_aq_rss_lut_param *)iaq->iaq_param;
4773 	vsi_id = (sc->sc_vsi_number << IXL_AQ_RSSLUT_VSI_ID_SHIFT) |
4774 	    IXL_AQ_RSSLUT_VSI_VALID;
4775 	param->vsi_id = htole16(vsi_id);
4776 	param->flags = htole16(IXL_AQ_RSSLUT_TABLE_TYPE_PF <<
4777 	    IXL_AQ_RSSLUT_TABLE_TYPE_SHIFT);
4778 
4779 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4780 	    IXL_DMA_LEN(idm), BUS_DMASYNC_PREWRITE);
4781 
4782 	rv = ixl_atq_exec(sc, &iatq);
4783 
4784 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0,
4785 	    IXL_DMA_LEN(idm), BUS_DMASYNC_POSTWRITE);
4786 
4787 	if (rv != 0) {
4788 		return ETIMEDOUT;
4789 	}
4790 
4791 	if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK)) {
4792 		return EIO;
4793 	}
4794 
4795 	return 0;
4796 }
4797 
4798 static int
ixl_register_rss_key(struct ixl_softc * sc)4799 ixl_register_rss_key(struct ixl_softc *sc)
4800 {
4801 	uint32_t rss_seed[IXL_RSS_KEY_SIZE_REG];
4802 	int rv;
4803 	size_t i;
4804 
4805 	ixl_get_default_rss_key(rss_seed, sizeof(rss_seed));
4806 
4807 	if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4808 		rv = ixl_set_rss_key(sc, (uint8_t*)rss_seed,
4809 		    sizeof(rss_seed));
4810 	} else {
4811 		rv = 0;
4812 		for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
4813 			ixl_wr_rx_csr(sc, I40E_PFQF_HKEY(i), rss_seed[i]);
4814 		}
4815 	}
4816 
4817 	return rv;
4818 }
4819 
4820 static void
ixl_register_rss_pctype(struct ixl_softc * sc)4821 ixl_register_rss_pctype(struct ixl_softc *sc)
4822 {
4823 	uint64_t set_hena = 0;
4824 	uint32_t hena0, hena1;
4825 
4826 	/*
4827 	 * We use TCP/UDP with IPv4/IPv6 by default.
4828 	 * Note: the device can not use just IP header in each
4829 	 * TCP/UDP packets for the RSS hash calculation.
4830 	 */
4831 	if (sc->sc_mac_type == I40E_MAC_X722)
4832 		set_hena = IXL_RSS_HENA_DEFAULT_X722;
4833 	else
4834 		set_hena = IXL_RSS_HENA_DEFAULT_XL710;
4835 
4836 	hena0 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(0));
4837 	hena1 = ixl_rd_rx_csr(sc, I40E_PFQF_HENA(1));
4838 
4839 	SET(hena0, set_hena);
4840 	SET(hena1, set_hena >> 32);
4841 
4842 	ixl_wr_rx_csr(sc, I40E_PFQF_HENA(0), hena0);
4843 	ixl_wr_rx_csr(sc, I40E_PFQF_HENA(1), hena1);
4844 }
4845 
4846 static int
ixl_register_rss_hlut(struct ixl_softc * sc)4847 ixl_register_rss_hlut(struct ixl_softc *sc)
4848 {
4849 	unsigned int qid;
4850 	uint8_t hlut_buf[512], lut_mask;
4851 	uint32_t *hluts;
4852 	size_t i, hluts_num;
4853 	int rv;
4854 
4855 	lut_mask = (0x01 << sc->sc_rss_table_entry_width) - 1;
4856 
4857 	for (i = 0; i < sc->sc_rss_table_size; i++) {
4858 		qid = i % sc->sc_nqueue_pairs;
4859 		hlut_buf[i] = qid & lut_mask;
4860 	}
4861 
4862 	if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RSS)) {
4863 		rv = ixl_set_rss_lut(sc, hlut_buf, sizeof(hlut_buf));
4864 	} else {
4865 		rv = 0;
4866 		hluts = (uint32_t *)hlut_buf;
4867 		hluts_num = sc->sc_rss_table_size >> 2;
4868 		for (i = 0; i < hluts_num; i++) {
4869 			ixl_wr(sc, I40E_PFQF_HLUT(i), hluts[i]);
4870 		}
4871 		ixl_flush(sc);
4872 	}
4873 
4874 	return rv;
4875 }
4876 
4877 static void
ixl_config_rss(struct ixl_softc * sc)4878 ixl_config_rss(struct ixl_softc *sc)
4879 {
4880 
4881 	KASSERT(mutex_owned(&sc->sc_cfg_lock));
4882 
4883 	ixl_register_rss_key(sc);
4884 	ixl_register_rss_pctype(sc);
4885 	ixl_register_rss_hlut(sc);
4886 }
4887 
4888 static const struct ixl_phy_type *
ixl_search_phy_type(uint8_t phy_type)4889 ixl_search_phy_type(uint8_t phy_type)
4890 {
4891 	const struct ixl_phy_type *itype;
4892 	uint64_t mask;
4893 	unsigned int i;
4894 
4895 	if (phy_type >= 64)
4896 		return NULL;
4897 
4898 	mask = 1ULL << phy_type;
4899 
4900 	for (i = 0; i < __arraycount(ixl_phy_type_map); i++) {
4901 		itype = &ixl_phy_type_map[i];
4902 
4903 		if (ISSET(itype->phy_type, mask))
4904 			return itype;
4905 	}
4906 
4907 	return NULL;
4908 }
4909 
4910 static uint64_t
ixl_search_link_speed(uint8_t link_speed)4911 ixl_search_link_speed(uint8_t link_speed)
4912 {
4913 	const struct ixl_speed_type *type;
4914 	unsigned int i;
4915 
4916 	for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4917 		type = &ixl_speed_type_map[i];
4918 
4919 		if (ISSET(type->dev_speed, link_speed))
4920 			return type->net_speed;
4921 	}
4922 
4923 	return 0;
4924 }
4925 
4926 static uint8_t
ixl_search_baudrate(uint64_t baudrate)4927 ixl_search_baudrate(uint64_t baudrate)
4928 {
4929 	const struct ixl_speed_type *type;
4930 	unsigned int i;
4931 
4932 	for (i = 0; i < __arraycount(ixl_speed_type_map); i++) {
4933 		type = &ixl_speed_type_map[i];
4934 
4935 		if (type->net_speed == baudrate) {
4936 			return type->dev_speed;
4937 		}
4938 	}
4939 
4940 	return 0;
4941 }
4942 
4943 static int
ixl_restart_an(struct ixl_softc * sc)4944 ixl_restart_an(struct ixl_softc *sc)
4945 {
4946 	struct ixl_aq_desc iaq;
4947 
4948 	memset(&iaq, 0, sizeof(iaq));
4949 	iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4950 	iaq.iaq_param[0] =
4951 	    htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4952 
4953 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4954 		aprint_error_dev(sc->sc_dev, "RESTART AN timeout\n");
4955 		return -1;
4956 	}
4957 	if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4958 		aprint_error_dev(sc->sc_dev, "RESTART AN error\n");
4959 		return -1;
4960 	}
4961 
4962 	return 0;
4963 }
4964 
4965 static int
ixl_add_macvlan(struct ixl_softc * sc,const uint8_t * macaddr,uint16_t vlan,uint16_t flags)4966 ixl_add_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
4967     uint16_t vlan, uint16_t flags)
4968 {
4969 	struct ixl_aq_desc iaq;
4970 	struct ixl_aq_add_macvlan *param;
4971 	struct ixl_aq_add_macvlan_elem *elem;
4972 
4973 	memset(&iaq, 0, sizeof(iaq));
4974 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4975 	iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4976 	iaq.iaq_datalen = htole16(sizeof(*elem));
4977 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4978 
4979 	param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4980 	param->num_addrs = htole16(1);
4981 	param->seid0 = htole16(0x8000) | sc->sc_seid;
4982 	param->seid1 = 0;
4983 	param->seid2 = 0;
4984 
4985 	elem = IXL_DMA_KVA(&sc->sc_scratch);
4986 	memset(elem, 0, sizeof(*elem));
4987 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4988 	elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4989 	elem->vlan = htole16(vlan);
4990 
4991 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4992 		return IXL_AQ_RC_EINVAL;
4993 	}
4994 
4995 	switch (le16toh(iaq.iaq_retval)) {
4996 	case IXL_AQ_RC_OK:
4997 		break;
4998 	case IXL_AQ_RC_ENOSPC:
4999 		return ENOSPC;
5000 	case IXL_AQ_RC_ENOENT:
5001 		return ENOENT;
5002 	case IXL_AQ_RC_EACCES:
5003 		return EACCES;
5004 	case IXL_AQ_RC_EEXIST:
5005 		return EEXIST;
5006 	case IXL_AQ_RC_EINVAL:
5007 		return EINVAL;
5008 	default:
5009 		return EIO;
5010 	}
5011 
5012 	return 0;
5013 }
5014 
5015 static int
ixl_remove_macvlan(struct ixl_softc * sc,const uint8_t * macaddr,uint16_t vlan,uint16_t flags)5016 ixl_remove_macvlan(struct ixl_softc *sc, const uint8_t *macaddr,
5017     uint16_t vlan, uint16_t flags)
5018 {
5019 	struct ixl_aq_desc iaq;
5020 	struct ixl_aq_remove_macvlan *param;
5021 	struct ixl_aq_remove_macvlan_elem *elem;
5022 
5023 	memset(&iaq, 0, sizeof(iaq));
5024 	iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5025 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
5026 	iaq.iaq_datalen = htole16(sizeof(*elem));
5027 	ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
5028 
5029 	param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
5030 	param->num_addrs = htole16(1);
5031 	param->seid0 = htole16(0x8000) | sc->sc_seid;
5032 	param->seid1 = 0;
5033 	param->seid2 = 0;
5034 
5035 	elem = IXL_DMA_KVA(&sc->sc_scratch);
5036 	memset(elem, 0, sizeof(*elem));
5037 	memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
5038 	elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
5039 	elem->vlan = htole16(vlan);
5040 
5041 	if (ixl_atq_poll(sc, &iaq, 250) != 0) {
5042 		return EINVAL;
5043 	}
5044 
5045 	switch (le16toh(iaq.iaq_retval)) {
5046 	case IXL_AQ_RC_OK:
5047 		break;
5048 	case IXL_AQ_RC_ENOENT:
5049 		return ENOENT;
5050 	case IXL_AQ_RC_EACCES:
5051 		return EACCES;
5052 	case IXL_AQ_RC_EINVAL:
5053 		return EINVAL;
5054 	default:
5055 		return EIO;
5056 	}
5057 
5058 	return 0;
5059 }
5060 
5061 static int
ixl_hmc(struct ixl_softc * sc)5062 ixl_hmc(struct ixl_softc *sc)
5063 {
5064 	struct {
5065 		uint32_t   count;
5066 		uint32_t   minsize;
5067 		bus_size_t objsiz;
5068 		bus_size_t setoff;
5069 		bus_size_t setcnt;
5070 	} regs[] = {
5071 		{
5072 			0,
5073 			IXL_HMC_TXQ_MINSIZE,
5074 			I40E_GLHMC_LANTXOBJSZ,
5075 			I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
5076 			I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
5077 		},
5078 		{
5079 			0,
5080 			IXL_HMC_RXQ_MINSIZE,
5081 			I40E_GLHMC_LANRXOBJSZ,
5082 			I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
5083 			I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
5084 		},
5085 		{
5086 			0,
5087 			0,
5088 			I40E_GLHMC_FCOEDDPOBJSZ,
5089 			I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
5090 			I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
5091 		},
5092 		{
5093 			0,
5094 			0,
5095 			I40E_GLHMC_FCOEFOBJSZ,
5096 			I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
5097 			I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
5098 		},
5099 	};
5100 	struct ixl_hmc_entry *e;
5101 	uint64_t size, dva;
5102 	uint8_t *kva;
5103 	uint64_t *sdpage;
5104 	unsigned int i;
5105 	int npages, tables;
5106 	uint32_t reg;
5107 
5108 	CTASSERT(__arraycount(regs) <= __arraycount(sc->sc_hmc_entries));
5109 
5110 	regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
5111 	    ixl_rd(sc, I40E_GLHMC_LANQMAX);
5112 
5113 	size = 0;
5114 	for (i = 0; i < __arraycount(regs); i++) {
5115 		e = &sc->sc_hmc_entries[i];
5116 
5117 		e->hmc_count = regs[i].count;
5118 		reg = ixl_rd(sc, regs[i].objsiz);
5119 		e->hmc_size = IXL_BIT_ULL(0x3F & reg);
5120 		e->hmc_base = size;
5121 
5122 		if ((e->hmc_size * 8) < regs[i].minsize) {
5123 			aprint_error_dev(sc->sc_dev,
5124 			    "kernel hmc entry is too big\n");
5125 			return -1;
5126 		}
5127 
5128 		size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
5129 	}
5130 	size = roundup(size, IXL_HMC_PGSIZE);
5131 	npages = size / IXL_HMC_PGSIZE;
5132 
5133 	tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
5134 
5135 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
5136 		aprint_error_dev(sc->sc_dev,
5137 		    "unable to allocate hmc pd memory\n");
5138 		return -1;
5139 	}
5140 
5141 	if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
5142 	    IXL_HMC_PGSIZE) != 0) {
5143 		aprint_error_dev(sc->sc_dev,
5144 		    "unable to allocate hmc sd memory\n");
5145 		ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5146 		return -1;
5147 	}
5148 
5149 	kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
5150 	memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
5151 
5152 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
5153 	    0, IXL_DMA_LEN(&sc->sc_hmc_pd),
5154 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5155 
5156 	dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
5157 	sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
5158 	memset(sdpage, 0, IXL_DMA_LEN(&sc->sc_hmc_sd));
5159 
5160 	for (i = 0; (int)i < npages; i++) {
5161 		*sdpage = htole64(dva | IXL_HMC_PDVALID);
5162 		sdpage++;
5163 
5164 		dva += IXL_HMC_PGSIZE;
5165 	}
5166 
5167 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
5168 	    0, IXL_DMA_LEN(&sc->sc_hmc_sd),
5169 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
5170 
5171 	dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
5172 	for (i = 0; (int)i < tables; i++) {
5173 		uint32_t count;
5174 
5175 		KASSERT(npages >= 0);
5176 
5177 		count = ((unsigned int)npages > IXL_HMC_PGS) ?
5178 		    IXL_HMC_PGS : (unsigned int)npages;
5179 
5180 		ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
5181 		ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
5182 		    (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
5183 		    (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
5184 		ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
5185 		ixl_wr(sc, I40E_PFHMC_SDCMD,
5186 		    (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
5187 
5188 		npages -= IXL_HMC_PGS;
5189 		dva += IXL_HMC_PGSIZE;
5190 	}
5191 
5192 	for (i = 0; i < __arraycount(regs); i++) {
5193 		e = &sc->sc_hmc_entries[i];
5194 
5195 		ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
5196 		ixl_wr(sc, regs[i].setcnt, e->hmc_count);
5197 	}
5198 
5199 	return 0;
5200 }
5201 
5202 static void
ixl_hmc_free(struct ixl_softc * sc)5203 ixl_hmc_free(struct ixl_softc *sc)
5204 {
5205 	ixl_dmamem_free(sc, &sc->sc_hmc_sd);
5206 	ixl_dmamem_free(sc, &sc->sc_hmc_pd);
5207 }
5208 
5209 static void
ixl_hmc_pack(void * d,const void * s,const struct ixl_hmc_pack * packing,unsigned int npacking)5210 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
5211     unsigned int npacking)
5212 {
5213 	uint8_t *dst = d;
5214 	const uint8_t *src = s;
5215 	unsigned int i;
5216 
5217 	for (i = 0; i < npacking; i++) {
5218 		const struct ixl_hmc_pack *pack = &packing[i];
5219 		unsigned int offset = pack->lsb / 8;
5220 		unsigned int align = pack->lsb % 8;
5221 		const uint8_t *in = src + pack->offset;
5222 		uint8_t *out = dst + offset;
5223 		int width = pack->width;
5224 		unsigned int inbits = 0;
5225 
5226 		if (align) {
5227 			inbits = (*in++) << align;
5228 			*out++ |= (inbits & 0xff);
5229 			inbits >>= 8;
5230 
5231 			width -= 8 - align;
5232 		}
5233 
5234 		while (width >= 8) {
5235 			inbits |= (*in++) << align;
5236 			*out++ = (inbits & 0xff);
5237 			inbits >>= 8;
5238 
5239 			width -= 8;
5240 		}
5241 
5242 		if (width > 0) {
5243 			inbits |= (*in) << align;
5244 			*out |= (inbits & ((1 << width) - 1));
5245 		}
5246 	}
5247 }
5248 
5249 static struct ixl_aq_buf *
ixl_aqb_alloc(struct ixl_softc * sc)5250 ixl_aqb_alloc(struct ixl_softc *sc)
5251 {
5252 	struct ixl_aq_buf *aqb;
5253 
5254 	aqb = kmem_alloc(sizeof(*aqb), KM_SLEEP);
5255 
5256 	aqb->aqb_size = IXL_AQ_BUFLEN;
5257 
5258 	if (bus_dmamap_create(sc->sc_dmat, aqb->aqb_size, 1,
5259 	    aqb->aqb_size, 0,
5260 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
5261 		goto free;
5262 	if (bus_dmamem_alloc(sc->sc_dmat, aqb->aqb_size,
5263 	    IXL_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
5264 	    BUS_DMA_WAITOK) != 0)
5265 		goto destroy;
5266 	if (bus_dmamem_map(sc->sc_dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
5267 	    aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
5268 		goto dma_free;
5269 	if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
5270 	    aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
5271 		goto unmap;
5272 
5273 	return aqb;
5274 unmap:
5275 	bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5276 dma_free:
5277 	bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5278 destroy:
5279 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5280 free:
5281 	kmem_free(aqb, sizeof(*aqb));
5282 
5283 	return NULL;
5284 }
5285 
5286 static void
ixl_aqb_free(struct ixl_softc * sc,struct ixl_aq_buf * aqb)5287 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
5288 {
5289 
5290 	bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
5291 	bus_dmamem_unmap(sc->sc_dmat, aqb->aqb_data, aqb->aqb_size);
5292 	bus_dmamem_free(sc->sc_dmat, &aqb->aqb_seg, 1);
5293 	bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
5294 	kmem_free(aqb, sizeof(*aqb));
5295 }
5296 
5297 static int
ixl_arq_fill(struct ixl_softc * sc)5298 ixl_arq_fill(struct ixl_softc *sc)
5299 {
5300 	struct ixl_aq_buf *aqb;
5301 	struct ixl_aq_desc *arq, *iaq;
5302 	unsigned int prod = sc->sc_arq_prod;
5303 	unsigned int n;
5304 	int post = 0;
5305 
5306 	n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
5307 	    IXL_AQ_NUM);
5308 	arq = IXL_DMA_KVA(&sc->sc_arq);
5309 
5310 	if (__predict_false(n <= 0))
5311 		return 0;
5312 
5313 	do {
5314 		aqb = sc->sc_arq_live[prod];
5315 		iaq = &arq[prod];
5316 
5317 		if (aqb == NULL) {
5318 			aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
5319 			if (aqb != NULL) {
5320 				SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5321 				    ixl_aq_buf, aqb_entry);
5322 			} else if ((aqb = ixl_aqb_alloc(sc)) == NULL) {
5323 				break;
5324 			}
5325 
5326 			sc->sc_arq_live[prod] = aqb;
5327 			memset(aqb->aqb_data, 0, aqb->aqb_size);
5328 
5329 			bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
5330 			    aqb->aqb_size, BUS_DMASYNC_PREREAD);
5331 
5332 			iaq->iaq_flags = htole16(IXL_AQ_BUF |
5333 			    (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ?
5334 			    IXL_AQ_LB : 0));
5335 			iaq->iaq_opcode = 0;
5336 			iaq->iaq_datalen = htole16(aqb->aqb_size);
5337 			iaq->iaq_retval = 0;
5338 			iaq->iaq_cookie = 0;
5339 			iaq->iaq_param[0] = 0;
5340 			iaq->iaq_param[1] = 0;
5341 			ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
5342 		}
5343 
5344 		prod++;
5345 		prod &= IXL_AQ_MASK;
5346 
5347 		post = 1;
5348 
5349 	} while (--n);
5350 
5351 	if (post) {
5352 		sc->sc_arq_prod = prod;
5353 		ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
5354 	}
5355 
5356 	return post;
5357 }
5358 
5359 static void
ixl_arq_unfill(struct ixl_softc * sc)5360 ixl_arq_unfill(struct ixl_softc *sc)
5361 {
5362 	struct ixl_aq_buf *aqb;
5363 	unsigned int i;
5364 
5365 	for (i = 0; i < __arraycount(sc->sc_arq_live); i++) {
5366 		aqb = sc->sc_arq_live[i];
5367 		if (aqb == NULL)
5368 			continue;
5369 
5370 		sc->sc_arq_live[i] = NULL;
5371 		bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
5372 		    BUS_DMASYNC_POSTREAD);
5373 		ixl_aqb_free(sc, aqb);
5374 	}
5375 
5376 	while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle)) != NULL) {
5377 		SIMPLEQ_REMOVE(&sc->sc_arq_idle, aqb,
5378 		    ixl_aq_buf, aqb_entry);
5379 		ixl_aqb_free(sc, aqb);
5380 	}
5381 }
5382 
5383 static void
ixl_clear_hw(struct ixl_softc * sc)5384 ixl_clear_hw(struct ixl_softc *sc)
5385 {
5386 	uint32_t num_queues, base_queue;
5387 	uint32_t num_pf_int;
5388 	uint32_t num_vf_int;
5389 	uint32_t num_vfs;
5390 	uint32_t i, j;
5391 	uint32_t val;
5392 	uint32_t eol = 0x7ff;
5393 
5394 	/* get number of interrupts, queues, and vfs */
5395 	val = ixl_rd(sc, I40E_GLPCI_CNF2);
5396 	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
5397 	    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
5398 	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
5399 	    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
5400 
5401 	val = ixl_rd(sc, I40E_PFLAN_QALLOC);
5402 	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
5403 	    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
5404 	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
5405 	    I40E_PFLAN_QALLOC_LASTQ_SHIFT;
5406 	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
5407 		num_queues = (j - base_queue) + 1;
5408 	else
5409 		num_queues = 0;
5410 
5411 	val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
5412 	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
5413 	    I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
5414 	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
5415 	    I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
5416 	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
5417 		num_vfs = (j - i) + 1;
5418 	else
5419 		num_vfs = 0;
5420 
5421 	/* stop all the interrupts */
5422 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5423 	ixl_flush(sc);
5424 	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
5425 	for (i = 0; i < num_pf_int - 2; i++)
5426 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
5427 	ixl_flush(sc);
5428 
5429 	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
5430 	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5431 	ixl_wr(sc, I40E_PFINT_LNKLST0, val);
5432 	for (i = 0; i < num_pf_int - 2; i++)
5433 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
5434 	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
5435 	for (i = 0; i < num_vfs; i++)
5436 		ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
5437 	for (i = 0; i < num_vf_int - 2; i++)
5438 		ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
5439 
5440 	/* warn the HW of the coming Tx disables */
5441 	for (i = 0; i < num_queues; i++) {
5442 		uint32_t abs_queue_idx = base_queue + i;
5443 		uint32_t reg_block = 0;
5444 
5445 		if (abs_queue_idx >= 128) {
5446 			reg_block = abs_queue_idx / 128;
5447 			abs_queue_idx %= 128;
5448 		}
5449 
5450 		val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5451 		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5452 		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5453 		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5454 
5455 		ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5456 	}
5457 	delaymsec(400);
5458 
5459 	/* stop all the queues */
5460 	for (i = 0; i < num_queues; i++) {
5461 		ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5462 		ixl_wr(sc, I40E_QTX_ENA(i), 0);
5463 		ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5464 		ixl_wr(sc, I40E_QRX_ENA(i), 0);
5465 	}
5466 
5467 	/* short wait for all queue disables to settle */
5468 	delaymsec(50);
5469 }
5470 
5471 static int
ixl_pf_reset(struct ixl_softc * sc)5472 ixl_pf_reset(struct ixl_softc *sc)
5473 {
5474 	uint32_t cnt = 0;
5475 	uint32_t cnt1 = 0;
5476 	uint32_t reg = 0, reg0 = 0;
5477 	uint32_t grst_del;
5478 
5479 	/*
5480 	 * Poll for Global Reset steady state in case of recent GRST.
5481 	 * The grst delay value is in 100ms units, and we'll wait a
5482 	 * couple counts longer to be sure we don't just miss the end.
5483 	 */
5484 	grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5485 	grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5486 	grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5487 
5488 	grst_del = grst_del * 20;
5489 
5490 	for (cnt = 0; cnt < grst_del; cnt++) {
5491 		reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5492 		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5493 			break;
5494 		delaymsec(100);
5495 	}
5496 	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5497 		aprint_error(", Global reset polling failed to complete\n");
5498 		return -1;
5499 	}
5500 
5501 	/* Now Wait for the FW to be ready */
5502 	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5503 		reg = ixl_rd(sc, I40E_GLNVM_ULD);
5504 		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5505 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5506 		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5507 		    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5508 			break;
5509 
5510 		delaymsec(10);
5511 	}
5512 	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5513 	    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5514 		aprint_error(", wait for FW Reset complete timed out "
5515 		    "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5516 		return -1;
5517 	}
5518 
5519 	/*
5520 	 * If there was a Global Reset in progress when we got here,
5521 	 * we don't need to do the PF Reset
5522 	 */
5523 	if (cnt == 0) {
5524 		reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5525 		ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5526 		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5527 			reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5528 			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5529 				break;
5530 			delaymsec(1);
5531 
5532 			reg0 = ixl_rd(sc, I40E_GLGEN_RSTAT);
5533 			if (reg0 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5534 				aprint_error(", Core reset upcoming."
5535 				    " Skipping PF reset reset request\n");
5536 				return -1;
5537 			}
5538 		}
5539 		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5540 			aprint_error(", PF reset polling failed to complete"
5541 			    "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5542 			return -1;
5543 		}
5544 	}
5545 
5546 	return 0;
5547 }
5548 
5549 static int
ixl_dmamem_alloc(struct ixl_softc * sc,struct ixl_dmamem * ixm,bus_size_t size,bus_size_t align)5550 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5551     bus_size_t size, bus_size_t align)
5552 {
5553 	ixm->ixm_size = size;
5554 
5555 	if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5556 	    ixm->ixm_size, 0,
5557 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
5558 	    &ixm->ixm_map) != 0)
5559 		return 1;
5560 	if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5561 	    align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5562 	    BUS_DMA_WAITOK) != 0)
5563 		goto destroy;
5564 	if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5565 	    ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5566 		goto free;
5567 	if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5568 	    ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5569 		goto unmap;
5570 
5571 	memset(ixm->ixm_kva, 0, ixm->ixm_size);
5572 
5573 	return 0;
5574 unmap:
5575 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5576 free:
5577 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5578 destroy:
5579 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5580 	return 1;
5581 }
5582 
5583 static void
ixl_dmamem_free(struct ixl_softc * sc,struct ixl_dmamem * ixm)5584 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5585 {
5586 	bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5587 	bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5588 	bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5589 	bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5590 }
5591 
5592 static int
ixl_setup_vlan_hwfilter(struct ixl_softc * sc)5593 ixl_setup_vlan_hwfilter(struct ixl_softc *sc)
5594 {
5595 	struct ethercom *ec = &sc->sc_ec;
5596 	struct vlanid_list *vlanidp;
5597 	int rv;
5598 
5599 	ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5600 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5601 	ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5602 	    IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
5603 
5604 	rv = ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5605 	    IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5606 	if (rv != 0)
5607 		return rv;
5608 	rv = ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5609 	    IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5610 	if (rv != 0)
5611 		return rv;
5612 
5613 	ETHER_LOCK(ec);
5614 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5615 		rv = ixl_add_macvlan(sc, sc->sc_enaddr,
5616 		    vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5617 		if (rv != 0)
5618 			break;
5619 		rv = ixl_add_macvlan(sc, etherbroadcastaddr,
5620 		    vlanidp->vid, IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH);
5621 		if (rv != 0)
5622 			break;
5623 	}
5624 	ETHER_UNLOCK(ec);
5625 
5626 	return rv;
5627 }
5628 
5629 static void
ixl_teardown_vlan_hwfilter(struct ixl_softc * sc)5630 ixl_teardown_vlan_hwfilter(struct ixl_softc *sc)
5631 {
5632 	struct vlanid_list *vlanidp;
5633 	struct ethercom *ec = &sc->sc_ec;
5634 
5635 	ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
5636 	    IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5637 	ixl_remove_macvlan(sc, etherbroadcastaddr, 0,
5638 	    IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5639 
5640 	ETHER_LOCK(ec);
5641 	SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
5642 		ixl_remove_macvlan(sc, sc->sc_enaddr,
5643 		    vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5644 		ixl_remove_macvlan(sc, etherbroadcastaddr,
5645 		    vlanidp->vid, IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH);
5646 	}
5647 	ETHER_UNLOCK(ec);
5648 
5649 	ixl_add_macvlan(sc, sc->sc_enaddr, 0,
5650 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5651 	ixl_add_macvlan(sc, etherbroadcastaddr, 0,
5652 	    IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
5653 }
5654 
5655 static int
ixl_update_macvlan(struct ixl_softc * sc)5656 ixl_update_macvlan(struct ixl_softc *sc)
5657 {
5658 	int rv = 0;
5659 	int next_ec_capenable = sc->sc_ec.ec_capenable;
5660 
5661 	if (ISSET(next_ec_capenable, ETHERCAP_VLAN_HWFILTER)) {
5662 		rv = ixl_setup_vlan_hwfilter(sc);
5663 		if (rv != 0)
5664 			ixl_teardown_vlan_hwfilter(sc);
5665 	} else {
5666 		ixl_teardown_vlan_hwfilter(sc);
5667 	}
5668 
5669 	return rv;
5670 }
5671 
5672 static int
ixl_ifflags_cb(struct ethercom * ec)5673 ixl_ifflags_cb(struct ethercom *ec)
5674 {
5675 	struct ifnet *ifp = &ec->ec_if;
5676 	struct ixl_softc *sc = ifp->if_softc;
5677 	int rv, change, reset_bits;
5678 
5679 	mutex_enter(&sc->sc_cfg_lock);
5680 
5681 	change = ec->ec_capenable ^ sc->sc_cur_ec_capenable;
5682 	reset_bits = change & (ETHERCAP_VLAN_HWTAGGING | ETHERCAP_VLAN_MTU);
5683 	if (reset_bits != 0) {
5684 		sc->sc_cur_ec_capenable ^= reset_bits;
5685 		rv = ENETRESET;
5686 		goto out;
5687 	}
5688 
5689 	if (ISSET(change, ETHERCAP_VLAN_HWFILTER)) {
5690 		rv = ixl_update_macvlan(sc);
5691 		if (rv == 0) {
5692 			sc->sc_cur_ec_capenable ^= ETHERCAP_VLAN_HWFILTER;
5693 		} else {
5694 			CLR(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
5695 			CLR(sc->sc_cur_ec_capenable, ETHERCAP_VLAN_HWFILTER);
5696 		}
5697 	}
5698 
5699 	rv = ixl_iff(sc);
5700 out:
5701 	mutex_exit(&sc->sc_cfg_lock);
5702 
5703 	return rv;
5704 }
5705 
5706 static int
ixl_set_link_status_locked(struct ixl_softc * sc,const struct ixl_aq_desc * iaq)5707 ixl_set_link_status_locked(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
5708 {
5709 	const struct ixl_aq_link_status *status;
5710 	const struct ixl_phy_type *itype;
5711 
5712 	uint64_t ifm_active = IFM_ETHER;
5713 	uint64_t ifm_status = IFM_AVALID;
5714 	int link_state = LINK_STATE_DOWN;
5715 	uint64_t baudrate = 0;
5716 
5717 	status = (const struct ixl_aq_link_status *)iaq->iaq_param;
5718 	if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION)) {
5719 		ifm_active |= IFM_NONE;
5720 		goto done;
5721 	}
5722 
5723 	ifm_active |= IFM_FDX;
5724 	ifm_status |= IFM_ACTIVE;
5725 	link_state = LINK_STATE_UP;
5726 
5727 	itype = ixl_search_phy_type(status->phy_type);
5728 	if (itype != NULL)
5729 		ifm_active |= itype->ifm_type;
5730 
5731 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
5732 		ifm_active |= IFM_ETH_TXPAUSE;
5733 	if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
5734 		ifm_active |= IFM_ETH_RXPAUSE;
5735 
5736 	baudrate = ixl_search_link_speed(status->link_speed);
5737 
5738 done:
5739 	/* sc->sc_cfg_lock held expect during attach */
5740 	sc->sc_media_active = ifm_active;
5741 	sc->sc_media_status = ifm_status;
5742 
5743 	sc->sc_ec.ec_if.if_baudrate = baudrate;
5744 
5745 	return link_state;
5746 }
5747 
5748 static int
ixl_establish_intx(struct ixl_softc * sc)5749 ixl_establish_intx(struct ixl_softc *sc)
5750 {
5751 	pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5752 	pci_intr_handle_t *intr;
5753 	char xnamebuf[32];
5754 	char intrbuf[PCI_INTRSTR_LEN];
5755 	char const *intrstr;
5756 
5757 	KASSERT(sc->sc_nintrs == 1);
5758 
5759 	intr = &sc->sc_ihp[0];
5760 
5761 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
5762 	snprintf(xnamebuf, sizeof(xnamebuf), "%s:legacy",
5763 	    device_xname(sc->sc_dev));
5764 
5765 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, *intr, IPL_NET, ixl_intr,
5766 	    sc, xnamebuf);
5767 
5768 	if (sc->sc_ihs[0] == NULL) {
5769 		aprint_error_dev(sc->sc_dev,
5770 		    "unable to establish interrupt at %s\n", intrstr);
5771 		return -1;
5772 	}
5773 
5774 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5775 	return 0;
5776 }
5777 
5778 static int
ixl_establish_msix(struct ixl_softc * sc)5779 ixl_establish_msix(struct ixl_softc *sc)
5780 {
5781 	pci_chipset_tag_t pc = sc->sc_pa.pa_pc;
5782 	kcpuset_t *affinity;
5783 	unsigned int vector = 0;
5784 	unsigned int i;
5785 	int affinity_to, r;
5786 	char xnamebuf[32];
5787 	char intrbuf[PCI_INTRSTR_LEN];
5788 	char const *intrstr;
5789 
5790 	kcpuset_create(&affinity, false);
5791 
5792 	/* the "other" intr is mapped to vector 0 */
5793 	vector = 0;
5794 	intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5795 	    intrbuf, sizeof(intrbuf));
5796 	snprintf(xnamebuf, sizeof(xnamebuf), "%s others",
5797 	    device_xname(sc->sc_dev));
5798 	sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5799 	    sc->sc_ihp[vector], IPL_NET, ixl_other_intr,
5800 	    sc, xnamebuf);
5801 	if (sc->sc_ihs[vector] == NULL) {
5802 		aprint_error_dev(sc->sc_dev,
5803 		    "unable to establish interrupt at %s\n", intrstr);
5804 		goto fail;
5805 	}
5806 
5807 	aprint_normal_dev(sc->sc_dev, "other interrupt at %s", intrstr);
5808 
5809 	affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5810 	affinity_to = (affinity_to + sc->sc_nqueue_pairs_max) % ncpu;
5811 
5812 	kcpuset_zero(affinity);
5813 	kcpuset_set(affinity, affinity_to);
5814 	r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5815 	if (r == 0) {
5816 		aprint_normal(", affinity to %u", affinity_to);
5817 	}
5818 	aprint_normal("\n");
5819 	vector++;
5820 
5821 	sc->sc_msix_vector_queue = vector;
5822 	affinity_to = ncpu > (int)sc->sc_nqueue_pairs_max ? 1 : 0;
5823 
5824 	for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
5825 		intrstr = pci_intr_string(pc, sc->sc_ihp[vector],
5826 		    intrbuf, sizeof(intrbuf));
5827 		snprintf(xnamebuf, sizeof(xnamebuf), "%s TXRX%d",
5828 		    device_xname(sc->sc_dev), i);
5829 
5830 		sc->sc_ihs[vector] = pci_intr_establish_xname(pc,
5831 		    sc->sc_ihp[vector], IPL_NET, ixl_queue_intr,
5832 		    (void *)&sc->sc_qps[i], xnamebuf);
5833 
5834 		if (sc->sc_ihs[vector] == NULL) {
5835 			aprint_error_dev(sc->sc_dev,
5836 			    "unable to establish interrupt at %s\n", intrstr);
5837 			goto fail;
5838 		}
5839 
5840 		aprint_normal_dev(sc->sc_dev,
5841 		    "for TXRX%d interrupt at %s", i, intrstr);
5842 
5843 		kcpuset_zero(affinity);
5844 		kcpuset_set(affinity, affinity_to);
5845 		r = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
5846 		if (r == 0) {
5847 			aprint_normal(", affinity to %u", affinity_to);
5848 			affinity_to = (affinity_to + 1) % ncpu;
5849 		}
5850 		aprint_normal("\n");
5851 		vector++;
5852 	}
5853 
5854 	kcpuset_destroy(affinity);
5855 
5856 	return 0;
5857 fail:
5858 	for (i = 0; i < vector; i++) {
5859 		pci_intr_disestablish(pc, sc->sc_ihs[i]);
5860 	}
5861 
5862 	sc->sc_msix_vector_queue = 0;
5863 	sc->sc_msix_vector_queue = 0;
5864 	kcpuset_destroy(affinity);
5865 
5866 	return -1;
5867 }
5868 
5869 static void
ixl_config_queue_intr(struct ixl_softc * sc)5870 ixl_config_queue_intr(struct ixl_softc *sc)
5871 {
5872 	unsigned int i, vector;
5873 
5874 	if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5875 		vector = sc->sc_msix_vector_queue;
5876 	} else {
5877 		vector = I40E_INTR_NOTX_INTR;
5878 
5879 		ixl_wr(sc, I40E_PFINT_LNKLST0,
5880 		    (I40E_INTR_NOTX_QUEUE <<
5881 		     I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
5882 		    (I40E_QUEUE_TYPE_RX <<
5883 		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5884 	}
5885 
5886 	for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5887 		ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), 0);
5888 		ixl_flush(sc);
5889 
5890 		ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
5891 		    ((i) << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
5892 		    (I40E_QUEUE_TYPE_RX <<
5893 		     I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
5894 
5895 		ixl_wr(sc, I40E_QINT_RQCTL(i),
5896 		    (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
5897 		    (I40E_ITR_INDEX_RX <<
5898 		     I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
5899 		    (I40E_INTR_NOTX_RX_QUEUE <<
5900 		     I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
5901 		    (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
5902 		    (I40E_QUEUE_TYPE_TX <<
5903 		     I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
5904 		    I40E_QINT_RQCTL_CAUSE_ENA_MASK);
5905 
5906 		ixl_wr(sc, I40E_QINT_TQCTL(i),
5907 		    (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
5908 		    (I40E_ITR_INDEX_TX <<
5909 		     I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
5910 		    (I40E_INTR_NOTX_TX_QUEUE <<
5911 		     I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
5912 		    (I40E_QUEUE_TYPE_EOL <<
5913 		     I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
5914 		    (I40E_QUEUE_TYPE_RX <<
5915 		     I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
5916 		     I40E_QINT_TQCTL_CAUSE_ENA_MASK);
5917 
5918 		if (sc->sc_intrtype == PCI_INTR_TYPE_MSIX) {
5919 			ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_RX, i),
5920 			    sc->sc_itr_rx);
5921 			ixl_wr(sc, I40E_PFINT_ITRN(I40E_ITR_INDEX_TX, i),
5922 			    sc->sc_itr_tx);
5923 			vector++;
5924 		}
5925 	}
5926 	ixl_flush(sc);
5927 
5928 	ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_RX), sc->sc_itr_rx);
5929 	ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_TX), sc->sc_itr_tx);
5930 	ixl_flush(sc);
5931 }
5932 
5933 static void
ixl_config_other_intr(struct ixl_softc * sc)5934 ixl_config_other_intr(struct ixl_softc *sc)
5935 {
5936 	ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
5937 	(void)ixl_rd(sc, I40E_PFINT_ICR0);
5938 
5939 	ixl_wr(sc, I40E_PFINT_ICR0_ENA,
5940 	    I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
5941 	    I40E_PFINT_ICR0_ENA_GRST_MASK |
5942 	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
5943 	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
5944 	    I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
5945 	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
5946 	    I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
5947 	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
5948 	    I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK);
5949 
5950 	ixl_wr(sc, I40E_PFINT_LNKLST0, 0x7FF);
5951 	ixl_wr(sc, I40E_PFINT_ITR0(I40E_ITR_INDEX_OTHER), 0);
5952 	ixl_wr(sc, I40E_PFINT_STAT_CTL0,
5953 	    (I40E_ITR_INDEX_OTHER <<
5954 	     I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT));
5955 	ixl_flush(sc);
5956 }
5957 
5958 static int
ixl_setup_interrupts(struct ixl_softc * sc)5959 ixl_setup_interrupts(struct ixl_softc *sc)
5960 {
5961 	struct pci_attach_args *pa = &sc->sc_pa;
5962 	pci_intr_type_t max_type, intr_type;
5963 	int counts[PCI_INTR_TYPE_SIZE];
5964 	int error;
5965 	unsigned int i;
5966 	bool retry;
5967 
5968 	memset(counts, 0, sizeof(counts));
5969 	max_type = PCI_INTR_TYPE_MSIX;
5970 	/* QPs + other interrupt */
5971 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueue_pairs_max + 1;
5972 	counts[PCI_INTR_TYPE_INTX] = 1;
5973 
5974 	if (ixl_param_nomsix)
5975 		counts[PCI_INTR_TYPE_MSIX] = 0;
5976 
5977 	do {
5978 		retry = false;
5979 		error = pci_intr_alloc(pa, &sc->sc_ihp, counts, max_type);
5980 		if (error != 0) {
5981 			aprint_error_dev(sc->sc_dev,
5982 			    "couldn't map interrupt\n");
5983 			break;
5984 		}
5985 
5986 		intr_type = pci_intr_type(pa->pa_pc, sc->sc_ihp[0]);
5987 		sc->sc_nintrs = counts[intr_type];
5988 		KASSERT(sc->sc_nintrs > 0);
5989 
5990 		for (i = 0; i < sc->sc_nintrs; i++) {
5991 			pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[i],
5992 			    PCI_INTR_MPSAFE, true);
5993 		}
5994 
5995 		sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * sc->sc_nintrs,
5996 		    KM_SLEEP);
5997 
5998 		if (intr_type == PCI_INTR_TYPE_MSIX) {
5999 			error = ixl_establish_msix(sc);
6000 			if (error) {
6001 				counts[PCI_INTR_TYPE_MSIX] = 0;
6002 				retry = true;
6003 			}
6004 		} else if (intr_type == PCI_INTR_TYPE_INTX) {
6005 			error = ixl_establish_intx(sc);
6006 		} else {
6007 			error = -1;
6008 		}
6009 
6010 		if (error) {
6011 			kmem_free(sc->sc_ihs,
6012 			    sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6013 			pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6014 		} else {
6015 			sc->sc_intrtype = intr_type;
6016 		}
6017 	} while (retry);
6018 
6019 	return error;
6020 }
6021 
6022 static void
ixl_teardown_interrupts(struct ixl_softc * sc)6023 ixl_teardown_interrupts(struct ixl_softc *sc)
6024 {
6025 	struct pci_attach_args *pa = &sc->sc_pa;
6026 	unsigned int i;
6027 
6028 	for (i = 0; i < sc->sc_nintrs; i++) {
6029 		pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
6030 	}
6031 
6032 	pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
6033 
6034 	kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
6035 	sc->sc_ihs = NULL;
6036 	sc->sc_nintrs = 0;
6037 }
6038 
6039 static int
ixl_setup_stats(struct ixl_softc * sc)6040 ixl_setup_stats(struct ixl_softc *sc)
6041 {
6042 	struct ixl_queue_pair *qp;
6043 	struct ixl_tx_ring *txr;
6044 	struct ixl_rx_ring *rxr;
6045 	struct ixl_stats_counters *isc;
6046 	unsigned int i;
6047 
6048 	for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6049 		qp = &sc->sc_qps[i];
6050 		txr = qp->qp_txr;
6051 		rxr = qp->qp_rxr;
6052 
6053 		evcnt_attach_dynamic(&txr->txr_defragged, EVCNT_TYPE_MISC,
6054 		    NULL, qp->qp_name, "m_defrag successed");
6055 		evcnt_attach_dynamic(&txr->txr_defrag_failed, EVCNT_TYPE_MISC,
6056 		    NULL, qp->qp_name, "m_defrag_failed");
6057 		evcnt_attach_dynamic(&txr->txr_pcqdrop, EVCNT_TYPE_MISC,
6058 		    NULL, qp->qp_name, "Dropped in pcq");
6059 		evcnt_attach_dynamic(&txr->txr_transmitdef, EVCNT_TYPE_MISC,
6060 		    NULL, qp->qp_name, "Deferred transmit");
6061 		evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR,
6062 		    NULL, qp->qp_name, "Interrupt on queue");
6063 		evcnt_attach_dynamic(&txr->txr_defer, EVCNT_TYPE_MISC,
6064 		    NULL, qp->qp_name, "Handled queue in softint/workqueue");
6065 
6066 		evcnt_attach_dynamic(&rxr->rxr_mgethdr_failed, EVCNT_TYPE_MISC,
6067 		    NULL, qp->qp_name, "MGETHDR failed");
6068 		evcnt_attach_dynamic(&rxr->rxr_mgetcl_failed, EVCNT_TYPE_MISC,
6069 		    NULL, qp->qp_name, "MCLGET failed");
6070 		evcnt_attach_dynamic(&rxr->rxr_mbuf_load_failed,
6071 		    EVCNT_TYPE_MISC, NULL, qp->qp_name,
6072 		    "bus_dmamap_load_mbuf failed");
6073 		evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR,
6074 		    NULL, qp->qp_name, "Interrupt on queue");
6075 		evcnt_attach_dynamic(&rxr->rxr_defer, EVCNT_TYPE_MISC,
6076 		    NULL, qp->qp_name, "Handled queue in softint/workqueue");
6077 	}
6078 
6079 	evcnt_attach_dynamic(&sc->sc_event_atq, EVCNT_TYPE_INTR,
6080 	    NULL, device_xname(sc->sc_dev), "Interrupt for other events");
6081 	evcnt_attach_dynamic(&sc->sc_event_link, EVCNT_TYPE_MISC,
6082 	    NULL, device_xname(sc->sc_dev), "Link status event");
6083 	evcnt_attach_dynamic(&sc->sc_event_ecc_err, EVCNT_TYPE_MISC,
6084 	    NULL, device_xname(sc->sc_dev), "ECC error");
6085 	evcnt_attach_dynamic(&sc->sc_event_pci_exception, EVCNT_TYPE_MISC,
6086 	    NULL, device_xname(sc->sc_dev), "PCI exception");
6087 	evcnt_attach_dynamic(&sc->sc_event_crit_err, EVCNT_TYPE_MISC,
6088 	    NULL, device_xname(sc->sc_dev), "Critical error");
6089 
6090 	isc = &sc->sc_stats_counters;
6091 	evcnt_attach_dynamic(&isc->isc_crc_errors, EVCNT_TYPE_MISC,
6092 	    NULL, device_xname(sc->sc_dev), "CRC errors");
6093 	evcnt_attach_dynamic(&isc->isc_illegal_bytes, EVCNT_TYPE_MISC,
6094 	    NULL, device_xname(sc->sc_dev), "Illegal bytes");
6095 	evcnt_attach_dynamic(&isc->isc_mac_local_faults, EVCNT_TYPE_MISC,
6096 	    NULL, device_xname(sc->sc_dev), "Mac local faults");
6097 	evcnt_attach_dynamic(&isc->isc_mac_remote_faults, EVCNT_TYPE_MISC,
6098 	    NULL, device_xname(sc->sc_dev), "Mac remote faults");
6099 	evcnt_attach_dynamic(&isc->isc_link_xon_rx, EVCNT_TYPE_MISC,
6100 	    NULL, device_xname(sc->sc_dev), "Rx xon");
6101 	evcnt_attach_dynamic(&isc->isc_link_xon_tx, EVCNT_TYPE_MISC,
6102 	    NULL, device_xname(sc->sc_dev), "Tx xon");
6103 	evcnt_attach_dynamic(&isc->isc_link_xoff_rx, EVCNT_TYPE_MISC,
6104 	    NULL, device_xname(sc->sc_dev), "Rx xoff");
6105 	evcnt_attach_dynamic(&isc->isc_link_xoff_tx, EVCNT_TYPE_MISC,
6106 	    NULL, device_xname(sc->sc_dev), "Tx xoff");
6107 	evcnt_attach_dynamic(&isc->isc_rx_fragments, EVCNT_TYPE_MISC,
6108 	    NULL, device_xname(sc->sc_dev), "Rx fragments");
6109 	evcnt_attach_dynamic(&isc->isc_rx_jabber, EVCNT_TYPE_MISC,
6110 	    NULL, device_xname(sc->sc_dev), "Rx jabber");
6111 
6112 	evcnt_attach_dynamic(&isc->isc_rx_size_64, EVCNT_TYPE_MISC,
6113 	    NULL, device_xname(sc->sc_dev), "Rx size 64");
6114 	evcnt_attach_dynamic(&isc->isc_rx_size_127, EVCNT_TYPE_MISC,
6115 	    NULL, device_xname(sc->sc_dev), "Rx size 127");
6116 	evcnt_attach_dynamic(&isc->isc_rx_size_255, EVCNT_TYPE_MISC,
6117 	    NULL, device_xname(sc->sc_dev), "Rx size 255");
6118 	evcnt_attach_dynamic(&isc->isc_rx_size_511, EVCNT_TYPE_MISC,
6119 	    NULL, device_xname(sc->sc_dev), "Rx size 511");
6120 	evcnt_attach_dynamic(&isc->isc_rx_size_1023, EVCNT_TYPE_MISC,
6121 	    NULL, device_xname(sc->sc_dev), "Rx size 1023");
6122 	evcnt_attach_dynamic(&isc->isc_rx_size_1522, EVCNT_TYPE_MISC,
6123 	    NULL, device_xname(sc->sc_dev), "Rx size 1522");
6124 	evcnt_attach_dynamic(&isc->isc_rx_size_big, EVCNT_TYPE_MISC,
6125 	    NULL, device_xname(sc->sc_dev), "Rx jumbo packets");
6126 	evcnt_attach_dynamic(&isc->isc_rx_undersize, EVCNT_TYPE_MISC,
6127 	    NULL, device_xname(sc->sc_dev), "Rx under size");
6128 	evcnt_attach_dynamic(&isc->isc_rx_oversize, EVCNT_TYPE_MISC,
6129 	    NULL, device_xname(sc->sc_dev), "Rx over size");
6130 
6131 	evcnt_attach_dynamic(&isc->isc_rx_bytes, EVCNT_TYPE_MISC,
6132 	    NULL, device_xname(sc->sc_dev), "Rx bytes / port");
6133 	evcnt_attach_dynamic(&isc->isc_rx_discards, EVCNT_TYPE_MISC,
6134 	    NULL, device_xname(sc->sc_dev), "Rx discards / port");
6135 	evcnt_attach_dynamic(&isc->isc_rx_unicast, EVCNT_TYPE_MISC,
6136 	    NULL, device_xname(sc->sc_dev), "Rx unicast / port");
6137 	evcnt_attach_dynamic(&isc->isc_rx_multicast, EVCNT_TYPE_MISC,
6138 	    NULL, device_xname(sc->sc_dev), "Rx multicast / port");
6139 	evcnt_attach_dynamic(&isc->isc_rx_broadcast, EVCNT_TYPE_MISC,
6140 	    NULL, device_xname(sc->sc_dev), "Rx broadcast / port");
6141 
6142 	evcnt_attach_dynamic(&isc->isc_vsi_rx_bytes, EVCNT_TYPE_MISC,
6143 	    NULL, device_xname(sc->sc_dev), "Rx bytes / vsi");
6144 	evcnt_attach_dynamic(&isc->isc_vsi_rx_discards, EVCNT_TYPE_MISC,
6145 	    NULL, device_xname(sc->sc_dev), "Rx discards / vsi");
6146 	evcnt_attach_dynamic(&isc->isc_vsi_rx_unicast, EVCNT_TYPE_MISC,
6147 	    NULL, device_xname(sc->sc_dev), "Rx unicast / vsi");
6148 	evcnt_attach_dynamic(&isc->isc_vsi_rx_multicast, EVCNT_TYPE_MISC,
6149 	    NULL, device_xname(sc->sc_dev), "Rx multicast / vsi");
6150 	evcnt_attach_dynamic(&isc->isc_vsi_rx_broadcast, EVCNT_TYPE_MISC,
6151 	    NULL, device_xname(sc->sc_dev), "Rx broadcast / vsi");
6152 
6153 	evcnt_attach_dynamic(&isc->isc_tx_size_64, EVCNT_TYPE_MISC,
6154 	    NULL, device_xname(sc->sc_dev), "Tx size 64");
6155 	evcnt_attach_dynamic(&isc->isc_tx_size_127, EVCNT_TYPE_MISC,
6156 	    NULL, device_xname(sc->sc_dev), "Tx size 127");
6157 	evcnt_attach_dynamic(&isc->isc_tx_size_255, EVCNT_TYPE_MISC,
6158 	    NULL, device_xname(sc->sc_dev), "Tx size 255");
6159 	evcnt_attach_dynamic(&isc->isc_tx_size_511, EVCNT_TYPE_MISC,
6160 	    NULL, device_xname(sc->sc_dev), "Tx size 511");
6161 	evcnt_attach_dynamic(&isc->isc_tx_size_1023, EVCNT_TYPE_MISC,
6162 	    NULL, device_xname(sc->sc_dev), "Tx size 1023");
6163 	evcnt_attach_dynamic(&isc->isc_tx_size_1522, EVCNT_TYPE_MISC,
6164 	    NULL, device_xname(sc->sc_dev), "Tx size 1522");
6165 	evcnt_attach_dynamic(&isc->isc_tx_size_big, EVCNT_TYPE_MISC,
6166 	    NULL, device_xname(sc->sc_dev), "Tx jumbo packets");
6167 
6168 	evcnt_attach_dynamic(&isc->isc_tx_bytes, EVCNT_TYPE_MISC,
6169 	    NULL, device_xname(sc->sc_dev), "Tx bytes / port");
6170 	evcnt_attach_dynamic(&isc->isc_tx_dropped_link_down, EVCNT_TYPE_MISC,
6171 	    NULL, device_xname(sc->sc_dev),
6172 	    "Tx dropped due to link down / port");
6173 	evcnt_attach_dynamic(&isc->isc_tx_unicast, EVCNT_TYPE_MISC,
6174 	    NULL, device_xname(sc->sc_dev), "Tx unicast / port");
6175 	evcnt_attach_dynamic(&isc->isc_tx_multicast, EVCNT_TYPE_MISC,
6176 	    NULL, device_xname(sc->sc_dev), "Tx multicast / port");
6177 	evcnt_attach_dynamic(&isc->isc_tx_broadcast, EVCNT_TYPE_MISC,
6178 	    NULL, device_xname(sc->sc_dev), "Tx broadcast / port");
6179 
6180 	evcnt_attach_dynamic(&isc->isc_vsi_tx_bytes, EVCNT_TYPE_MISC,
6181 	    NULL, device_xname(sc->sc_dev), "Tx bytes / vsi");
6182 	evcnt_attach_dynamic(&isc->isc_vsi_tx_errors, EVCNT_TYPE_MISC,
6183 	    NULL, device_xname(sc->sc_dev), "Tx errors / vsi");
6184 	evcnt_attach_dynamic(&isc->isc_vsi_tx_unicast, EVCNT_TYPE_MISC,
6185 	    NULL, device_xname(sc->sc_dev), "Tx unicast / vsi");
6186 	evcnt_attach_dynamic(&isc->isc_vsi_tx_multicast, EVCNT_TYPE_MISC,
6187 	    NULL, device_xname(sc->sc_dev), "Tx multicast / vsi");
6188 	evcnt_attach_dynamic(&isc->isc_vsi_tx_broadcast, EVCNT_TYPE_MISC,
6189 	    NULL, device_xname(sc->sc_dev), "Tx broadcast / vsi");
6190 
6191 	sc->sc_stats_intval = ixl_param_stats_interval;
6192 	callout_init(&sc->sc_stats_callout, CALLOUT_MPSAFE);
6193 	callout_setfunc(&sc->sc_stats_callout, ixl_stats_callout, sc);
6194 	ixl_work_set(&sc->sc_stats_task, ixl_stats_update, sc);
6195 
6196 	return 0;
6197 }
6198 
6199 static void
ixl_teardown_stats(struct ixl_softc * sc)6200 ixl_teardown_stats(struct ixl_softc *sc)
6201 {
6202 	struct ixl_tx_ring *txr;
6203 	struct ixl_rx_ring *rxr;
6204 	struct ixl_stats_counters *isc;
6205 	unsigned int i;
6206 
6207 	for (i = 0; i < sc->sc_nqueue_pairs_max; i++) {
6208 		txr = sc->sc_qps[i].qp_txr;
6209 		rxr = sc->sc_qps[i].qp_rxr;
6210 
6211 		evcnt_detach(&txr->txr_defragged);
6212 		evcnt_detach(&txr->txr_defrag_failed);
6213 		evcnt_detach(&txr->txr_pcqdrop);
6214 		evcnt_detach(&txr->txr_transmitdef);
6215 		evcnt_detach(&txr->txr_intr);
6216 		evcnt_detach(&txr->txr_defer);
6217 
6218 		evcnt_detach(&rxr->rxr_mgethdr_failed);
6219 		evcnt_detach(&rxr->rxr_mgetcl_failed);
6220 		evcnt_detach(&rxr->rxr_mbuf_load_failed);
6221 		evcnt_detach(&rxr->rxr_intr);
6222 		evcnt_detach(&rxr->rxr_defer);
6223 	}
6224 
6225 	isc = &sc->sc_stats_counters;
6226 	evcnt_detach(&isc->isc_crc_errors);
6227 	evcnt_detach(&isc->isc_illegal_bytes);
6228 	evcnt_detach(&isc->isc_mac_local_faults);
6229 	evcnt_detach(&isc->isc_mac_remote_faults);
6230 	evcnt_detach(&isc->isc_link_xon_rx);
6231 	evcnt_detach(&isc->isc_link_xon_tx);
6232 	evcnt_detach(&isc->isc_link_xoff_rx);
6233 	evcnt_detach(&isc->isc_link_xoff_tx);
6234 	evcnt_detach(&isc->isc_rx_fragments);
6235 	evcnt_detach(&isc->isc_rx_jabber);
6236 	evcnt_detach(&isc->isc_rx_bytes);
6237 	evcnt_detach(&isc->isc_rx_discards);
6238 	evcnt_detach(&isc->isc_rx_unicast);
6239 	evcnt_detach(&isc->isc_rx_multicast);
6240 	evcnt_detach(&isc->isc_rx_broadcast);
6241 	evcnt_detach(&isc->isc_rx_size_64);
6242 	evcnt_detach(&isc->isc_rx_size_127);
6243 	evcnt_detach(&isc->isc_rx_size_255);
6244 	evcnt_detach(&isc->isc_rx_size_511);
6245 	evcnt_detach(&isc->isc_rx_size_1023);
6246 	evcnt_detach(&isc->isc_rx_size_1522);
6247 	evcnt_detach(&isc->isc_rx_size_big);
6248 	evcnt_detach(&isc->isc_rx_undersize);
6249 	evcnt_detach(&isc->isc_rx_oversize);
6250 	evcnt_detach(&isc->isc_tx_bytes);
6251 	evcnt_detach(&isc->isc_tx_dropped_link_down);
6252 	evcnt_detach(&isc->isc_tx_unicast);
6253 	evcnt_detach(&isc->isc_tx_multicast);
6254 	evcnt_detach(&isc->isc_tx_broadcast);
6255 	evcnt_detach(&isc->isc_tx_size_64);
6256 	evcnt_detach(&isc->isc_tx_size_127);
6257 	evcnt_detach(&isc->isc_tx_size_255);
6258 	evcnt_detach(&isc->isc_tx_size_511);
6259 	evcnt_detach(&isc->isc_tx_size_1023);
6260 	evcnt_detach(&isc->isc_tx_size_1522);
6261 	evcnt_detach(&isc->isc_tx_size_big);
6262 	evcnt_detach(&isc->isc_vsi_rx_discards);
6263 	evcnt_detach(&isc->isc_vsi_rx_bytes);
6264 	evcnt_detach(&isc->isc_vsi_rx_unicast);
6265 	evcnt_detach(&isc->isc_vsi_rx_multicast);
6266 	evcnt_detach(&isc->isc_vsi_rx_broadcast);
6267 	evcnt_detach(&isc->isc_vsi_tx_errors);
6268 	evcnt_detach(&isc->isc_vsi_tx_bytes);
6269 	evcnt_detach(&isc->isc_vsi_tx_unicast);
6270 	evcnt_detach(&isc->isc_vsi_tx_multicast);
6271 	evcnt_detach(&isc->isc_vsi_tx_broadcast);
6272 
6273 	evcnt_detach(&sc->sc_event_atq);
6274 	evcnt_detach(&sc->sc_event_link);
6275 	evcnt_detach(&sc->sc_event_ecc_err);
6276 	evcnt_detach(&sc->sc_event_pci_exception);
6277 	evcnt_detach(&sc->sc_event_crit_err);
6278 
6279 	callout_destroy(&sc->sc_stats_callout);
6280 }
6281 
6282 static void
ixl_stats_callout(void * xsc)6283 ixl_stats_callout(void *xsc)
6284 {
6285 	struct ixl_softc *sc = xsc;
6286 
6287 	ixl_work_add(sc->sc_workq, &sc->sc_stats_task);
6288 	callout_schedule(&sc->sc_stats_callout, mstohz(sc->sc_stats_intval));
6289 }
6290 
6291 static uint64_t
ixl_stat_delta(struct ixl_softc * sc,uint32_t reg_hi,uint32_t reg_lo,uint64_t * offset,bool has_offset)6292 ixl_stat_delta(struct ixl_softc *sc, uint32_t reg_hi, uint32_t reg_lo,
6293     uint64_t *offset, bool has_offset)
6294 {
6295 	uint64_t value, delta;
6296 	int bitwidth;
6297 
6298 	bitwidth = reg_hi == 0 ? 32 : 48;
6299 
6300 	value = ixl_rd(sc, reg_lo);
6301 
6302 	if (bitwidth > 32) {
6303 		value |= ((uint64_t)ixl_rd(sc, reg_hi) << 32);
6304 	}
6305 
6306 	if (__predict_true(has_offset)) {
6307 		delta = value;
6308 		if (value < *offset)
6309 			delta += ((uint64_t)1 << bitwidth);
6310 		delta -= *offset;
6311 	} else {
6312 		delta = 0;
6313 	}
6314 	atomic_swap_64(offset, value);
6315 
6316 	return delta;
6317 }
6318 
6319 static void
ixl_stats_update(void * xsc)6320 ixl_stats_update(void *xsc)
6321 {
6322 	struct ixl_softc *sc = xsc;
6323 	struct ixl_stats_counters *isc;
6324 	uint64_t delta;
6325 
6326 	isc = &sc->sc_stats_counters;
6327 
6328 	/* errors */
6329 	delta = ixl_stat_delta(sc,
6330 	    0, I40E_GLPRT_CRCERRS(sc->sc_port),
6331 	    &isc->isc_crc_errors_offset, isc->isc_has_offset);
6332 	atomic_add_64(&isc->isc_crc_errors.ev_count, delta);
6333 
6334 	delta = ixl_stat_delta(sc,
6335 	    0, I40E_GLPRT_ILLERRC(sc->sc_port),
6336 	    &isc->isc_illegal_bytes_offset, isc->isc_has_offset);
6337 	atomic_add_64(&isc->isc_illegal_bytes.ev_count, delta);
6338 
6339 	/* rx */
6340 	delta = ixl_stat_delta(sc,
6341 	    I40E_GLPRT_GORCH(sc->sc_port), I40E_GLPRT_GORCL(sc->sc_port),
6342 	    &isc->isc_rx_bytes_offset, isc->isc_has_offset);
6343 	atomic_add_64(&isc->isc_rx_bytes.ev_count, delta);
6344 
6345 	delta = ixl_stat_delta(sc,
6346 	    0, I40E_GLPRT_RDPC(sc->sc_port),
6347 	    &isc->isc_rx_discards_offset, isc->isc_has_offset);
6348 	atomic_add_64(&isc->isc_rx_discards.ev_count, delta);
6349 
6350 	delta = ixl_stat_delta(sc,
6351 	    I40E_GLPRT_UPRCH(sc->sc_port), I40E_GLPRT_UPRCL(sc->sc_port),
6352 	    &isc->isc_rx_unicast_offset, isc->isc_has_offset);
6353 	atomic_add_64(&isc->isc_rx_unicast.ev_count, delta);
6354 
6355 	delta = ixl_stat_delta(sc,
6356 	    I40E_GLPRT_MPRCH(sc->sc_port), I40E_GLPRT_MPRCL(sc->sc_port),
6357 	    &isc->isc_rx_multicast_offset, isc->isc_has_offset);
6358 	atomic_add_64(&isc->isc_rx_multicast.ev_count, delta);
6359 
6360 	delta = ixl_stat_delta(sc,
6361 	    I40E_GLPRT_BPRCH(sc->sc_port), I40E_GLPRT_BPRCL(sc->sc_port),
6362 	    &isc->isc_rx_broadcast_offset, isc->isc_has_offset);
6363 	atomic_add_64(&isc->isc_rx_broadcast.ev_count, delta);
6364 
6365 	/* Packet size stats rx */
6366 	delta = ixl_stat_delta(sc,
6367 	    I40E_GLPRT_PRC64H(sc->sc_port), I40E_GLPRT_PRC64L(sc->sc_port),
6368 	    &isc->isc_rx_size_64_offset, isc->isc_has_offset);
6369 	atomic_add_64(&isc->isc_rx_size_64.ev_count, delta);
6370 
6371 	delta = ixl_stat_delta(sc,
6372 	    I40E_GLPRT_PRC127H(sc->sc_port), I40E_GLPRT_PRC127L(sc->sc_port),
6373 	    &isc->isc_rx_size_127_offset, isc->isc_has_offset);
6374 	atomic_add_64(&isc->isc_rx_size_127.ev_count, delta);
6375 
6376 	delta = ixl_stat_delta(sc,
6377 	    I40E_GLPRT_PRC255H(sc->sc_port), I40E_GLPRT_PRC255L(sc->sc_port),
6378 	    &isc->isc_rx_size_255_offset, isc->isc_has_offset);
6379 	atomic_add_64(&isc->isc_rx_size_255.ev_count, delta);
6380 
6381 	delta = ixl_stat_delta(sc,
6382 	    I40E_GLPRT_PRC511H(sc->sc_port), I40E_GLPRT_PRC511L(sc->sc_port),
6383 	    &isc->isc_rx_size_511_offset, isc->isc_has_offset);
6384 	atomic_add_64(&isc->isc_rx_size_511.ev_count, delta);
6385 
6386 	delta = ixl_stat_delta(sc,
6387 	    I40E_GLPRT_PRC1023H(sc->sc_port), I40E_GLPRT_PRC1023L(sc->sc_port),
6388 	    &isc->isc_rx_size_1023_offset, isc->isc_has_offset);
6389 	atomic_add_64(&isc->isc_rx_size_1023.ev_count, delta);
6390 
6391 	delta = ixl_stat_delta(sc,
6392 	    I40E_GLPRT_PRC1522H(sc->sc_port), I40E_GLPRT_PRC1522L(sc->sc_port),
6393 	    &isc->isc_rx_size_1522_offset, isc->isc_has_offset);
6394 	atomic_add_64(&isc->isc_rx_size_1522.ev_count, delta);
6395 
6396 	delta = ixl_stat_delta(sc,
6397 	    I40E_GLPRT_PRC9522H(sc->sc_port), I40E_GLPRT_PRC9522L(sc->sc_port),
6398 	    &isc->isc_rx_size_big_offset, isc->isc_has_offset);
6399 	atomic_add_64(&isc->isc_rx_size_big.ev_count, delta);
6400 
6401 	delta = ixl_stat_delta(sc,
6402 	    0, I40E_GLPRT_RUC(sc->sc_port),
6403 	    &isc->isc_rx_undersize_offset, isc->isc_has_offset);
6404 	atomic_add_64(&isc->isc_rx_undersize.ev_count, delta);
6405 
6406 	delta = ixl_stat_delta(sc,
6407 	    0, I40E_GLPRT_ROC(sc->sc_port),
6408 	    &isc->isc_rx_oversize_offset, isc->isc_has_offset);
6409 	atomic_add_64(&isc->isc_rx_oversize.ev_count, delta);
6410 
6411 	/* tx */
6412 	delta = ixl_stat_delta(sc,
6413 	    I40E_GLPRT_GOTCH(sc->sc_port), I40E_GLPRT_GOTCL(sc->sc_port),
6414 	    &isc->isc_tx_bytes_offset, isc->isc_has_offset);
6415 	atomic_add_64(&isc->isc_tx_bytes.ev_count, delta);
6416 
6417 	delta = ixl_stat_delta(sc,
6418 	    0, I40E_GLPRT_TDOLD(sc->sc_port),
6419 	    &isc->isc_tx_dropped_link_down_offset, isc->isc_has_offset);
6420 	atomic_add_64(&isc->isc_tx_dropped_link_down.ev_count, delta);
6421 
6422 	delta = ixl_stat_delta(sc,
6423 	    I40E_GLPRT_UPTCH(sc->sc_port), I40E_GLPRT_UPTCL(sc->sc_port),
6424 	    &isc->isc_tx_unicast_offset, isc->isc_has_offset);
6425 	atomic_add_64(&isc->isc_tx_unicast.ev_count, delta);
6426 
6427 	delta = ixl_stat_delta(sc,
6428 	    I40E_GLPRT_MPTCH(sc->sc_port), I40E_GLPRT_MPTCL(sc->sc_port),
6429 	    &isc->isc_tx_multicast_offset, isc->isc_has_offset);
6430 	atomic_add_64(&isc->isc_tx_multicast.ev_count, delta);
6431 
6432 	delta = ixl_stat_delta(sc,
6433 	    I40E_GLPRT_BPTCH(sc->sc_port), I40E_GLPRT_BPTCL(sc->sc_port),
6434 	    &isc->isc_tx_broadcast_offset, isc->isc_has_offset);
6435 	atomic_add_64(&isc->isc_tx_broadcast.ev_count, delta);
6436 
6437 	/* Packet size stats tx */
6438 	delta = ixl_stat_delta(sc,
6439 	    I40E_GLPRT_PTC64L(sc->sc_port), I40E_GLPRT_PTC64L(sc->sc_port),
6440 	    &isc->isc_tx_size_64_offset, isc->isc_has_offset);
6441 	atomic_add_64(&isc->isc_tx_size_64.ev_count, delta);
6442 
6443 	delta = ixl_stat_delta(sc,
6444 	    I40E_GLPRT_PTC127H(sc->sc_port), I40E_GLPRT_PTC127L(sc->sc_port),
6445 	    &isc->isc_tx_size_127_offset, isc->isc_has_offset);
6446 	atomic_add_64(&isc->isc_tx_size_127.ev_count, delta);
6447 
6448 	delta = ixl_stat_delta(sc,
6449 	    I40E_GLPRT_PTC255H(sc->sc_port), I40E_GLPRT_PTC255L(sc->sc_port),
6450 	    &isc->isc_tx_size_255_offset, isc->isc_has_offset);
6451 	atomic_add_64(&isc->isc_tx_size_255.ev_count, delta);
6452 
6453 	delta = ixl_stat_delta(sc,
6454 	    I40E_GLPRT_PTC511H(sc->sc_port), I40E_GLPRT_PTC511L(sc->sc_port),
6455 	    &isc->isc_tx_size_511_offset, isc->isc_has_offset);
6456 	atomic_add_64(&isc->isc_tx_size_511.ev_count, delta);
6457 
6458 	delta = ixl_stat_delta(sc,
6459 	    I40E_GLPRT_PTC1023H(sc->sc_port), I40E_GLPRT_PTC1023L(sc->sc_port),
6460 	    &isc->isc_tx_size_1023_offset, isc->isc_has_offset);
6461 	atomic_add_64(&isc->isc_tx_size_1023.ev_count, delta);
6462 
6463 	delta = ixl_stat_delta(sc,
6464 	    I40E_GLPRT_PTC1522H(sc->sc_port), I40E_GLPRT_PTC1522L(sc->sc_port),
6465 	    &isc->isc_tx_size_1522_offset, isc->isc_has_offset);
6466 	atomic_add_64(&isc->isc_tx_size_1522.ev_count, delta);
6467 
6468 	delta = ixl_stat_delta(sc,
6469 	    I40E_GLPRT_PTC9522H(sc->sc_port), I40E_GLPRT_PTC9522L(sc->sc_port),
6470 	    &isc->isc_tx_size_big_offset, isc->isc_has_offset);
6471 	atomic_add_64(&isc->isc_tx_size_big.ev_count, delta);
6472 
6473 	/* mac faults */
6474 	delta = ixl_stat_delta(sc,
6475 	    0, I40E_GLPRT_MLFC(sc->sc_port),
6476 	    &isc->isc_mac_local_faults_offset, isc->isc_has_offset);
6477 	atomic_add_64(&isc->isc_mac_local_faults.ev_count, delta);
6478 
6479 	delta = ixl_stat_delta(sc,
6480 	    0, I40E_GLPRT_MRFC(sc->sc_port),
6481 	    &isc->isc_mac_remote_faults_offset, isc->isc_has_offset);
6482 	atomic_add_64(&isc->isc_mac_remote_faults.ev_count, delta);
6483 
6484 	/* Flow control (LFC) stats */
6485 	delta = ixl_stat_delta(sc,
6486 	    0, I40E_GLPRT_LXONRXC(sc->sc_port),
6487 	    &isc->isc_link_xon_rx_offset, isc->isc_has_offset);
6488 	atomic_add_64(&isc->isc_link_xon_rx.ev_count, delta);
6489 
6490 	delta = ixl_stat_delta(sc,
6491 	    0, I40E_GLPRT_LXONTXC(sc->sc_port),
6492 	    &isc->isc_link_xon_tx_offset, isc->isc_has_offset);
6493 	atomic_add_64(&isc->isc_link_xon_tx.ev_count, delta);
6494 
6495 	delta = ixl_stat_delta(sc,
6496 	    0, I40E_GLPRT_LXOFFRXC(sc->sc_port),
6497 	    &isc->isc_link_xoff_rx_offset, isc->isc_has_offset);
6498 	atomic_add_64(&isc->isc_link_xoff_rx.ev_count, delta);
6499 
6500 	delta = ixl_stat_delta(sc,
6501 	    0, I40E_GLPRT_LXOFFTXC(sc->sc_port),
6502 	    &isc->isc_link_xoff_tx_offset, isc->isc_has_offset);
6503 	atomic_add_64(&isc->isc_link_xoff_tx.ev_count, delta);
6504 
6505 	/* fragments */
6506 	delta = ixl_stat_delta(sc,
6507 	    0, I40E_GLPRT_RFC(sc->sc_port),
6508 	    &isc->isc_rx_fragments_offset, isc->isc_has_offset);
6509 	atomic_add_64(&isc->isc_rx_fragments.ev_count, delta);
6510 
6511 	delta = ixl_stat_delta(sc,
6512 	    0, I40E_GLPRT_RJC(sc->sc_port),
6513 	    &isc->isc_rx_jabber_offset, isc->isc_has_offset);
6514 	atomic_add_64(&isc->isc_rx_jabber.ev_count, delta);
6515 
6516 	/* VSI rx counters */
6517 	delta = ixl_stat_delta(sc,
6518 	    0, I40E_GLV_RDPC(sc->sc_vsi_stat_counter_idx),
6519 	    &isc->isc_vsi_rx_discards_offset, isc->isc_has_offset);
6520 	atomic_add_64(&isc->isc_vsi_rx_discards.ev_count, delta);
6521 
6522 	delta = ixl_stat_delta(sc,
6523 	    I40E_GLV_GORCH(sc->sc_vsi_stat_counter_idx),
6524 	    I40E_GLV_GORCL(sc->sc_vsi_stat_counter_idx),
6525 	    &isc->isc_vsi_rx_bytes_offset, isc->isc_has_offset);
6526 	atomic_add_64(&isc->isc_vsi_rx_bytes.ev_count, delta);
6527 
6528 	delta = ixl_stat_delta(sc,
6529 	    I40E_GLV_UPRCH(sc->sc_vsi_stat_counter_idx),
6530 	    I40E_GLV_UPRCL(sc->sc_vsi_stat_counter_idx),
6531 	    &isc->isc_vsi_rx_unicast_offset, isc->isc_has_offset);
6532 	atomic_add_64(&isc->isc_vsi_rx_unicast.ev_count, delta);
6533 
6534 	delta = ixl_stat_delta(sc,
6535 	    I40E_GLV_MPRCH(sc->sc_vsi_stat_counter_idx),
6536 	    I40E_GLV_MPRCL(sc->sc_vsi_stat_counter_idx),
6537 	    &isc->isc_vsi_rx_multicast_offset, isc->isc_has_offset);
6538 	atomic_add_64(&isc->isc_vsi_rx_multicast.ev_count, delta);
6539 
6540 	delta = ixl_stat_delta(sc,
6541 	    I40E_GLV_BPRCH(sc->sc_vsi_stat_counter_idx),
6542 	    I40E_GLV_BPRCL(sc->sc_vsi_stat_counter_idx),
6543 	    &isc->isc_vsi_rx_broadcast_offset, isc->isc_has_offset);
6544 	atomic_add_64(&isc->isc_vsi_rx_broadcast.ev_count, delta);
6545 
6546 	/* VSI tx counters */
6547 	delta = ixl_stat_delta(sc,
6548 	    0, I40E_GLV_TEPC(sc->sc_vsi_stat_counter_idx),
6549 	    &isc->isc_vsi_tx_errors_offset, isc->isc_has_offset);
6550 	atomic_add_64(&isc->isc_vsi_tx_errors.ev_count, delta);
6551 
6552 	delta = ixl_stat_delta(sc,
6553 	    I40E_GLV_GOTCH(sc->sc_vsi_stat_counter_idx),
6554 	    I40E_GLV_GOTCL(sc->sc_vsi_stat_counter_idx),
6555 	    &isc->isc_vsi_tx_bytes_offset, isc->isc_has_offset);
6556 	atomic_add_64(&isc->isc_vsi_tx_bytes.ev_count, delta);
6557 
6558 	delta = ixl_stat_delta(sc,
6559 	    I40E_GLV_UPTCH(sc->sc_vsi_stat_counter_idx),
6560 	    I40E_GLV_UPTCL(sc->sc_vsi_stat_counter_idx),
6561 	    &isc->isc_vsi_tx_unicast_offset, isc->isc_has_offset);
6562 	atomic_add_64(&isc->isc_vsi_tx_unicast.ev_count, delta);
6563 
6564 	delta = ixl_stat_delta(sc,
6565 	    I40E_GLV_MPTCH(sc->sc_vsi_stat_counter_idx),
6566 	    I40E_GLV_MPTCL(sc->sc_vsi_stat_counter_idx),
6567 	    &isc->isc_vsi_tx_multicast_offset, isc->isc_has_offset);
6568 	atomic_add_64(&isc->isc_vsi_tx_multicast.ev_count, delta);
6569 
6570 	delta = ixl_stat_delta(sc,
6571 	    I40E_GLV_BPTCH(sc->sc_vsi_stat_counter_idx),
6572 	    I40E_GLV_BPTCL(sc->sc_vsi_stat_counter_idx),
6573 	    &isc->isc_vsi_tx_broadcast_offset, isc->isc_has_offset);
6574 	atomic_add_64(&isc->isc_vsi_tx_broadcast.ev_count, delta);
6575 }
6576 
6577 static int
ixl_setup_sysctls(struct ixl_softc * sc)6578 ixl_setup_sysctls(struct ixl_softc *sc)
6579 {
6580 	const char *devname;
6581 	struct sysctllog **log;
6582 	const struct sysctlnode *rnode, *rxnode, *txnode;
6583 	int error;
6584 
6585 	log = &sc->sc_sysctllog;
6586 	devname = device_xname(sc->sc_dev);
6587 
6588 	error = sysctl_createv(log, 0, NULL, &rnode,
6589 	    0, CTLTYPE_NODE, devname,
6590 	    SYSCTL_DESCR("ixl information and settings"),
6591 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
6592 	if (error)
6593 		goto out;
6594 
6595 	error = sysctl_createv(log, 0, &rnode, NULL,
6596 	    CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
6597 	    SYSCTL_DESCR("Use workqueue for packet processing"),
6598 	    NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
6599 	if (error)
6600 		goto out;
6601 
6602 	error = sysctl_createv(log, 0, &rnode, NULL,
6603 	    CTLFLAG_READONLY, CTLTYPE_INT, "stats_interval",
6604 	    SYSCTL_DESCR("Statistics collection interval in milliseconds"),
6605 	    NULL, 0, &sc->sc_stats_intval, 0, CTL_CREATE, CTL_EOL);
6606 
6607 	error = sysctl_createv(log, 0, &rnode, &rxnode,
6608 	    0, CTLTYPE_NODE, "rx",
6609 	    SYSCTL_DESCR("ixl information and settings for Rx"),
6610 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6611 	if (error)
6612 		goto out;
6613 
6614 	error = sysctl_createv(log, 0, &rxnode, NULL,
6615 	    CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6616 	    SYSCTL_DESCR("Interrupt Throttling"),
6617 	    ixl_sysctl_itr_handler, 0,
6618 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
6619 	if (error)
6620 		goto out;
6621 
6622 	error = sysctl_createv(log, 0, &rxnode, NULL,
6623 	    CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6624 	    SYSCTL_DESCR("the number of rx descriptors"),
6625 	    NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6626 	if (error)
6627 		goto out;
6628 
6629 	error = sysctl_createv(log, 0, &rxnode, NULL,
6630 	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6631 	    SYSCTL_DESCR("max number of Rx packets"
6632 	    " to process for interrupt processing"),
6633 	    NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6634 	if (error)
6635 		goto out;
6636 
6637 	error = sysctl_createv(log, 0, &rxnode, NULL,
6638 	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6639 	    SYSCTL_DESCR("max number of Rx packets"
6640 	    " to process for deferred processing"),
6641 	    NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
6642 	if (error)
6643 		goto out;
6644 
6645 	error = sysctl_createv(log, 0, &rnode, &txnode,
6646 	    0, CTLTYPE_NODE, "tx",
6647 	    SYSCTL_DESCR("ixl information and settings for Tx"),
6648 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
6649 	if (error)
6650 		goto out;
6651 
6652 	error = sysctl_createv(log, 0, &txnode, NULL,
6653 	    CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
6654 	    SYSCTL_DESCR("Interrupt Throttling"),
6655 	    ixl_sysctl_itr_handler, 0,
6656 	    (void *)sc, 0, CTL_CREATE, CTL_EOL);
6657 	if (error)
6658 		goto out;
6659 
6660 	error = sysctl_createv(log, 0, &txnode, NULL,
6661 	    CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
6662 	    SYSCTL_DESCR("the number of tx descriptors"),
6663 	    NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
6664 	if (error)
6665 		goto out;
6666 
6667 	error = sysctl_createv(log, 0, &txnode, NULL,
6668 	    CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
6669 	    SYSCTL_DESCR("max number of Tx packets"
6670 	    " to process for interrupt processing"),
6671 	    NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
6672 	if (error)
6673 		goto out;
6674 
6675 	error = sysctl_createv(log, 0, &txnode, NULL,
6676 	    CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
6677 	    SYSCTL_DESCR("max number of Tx packets"
6678 	    " to process for deferred processing"),
6679 	    NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
6680 	if (error)
6681 		goto out;
6682 
6683 out:
6684 	if (error) {
6685 		aprint_error_dev(sc->sc_dev,
6686 		    "unable to create sysctl node\n");
6687 		sysctl_teardown(log);
6688 	}
6689 
6690 	return error;
6691 }
6692 
6693 static void
ixl_teardown_sysctls(struct ixl_softc * sc)6694 ixl_teardown_sysctls(struct ixl_softc *sc)
6695 {
6696 
6697 	sysctl_teardown(&sc->sc_sysctllog);
6698 }
6699 
6700 static bool
ixl_sysctlnode_is_rx(struct sysctlnode * node)6701 ixl_sysctlnode_is_rx(struct sysctlnode *node)
6702 {
6703 
6704 	if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
6705 		return true;
6706 
6707 	return false;
6708 }
6709 
6710 static int
ixl_sysctl_itr_handler(SYSCTLFN_ARGS)6711 ixl_sysctl_itr_handler(SYSCTLFN_ARGS)
6712 {
6713 	struct sysctlnode node = *rnode;
6714 	struct ixl_softc *sc = (struct ixl_softc *)node.sysctl_data;
6715 	struct ifnet *ifp = &sc->sc_ec.ec_if;
6716 	uint32_t newitr, *itrptr;
6717 	int error;
6718 
6719 	if (ixl_sysctlnode_is_rx(&node)) {
6720 		itrptr = &sc->sc_itr_rx;
6721 	} else {
6722 		itrptr = &sc->sc_itr_tx;
6723 	}
6724 
6725 	newitr = *itrptr;
6726 	node.sysctl_data = &newitr;
6727 	node.sysctl_size = sizeof(newitr);
6728 
6729 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
6730 
6731 	if (error || newp == NULL)
6732 		return error;
6733 
6734 	/* ITRs are applied in ixl_init() for simple implementation */
6735 	if (ISSET(ifp->if_flags, IFF_RUNNING))
6736 		return EBUSY;
6737 
6738 	if (newitr > 0x07ff)
6739 		return EINVAL;
6740 
6741 	*itrptr = newitr;
6742 
6743 	return 0;
6744 }
6745 
6746 static struct workqueue *
ixl_workq_create(const char * name,pri_t prio,int ipl,int flags)6747 ixl_workq_create(const char *name, pri_t prio, int ipl, int flags)
6748 {
6749 	struct workqueue *wq;
6750 	int error;
6751 
6752 	error = workqueue_create(&wq, name, ixl_workq_work, NULL,
6753 	    prio, ipl, flags);
6754 
6755 	if (error)
6756 		return NULL;
6757 
6758 	return wq;
6759 }
6760 
6761 static void
ixl_workq_destroy(struct workqueue * wq)6762 ixl_workq_destroy(struct workqueue *wq)
6763 {
6764 
6765 	workqueue_destroy(wq);
6766 }
6767 
6768 static void
ixl_work_set(struct ixl_work * work,void (* func)(void *),void * arg)6769 ixl_work_set(struct ixl_work *work, void (*func)(void *), void *arg)
6770 {
6771 
6772 	memset(work, 0, sizeof(*work));
6773 	work->ixw_func = func;
6774 	work->ixw_arg = arg;
6775 }
6776 
6777 static void
ixl_work_add(struct workqueue * wq,struct ixl_work * work)6778 ixl_work_add(struct workqueue *wq, struct ixl_work *work)
6779 {
6780 	if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
6781 		return;
6782 
6783 	kpreempt_disable();
6784 	workqueue_enqueue(wq, &work->ixw_cookie, NULL);
6785 	kpreempt_enable();
6786 }
6787 
6788 static void
ixl_work_wait(struct workqueue * wq,struct ixl_work * work)6789 ixl_work_wait(struct workqueue *wq, struct ixl_work *work)
6790 {
6791 
6792 	workqueue_wait(wq, &work->ixw_cookie);
6793 }
6794 
6795 static void
ixl_workq_work(struct work * wk,void * context)6796 ixl_workq_work(struct work *wk, void *context)
6797 {
6798 	struct ixl_work *work;
6799 
6800 	work = container_of(wk, struct ixl_work, ixw_cookie);
6801 
6802 	atomic_swap_uint(&work->ixw_added, 0);
6803 	work->ixw_func(work->ixw_arg);
6804 }
6805 
6806 static int
ixl_rx_ctl_read(struct ixl_softc * sc,uint32_t reg,uint32_t * rv)6807 ixl_rx_ctl_read(struct ixl_softc *sc, uint32_t reg, uint32_t *rv)
6808 {
6809 	struct ixl_aq_desc iaq;
6810 
6811 	memset(&iaq, 0, sizeof(iaq));
6812 	iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_READ);
6813 	iaq.iaq_param[1] = htole32(reg);
6814 
6815 	if (ixl_atq_poll(sc, &iaq, 250) != 0)
6816 		return ETIMEDOUT;
6817 
6818 	switch (htole16(iaq.iaq_retval)) {
6819 	case IXL_AQ_RC_OK:
6820 		/* success */
6821 		break;
6822 	case IXL_AQ_RC_EACCES:
6823 		return EPERM;
6824 	case IXL_AQ_RC_EAGAIN:
6825 		return EAGAIN;
6826 	default:
6827 		return EIO;
6828 	}
6829 
6830 	*rv = htole32(iaq.iaq_param[3]);
6831 	return 0;
6832 }
6833 
6834 static uint32_t
ixl_rd_rx_csr(struct ixl_softc * sc,uint32_t reg)6835 ixl_rd_rx_csr(struct ixl_softc *sc, uint32_t reg)
6836 {
6837 	uint32_t val;
6838 	int rv, retry, retry_limit;
6839 
6840 	if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6841 		retry_limit = 5;
6842 	} else {
6843 		retry_limit = 0;
6844 	}
6845 
6846 	for (retry = 0; retry < retry_limit; retry++) {
6847 		rv = ixl_rx_ctl_read(sc, reg, &val);
6848 		if (rv == 0)
6849 			return val;
6850 		else if (rv == EAGAIN)
6851 			delaymsec(1);
6852 		else
6853 			break;
6854 	}
6855 
6856 	val = ixl_rd(sc, reg);
6857 
6858 	return val;
6859 }
6860 
6861 static int
ixl_rx_ctl_write(struct ixl_softc * sc,uint32_t reg,uint32_t value)6862 ixl_rx_ctl_write(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6863 {
6864 	struct ixl_aq_desc iaq;
6865 
6866 	memset(&iaq, 0, sizeof(iaq));
6867 	iaq.iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_REG_WRITE);
6868 	iaq.iaq_param[1] = htole32(reg);
6869 	iaq.iaq_param[3] = htole32(value);
6870 
6871 	if (ixl_atq_poll(sc, &iaq, 250) != 0)
6872 		return ETIMEDOUT;
6873 
6874 	switch (htole16(iaq.iaq_retval)) {
6875 	case IXL_AQ_RC_OK:
6876 		/* success */
6877 		break;
6878 	case IXL_AQ_RC_EACCES:
6879 		return EPERM;
6880 	case IXL_AQ_RC_EAGAIN:
6881 		return EAGAIN;
6882 	default:
6883 		return EIO;
6884 	}
6885 
6886 	return 0;
6887 }
6888 
6889 static void
ixl_wr_rx_csr(struct ixl_softc * sc,uint32_t reg,uint32_t value)6890 ixl_wr_rx_csr(struct ixl_softc *sc, uint32_t reg, uint32_t value)
6891 {
6892 	int rv, retry, retry_limit;
6893 
6894 	if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_RXCTL)) {
6895 		retry_limit = 5;
6896 	} else {
6897 		retry_limit = 0;
6898 	}
6899 
6900 	for (retry = 0; retry < retry_limit; retry++) {
6901 		rv = ixl_rx_ctl_write(sc, reg, value);
6902 		if (rv == 0)
6903 			return;
6904 		else if (rv == EAGAIN)
6905 			delaymsec(1);
6906 		else
6907 			break;
6908 	}
6909 
6910 	ixl_wr(sc, reg, value);
6911 }
6912 
6913 static int
ixl_nvm_lock(struct ixl_softc * sc,char rw)6914 ixl_nvm_lock(struct ixl_softc *sc, char rw)
6915 {
6916 	struct ixl_aq_desc iaq;
6917 	struct ixl_aq_req_resource_param *param;
6918 	int rv;
6919 
6920 	if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6921 		return 0;
6922 
6923 	memset(&iaq, 0, sizeof(iaq));
6924 	iaq.iaq_opcode = htole16(IXL_AQ_OP_REQUEST_RESOURCE);
6925 
6926 	param = (struct ixl_aq_req_resource_param *)&iaq.iaq_param;
6927 	param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6928 	if (rw == 'R') {
6929 		param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_READ);
6930 	} else {
6931 		param->access_type = htole16(IXL_AQ_RESOURCE_ACCES_WRITE);
6932 	}
6933 
6934 	rv = ixl_atq_poll(sc, &iaq, 250);
6935 
6936 	if (rv != 0)
6937 		return ETIMEDOUT;
6938 
6939 	switch (le16toh(iaq.iaq_retval)) {
6940 	case IXL_AQ_RC_OK:
6941 		break;
6942 	case IXL_AQ_RC_EACCES:
6943 		return EACCES;
6944 	case IXL_AQ_RC_EBUSY:
6945 		return EBUSY;
6946 	case IXL_AQ_RC_EPERM:
6947 		return EPERM;
6948 	}
6949 
6950 	return 0;
6951 }
6952 
6953 static int
ixl_nvm_unlock(struct ixl_softc * sc)6954 ixl_nvm_unlock(struct ixl_softc *sc)
6955 {
6956 	struct ixl_aq_desc iaq;
6957 	struct ixl_aq_rel_resource_param *param;
6958 	int rv;
6959 
6960 	if (!ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMLOCK))
6961 		return 0;
6962 
6963 	memset(&iaq, 0, sizeof(iaq));
6964 	iaq.iaq_opcode = htole16(IXL_AQ_OP_RELEASE_RESOURCE);
6965 
6966 	param = (struct ixl_aq_rel_resource_param *)&iaq.iaq_param;
6967 	param->resource_id = htole16(IXL_AQ_RESOURCE_ID_NVM);
6968 
6969 	rv = ixl_atq_poll(sc, &iaq, 250);
6970 
6971 	if (rv != 0)
6972 		return ETIMEDOUT;
6973 
6974 	switch (le16toh(iaq.iaq_retval)) {
6975 	case IXL_AQ_RC_OK:
6976 		break;
6977 	default:
6978 		return EIO;
6979 	}
6980 	return 0;
6981 }
6982 
6983 static int
ixl_srdone_poll(struct ixl_softc * sc)6984 ixl_srdone_poll(struct ixl_softc *sc)
6985 {
6986 	int wait_count;
6987 	uint32_t reg;
6988 
6989 	for (wait_count = 0; wait_count < IXL_SRRD_SRCTL_ATTEMPTS;
6990 	    wait_count++) {
6991 		reg = ixl_rd(sc, I40E_GLNVM_SRCTL);
6992 		if (ISSET(reg, I40E_GLNVM_SRCTL_DONE_MASK))
6993 			break;
6994 
6995 		delaymsec(5);
6996 	}
6997 
6998 	if (wait_count == IXL_SRRD_SRCTL_ATTEMPTS)
6999 		return -1;
7000 
7001 	return 0;
7002 }
7003 
7004 static int
ixl_nvm_read_srctl(struct ixl_softc * sc,uint16_t offset,uint16_t * data)7005 ixl_nvm_read_srctl(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7006 {
7007 	uint32_t reg;
7008 
7009 	if (ixl_srdone_poll(sc) != 0)
7010 		return ETIMEDOUT;
7011 
7012 	reg = ((uint32_t)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
7013 	    __BIT(I40E_GLNVM_SRCTL_START_SHIFT);
7014 	ixl_wr(sc, I40E_GLNVM_SRCTL, reg);
7015 
7016 	if (ixl_srdone_poll(sc) != 0) {
7017 		aprint_debug("NVM read error: couldn't access "
7018 		    "Shadow RAM address: 0x%x\n", offset);
7019 		return ETIMEDOUT;
7020 	}
7021 
7022 	reg = ixl_rd(sc, I40E_GLNVM_SRDATA);
7023 	*data = (uint16_t)__SHIFTOUT(reg, I40E_GLNVM_SRDATA_RDDATA_MASK);
7024 
7025 	return 0;
7026 }
7027 
7028 static int
ixl_nvm_read_aq(struct ixl_softc * sc,uint16_t offset_word,void * data,size_t len)7029 ixl_nvm_read_aq(struct ixl_softc *sc, uint16_t offset_word,
7030     void *data, size_t len)
7031 {
7032 	struct ixl_dmamem *idm;
7033 	struct ixl_aq_desc iaq;
7034 	struct ixl_aq_nvm_param *param;
7035 	uint32_t offset_bytes;
7036 	int rv;
7037 
7038 	idm = &sc->sc_aqbuf;
7039 	if (len > IXL_DMA_LEN(idm))
7040 		return ENOMEM;
7041 
7042 	memset(IXL_DMA_KVA(idm), 0, IXL_DMA_LEN(idm));
7043 	memset(&iaq, 0, sizeof(iaq));
7044 	iaq.iaq_opcode = htole16(IXL_AQ_OP_NVM_READ);
7045 	iaq.iaq_flags = htole16(IXL_AQ_BUF |
7046 	    ((len > I40E_AQ_LARGE_BUF) ? IXL_AQ_LB : 0));
7047 	iaq.iaq_datalen = htole16(len);
7048 	ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
7049 
7050 	param = (struct ixl_aq_nvm_param *)iaq.iaq_param;
7051 	param->command_flags = IXL_AQ_NVM_LAST_CMD;
7052 	param->module_pointer = 0;
7053 	param->length = htole16(len);
7054 	offset_bytes = (uint32_t)offset_word * 2;
7055 	offset_bytes &= 0x00FFFFFF;
7056 	param->offset = htole32(offset_bytes);
7057 
7058 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7059 	    BUS_DMASYNC_PREREAD);
7060 
7061 	rv = ixl_atq_poll(sc, &iaq, 250);
7062 
7063 	bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
7064 	    BUS_DMASYNC_POSTREAD);
7065 
7066 	if (rv != 0) {
7067 		return ETIMEDOUT;
7068 	}
7069 
7070 	switch (le16toh(iaq.iaq_retval)) {
7071 	case IXL_AQ_RC_OK:
7072 		break;
7073 	case IXL_AQ_RC_EPERM:
7074 		return EPERM;
7075 	case IXL_AQ_RC_EINVAL:
7076 		return EINVAL;
7077 	case IXL_AQ_RC_EBUSY:
7078 		return EBUSY;
7079 	case IXL_AQ_RC_EIO:
7080 	default:
7081 		return EIO;
7082 	}
7083 
7084 	memcpy(data, IXL_DMA_KVA(idm), len);
7085 
7086 	return 0;
7087 }
7088 
7089 static int
ixl_rd16_nvm(struct ixl_softc * sc,uint16_t offset,uint16_t * data)7090 ixl_rd16_nvm(struct ixl_softc *sc, uint16_t offset, uint16_t *data)
7091 {
7092 	int error;
7093 	uint16_t buf;
7094 
7095 	error = ixl_nvm_lock(sc, 'R');
7096 	if (error)
7097 		return error;
7098 
7099 	if (ISSET(sc->sc_aq_flags, IXL_SC_AQ_FLAG_NVMREAD)) {
7100 		error = ixl_nvm_read_aq(sc, offset,
7101 		    &buf, sizeof(buf));
7102 		if (error == 0)
7103 			*data = le16toh(buf);
7104 	} else {
7105 		error = ixl_nvm_read_srctl(sc, offset, &buf);
7106 		if (error == 0)
7107 			*data = buf;
7108 	}
7109 
7110 	ixl_nvm_unlock(sc);
7111 
7112 	return error;
7113 }
7114 
7115 MODULE(MODULE_CLASS_DRIVER, if_ixl, "pci");
7116 
7117 #ifdef _MODULE
7118 #include "ioconf.c"
7119 #endif
7120 
7121 #ifdef _MODULE
7122 static void
ixl_parse_modprop(prop_dictionary_t dict)7123 ixl_parse_modprop(prop_dictionary_t dict)
7124 {
7125 	prop_object_t obj;
7126 	int64_t val;
7127 	uint64_t uval;
7128 
7129 	if (dict == NULL)
7130 		return;
7131 
7132 	obj = prop_dictionary_get(dict, "nomsix");
7133 	if (obj != NULL && prop_object_type(obj) == PROP_TYPE_BOOL) {
7134 		ixl_param_nomsix = prop_bool_true((prop_bool_t)obj);
7135 	}
7136 
7137 	obj = prop_dictionary_get(dict, "stats_interval");
7138 	if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7139 		val = prop_number_signed_value((prop_number_t)obj);
7140 
7141 		/* the range has no reason */
7142 		if (100 < val && val < 180000) {
7143 			ixl_param_stats_interval = val;
7144 		}
7145 	}
7146 
7147 	obj = prop_dictionary_get(dict, "nqps_limit");
7148 	if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7149 		val = prop_number_signed_value((prop_number_t)obj);
7150 
7151 		if (val <= INT32_MAX)
7152 			ixl_param_nqps_limit = val;
7153 	}
7154 
7155 	obj = prop_dictionary_get(dict, "rx_ndescs");
7156 	if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7157 		uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7158 
7159 		if (uval > 8)
7160 			ixl_param_rx_ndescs = uval;
7161 	}
7162 
7163 	obj = prop_dictionary_get(dict, "tx_ndescs");
7164 	if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
7165 		uval = prop_number_unsigned_integer_value((prop_number_t)obj);
7166 
7167 		if (uval > IXL_TX_PKT_DESCS)
7168 			ixl_param_tx_ndescs = uval;
7169 	}
7170 
7171 }
7172 #endif
7173 
7174 static int
if_ixl_modcmd(modcmd_t cmd,void * opaque)7175 if_ixl_modcmd(modcmd_t cmd, void *opaque)
7176 {
7177 	int error = 0;
7178 
7179 #ifdef _MODULE
7180 	switch (cmd) {
7181 	case MODULE_CMD_INIT:
7182 		ixl_parse_modprop((prop_dictionary_t)opaque);
7183 		error = config_init_component(cfdriver_ioconf_if_ixl,
7184 		    cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7185 		break;
7186 	case MODULE_CMD_FINI:
7187 		error = config_fini_component(cfdriver_ioconf_if_ixl,
7188 		    cfattach_ioconf_if_ixl, cfdata_ioconf_if_ixl);
7189 		break;
7190 	default:
7191 		error = ENOTTY;
7192 		break;
7193 	}
7194 #endif
7195 
7196 	return error;
7197 }
7198