1 /* $NetBSD: if_iavf.c,v 1.18 2024/06/29 12:11:12 riastradh Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 /*
52 * Copyright (c) 2020 Internet Initiative Japan, Inc.
53 * All rights reserved.
54 *
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
57 * are met:
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 *
64 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
65 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
66 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
67 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
68 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
69 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
70 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
71 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
72 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
73 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
74 * POSSIBILITY OF SUCH DAMAGE.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: if_iavf.c,v 1.18 2024/06/29 12:11:12 riastradh Exp $");
79
80 #include <sys/param.h>
81 #include <sys/types.h>
82
83 #include <sys/bitops.h>
84 #include <sys/bus.h>
85 #include <sys/cprng.h>
86 #include <sys/cpu.h>
87 #include <sys/device.h>
88 #include <sys/evcnt.h>
89 #include <sys/interrupt.h>
90 #include <sys/kmem.h>
91 #include <sys/module.h>
92 #include <sys/mutex.h>
93 #include <sys/pcq.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96 #include <sys/workqueue.h>
97 #include <sys/xcall.h>
98
99 #include <net/bpf.h>
100 #include <net/if.h>
101 #include <net/if_dl.h>
102 #include <net/if_media.h>
103 #include <net/if_ether.h>
104 #include <net/rss_config.h>
105
106 #include <netinet/tcp.h> /* for struct tcphdr */
107 #include <netinet/udp.h> /* for struct udphdr */
108
109 #include <dev/pci/pcivar.h>
110 #include <dev/pci/pcidevs.h>
111
112 #include <dev/pci/if_ixlreg.h>
113 #include <dev/pci/if_ixlvar.h>
114 #include <dev/pci/if_iavfvar.h>
115
116 #include <prop/proplib.h>
117
118 #define IAVF_PCIREG PCI_MAPREG_START
119 #define IAVF_AQ_NUM 256
120 #define IAVF_AQ_MASK (IAVF_AQ_NUM-1)
121 #define IAVF_AQ_ALIGN 64
122 #define IAVF_AQ_BUFLEN 4096
123 #define I40E_AQ_LARGE_BUF 512
124 #define IAVF_VF_MAJOR 1
125 #define IAVF_VF_MINOR 1
126
127 #define IAVF_VFR_INPROGRESS 0
128 #define IAVF_VFR_COMPLETED 1
129 #define IAVF_VFR_VFACTIVE 2
130
131 #define IAVF_REG_VFR 0xdeadbeef
132
133 #define IAVF_ITR_RX 0x0
134 #define IAVF_ITR_TX 0x1
135 #define IAVF_ITR_MISC 0x2
136 #define IAVF_NOITR 0x3
137
138 #define IAVF_MTU_ETHERLEN (ETHER_HDR_LEN \
139 + ETHER_CRC_LEN)
140 #define IAVF_MAX_MTU (9600 - IAVF_MTU_ETHERLEN)
141 #define IAVF_MIN_MTU (ETHER_MIN_LEN - ETHER_CRC_LEN)
142
143 #define IAVF_WORKQUEUE_PRI PRI_SOFTNET
144
145 #define IAVF_TX_PKT_DESCS 8
146 #define IAVF_TX_QUEUE_ALIGN 128
147 #define IAVF_RX_QUEUE_ALIGN 128
148 #define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS)
149 #define IAVF_MCLBYTES (MCLBYTES - ETHER_ALIGN)
150
151 #define IAVF_TICK_INTERVAL (5 * hz)
152 #define IAVF_WATCHDOG_TICKS 3
153 #define IAVF_WATCHDOG_STOP 0
154
155 #define IAVF_TXRX_PROCESS_UNLIMIT UINT_MAX
156 #define IAVF_TX_PROCESS_LIMIT 256
157 #define IAVF_RX_PROCESS_LIMIT 256
158 #define IAVF_TX_INTR_PROCESS_LIMIT 256
159 #define IAVF_RX_INTR_PROCESS_LIMIT 0U
160
161 #define IAVF_EXEC_TIMEOUT 3000
162
163 #define IAVF_IFCAP_RXCSUM (IFCAP_CSUM_IPv4_Rx | \
164 IFCAP_CSUM_TCPv4_Rx | \
165 IFCAP_CSUM_UDPv4_Rx | \
166 IFCAP_CSUM_TCPv6_Rx | \
167 IFCAP_CSUM_UDPv6_Rx)
168 #define IAVF_IFCAP_TXCSUM (IFCAP_CSUM_IPv4_Tx | \
169 IFCAP_CSUM_TCPv4_Tx | \
170 IFCAP_CSUM_UDPv4_Tx | \
171 IFCAP_CSUM_TCPv6_Tx | \
172 IFCAP_CSUM_UDPv6_Tx)
173 #define IAVF_CSUM_ALL_OFFLOAD (M_CSUM_IPv4 | \
174 M_CSUM_TCPv4 | M_CSUM_TCPv6 | \
175 M_CSUM_UDPv4 | M_CSUM_UDPv6)
176
177 struct iavf_softc; /* defined */
178
179 struct iavf_module_params {
180 int debug;
181 uint32_t rx_itr;
182 uint32_t tx_itr;
183 unsigned int rx_ndescs;
184 unsigned int tx_ndescs;
185 int max_qps;
186 };
187
188 struct iavf_product {
189 unsigned int vendor_id;
190 unsigned int product_id;
191 };
192
193 struct iavf_link_speed {
194 uint64_t baudrate;
195 uint64_t media;
196 };
197
198 struct iavf_aq_regs {
199 bus_size_t atq_tail;
200 bus_size_t atq_head;
201 bus_size_t atq_len;
202 bus_size_t atq_bal;
203 bus_size_t atq_bah;
204
205 bus_size_t arq_tail;
206 bus_size_t arq_head;
207 bus_size_t arq_len;
208 bus_size_t arq_bal;
209 bus_size_t arq_bah;
210
211 uint32_t atq_len_enable;
212 uint32_t atq_tail_mask;
213 uint32_t atq_head_mask;
214
215 uint32_t arq_len_enable;
216 uint32_t arq_tail_mask;
217 uint32_t arq_head_mask;
218 };
219
220 struct iavf_work {
221 struct work ixw_cookie;
222 void (*ixw_func)(void *);
223 void *ixw_arg;
224 unsigned int ixw_added;
225 };
226
227 struct iavf_tx_map {
228 struct mbuf *txm_m;
229 bus_dmamap_t txm_map;
230 unsigned int txm_eop;
231 };
232
233 struct iavf_tx_ring {
234 unsigned int txr_qid;
235 char txr_name[16];
236
237 struct iavf_softc *txr_sc;
238 kmutex_t txr_lock;
239 pcq_t *txr_intrq;
240 void *txr_si;
241 unsigned int txr_prod;
242 unsigned int txr_cons;
243
244 struct iavf_tx_map *txr_maps;
245 struct ixl_dmamem txr_mem;
246 bus_size_t txr_tail;
247
248 int txr_watchdog;
249
250 struct evcnt txr_defragged;
251 struct evcnt txr_defrag_failed;
252 struct evcnt txr_pcqdrop;
253 struct evcnt txr_transmitdef;
254 struct evcnt txr_defer;
255 struct evcnt txr_watchdogto;
256 struct evcnt txr_intr;
257 };
258
259 struct iavf_rx_map {
260 struct mbuf *rxm_m;
261 bus_dmamap_t rxm_map;
262 };
263
264 struct iavf_rx_ring {
265 unsigned int rxr_qid;
266 char rxr_name[16];
267
268 struct iavf_softc *rxr_sc;
269 kmutex_t rxr_lock;
270
271 unsigned int rxr_prod;
272 unsigned int rxr_cons;
273
274 struct iavf_rx_map *rxr_maps;
275 struct ixl_dmamem rxr_mem;
276 bus_size_t rxr_tail;
277
278 struct mbuf *rxr_m_head;
279 struct mbuf **rxr_m_tail;
280
281 struct evcnt rxr_mgethdr_failed;
282 struct evcnt rxr_mgetcl_failed;
283 struct evcnt rxr_mbuf_load_failed;
284 struct evcnt rxr_defer;
285 struct evcnt rxr_intr;
286 };
287
288 struct iavf_queue_pair {
289 struct iavf_tx_ring *qp_txr;
290 struct iavf_rx_ring *qp_rxr;
291 struct work qp_work;
292 void *qp_si;
293 bool qp_workqueue;
294 };
295
296 struct iavf_stat_counters {
297 struct evcnt isc_rx_bytes;
298 struct evcnt isc_rx_unicast;
299 struct evcnt isc_rx_multicast;
300 struct evcnt isc_rx_broadcast;
301 struct evcnt isc_rx_discards;
302 struct evcnt isc_rx_unknown_protocol;
303 struct evcnt isc_tx_bytes;
304 struct evcnt isc_tx_unicast;
305 struct evcnt isc_tx_multicast;
306 struct evcnt isc_tx_broadcast;
307 struct evcnt isc_tx_discards;
308 struct evcnt isc_tx_errors;
309 };
310
311 /*
312 * Locking notes:
313 * + A field in iavf_tx_ring is protected by txr_lock (a spin mutex), and
314 * A field in iavf_rx_ring is protected by rxr_lock (a spin mutex).
315 * - more than one lock must not be held at once.
316 * + fields named sc_atq_*, sc_arq_*, and sc_adminq_* are protected by
317 * sc_adminq_lock(a spin mutex).
318 * - The lock is held while accessing sc_aq_regs
319 * and is not held with txr_lock and rxr_lock together.
320 * + Other fields in iavf_softc is protected by sc_cfg_lock
321 * (an adaptive mutex).
322 * - The lock must be held before acquiring another lock.
323 *
324 * Locking order:
325 * - IFNET_LOCK => sc_cfg_lock => sc_adminq_lock
326 * - sc_cfg_lock => ETHER_LOCK => sc_adminq_lock
327 * - sc_cfg_lock => txr_lock
328 * - sc_cfg_lock => rxr_lock
329 */
330
331 struct iavf_softc {
332 device_t sc_dev;
333 enum i40e_mac_type sc_mac_type;
334 int sc_debuglevel;
335 bool sc_attached;
336 bool sc_dead;
337 kmutex_t sc_cfg_lock;
338 callout_t sc_tick;
339 struct ifmedia sc_media;
340 uint64_t sc_media_status;
341 uint64_t sc_media_active;
342 int sc_link_state;
343
344 const struct iavf_aq_regs *
345 sc_aq_regs;
346
347 struct ethercom sc_ec;
348 uint8_t sc_enaddr[ETHER_ADDR_LEN];
349 uint8_t sc_enaddr_fake[ETHER_ADDR_LEN];
350 uint8_t sc_enaddr_added[ETHER_ADDR_LEN];
351 uint8_t sc_enaddr_reset[ETHER_ADDR_LEN];
352 struct if_percpuq *sc_ipq;
353
354 struct pci_attach_args sc_pa;
355 bus_dma_tag_t sc_dmat;
356 bus_space_tag_t sc_memt;
357 bus_space_handle_t sc_memh;
358 bus_size_t sc_mems;
359 pci_intr_handle_t *sc_ihp;
360 void **sc_ihs;
361 unsigned int sc_nintrs;
362
363 uint32_t sc_major_ver;
364 uint32_t sc_minor_ver;
365 uint32_t sc_vf_id;
366 uint32_t sc_vf_cap;
367 uint16_t sc_vsi_id;
368 uint16_t sc_qset_handle;
369 uint16_t sc_max_mtu;
370 bool sc_got_vf_resources;
371 bool sc_got_irq_map;
372 unsigned int sc_max_vectors;
373
374 kmutex_t sc_adminq_lock;
375 kcondvar_t sc_adminq_cv;
376 struct ixl_dmamem sc_atq;
377 unsigned int sc_atq_prod;
378 unsigned int sc_atq_cons;
379 struct ixl_aq_bufs sc_atq_idle;
380 struct ixl_aq_bufs sc_atq_live;
381 struct ixl_dmamem sc_arq;
382 struct ixl_aq_bufs sc_arq_idle;
383 struct ixl_aq_bufs sc_arq_live;
384 unsigned int sc_arq_prod;
385 unsigned int sc_arq_cons;
386 struct iavf_work sc_arq_refill;
387 uint32_t sc_arq_opcode;
388 uint32_t sc_arq_retval;
389
390 uint32_t sc_tx_itr;
391 uint32_t sc_rx_itr;
392 unsigned int sc_tx_ring_ndescs;
393 unsigned int sc_rx_ring_ndescs;
394 unsigned int sc_nqueue_pairs;
395 unsigned int sc_nqps_alloc;
396 unsigned int sc_nqps_vsi;
397 unsigned int sc_nqps_req;
398 struct iavf_queue_pair *sc_qps;
399 bool sc_txrx_workqueue;
400 u_int sc_tx_intr_process_limit;
401 u_int sc_tx_process_limit;
402 u_int sc_rx_intr_process_limit;
403 u_int sc_rx_process_limit;
404
405 struct workqueue *sc_workq;
406 struct workqueue *sc_workq_txrx;
407 struct iavf_work sc_reset_task;
408 struct iavf_work sc_wdto_task;
409 struct iavf_work sc_req_queues_task;
410 bool sc_req_queues_retried;
411 bool sc_resetting;
412 bool sc_reset_up;
413
414 struct sysctllog *sc_sysctllog;
415 struct iavf_stat_counters
416 sc_stat_counters;
417 };
418
419 #define IAVF_LOG(_sc, _lvl, _fmt, _args...) \
420 do { \
421 if (!(_sc)->sc_attached) { \
422 switch (_lvl) { \
423 case LOG_ERR: \
424 case LOG_WARNING: \
425 aprint_error_dev((_sc)->sc_dev, _fmt, ##_args); \
426 break; \
427 case LOG_INFO: \
428 aprint_normal_dev((_sc)->sc_dev,_fmt, ##_args); \
429 break; \
430 case LOG_DEBUG: \
431 default: \
432 aprint_debug_dev((_sc)->sc_dev, _fmt, ##_args); \
433 } \
434 } else { \
435 struct ifnet *_ifp = &(_sc)->sc_ec.ec_if; \
436 log((_lvl), "%s: " _fmt, _ifp->if_xname, ##_args); \
437 } \
438 } while (0)
439
440 static int iavf_dmamem_alloc(bus_dma_tag_t, struct ixl_dmamem *,
441 bus_size_t, bus_size_t);
442 static void iavf_dmamem_free(bus_dma_tag_t, struct ixl_dmamem *);
443 static struct ixl_aq_buf *
444 iavf_aqb_get(struct iavf_softc *, struct ixl_aq_bufs *);
445 static struct ixl_aq_buf *
446 iavf_aqb_get_locked(struct ixl_aq_bufs *);
447 static void iavf_aqb_put_locked(struct ixl_aq_bufs *, struct ixl_aq_buf *);
448 static void iavf_aqb_clean(struct ixl_aq_bufs *, bus_dma_tag_t);
449
450 static const struct iavf_product *
451 iavf_lookup(const struct pci_attach_args *);
452 static enum i40e_mac_type
453 iavf_mactype(pci_product_id_t);
454 static void iavf_pci_csr_setup(pci_chipset_tag_t, pcitag_t);
455 static int iavf_wait_active(struct iavf_softc *);
456 static bool iavf_is_etheranyaddr(const uint8_t *);
457 static void iavf_prepare_fakeaddr(struct iavf_softc *);
458 static int iavf_replace_lla(struct ifnet *,
459 const uint8_t *, const uint8_t *);
460 static void iavf_evcnt_attach(struct evcnt *,
461 const char *, const char *);
462 static int iavf_setup_interrupts(struct iavf_softc *);
463 static void iavf_teardown_interrupts(struct iavf_softc *);
464 static int iavf_setup_sysctls(struct iavf_softc *);
465 static void iavf_teardown_sysctls(struct iavf_softc *);
466 static int iavf_setup_stats(struct iavf_softc *);
467 static void iavf_teardown_stats(struct iavf_softc *);
468 static struct workqueue *
469 iavf_workq_create(const char *, pri_t, int, int);
470 static void iavf_workq_destroy(struct workqueue *);
471 static int iavf_work_set(struct iavf_work *, void (*)(void *), void *);
472 static void iavf_work_add(struct workqueue *, struct iavf_work *);
473 static void iavf_work_wait(struct workqueue *, struct iavf_work *);
474 static unsigned int
475 iavf_calc_msix_count(struct iavf_softc *);
476 static unsigned int
477 iavf_calc_queue_pair_size(struct iavf_softc *);
478 static int iavf_queue_pairs_alloc(struct iavf_softc *);
479 static void iavf_queue_pairs_free(struct iavf_softc *);
480 static int iavf_arq_fill(struct iavf_softc *);
481 static void iavf_arq_refill(void *);
482 static int iavf_arq_poll(struct iavf_softc *, uint32_t, int);
483 static void iavf_atq_done(struct iavf_softc *);
484 static int iavf_init_admin_queue(struct iavf_softc *);
485 static void iavf_cleanup_admin_queue(struct iavf_softc *);
486 static int iavf_arq(struct iavf_softc *);
487 static int iavf_adminq_exec(struct iavf_softc *,
488 struct ixl_aq_desc *, struct ixl_aq_buf *);
489 static int iavf_adminq_poll(struct iavf_softc *,
490 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
491 static int iavf_adminq_poll_locked(struct iavf_softc *,
492 struct ixl_aq_desc *, struct ixl_aq_buf *, int);
493 static int iavf_add_multi(struct iavf_softc *, uint8_t *, uint8_t *);
494 static int iavf_del_multi(struct iavf_softc *, uint8_t *, uint8_t *);
495 static void iavf_del_all_multi(struct iavf_softc *);
496
497 static int iavf_get_version(struct iavf_softc *, struct ixl_aq_buf *);
498 static int iavf_get_vf_resources(struct iavf_softc *, struct ixl_aq_buf *);
499 static int iavf_get_stats(struct iavf_softc *);
500 static int iavf_config_irq_map(struct iavf_softc *, struct ixl_aq_buf *);
501 static int iavf_config_vsi_queues(struct iavf_softc *);
502 static int iavf_config_hena(struct iavf_softc *);
503 static int iavf_config_rss_key(struct iavf_softc *);
504 static int iavf_config_rss_lut(struct iavf_softc *);
505 static int iavf_config_promisc_mode(struct iavf_softc *, int, int);
506 static int iavf_config_vlan_stripping(struct iavf_softc *, int);
507 static int iavf_config_vlan_id(struct iavf_softc *, uint16_t, uint32_t);
508 static int iavf_queue_select(struct iavf_softc *, int);
509 static int iavf_request_queues(struct iavf_softc *, unsigned int);
510 static int iavf_reset_vf(struct iavf_softc *);
511 static int iavf_eth_addr(struct iavf_softc *, const uint8_t *, uint32_t);
512 static void iavf_process_version(struct iavf_softc *,
513 struct ixl_aq_desc *, struct ixl_aq_buf *);
514 static void iavf_process_vf_resources(struct iavf_softc *,
515 struct ixl_aq_desc *, struct ixl_aq_buf *);
516 static void iavf_process_irq_map(struct iavf_softc *,
517 struct ixl_aq_desc *);
518 static void iavf_process_vc_event(struct iavf_softc *,
519 struct ixl_aq_desc *, struct ixl_aq_buf *);
520 static void iavf_process_stats(struct iavf_softc *,
521 struct ixl_aq_desc *, struct ixl_aq_buf *);
522 static void iavf_process_req_queues(struct iavf_softc *,
523 struct ixl_aq_desc *, struct ixl_aq_buf *);
524
525 static int iavf_intr(void *);
526 static int iavf_queue_intr(void *);
527 static void iavf_tick(void *);
528 static void iavf_tick_halt(void *);
529 static void iavf_reset_request(void *);
530 static void iavf_reset_start(void *);
531 static void iavf_reset(void *);
532 static void iavf_reset_finish(struct iavf_softc *);
533 static int iavf_init(struct ifnet *);
534 static int iavf_init_locked(struct iavf_softc *);
535 static void iavf_stop(struct ifnet *, int);
536 static void iavf_stop_locked(struct iavf_softc *);
537 static int iavf_ioctl(struct ifnet *, u_long, void *);
538 static void iavf_start(struct ifnet *);
539 static int iavf_transmit(struct ifnet *, struct mbuf*);
540 static int iavf_watchdog(struct iavf_tx_ring *);
541 static void iavf_watchdog_timeout(void *);
542 static int iavf_media_change(struct ifnet *);
543 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
544 static int iavf_ifflags_cb(struct ethercom *);
545 static int iavf_vlan_cb(struct ethercom *, uint16_t, bool);
546 static void iavf_deferred_transmit(void *);
547 static void iavf_handle_queue(void *);
548 static void iavf_handle_queue_wk(struct work *, void *);
549 static int iavf_reinit(struct iavf_softc *);
550 static int iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
551 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
552 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
553 static int iavf_txeof(struct iavf_softc *, struct iavf_tx_ring *,
554 u_int, struct evcnt *);
555 static int iavf_rxeof(struct iavf_softc *, struct iavf_rx_ring *,
556 u_int, struct evcnt *);
557 static int iavf_iff(struct iavf_softc *);
558 static int iavf_iff_locked(struct iavf_softc *);
559 static void iavf_post_request_queues(void *);
560 static int iavf_sysctl_itr_handler(SYSCTLFN_PROTO);
561
562 static int iavf_match(device_t, cfdata_t, void *);
563 static void iavf_attach(device_t, device_t, void*);
564 static int iavf_detach(device_t, int);
565 static int iavf_finalize_teardown(device_t);
566
567 CFATTACH_DECL3_NEW(iavf, sizeof(struct iavf_softc),
568 iavf_match, iavf_attach, iavf_detach, NULL, NULL, NULL,
569 DVF_DETACH_SHUTDOWN);
570
571 static const struct iavf_product iavf_products[] = {
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
575 /* required last entry */
576 {0, 0}
577 };
578
579 static const struct iavf_link_speed iavf_link_speeds[] = {
580 { 0, 0 },
581 { IF_Mbps(100), IFM_100_TX },
582 { IF_Mbps(1000), IFM_1000_T },
583 { IF_Gbps(10), IFM_10G_T },
584 { IF_Gbps(40), IFM_40G_CR4 },
585 { IF_Gbps(20), IFM_20G_KR2 },
586 { IF_Gbps(25), IFM_25G_CR }
587 };
588
589 static const struct iavf_aq_regs iavf_aq_regs = {
590 .atq_tail = I40E_VF_ATQT1,
591 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
592 .atq_head = I40E_VF_ATQH1,
593 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
594 .atq_len = I40E_VF_ATQLEN1,
595 .atq_bal = I40E_VF_ATQBAL1,
596 .atq_bah = I40E_VF_ATQBAH1,
597 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
598
599 .arq_tail = I40E_VF_ARQT1,
600 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
601 .arq_head = I40E_VF_ARQH1,
602 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
603 .arq_len = I40E_VF_ARQLEN1,
604 .arq_bal = I40E_VF_ARQBAL1,
605 .arq_bah = I40E_VF_ARQBAH1,
606 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
607 };
608
609 static struct iavf_module_params iavf_params = {
610 .debug = 0,
611 .rx_itr = 0x07a, /* 4K intrs/sec */
612 .tx_itr = 0x07a, /* 4K intrs/sec */
613 .tx_ndescs = 512,
614 .rx_ndescs = 256,
615 .max_qps = INT_MAX,
616 };
617
618 #define delaymsec(_x) DELAY(1000 * (_x))
619 #define iavf_rd(_s, _r) \
620 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
621 #define iavf_wr(_s, _r, _v) \
622 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
623 #define iavf_barrier(_s, _r, _l, _o) \
624 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
625 #define iavf_flush(_s) (void)iavf_rd((_s), I40E_VFGEN_RSTAT)
626 #define iavf_nqueues(_sc) (1 << ((_sc)->sc_nqueue_pairs - 1))
627 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueue_pairs)) - 1)
628
629 static inline void
iavf_intr_barrier(void)630 iavf_intr_barrier(void)
631 {
632
633 /* make all interrupt handler finished */
634 xc_barrier(0);
635 }
636 static inline void
iavf_intr_enable(struct iavf_softc * sc)637 iavf_intr_enable(struct iavf_softc *sc)
638 {
639
640 iavf_wr(sc, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK |
641 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK |
642 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
643 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
644 iavf_flush(sc);
645 }
646
647 static inline void
iavf_intr_disable(struct iavf_softc * sc)648 iavf_intr_disable(struct iavf_softc *sc)
649 {
650
651 iavf_wr(sc, I40E_VFINT_DYN_CTL01,
652 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT));
653 iavf_wr(sc, I40E_VFINT_ICR0_ENA1, 0);
654 iavf_flush(sc);
655 }
656
657 static inline void
iavf_queue_intr_enable(struct iavf_softc * sc,unsigned int qid)658 iavf_queue_intr_enable(struct iavf_softc *sc, unsigned int qid)
659 {
660
661 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
662 I40E_VFINT_DYN_CTLN1_INTENA_MASK |
663 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
664 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
665 iavf_flush(sc);
666 }
667
668 static inline void
iavf_queue_intr_disable(struct iavf_softc * sc,unsigned int qid)669 iavf_queue_intr_disable(struct iavf_softc *sc, unsigned int qid)
670 {
671
672 iavf_wr(sc, I40E_VFINT_DYN_CTLN1(qid),
673 (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT));
674 iavf_flush(sc);
675 }
676
677 static inline void
iavf_aq_vc_set_opcode(struct ixl_aq_desc * iaq,uint32_t opcode)678 iavf_aq_vc_set_opcode(struct ixl_aq_desc *iaq, uint32_t opcode)
679 {
680 struct iavf_aq_vc *vc;
681
682 vc = (struct iavf_aq_vc *)&iaq->iaq_cookie;
683 vc->iaq_vc_opcode = htole32(opcode);
684 }
685
686 static inline uint32_t
iavf_aq_vc_get_opcode(const struct ixl_aq_desc * iaq)687 iavf_aq_vc_get_opcode(const struct ixl_aq_desc *iaq)
688 {
689 const struct iavf_aq_vc *vc;
690
691 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
692 return le32toh(vc->iaq_vc_opcode);
693 }
694
695 static inline uint32_t
iavf_aq_vc_get_retval(const struct ixl_aq_desc * iaq)696 iavf_aq_vc_get_retval(const struct ixl_aq_desc *iaq)
697 {
698 const struct iavf_aq_vc *vc;
699
700 vc = (const struct iavf_aq_vc *)&iaq->iaq_cookie;
701 return le32toh(vc->iaq_vc_retval);
702 }
703
704 static int
iavf_match(device_t parent,cfdata_t match,void * aux)705 iavf_match(device_t parent, cfdata_t match, void *aux)
706 {
707 const struct pci_attach_args *pa = aux;
708
709 return (iavf_lookup(pa) != NULL) ? 1 : 0;
710 }
711
712 static void
iavf_attach(device_t parent,device_t self,void * aux)713 iavf_attach(device_t parent, device_t self, void *aux)
714 {
715 struct iavf_softc *sc;
716 struct pci_attach_args *pa = aux;
717 struct ifnet *ifp;
718 struct ixl_aq_buf *aqb;
719 pcireg_t memtype;
720 char xnamebuf[MAXCOMLEN];
721 int error, i;
722
723 sc = device_private(self);
724 sc->sc_dev = self;
725 ifp = &sc->sc_ec.ec_if;
726
727 sc->sc_pa = *pa;
728 sc->sc_dmat = (pci_dma64_available(pa)) ? pa->pa_dmat64 : pa->pa_dmat;
729 sc->sc_aq_regs = &iavf_aq_regs;
730 sc->sc_debuglevel = iavf_params.debug;
731 sc->sc_tx_ring_ndescs = iavf_params.tx_ndescs;
732 sc->sc_rx_ring_ndescs = iavf_params.rx_ndescs;
733 sc->sc_tx_itr = iavf_params.tx_itr;
734 sc->sc_rx_itr = iavf_params.rx_itr;
735 sc->sc_nqps_req = MIN(ncpu, iavf_params.max_qps);
736 iavf_prepare_fakeaddr(sc);
737
738 sc->sc_mac_type = iavf_mactype(PCI_PRODUCT(pa->pa_id));
739 iavf_pci_csr_setup(pa->pa_pc, pa->pa_tag);
740
741 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IAVF_PCIREG);
742 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
743 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) {
744 aprint_error(": unable to map registers\n");
745 return;
746 }
747
748 if (iavf_wait_active(sc) != 0) {
749 aprint_error(": VF reset timed out\n");
750 goto unmap;
751 }
752
753 mutex_init(&sc->sc_cfg_lock, MUTEX_DEFAULT, IPL_SOFTNET);
754 mutex_init(&sc->sc_adminq_lock, MUTEX_DEFAULT, IPL_NET);
755 SIMPLEQ_INIT(&sc->sc_atq_idle);
756 SIMPLEQ_INIT(&sc->sc_atq_live);
757 SIMPLEQ_INIT(&sc->sc_arq_idle);
758 SIMPLEQ_INIT(&sc->sc_arq_live);
759 sc->sc_arq_cons = 0;
760 sc->sc_arq_prod = 0;
761 aqb = NULL;
762
763 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_atq,
764 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
765 aprint_error(": unable to allocate atq\n");
766 goto free_mutex;
767 }
768
769 if (iavf_dmamem_alloc(sc->sc_dmat, &sc->sc_arq,
770 sizeof(struct ixl_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
771 aprint_error(": unable to allocate arq\n");
772 goto free_atq;
773 }
774
775 for (i = 0; i < IAVF_AQ_NUM; i++) {
776 aqb = iavf_aqb_get(sc, NULL);
777 if (aqb != NULL) {
778 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
779 }
780 }
781 aqb = NULL;
782
783 if (!iavf_arq_fill(sc)) {
784 aprint_error(": unable to fill arq descriptors\n");
785 goto free_arq;
786 }
787
788 if (iavf_init_admin_queue(sc) != 0) {
789 aprint_error(": unable to initialize admin queue\n");
790 goto shutdown;
791 }
792
793 aqb = iavf_aqb_get(sc, NULL);
794 if (aqb == NULL) {
795 aprint_error(": unable to allocate buffer for ATQ\n");
796 goto shutdown;
797 }
798
799 error = iavf_get_version(sc, aqb);
800 switch (error) {
801 case 0:
802 break;
803 case ETIMEDOUT:
804 aprint_error(": timeout waiting for VF version\n");
805 goto shutdown;
806 case ENOTSUP:
807 aprint_error(": unsupported VF version %d\n", sc->sc_major_ver);
808 goto shutdown;
809 default:
810 aprint_error(":unable to get VF interface version\n");
811 goto shutdown;
812 }
813
814 if (iavf_get_vf_resources(sc, aqb) != 0) {
815 aprint_error(": timeout waiting for VF resources\n");
816 goto shutdown;
817 }
818
819 aprint_normal(", VF version %d.%d%s",
820 sc->sc_major_ver, sc->sc_minor_ver,
821 (sc->sc_minor_ver > IAVF_VF_MINOR) ? "(minor mismatch)" : "");
822 aprint_normal(", VF %d, VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
823 aprint_normal("\n");
824 aprint_naive("\n");
825
826 aprint_normal_dev(self, "Ethernet address %s\n",
827 ether_sprintf(sc->sc_enaddr));
828
829 if (iavf_queue_pairs_alloc(sc) != 0) {
830 goto shutdown;
831 }
832
833 if (iavf_setup_interrupts(sc) != 0) {
834 goto free_queue_pairs;
835 }
836
837 if (iavf_config_irq_map(sc, aqb) != 0) {
838 aprint_error(", timed out waiting for IRQ map response\n");
839 goto teardown_intrs;
840 }
841
842 if (iavf_setup_sysctls(sc) != 0) {
843 goto teardown_intrs;
844 }
845
846 if (iavf_setup_stats(sc) != 0) {
847 goto teardown_sysctls;
848 }
849
850 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
851 aqb = NULL;
852
853 snprintf(xnamebuf, sizeof(xnamebuf),
854 "%s_adminq_cv", device_xname(self));
855 cv_init(&sc->sc_adminq_cv, xnamebuf);
856
857 callout_init(&sc->sc_tick, CALLOUT_MPSAFE);
858 callout_setfunc(&sc->sc_tick, iavf_tick, sc);
859
860 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
861 iavf_work_set(&sc->sc_arq_refill, iavf_arq_refill, sc);
862 iavf_work_set(&sc->sc_wdto_task, iavf_watchdog_timeout, sc);
863 iavf_work_set(&sc->sc_req_queues_task, iavf_post_request_queues, sc);
864 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_cfg", device_xname(self));
865 sc->sc_workq = iavf_workq_create(xnamebuf, IAVF_WORKQUEUE_PRI,
866 IPL_NET, WQ_MPSAFE);
867 if (sc->sc_workq == NULL)
868 goto destroy_cv;
869
870 snprintf(xnamebuf, sizeof(xnamebuf), "%s_wq_txrx", device_xname(self));
871 error = workqueue_create(&sc->sc_workq_txrx, xnamebuf,
872 iavf_handle_queue_wk, sc, IAVF_WORKQUEUE_PRI, IPL_NET,
873 WQ_PERCPU|WQ_MPSAFE);
874 if (error != 0) {
875 sc->sc_workq_txrx = NULL;
876 goto teardown_wqs;
877 }
878
879 if_initialize(ifp);
880
881 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
882
883 ifp->if_softc = sc;
884 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
885 ifp->if_extflags = IFEF_MPSAFE;
886 ifp->if_ioctl = iavf_ioctl;
887 ifp->if_start = iavf_start;
888 ifp->if_transmit = iavf_transmit;
889 ifp->if_watchdog = NULL;
890 ifp->if_init = iavf_init;
891 ifp->if_stop = iavf_stop;
892
893 IFQ_SET_MAXLEN(&ifp->if_snd, sc->sc_tx_ring_ndescs);
894 IFQ_SET_READY(&ifp->if_snd);
895 sc->sc_ipq = if_percpuq_create(ifp);
896
897 ifp->if_capabilities |= IAVF_IFCAP_RXCSUM;
898 ifp->if_capabilities |= IAVF_IFCAP_TXCSUM;
899
900 ether_set_vlan_cb(&sc->sc_ec, iavf_vlan_cb);
901 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING;
902 sc->sc_ec.ec_capabilities |= ETHERCAP_VLAN_HWFILTER;
903 sc->sc_ec.ec_capenable = sc->sc_ec.ec_capabilities;
904
905 ether_set_ifflags_cb(&sc->sc_ec, iavf_ifflags_cb);
906
907 sc->sc_ec.ec_ifmedia = &sc->sc_media;
908 ifmedia_init_with_lock(&sc->sc_media, IFM_IMASK, iavf_media_change,
909 iavf_media_status, &sc->sc_cfg_lock);
910
911 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
912 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
913
914 if_deferred_start_init(ifp, NULL);
915 ether_ifattach(ifp, sc->sc_enaddr);
916
917 sc->sc_txrx_workqueue = true;
918 sc->sc_tx_process_limit = IAVF_TX_PROCESS_LIMIT;
919 sc->sc_rx_process_limit = IAVF_RX_PROCESS_LIMIT;
920 sc->sc_tx_intr_process_limit = IAVF_TX_INTR_PROCESS_LIMIT;
921 sc->sc_rx_intr_process_limit = IAVF_RX_INTR_PROCESS_LIMIT;
922
923 if_register(ifp);
924 if_link_state_change(ifp, sc->sc_link_state);
925 iavf_intr_enable(sc);
926 if (sc->sc_nqps_vsi < sc->sc_nqps_req)
927 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
928 sc->sc_attached = true;
929 return;
930
931 teardown_wqs:
932 config_finalize_register(self, iavf_finalize_teardown);
933 destroy_cv:
934 cv_destroy(&sc->sc_adminq_cv);
935 callout_destroy(&sc->sc_tick);
936 iavf_teardown_stats(sc);
937 teardown_sysctls:
938 iavf_teardown_sysctls(sc);
939 teardown_intrs:
940 iavf_teardown_interrupts(sc);
941 free_queue_pairs:
942 iavf_queue_pairs_free(sc);
943 shutdown:
944 if (aqb != NULL)
945 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
946 iavf_cleanup_admin_queue(sc);
947 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
948 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
949 free_arq:
950 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
951 free_atq:
952 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
953 free_mutex:
954 mutex_destroy(&sc->sc_cfg_lock);
955 mutex_destroy(&sc->sc_adminq_lock);
956 unmap:
957 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
958 sc->sc_mems = 0;
959 sc->sc_attached = false;
960 }
961
962 static int
iavf_detach(device_t self,int flags)963 iavf_detach(device_t self, int flags)
964 {
965 struct iavf_softc *sc = device_private(self);
966 struct ifnet *ifp = &sc->sc_ec.ec_if;
967
968 if (!sc->sc_attached)
969 return 0;
970
971 iavf_stop(ifp, 1);
972
973 /*
974 * set a dummy function to halt callout safely
975 * even if a workqueue entry calls callout_schedule()
976 */
977 callout_setfunc(&sc->sc_tick, iavf_tick_halt, sc);
978 iavf_work_wait(sc->sc_workq, &sc->sc_reset_task);
979 iavf_work_wait(sc->sc_workq, &sc->sc_wdto_task);
980
981 callout_halt(&sc->sc_tick, NULL);
982 callout_destroy(&sc->sc_tick);
983
984 /* detach the I/F before stop adminq due to callbacks */
985 ether_ifdetach(ifp);
986 if_detach(ifp);
987 ifmedia_fini(&sc->sc_media);
988 if_percpuq_destroy(sc->sc_ipq);
989
990 iavf_intr_disable(sc);
991 iavf_intr_barrier();
992 iavf_work_wait(sc->sc_workq, &sc->sc_arq_refill);
993
994 mutex_enter(&sc->sc_adminq_lock);
995 iavf_cleanup_admin_queue(sc);
996 mutex_exit(&sc->sc_adminq_lock);
997 iavf_aqb_clean(&sc->sc_atq_idle, sc->sc_dmat);
998 iavf_aqb_clean(&sc->sc_arq_idle, sc->sc_dmat);
999 iavf_dmamem_free(sc->sc_dmat, &sc->sc_arq);
1000 iavf_dmamem_free(sc->sc_dmat, &sc->sc_atq);
1001 cv_destroy(&sc->sc_adminq_cv);
1002
1003 iavf_workq_destroy(sc->sc_workq);
1004 sc->sc_workq = NULL;
1005
1006 iavf_queue_pairs_free(sc);
1007 iavf_teardown_interrupts(sc);
1008 iavf_teardown_sysctls(sc);
1009 iavf_teardown_stats(sc);
1010 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
1011
1012 mutex_destroy(&sc->sc_adminq_lock);
1013 mutex_destroy(&sc->sc_cfg_lock);
1014
1015 return 0;
1016 }
1017
1018 static int
iavf_finalize_teardown(device_t self)1019 iavf_finalize_teardown(device_t self)
1020 {
1021 struct iavf_softc *sc = device_private(self);
1022
1023 if (sc->sc_workq != NULL) {
1024 iavf_workq_destroy(sc->sc_workq);
1025 sc->sc_workq = NULL;
1026 }
1027
1028 if (sc->sc_workq_txrx != NULL) {
1029 workqueue_destroy(sc->sc_workq_txrx);
1030 sc->sc_workq_txrx = NULL;
1031 }
1032
1033 return 0;
1034 }
1035
1036 static int
iavf_init(struct ifnet * ifp)1037 iavf_init(struct ifnet *ifp)
1038 {
1039 struct iavf_softc *sc;
1040 int rv;
1041
1042 sc = ifp->if_softc;
1043 mutex_enter(&sc->sc_cfg_lock);
1044 rv = iavf_init_locked(sc);
1045 mutex_exit(&sc->sc_cfg_lock);
1046
1047 return rv;
1048 }
1049
1050 static int
iavf_init_locked(struct iavf_softc * sc)1051 iavf_init_locked(struct iavf_softc *sc)
1052 {
1053 struct ifnet *ifp = &sc->sc_ec.ec_if;
1054 unsigned int i;
1055 int error;
1056
1057 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1058
1059 if (ISSET(ifp->if_flags, IFF_RUNNING))
1060 iavf_stop_locked(sc);
1061
1062 if (sc->sc_resetting)
1063 return ENXIO;
1064
1065 error = iavf_reinit(sc);
1066 if (error) {
1067 iavf_stop_locked(sc);
1068 return error;
1069 }
1070
1071 SET(ifp->if_flags, IFF_RUNNING);
1072 CLR(ifp->if_flags, IFF_OACTIVE);
1073
1074 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1075 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_RX, i), sc->sc_rx_itr);
1076 iavf_wr(sc, I40E_VFINT_ITRN1(IAVF_ITR_TX, i), sc->sc_tx_itr);
1077 }
1078 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_RX), sc->sc_rx_itr);
1079 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_TX), sc->sc_tx_itr);
1080 iavf_wr(sc, I40E_VFINT_ITR01(IAVF_ITR_MISC), 0);
1081
1082 error = iavf_iff_locked(sc);
1083 if (error) {
1084 iavf_stop_locked(sc);
1085 return error;
1086 };
1087
1088 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1089 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1090
1091 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
1092 return 0;
1093 }
1094
1095 static int
iavf_reinit(struct iavf_softc * sc)1096 iavf_reinit(struct iavf_softc *sc)
1097 {
1098 struct iavf_rx_ring *rxr;
1099 struct iavf_tx_ring *txr;
1100 unsigned int i;
1101 uint32_t reg;
1102
1103 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1104
1105 sc->sc_reset_up = true;
1106 sc->sc_nqueue_pairs = MIN(sc->sc_nqps_alloc, sc->sc_nintrs - 1);
1107
1108 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1109 rxr = sc->sc_qps[i].qp_rxr;
1110 txr = sc->sc_qps[i].qp_txr;
1111
1112 iavf_rxfill(sc, rxr);
1113 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1114 }
1115
1116 if (iavf_config_vsi_queues(sc) != 0)
1117 return EIO;
1118
1119 if (iavf_config_hena(sc) != 0)
1120 return EIO;
1121
1122 iavf_config_rss_key(sc);
1123 iavf_config_rss_lut(sc);
1124
1125 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1126 iavf_queue_intr_enable(sc, i);
1127 }
1128 /* unmask */
1129 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1130 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1131 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1132
1133 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1134 return EIO;
1135
1136 return 0;
1137 }
1138
1139 static void
iavf_stop(struct ifnet * ifp,int disable)1140 iavf_stop(struct ifnet *ifp, int disable)
1141 {
1142 struct iavf_softc *sc;
1143
1144 sc = ifp->if_softc;
1145 mutex_enter(&sc->sc_cfg_lock);
1146 iavf_stop_locked(sc);
1147 mutex_exit(&sc->sc_cfg_lock);
1148 }
1149
1150 static void
iavf_stop_locked(struct iavf_softc * sc)1151 iavf_stop_locked(struct iavf_softc *sc)
1152 {
1153 struct ifnet *ifp = &sc->sc_ec.ec_if;
1154 struct iavf_rx_ring *rxr;
1155 struct iavf_tx_ring *txr;
1156 uint32_t reg;
1157 unsigned int i;
1158
1159 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1160
1161 CLR(ifp->if_flags, IFF_RUNNING);
1162 sc->sc_reset_up = false;
1163 callout_stop(&sc->sc_tick);
1164
1165 if (!sc->sc_resetting) {
1166 /* disable queues*/
1167 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0) {
1168 goto die;
1169 }
1170 }
1171
1172 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1173 iavf_queue_intr_disable(sc, i);
1174 }
1175
1176 /* mask interrupts */
1177 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1178 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1179 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1180 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1181
1182 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
1183 rxr = sc->sc_qps[i].qp_rxr;
1184 txr = sc->sc_qps[i].qp_txr;
1185
1186 mutex_enter(&rxr->rxr_lock);
1187 iavf_rxr_clean(sc, rxr);
1188 mutex_exit(&rxr->rxr_lock);
1189
1190 mutex_enter(&txr->txr_lock);
1191 iavf_txr_clean(sc, txr);
1192 mutex_exit(&txr->txr_lock);
1193
1194 workqueue_wait(sc->sc_workq_txrx,
1195 &sc->sc_qps[i].qp_work);
1196 }
1197
1198 return;
1199 die:
1200 if (!sc->sc_dead) {
1201 sc->sc_dead = true;
1202 log(LOG_INFO, "%s: Request VF reset\n", ifp->if_xname);
1203
1204 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
1205 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
1206 }
1207 log(LOG_CRIT, "%s: failed to shut down rings\n", ifp->if_xname);
1208 }
1209
1210 static int
iavf_watchdog(struct iavf_tx_ring * txr)1211 iavf_watchdog(struct iavf_tx_ring *txr)
1212 {
1213 struct iavf_softc *sc;
1214
1215 sc = txr->txr_sc;
1216
1217 mutex_enter(&txr->txr_lock);
1218
1219 if (txr->txr_watchdog == IAVF_WATCHDOG_STOP
1220 || --txr->txr_watchdog > 0) {
1221 mutex_exit(&txr->txr_lock);
1222 return 0;
1223 }
1224
1225 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
1226 txr->txr_watchdogto.ev_count++;
1227 mutex_exit(&txr->txr_lock);
1228
1229 device_printf(sc->sc_dev, "watchdog timeout on queue %d\n",
1230 txr->txr_qid);
1231 return 1;
1232 }
1233
1234 static void
iavf_watchdog_timeout(void * xsc)1235 iavf_watchdog_timeout(void *xsc)
1236 {
1237 struct iavf_softc *sc;
1238 struct ifnet *ifp;
1239
1240 sc = xsc;
1241 ifp = &sc->sc_ec.ec_if;
1242
1243 mutex_enter(&sc->sc_cfg_lock);
1244 if (ISSET(ifp->if_flags, IFF_RUNNING))
1245 iavf_init_locked(sc);
1246 mutex_exit(&sc->sc_cfg_lock);
1247 }
1248
1249 static int
iavf_media_change(struct ifnet * ifp)1250 iavf_media_change(struct ifnet *ifp)
1251 {
1252 struct iavf_softc *sc;
1253 struct ifmedia *ifm;
1254
1255 sc = ifp->if_softc;
1256 ifm = &sc->sc_media;
1257
1258 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1259 return EINVAL;
1260
1261 switch (IFM_SUBTYPE(ifm->ifm_media)) {
1262 case IFM_AUTO:
1263 break;
1264 default:
1265 return EINVAL;
1266 }
1267
1268 return 0;
1269 }
1270
1271 static void
iavf_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1272 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1273 {
1274 struct iavf_softc *sc = ifp->if_softc;
1275
1276 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1277
1278 ifmr->ifm_status = sc->sc_media_status;
1279 ifmr->ifm_active = sc->sc_media_active;
1280 }
1281
1282 static int
iavf_ifflags_cb(struct ethercom * ec)1283 iavf_ifflags_cb(struct ethercom *ec)
1284 {
1285 struct ifnet *ifp = &ec->ec_if;
1286 struct iavf_softc *sc = ifp->if_softc;
1287
1288 /* vlan hwfilter can not be disabled */
1289 SET(ec->ec_capenable, ETHERCAP_VLAN_HWFILTER);
1290
1291 return iavf_iff(sc);
1292 }
1293
1294 static int
iavf_vlan_cb(struct ethercom * ec,uint16_t vid,bool set)1295 iavf_vlan_cb(struct ethercom *ec, uint16_t vid, bool set)
1296 {
1297 struct ifnet *ifp = &ec->ec_if;
1298 struct iavf_softc *sc = ifp->if_softc;
1299 int rv;
1300
1301 mutex_enter(&sc->sc_cfg_lock);
1302
1303 if (sc->sc_resetting) {
1304 mutex_exit(&sc->sc_cfg_lock);
1305
1306 /* all vlan id was already removed */
1307 if (!set)
1308 return 0;
1309
1310 return ENXIO;
1311 }
1312
1313 /* ETHERCAP_VLAN_HWFILTER can not be disabled */
1314 SET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWFILTER);
1315
1316 if (set) {
1317 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_ADD_VLAN);
1318 if (!ISSET(sc->sc_ec.ec_capenable, ETHERCAP_VLAN_HWTAGGING)) {
1319 iavf_config_vlan_stripping(sc,
1320 sc->sc_ec.ec_capenable);
1321 }
1322 } else {
1323 rv = iavf_config_vlan_id(sc, vid, IAVF_VC_OP_DEL_VLAN);
1324 }
1325
1326 mutex_exit(&sc->sc_cfg_lock);
1327
1328 if (rv != 0)
1329 return EIO;
1330
1331 return 0;
1332 }
1333
1334 static int
iavf_ioctl(struct ifnet * ifp,u_long cmd,void * data)1335 iavf_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1336 {
1337 struct ifreq *ifr = (struct ifreq *)data;
1338 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
1339 const struct sockaddr *sa;
1340 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
1341 int s, error = 0;
1342 unsigned int nmtu;
1343
1344 switch (cmd) {
1345 case SIOCSIFMTU:
1346 nmtu = ifr->ifr_mtu;
1347
1348 if (nmtu < IAVF_MIN_MTU || nmtu > IAVF_MAX_MTU) {
1349 error = EINVAL;
1350 break;
1351 }
1352 if (ifp->if_mtu != nmtu) {
1353 s = splnet();
1354 error = ether_ioctl(ifp, cmd, data);
1355 splx(s);
1356 if (error == ENETRESET)
1357 error = iavf_init(ifp);
1358 }
1359 break;
1360 case SIOCADDMULTI:
1361 sa = ifreq_getaddr(SIOCADDMULTI, ifr);
1362 if (ether_addmulti(sa, &sc->sc_ec) == ENETRESET) {
1363 error = ether_multiaddr(sa, addrlo, addrhi);
1364 if (error != 0)
1365 return error;
1366
1367 error = iavf_add_multi(sc, addrlo, addrhi);
1368 if (error != 0 && error != ENETRESET) {
1369 ether_delmulti(sa, &sc->sc_ec);
1370 error = EIO;
1371 }
1372 }
1373 break;
1374
1375 case SIOCDELMULTI:
1376 sa = ifreq_getaddr(SIOCDELMULTI, ifr);
1377 if (ether_delmulti(sa, &sc->sc_ec) == ENETRESET) {
1378 error = ether_multiaddr(sa, addrlo, addrhi);
1379 if (error != 0)
1380 return error;
1381
1382 error = iavf_del_multi(sc, addrlo, addrhi);
1383 }
1384 break;
1385
1386 default:
1387 s = splnet();
1388 error = ether_ioctl(ifp, cmd, data);
1389 splx(s);
1390 }
1391
1392 if (error == ENETRESET)
1393 error = iavf_iff(sc);
1394
1395 return error;
1396 }
1397
1398 static int
iavf_iff(struct iavf_softc * sc)1399 iavf_iff(struct iavf_softc *sc)
1400 {
1401 int error;
1402
1403 mutex_enter(&sc->sc_cfg_lock);
1404 error = iavf_iff_locked(sc);
1405 mutex_exit(&sc->sc_cfg_lock);
1406
1407 return error;
1408 }
1409
1410 static int
iavf_iff_locked(struct iavf_softc * sc)1411 iavf_iff_locked(struct iavf_softc *sc)
1412 {
1413 struct ifnet *ifp = &sc->sc_ec.ec_if;
1414 int unicast, multicast;
1415 const uint8_t *enaddr;
1416
1417 KASSERT(mutex_owned(&sc->sc_cfg_lock));
1418
1419 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1420 return 0;
1421
1422 unicast = 0;
1423 multicast = 0;
1424 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1425 unicast = 1;
1426 multicast = 1;
1427 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1428 multicast = 1;
1429 }
1430
1431 iavf_config_promisc_mode(sc, unicast, multicast);
1432
1433 iavf_config_vlan_stripping(sc, sc->sc_ec.ec_capenable);
1434
1435 enaddr = CLLADDR(ifp->if_sadl);
1436 if (memcmp(enaddr, sc->sc_enaddr_added, ETHER_ADDR_LEN) != 0) {
1437 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
1438 iavf_eth_addr(sc, sc->sc_enaddr_added,
1439 IAVF_VC_OP_DEL_ETH_ADDR);
1440 }
1441 memcpy(sc->sc_enaddr_added, enaddr, ETHER_ADDR_LEN);
1442 iavf_eth_addr(sc, enaddr, IAVF_VC_OP_ADD_ETH_ADDR);
1443 }
1444
1445 return 0;
1446 }
1447
1448 static const struct iavf_product *
iavf_lookup(const struct pci_attach_args * pa)1449 iavf_lookup(const struct pci_attach_args *pa)
1450 {
1451 const struct iavf_product *iavfp;
1452
1453 for (iavfp = iavf_products; iavfp->vendor_id != 0; iavfp++) {
1454 if (PCI_VENDOR(pa->pa_id) == iavfp->vendor_id &&
1455 PCI_PRODUCT(pa->pa_id) == iavfp->product_id)
1456 return iavfp;
1457 }
1458
1459 return NULL;
1460 }
1461
1462 static enum i40e_mac_type
iavf_mactype(pci_product_id_t id)1463 iavf_mactype(pci_product_id_t id)
1464 {
1465
1466 switch (id) {
1467 case PCI_PRODUCT_INTEL_XL710_VF:
1468 case PCI_PRODUCT_INTEL_XL710_VF_HV:
1469 return I40E_MAC_VF;
1470 case PCI_PRODUCT_INTEL_X722_VF:
1471 return I40E_MAC_X722_VF;
1472 }
1473
1474 return I40E_MAC_GENERIC;
1475 }
1476
1477 static const struct iavf_link_speed *
iavf_find_link_speed(struct iavf_softc * sc,uint32_t link_speed)1478 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
1479 {
1480 size_t i;
1481
1482 for (i = 0; i < __arraycount(iavf_link_speeds); i++) {
1483 if (link_speed & (1 << i))
1484 return (&iavf_link_speeds[i]);
1485 }
1486
1487 return NULL;
1488 }
1489
1490 static void
iavf_pci_csr_setup(pci_chipset_tag_t pc,pcitag_t tag)1491 iavf_pci_csr_setup(pci_chipset_tag_t pc, pcitag_t tag)
1492 {
1493 pcireg_t csr;
1494
1495 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
1496 csr |= (PCI_COMMAND_MASTER_ENABLE |
1497 PCI_COMMAND_MEM_ENABLE);
1498 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
1499 }
1500
1501 static int
iavf_wait_active(struct iavf_softc * sc)1502 iavf_wait_active(struct iavf_softc *sc)
1503 {
1504 int tries;
1505 uint32_t reg;
1506
1507 for (tries = 0; tries < 100; tries++) {
1508 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1509 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1510 if (reg == IAVF_VFR_VFACTIVE ||
1511 reg == IAVF_VFR_COMPLETED)
1512 return 0;
1513
1514 delaymsec(10);
1515 }
1516
1517 return -1;
1518 }
1519
1520 static bool
iavf_is_etheranyaddr(const uint8_t * enaddr)1521 iavf_is_etheranyaddr(const uint8_t *enaddr)
1522 {
1523 static const uint8_t etheranyaddr[ETHER_ADDR_LEN] = {
1524 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1525 };
1526
1527 if (memcmp(enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1528 return false;
1529
1530 return true;
1531 }
1532
1533 static void
iavf_prepare_fakeaddr(struct iavf_softc * sc)1534 iavf_prepare_fakeaddr(struct iavf_softc *sc)
1535 {
1536 uint64_t rndval;
1537
1538 if (!iavf_is_etheranyaddr(sc->sc_enaddr_fake))
1539 return;
1540
1541 rndval = cprng_strong64();
1542
1543 memcpy(sc->sc_enaddr_fake, &rndval, sizeof(sc->sc_enaddr_fake));
1544 sc->sc_enaddr_fake[0] &= 0xFE;
1545 sc->sc_enaddr_fake[0] |= 0x02;
1546 }
1547
1548 static int
iavf_replace_lla(struct ifnet * ifp,const uint8_t * prev,const uint8_t * next)1549 iavf_replace_lla(struct ifnet *ifp, const uint8_t *prev, const uint8_t *next)
1550 {
1551 union {
1552 struct sockaddr sa;
1553 struct sockaddr_dl sdl;
1554 struct sockaddr_storage ss;
1555 } u;
1556 struct psref psref_prev, psref_next;
1557 struct ifaddr *ifa_prev, *ifa_next;
1558 const struct sockaddr_dl *nsdl;
1559 int s, error;
1560
1561 KASSERT(IFNET_LOCKED(ifp));
1562
1563 error = 0;
1564 ifa_prev = ifa_next = NULL;
1565
1566 if (memcmp(prev, next, ETHER_ADDR_LEN) == 0) {
1567 goto done;
1568 }
1569
1570 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1571 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1572 prev, ETHER_ADDR_LEN) == NULL) {
1573 error = EINVAL;
1574 goto done;
1575 }
1576
1577 s = pserialize_read_enter();
1578 IFADDR_READER_FOREACH(ifa_prev, ifp) {
1579 if (sockaddr_cmp(&u.sa, ifa_prev->ifa_addr) == 0) {
1580 ifa_acquire(ifa_prev, &psref_prev);
1581 break;
1582 }
1583 }
1584 pserialize_read_exit(s);
1585
1586 if (sockaddr_dl_init(&u.sdl, sizeof(u.ss), ifp->if_index,
1587 ifp->if_type, ifp->if_xname, strlen(ifp->if_xname),
1588 next, ETHER_ADDR_LEN) == NULL) {
1589 error = EINVAL;
1590 goto done;
1591 }
1592
1593 s = pserialize_read_enter();
1594 IFADDR_READER_FOREACH(ifa_next, ifp) {
1595 if (sockaddr_cmp(&u.sa, ifa_next->ifa_addr) == 0) {
1596 ifa_acquire(ifa_next, &psref_next);
1597 break;
1598 }
1599 }
1600 pserialize_read_exit(s);
1601
1602 if (ifa_next == NULL) {
1603 nsdl = &u.sdl;
1604 ifa_next = if_dl_create(ifp, &nsdl);
1605 if (ifa_next == NULL) {
1606 error = ENOMEM;
1607 goto done;
1608 }
1609
1610 s = pserialize_read_enter();
1611 ifa_acquire(ifa_next, &psref_next);
1612 pserialize_read_exit(s);
1613
1614 sockaddr_copy(ifa_next->ifa_addr,
1615 ifa_next->ifa_addr->sa_len, &u.sa);
1616 ifa_insert(ifp, ifa_next);
1617 } else {
1618 nsdl = NULL;
1619 }
1620
1621 if (ifa_prev != NULL && ifa_prev == ifp->if_dl) {
1622 if_activate_sadl(ifp, ifa_next, nsdl);
1623 }
1624
1625 ifa_release(ifa_next, &psref_next);
1626 ifa_next = NULL;
1627
1628 if (ifa_prev != NULL && ifa_prev != ifp->if_hwdl) {
1629 ifaref(ifa_prev);
1630 ifa_release(ifa_prev, &psref_prev);
1631 ifa_remove(ifp, ifa_prev);
1632 KASSERTMSG(ifa_prev->ifa_refcnt == 1, "ifa_refcnt=%d",
1633 ifa_prev->ifa_refcnt);
1634 ifafree(ifa_prev);
1635 ifa_prev = NULL;
1636 }
1637
1638 if (ISSET(ifp->if_flags, IFF_RUNNING))
1639 error = ENETRESET;
1640
1641 done:
1642 if (ifa_prev != NULL)
1643 ifa_release(ifa_prev, &psref_prev);
1644 if (ifa_next != NULL)
1645 ifa_release(ifa_next, &psref_next);
1646
1647 return error;
1648 }
1649 static int
iavf_add_multi(struct iavf_softc * sc,uint8_t * addrlo,uint8_t * addrhi)1650 iavf_add_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1651 {
1652 struct ifnet *ifp = &sc->sc_ec.ec_if;
1653 int rv;
1654
1655 if (ISSET(ifp->if_flags, IFF_ALLMULTI))
1656 return 0;
1657
1658 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0) {
1659 iavf_del_all_multi(sc);
1660 SET(ifp->if_flags, IFF_ALLMULTI);
1661 return ENETRESET;
1662 }
1663
1664 rv = iavf_eth_addr(sc, addrlo, IAVF_VC_OP_ADD_ETH_ADDR);
1665
1666 if (rv == ENOSPC) {
1667 iavf_del_all_multi(sc);
1668 SET(ifp->if_flags, IFF_ALLMULTI);
1669 return ENETRESET;
1670 }
1671
1672 return rv;
1673 }
1674
1675 static int
iavf_del_multi(struct iavf_softc * sc,uint8_t * addrlo,uint8_t * addrhi)1676 iavf_del_multi(struct iavf_softc *sc, uint8_t *addrlo, uint8_t *addrhi)
1677 {
1678 struct ifnet *ifp = &sc->sc_ec.ec_if;
1679 struct ethercom *ec = &sc->sc_ec;
1680 struct ether_multi *enm, *enm_last;
1681 struct ether_multistep step;
1682 int error, rv = 0;
1683
1684 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1685 if (memcmp(addrlo, addrhi, ETHER_ADDR_LEN) != 0)
1686 return 0;
1687
1688 iavf_eth_addr(sc, addrlo, IAVF_VC_OP_DEL_ETH_ADDR);
1689 return 0;
1690 }
1691
1692 ETHER_LOCK(ec);
1693 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1694 ETHER_NEXT_MULTI(step, enm)) {
1695 if (memcmp(enm->enm_addrlo, enm->enm_addrhi,
1696 ETHER_ADDR_LEN) != 0) {
1697 goto out;
1698 }
1699 }
1700
1701 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1702 ETHER_NEXT_MULTI(step, enm)) {
1703 error = iavf_eth_addr(sc, enm->enm_addrlo,
1704 IAVF_VC_OP_ADD_ETH_ADDR);
1705 if (error != 0)
1706 break;
1707 }
1708
1709 if (enm != NULL) {
1710 enm_last = enm;
1711 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1712 ETHER_NEXT_MULTI(step, enm)) {
1713 if (enm == enm_last)
1714 break;
1715
1716 iavf_eth_addr(sc, enm->enm_addrlo,
1717 IAVF_VC_OP_DEL_ETH_ADDR);
1718 }
1719 } else {
1720 CLR(ifp->if_flags, IFF_ALLMULTI);
1721 rv = ENETRESET;
1722 }
1723
1724 out:
1725 ETHER_UNLOCK(ec);
1726 return rv;
1727 }
1728
1729 static void
iavf_del_all_multi(struct iavf_softc * sc)1730 iavf_del_all_multi(struct iavf_softc *sc)
1731 {
1732 struct ethercom *ec = &sc->sc_ec;
1733 struct ether_multi *enm;
1734 struct ether_multistep step;
1735
1736 ETHER_LOCK(ec);
1737 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
1738 ETHER_NEXT_MULTI(step, enm)) {
1739 iavf_eth_addr(sc, enm->enm_addrlo,
1740 IAVF_VC_OP_DEL_ETH_ADDR);
1741 }
1742 ETHER_UNLOCK(ec);
1743 }
1744
1745 static int
iavf_setup_interrupts(struct iavf_softc * sc)1746 iavf_setup_interrupts(struct iavf_softc *sc)
1747 {
1748 struct pci_attach_args *pa;
1749 kcpuset_t *affinity = NULL;
1750 char intrbuf[PCI_INTRSTR_LEN], xnamebuf[32];
1751 char const *intrstr;
1752 int counts[PCI_INTR_TYPE_SIZE];
1753 int error, affinity_to;
1754 unsigned int vector, qid, num;
1755
1756 /* queue pairs + misc interrupt */
1757 num = sc->sc_nqps_alloc + 1;
1758
1759 num = MIN(num, iavf_calc_msix_count(sc));
1760 if (num <= 0) {
1761 return -1;
1762 }
1763
1764 KASSERT(sc->sc_nqps_alloc > 0);
1765 num = MIN(num, sc->sc_nqps_alloc + 1);
1766
1767 pa = &sc->sc_pa;
1768 memset(counts, 0, sizeof(counts));
1769 counts[PCI_INTR_TYPE_MSIX] = num;
1770
1771 error = pci_intr_alloc(pa, &sc->sc_ihp, counts, PCI_INTR_TYPE_MSIX);
1772 if (error != 0) {
1773 IAVF_LOG(sc, LOG_WARNING, "couldn't allocate interrupts\n");
1774 return -1;
1775 }
1776
1777 KASSERT(pci_intr_type(pa->pa_pc, sc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX);
1778
1779 if (counts[PCI_INTR_TYPE_MSIX] < 1) {
1780 IAVF_LOG(sc, LOG_ERR, "couldn't allocate interrupts\n");
1781 } else if (counts[PCI_INTR_TYPE_MSIX] != (int)num) {
1782 IAVF_LOG(sc, LOG_DEBUG,
1783 "request %u interrupts, but allocate %d interrupts\n",
1784 num, counts[PCI_INTR_TYPE_MSIX]);
1785 num = counts[PCI_INTR_TYPE_MSIX];
1786 }
1787
1788 sc->sc_ihs = kmem_zalloc(sizeof(sc->sc_ihs[0]) * num, KM_NOSLEEP);
1789 if (sc->sc_ihs == NULL) {
1790 IAVF_LOG(sc, LOG_ERR,
1791 "couldn't allocate memory for interrupts\n");
1792 goto fail;
1793 }
1794
1795 /* vector #0 is Misc interrupt */
1796 vector = 0;
1797 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector], PCI_INTR_MPSAFE, true);
1798 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1799 intrbuf, sizeof(intrbuf));
1800 snprintf(xnamebuf, sizeof(xnamebuf), "%s-Misc",
1801 device_xname(sc->sc_dev));
1802
1803 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1804 sc->sc_ihp[vector], IPL_NET, iavf_intr, sc, xnamebuf);
1805 if (sc->sc_ihs[vector] == NULL) {
1806 IAVF_LOG(sc, LOG_WARNING,
1807 "unable to establish interrupt at %s", intrstr);
1808 goto fail;
1809 }
1810
1811 kcpuset_create(&affinity, false);
1812 affinity_to = 0;
1813 qid = 0;
1814 for (vector = 1; vector < num; vector++) {
1815 pci_intr_setattr(pa->pa_pc, &sc->sc_ihp[vector],
1816 PCI_INTR_MPSAFE, true);
1817 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1818 intrbuf, sizeof(intrbuf));
1819 snprintf(xnamebuf, sizeof(xnamebuf), "%s-TXRX%u",
1820 device_xname(sc->sc_dev), qid);
1821
1822 sc->sc_ihs[vector] = pci_intr_establish_xname(pa->pa_pc,
1823 sc->sc_ihp[vector], IPL_NET, iavf_queue_intr,
1824 (void *)&sc->sc_qps[qid], xnamebuf);
1825 if (sc->sc_ihs[vector] == NULL) {
1826 IAVF_LOG(sc, LOG_WARNING,
1827 "unable to establish interrupt at %s\n", intrstr);
1828 goto fail;
1829 }
1830
1831 kcpuset_zero(affinity);
1832 kcpuset_set(affinity, affinity_to);
1833 error = interrupt_distribute(sc->sc_ihs[vector],
1834 affinity, NULL);
1835
1836 if (error == 0) {
1837 IAVF_LOG(sc, LOG_INFO,
1838 "for TXRX%d interrupt at %s, affinity to %d\n",
1839 qid, intrstr, affinity_to);
1840 } else {
1841 IAVF_LOG(sc, LOG_INFO,
1842 "for TXRX%d interrupt at %s\n",
1843 qid, intrstr);
1844 }
1845
1846 qid++;
1847 affinity_to = (affinity_to + 1) % ncpu;
1848 }
1849
1850 vector = 0;
1851 kcpuset_zero(affinity);
1852 kcpuset_set(affinity, affinity_to);
1853 intrstr = pci_intr_string(pa->pa_pc, sc->sc_ihp[vector],
1854 intrbuf, sizeof(intrbuf));
1855 error = interrupt_distribute(sc->sc_ihs[vector], affinity, NULL);
1856 if (error == 0) {
1857 IAVF_LOG(sc, LOG_INFO,
1858 "for Misc interrupt at %s, affinity to %d\n",
1859 intrstr, affinity_to);
1860 } else {
1861 IAVF_LOG(sc, LOG_INFO,
1862 "for MISC interrupt at %s\n", intrstr);
1863 }
1864
1865 kcpuset_destroy(affinity);
1866
1867 sc->sc_nintrs = num;
1868 return 0;
1869
1870 fail:
1871 if (affinity != NULL)
1872 kcpuset_destroy(affinity);
1873 for (vector = 0; vector < num; vector++) {
1874 if (sc->sc_ihs[vector] == NULL)
1875 continue;
1876 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[vector]);
1877 }
1878 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * num);
1879 pci_intr_release(pa->pa_pc, sc->sc_ihp, num);
1880
1881 return -1;
1882 }
1883
1884 static void
iavf_teardown_interrupts(struct iavf_softc * sc)1885 iavf_teardown_interrupts(struct iavf_softc *sc)
1886 {
1887 struct pci_attach_args *pa;
1888 unsigned int i;
1889
1890 if (sc->sc_ihs == NULL)
1891 return;
1892
1893 pa = &sc->sc_pa;
1894
1895 for (i = 0; i < sc->sc_nintrs; i++) {
1896 pci_intr_disestablish(pa->pa_pc, sc->sc_ihs[i]);
1897 }
1898
1899 kmem_free(sc->sc_ihs, sizeof(sc->sc_ihs[0]) * sc->sc_nintrs);
1900 sc->sc_ihs = NULL;
1901
1902 pci_intr_release(pa->pa_pc, sc->sc_ihp, sc->sc_nintrs);
1903 sc->sc_nintrs = 0;
1904 }
1905
1906 static int
iavf_setup_sysctls(struct iavf_softc * sc)1907 iavf_setup_sysctls(struct iavf_softc *sc)
1908 {
1909 const char *devname;
1910 struct sysctllog **log;
1911 const struct sysctlnode *rnode, *rxnode, *txnode;
1912 int error;
1913
1914 log = &sc->sc_sysctllog;
1915 devname = device_xname(sc->sc_dev);
1916
1917 error = sysctl_createv(log, 0, NULL, &rnode,
1918 0, CTLTYPE_NODE, devname,
1919 SYSCTL_DESCR("iavf information and settings"),
1920 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL);
1921 if (error)
1922 goto out;
1923
1924 error = sysctl_createv(log, 0, &rnode, NULL,
1925 CTLFLAG_READWRITE, CTLTYPE_BOOL, "txrx_workqueue",
1926 SYSCTL_DESCR("Use workqueue for packet processing"),
1927 NULL, 0, &sc->sc_txrx_workqueue, 0, CTL_CREATE, CTL_EOL);
1928 if (error)
1929 goto out;
1930
1931 error = sysctl_createv(log, 0, &rnode, NULL,
1932 CTLFLAG_READWRITE, CTLTYPE_INT, "debug_level",
1933 SYSCTL_DESCR("Debug level"),
1934 NULL, 0, &sc->sc_debuglevel, 0, CTL_CREATE, CTL_EOL);
1935 if (error)
1936 goto out;
1937
1938 error = sysctl_createv(log, 0, &rnode, &rxnode,
1939 0, CTLTYPE_NODE, "rx",
1940 SYSCTL_DESCR("iavf information and settings for Rx"),
1941 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1942 if (error)
1943 goto out;
1944
1945 error = sysctl_createv(log, 0, &rxnode, NULL,
1946 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1947 SYSCTL_DESCR("Interrupt Throttling"),
1948 iavf_sysctl_itr_handler, 0,
1949 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1950 if (error)
1951 goto out;
1952
1953 error = sysctl_createv(log, 0, &rxnode, NULL,
1954 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1955 SYSCTL_DESCR("descriptor size"),
1956 NULL, 0, &sc->sc_rx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1957 if (error)
1958 goto out;
1959
1960 error = sysctl_createv(log, 0, &rxnode, NULL,
1961 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
1962 SYSCTL_DESCR("max number of Rx packets"
1963 " to process for interrupt processing"),
1964 NULL, 0, &sc->sc_rx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
1965 if (error)
1966 goto out;
1967
1968 error = sysctl_createv(log, 0, &rxnode, NULL,
1969 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
1970 SYSCTL_DESCR("max number of Rx packets"
1971 " to process for deferred processing"),
1972 NULL, 0, &sc->sc_rx_process_limit, 0, CTL_CREATE, CTL_EOL);
1973 if (error)
1974 goto out;
1975
1976 error = sysctl_createv(log, 0, &rnode, &txnode,
1977 0, CTLTYPE_NODE, "tx",
1978 SYSCTL_DESCR("iavf information and settings for Tx"),
1979 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
1980 if (error)
1981 goto out;
1982
1983 error = sysctl_createv(log, 0, &txnode, NULL,
1984 CTLFLAG_READWRITE, CTLTYPE_INT, "itr",
1985 SYSCTL_DESCR("Interrupt Throttling"),
1986 iavf_sysctl_itr_handler, 0,
1987 (void *)sc, 0, CTL_CREATE, CTL_EOL);
1988 if (error)
1989 goto out;
1990
1991 error = sysctl_createv(log, 0, &txnode, NULL,
1992 CTLFLAG_READONLY, CTLTYPE_INT, "descriptor_num",
1993 SYSCTL_DESCR("the number of Tx descriptors"),
1994 NULL, 0, &sc->sc_tx_ring_ndescs, 0, CTL_CREATE, CTL_EOL);
1995 if (error)
1996 goto out;
1997
1998 error = sysctl_createv(log, 0, &txnode, NULL,
1999 CTLFLAG_READWRITE, CTLTYPE_INT, "intr_process_limit",
2000 SYSCTL_DESCR("max number of Tx packets"
2001 " to process for interrupt processing"),
2002 NULL, 0, &sc->sc_tx_intr_process_limit, 0, CTL_CREATE, CTL_EOL);
2003 if (error)
2004 goto out;
2005
2006 error = sysctl_createv(log, 0, &txnode, NULL,
2007 CTLFLAG_READWRITE, CTLTYPE_INT, "process_limit",
2008 SYSCTL_DESCR("max number of Tx packets"
2009 " to process for deferred processing"),
2010 NULL, 0, &sc->sc_tx_process_limit, 0, CTL_CREATE, CTL_EOL);
2011 if (error)
2012 goto out;
2013 out:
2014 return error;
2015 }
2016
2017 static void
iavf_teardown_sysctls(struct iavf_softc * sc)2018 iavf_teardown_sysctls(struct iavf_softc *sc)
2019 {
2020
2021 sysctl_teardown(&sc->sc_sysctllog);
2022 }
2023
2024 static int
iavf_setup_stats(struct iavf_softc * sc)2025 iavf_setup_stats(struct iavf_softc *sc)
2026 {
2027 struct iavf_stat_counters *isc;
2028 const char *dn;
2029
2030 dn = device_xname(sc->sc_dev);
2031 isc = &sc->sc_stat_counters;
2032
2033 iavf_evcnt_attach(&isc->isc_rx_bytes, dn, "Rx bytes");
2034 iavf_evcnt_attach(&isc->isc_rx_unicast, dn, "Rx unicast");
2035 iavf_evcnt_attach(&isc->isc_rx_multicast, dn, "Rx multicast");
2036 iavf_evcnt_attach(&isc->isc_rx_broadcast, dn, "Rx broadcast");
2037 iavf_evcnt_attach(&isc->isc_rx_discards, dn, "Rx discards");
2038 iavf_evcnt_attach(&isc->isc_rx_unknown_protocol,
2039 dn, "Rx unknown protocol");
2040
2041 iavf_evcnt_attach(&isc->isc_tx_bytes, dn, "Tx bytes");
2042 iavf_evcnt_attach(&isc->isc_tx_unicast, dn, "Tx unicast");
2043 iavf_evcnt_attach(&isc->isc_tx_multicast, dn, "Tx multicast");
2044 iavf_evcnt_attach(&isc->isc_tx_broadcast, dn, "Tx broadcast");
2045 iavf_evcnt_attach(&isc->isc_tx_discards, dn, "Tx discards");
2046 iavf_evcnt_attach(&isc->isc_tx_errors, dn, "Tx errors");
2047
2048 return 0;
2049 }
2050
2051 static void
iavf_teardown_stats(struct iavf_softc * sc)2052 iavf_teardown_stats(struct iavf_softc *sc)
2053 {
2054 struct iavf_stat_counters *isc;
2055
2056 isc = &sc->sc_stat_counters;
2057
2058 evcnt_detach(&isc->isc_rx_bytes);
2059 evcnt_detach(&isc->isc_rx_unicast);
2060 evcnt_detach(&isc->isc_rx_multicast);
2061 evcnt_detach(&isc->isc_rx_broadcast);
2062 evcnt_detach(&isc->isc_rx_discards);
2063 evcnt_detach(&isc->isc_rx_unknown_protocol);
2064
2065 evcnt_detach(&isc->isc_tx_bytes);
2066 evcnt_detach(&isc->isc_tx_unicast);
2067 evcnt_detach(&isc->isc_tx_multicast);
2068 evcnt_detach(&isc->isc_tx_broadcast);
2069 evcnt_detach(&isc->isc_tx_discards);
2070 evcnt_detach(&isc->isc_tx_errors);
2071
2072 }
2073
2074 static int
iavf_init_admin_queue(struct iavf_softc * sc)2075 iavf_init_admin_queue(struct iavf_softc *sc)
2076 {
2077 uint32_t reg;
2078
2079 sc->sc_atq_cons = 0;
2080 sc->sc_atq_prod = 0;
2081
2082 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2083 0, IXL_DMA_LEN(&sc->sc_atq),
2084 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2085 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2086 0, IXL_DMA_LEN(&sc->sc_arq),
2087 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2088
2089 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2090 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2091 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2092 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2093
2094 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2095
2096 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2097 ixl_dmamem_lo(&sc->sc_atq));
2098 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2099 ixl_dmamem_hi(&sc->sc_atq));
2100 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2101 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2102
2103 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2104 ixl_dmamem_lo(&sc->sc_arq));
2105 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2106 ixl_dmamem_hi(&sc->sc_arq));
2107 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2108 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2109
2110 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2111
2112 reg = iavf_rd(sc, sc->sc_aq_regs->atq_bal);
2113 if (reg != ixl_dmamem_lo(&sc->sc_atq))
2114 goto fail;
2115
2116 reg = iavf_rd(sc, sc->sc_aq_regs->arq_bal);
2117 if (reg != ixl_dmamem_lo(&sc->sc_arq))
2118 goto fail;
2119
2120 sc->sc_dead = false;
2121 return 0;
2122
2123 fail:
2124 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2125 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2126 return -1;
2127 }
2128
2129 static void
iavf_cleanup_admin_queue(struct iavf_softc * sc)2130 iavf_cleanup_admin_queue(struct iavf_softc *sc)
2131 {
2132 struct ixl_aq_buf *aqb;
2133
2134 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2135 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2136 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2137 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2138
2139 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2140 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2141 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
2142
2143 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2144 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2145 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
2146 iavf_flush(sc);
2147
2148 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2149 0, IXL_DMA_LEN(&sc->sc_arq),
2150 BUS_DMASYNC_POSTREAD);
2151 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2152 0, IXL_DMA_LEN(&sc->sc_atq),
2153 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2154
2155 sc->sc_atq_cons = 0;
2156 sc->sc_atq_prod = 0;
2157 sc->sc_arq_cons = 0;
2158 sc->sc_arq_prod = 0;
2159
2160 memset(IXL_DMA_KVA(&sc->sc_arq), 0, IXL_DMA_LEN(&sc->sc_arq));
2161 memset(IXL_DMA_KVA(&sc->sc_atq), 0, IXL_DMA_LEN(&sc->sc_atq));
2162
2163 while ((aqb = iavf_aqb_get_locked(&sc->sc_arq_live)) != NULL) {
2164 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2165 BUS_DMASYNC_POSTREAD);
2166 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
2167 }
2168
2169 while ((aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
2170 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, aqb->aqb_size,
2171 BUS_DMASYNC_POSTREAD);
2172 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
2173 }
2174 }
2175
2176 static unsigned int
iavf_calc_msix_count(struct iavf_softc * sc)2177 iavf_calc_msix_count(struct iavf_softc *sc)
2178 {
2179 struct pci_attach_args *pa;
2180 int count;
2181
2182 pa = &sc->sc_pa;
2183 count = pci_msix_count(pa->pa_pc, pa->pa_tag);
2184 if (count < 0) {
2185 IAVF_LOG(sc, LOG_DEBUG,"MSIX config error\n");
2186 count = 0;
2187 }
2188
2189 return MIN(sc->sc_max_vectors, (unsigned int)count);
2190 }
2191
2192 static unsigned int
iavf_calc_queue_pair_size(struct iavf_softc * sc)2193 iavf_calc_queue_pair_size(struct iavf_softc *sc)
2194 {
2195 unsigned int nqp, nvec;
2196
2197 nvec = iavf_calc_msix_count(sc);
2198 if (sc->sc_max_vectors > 1) {
2199 /* decrease the number of misc interrupt */
2200 nvec -= 1;
2201 }
2202
2203 nqp = ncpu;
2204 nqp = MIN(nqp, sc->sc_nqps_vsi);
2205 nqp = MIN(nqp, nvec);
2206 nqp = MIN(nqp, (unsigned int)iavf_params.max_qps);
2207
2208 return nqp;
2209 }
2210
2211 static struct iavf_tx_ring *
iavf_txr_alloc(struct iavf_softc * sc,unsigned int qid)2212 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
2213 {
2214 struct iavf_tx_ring *txr;
2215 struct iavf_tx_map *maps;
2216 unsigned int i;
2217 int error;
2218
2219 txr = kmem_zalloc(sizeof(*txr), KM_NOSLEEP);
2220 if (txr == NULL)
2221 return NULL;
2222
2223 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_tx_ring_ndescs,
2224 KM_NOSLEEP);
2225 if (maps == NULL)
2226 goto free_txr;
2227
2228 if (iavf_dmamem_alloc(sc->sc_dmat, &txr->txr_mem,
2229 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2230 IAVF_TX_QUEUE_ALIGN) != 0) {
2231 goto free_maps;
2232 }
2233
2234 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2235 error = bus_dmamap_create(sc->sc_dmat, IAVF_TX_PKT_MAXSIZE,
2236 IAVF_TX_PKT_DESCS, IAVF_TX_PKT_MAXSIZE, 0,
2237 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].txm_map);
2238 if (error)
2239 goto destroy_maps;
2240 }
2241
2242 txr->txr_intrq = pcq_create(sc->sc_tx_ring_ndescs, KM_NOSLEEP);
2243 if (txr->txr_intrq == NULL)
2244 goto destroy_maps;
2245
2246 txr->txr_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2247 iavf_deferred_transmit, txr);
2248 if (txr->txr_si == NULL)
2249 goto destroy_pcq;
2250
2251 snprintf(txr->txr_name, sizeof(txr->txr_name), "%s-tx%d",
2252 device_xname(sc->sc_dev), qid);
2253
2254 iavf_evcnt_attach(&txr->txr_defragged,
2255 txr->txr_name, "m_defrag successed");
2256 iavf_evcnt_attach(&txr->txr_defrag_failed,
2257 txr->txr_name, "m_defrag failed");
2258 iavf_evcnt_attach(&txr->txr_pcqdrop,
2259 txr->txr_name, "Dropped in pcq");
2260 iavf_evcnt_attach(&txr->txr_transmitdef,
2261 txr->txr_name, "Deferred transmit");
2262 iavf_evcnt_attach(&txr->txr_watchdogto,
2263 txr->txr_name, "Watchdog timedout on queue");
2264 iavf_evcnt_attach(&txr->txr_defer,
2265 txr->txr_name, "Handled queue in softint/workqueue");
2266
2267 evcnt_attach_dynamic(&txr->txr_intr, EVCNT_TYPE_INTR, NULL,
2268 txr->txr_name, "Interrupt on queue");
2269
2270 txr->txr_qid = qid;
2271 txr->txr_sc = sc;
2272 txr->txr_maps = maps;
2273 txr->txr_prod = txr->txr_cons = 0;
2274 txr->txr_tail = I40E_QTX_TAIL1(qid);
2275 mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
2276
2277 return txr;
2278 destroy_pcq:
2279 pcq_destroy(txr->txr_intrq);
2280 destroy_maps:
2281 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2282 if (maps[i].txm_map == NULL)
2283 continue;
2284 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2285 }
2286
2287 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2288 free_maps:
2289 kmem_free(maps, sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2290 free_txr:
2291 kmem_free(txr, sizeof(*txr));
2292 return NULL;
2293 }
2294
2295 static void
iavf_txr_free(struct iavf_softc * sc,struct iavf_tx_ring * txr)2296 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
2297 {
2298 struct iavf_tx_map *maps;
2299 unsigned int i;
2300
2301 maps = txr->txr_maps;
2302 if (maps != NULL) {
2303 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2304 if (maps[i].txm_map == NULL)
2305 continue;
2306 bus_dmamap_destroy(sc->sc_dmat, maps[i].txm_map);
2307 }
2308 kmem_free(txr->txr_maps,
2309 sizeof(maps[0]) * sc->sc_tx_ring_ndescs);
2310 txr->txr_maps = NULL;
2311 }
2312
2313 evcnt_detach(&txr->txr_defragged);
2314 evcnt_detach(&txr->txr_defrag_failed);
2315 evcnt_detach(&txr->txr_pcqdrop);
2316 evcnt_detach(&txr->txr_transmitdef);
2317 evcnt_detach(&txr->txr_watchdogto);
2318 evcnt_detach(&txr->txr_defer);
2319 evcnt_detach(&txr->txr_intr);
2320
2321 iavf_dmamem_free(sc->sc_dmat, &txr->txr_mem);
2322 softint_disestablish(txr->txr_si);
2323 pcq_destroy(txr->txr_intrq);
2324 mutex_destroy(&txr->txr_lock);
2325 kmem_free(txr, sizeof(*txr));
2326 }
2327
2328 static struct iavf_rx_ring *
iavf_rxr_alloc(struct iavf_softc * sc,unsigned int qid)2329 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
2330 {
2331 struct iavf_rx_ring *rxr;
2332 struct iavf_rx_map *maps;
2333 unsigned int i;
2334 int error;
2335
2336 rxr = kmem_zalloc(sizeof(*rxr), KM_NOSLEEP);
2337 if (rxr == NULL)
2338 return NULL;
2339
2340 maps = kmem_zalloc(sizeof(maps[0]) * sc->sc_rx_ring_ndescs,
2341 KM_NOSLEEP);
2342 if (maps == NULL)
2343 goto free_rxr;
2344
2345 if (iavf_dmamem_alloc(sc->sc_dmat, &rxr->rxr_mem,
2346 sizeof(struct ixl_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
2347 IAVF_RX_QUEUE_ALIGN) != 0)
2348 goto free_maps;
2349
2350 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2351 error = bus_dmamap_create(sc->sc_dmat, IAVF_MCLBYTES,
2352 1, IAVF_MCLBYTES, 0,
2353 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &maps[i].rxm_map);
2354 if (error)
2355 goto destroy_maps;
2356 }
2357
2358 snprintf(rxr->rxr_name, sizeof(rxr->rxr_name), "%s-rx%d",
2359 device_xname(sc->sc_dev), qid);
2360
2361 iavf_evcnt_attach(&rxr->rxr_mgethdr_failed,
2362 rxr->rxr_name, "MGETHDR failed");
2363 iavf_evcnt_attach(&rxr->rxr_mgetcl_failed,
2364 rxr->rxr_name, "MCLGET failed");
2365 iavf_evcnt_attach(&rxr->rxr_mbuf_load_failed,
2366 rxr->rxr_name, "bus_dmamap_load_mbuf failed");
2367 iavf_evcnt_attach(&rxr->rxr_defer,
2368 rxr->rxr_name, "Handled queue in softint/workqueue");
2369
2370 evcnt_attach_dynamic(&rxr->rxr_intr, EVCNT_TYPE_INTR, NULL,
2371 rxr->rxr_name, "Interrupt on queue");
2372
2373 rxr->rxr_qid = qid;
2374 rxr->rxr_sc = sc;
2375 rxr->rxr_cons = rxr->rxr_prod = 0;
2376 rxr->rxr_m_head = NULL;
2377 rxr->rxr_m_tail = &rxr->rxr_m_head;
2378 rxr->rxr_maps = maps;
2379 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
2380 mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
2381
2382 return rxr;
2383
2384 destroy_maps:
2385 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2386 if (maps[i].rxm_map == NULL)
2387 continue;
2388 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2389 }
2390 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2391 free_maps:
2392 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2393 free_rxr:
2394 kmem_free(rxr, sizeof(*rxr));
2395
2396 return NULL;
2397 }
2398
2399 static void
iavf_rxr_free(struct iavf_softc * sc,struct iavf_rx_ring * rxr)2400 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2401 {
2402 struct iavf_rx_map *maps;
2403 unsigned int i;
2404
2405 maps = rxr->rxr_maps;
2406 if (maps != NULL) {
2407 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2408 if (maps[i].rxm_map == NULL)
2409 continue;
2410 bus_dmamap_destroy(sc->sc_dmat, maps[i].rxm_map);
2411 }
2412 kmem_free(maps, sizeof(maps[0]) * sc->sc_rx_ring_ndescs);
2413 rxr->rxr_maps = NULL;
2414 }
2415
2416 evcnt_detach(&rxr->rxr_mgethdr_failed);
2417 evcnt_detach(&rxr->rxr_mgetcl_failed);
2418 evcnt_detach(&rxr->rxr_mbuf_load_failed);
2419 evcnt_detach(&rxr->rxr_defer);
2420 evcnt_detach(&rxr->rxr_intr);
2421
2422 iavf_dmamem_free(sc->sc_dmat, &rxr->rxr_mem);
2423 mutex_destroy(&rxr->rxr_lock);
2424 kmem_free(rxr, sizeof(*rxr));
2425 }
2426
2427 static int
iavf_queue_pairs_alloc(struct iavf_softc * sc)2428 iavf_queue_pairs_alloc(struct iavf_softc *sc)
2429 {
2430 struct iavf_queue_pair *qp;
2431 unsigned int i, num;
2432
2433 num = iavf_calc_queue_pair_size(sc);
2434 if (num <= 0) {
2435 return -1;
2436 }
2437
2438 sc->sc_qps = kmem_zalloc(sizeof(sc->sc_qps[0]) * num, KM_NOSLEEP);
2439 if (sc->sc_qps == NULL) {
2440 return -1;
2441 }
2442
2443 for (i = 0; i < num; i++) {
2444 qp = &sc->sc_qps[i];
2445
2446 qp->qp_rxr = iavf_rxr_alloc(sc, i);
2447 qp->qp_txr = iavf_txr_alloc(sc, i);
2448
2449 if (qp->qp_rxr == NULL || qp->qp_txr == NULL)
2450 goto free;
2451
2452 qp->qp_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,
2453 iavf_handle_queue, qp);
2454 if (qp->qp_si == NULL)
2455 goto free;
2456 }
2457
2458 sc->sc_nqps_alloc = num;
2459 return 0;
2460 free:
2461 for (i = 0; i < num; i++) {
2462 qp = &sc->sc_qps[i];
2463
2464 if (qp->qp_si != NULL)
2465 softint_disestablish(qp->qp_si);
2466 if (qp->qp_rxr != NULL)
2467 iavf_rxr_free(sc, qp->qp_rxr);
2468 if (qp->qp_txr != NULL)
2469 iavf_txr_free(sc, qp->qp_txr);
2470 }
2471
2472 kmem_free(sc->sc_qps, sizeof(sc->sc_qps[0]) * num);
2473 sc->sc_qps = NULL;
2474
2475 return -1;
2476 }
2477
2478 static void
iavf_queue_pairs_free(struct iavf_softc * sc)2479 iavf_queue_pairs_free(struct iavf_softc *sc)
2480 {
2481 struct iavf_queue_pair *qp;
2482 unsigned int i;
2483 size_t sz;
2484
2485 if (sc->sc_qps == NULL)
2486 return;
2487
2488 for (i = 0; i < sc->sc_nqps_alloc; i++) {
2489 qp = &sc->sc_qps[i];
2490
2491 if (qp->qp_si != NULL)
2492 softint_disestablish(qp->qp_si);
2493 if (qp->qp_rxr != NULL)
2494 iavf_rxr_free(sc, qp->qp_rxr);
2495 if (qp->qp_txr != NULL)
2496 iavf_txr_free(sc, qp->qp_txr);
2497 }
2498
2499 sz = sizeof(sc->sc_qps[0]) * sc->sc_nqps_alloc;
2500 kmem_free(sc->sc_qps, sz);
2501 sc->sc_qps = NULL;
2502 sc->sc_nqps_alloc = 0;
2503 }
2504
2505 static int
iavf_rxfill(struct iavf_softc * sc,struct iavf_rx_ring * rxr)2506 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2507 {
2508 struct ixl_rx_rd_desc_32 *ring, *rxd;
2509 struct iavf_rx_map *rxm;
2510 bus_dmamap_t map;
2511 struct mbuf *m;
2512 unsigned int slots, prod, mask;
2513 int error, post;
2514
2515 slots = ixl_rxr_unrefreshed(rxr->rxr_prod, rxr->rxr_cons,
2516 sc->sc_rx_ring_ndescs);
2517
2518 if (slots == 0)
2519 return 0;
2520
2521 error = 0;
2522 prod = rxr->rxr_prod;
2523
2524 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2525 mask = sc->sc_rx_ring_ndescs - 1;
2526
2527 do {
2528 rxm = &rxr->rxr_maps[prod];
2529
2530 MGETHDR(m, M_DONTWAIT, MT_DATA);
2531 if (m == NULL) {
2532 rxr->rxr_mgethdr_failed.ev_count++;
2533 error = -1;
2534 break;
2535 }
2536
2537 MCLGET(m, M_DONTWAIT);
2538 if (!ISSET(m->m_flags, M_EXT)) {
2539 rxr->rxr_mgetcl_failed.ev_count++;
2540 error = -1;
2541 m_freem(m);
2542 break;
2543 }
2544
2545 m->m_len = m->m_pkthdr.len = MCLBYTES;
2546 m_adj(m, ETHER_ALIGN);
2547
2548 map = rxm->rxm_map;
2549
2550 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2551 BUS_DMA_READ|BUS_DMA_NOWAIT) != 0) {
2552 rxr->rxr_mbuf_load_failed.ev_count++;
2553 error = -1;
2554 m_freem(m);
2555 break;
2556 }
2557
2558 rxm->rxm_m = m;
2559
2560 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2561 BUS_DMASYNC_PREREAD);
2562
2563 rxd = &ring[prod];
2564 rxd->paddr = htole64(map->dm_segs[0].ds_addr);
2565 rxd->haddr = htole64(0);
2566
2567 prod++;
2568 prod &= mask;
2569 post = 1;
2570 } while (--slots);
2571
2572 if (post) {
2573 rxr->rxr_prod = prod;
2574 iavf_wr(sc, rxr->rxr_tail, prod);
2575 }
2576
2577 return error;
2578 }
2579
2580 static inline void
iavf_rx_csum(struct mbuf * m,uint64_t qword)2581 iavf_rx_csum(struct mbuf *m, uint64_t qword)
2582 {
2583 int flags_mask;
2584
2585 if (!ISSET(qword, IXL_RX_DESC_L3L4P)) {
2586 /* No L3 or L4 checksum was calculated */
2587 return;
2588 }
2589
2590 switch (__SHIFTOUT(qword, IXL_RX_DESC_PTYPE_MASK)) {
2591 case IXL_RX_DESC_PTYPE_IPV4FRAG:
2592 case IXL_RX_DESC_PTYPE_IPV4:
2593 case IXL_RX_DESC_PTYPE_SCTPV4:
2594 case IXL_RX_DESC_PTYPE_ICMPV4:
2595 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2596 break;
2597 case IXL_RX_DESC_PTYPE_TCPV4:
2598 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2599 flags_mask |= M_CSUM_TCPv4 | M_CSUM_TCP_UDP_BAD;
2600 break;
2601 case IXL_RX_DESC_PTYPE_UDPV4:
2602 flags_mask = M_CSUM_IPv4 | M_CSUM_IPv4_BAD;
2603 flags_mask |= M_CSUM_UDPv4 | M_CSUM_TCP_UDP_BAD;
2604 break;
2605 case IXL_RX_DESC_PTYPE_TCPV6:
2606 flags_mask = M_CSUM_TCPv6 | M_CSUM_TCP_UDP_BAD;
2607 break;
2608 case IXL_RX_DESC_PTYPE_UDPV6:
2609 flags_mask = M_CSUM_UDPv6 | M_CSUM_TCP_UDP_BAD;
2610 break;
2611 default:
2612 flags_mask = 0;
2613 }
2614
2615 m->m_pkthdr.csum_flags |= (flags_mask & (M_CSUM_IPv4 |
2616 M_CSUM_TCPv4 | M_CSUM_TCPv6 | M_CSUM_UDPv4 | M_CSUM_UDPv6));
2617
2618 if (ISSET(qword, IXL_RX_DESC_IPE)) {
2619 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_IPv4_BAD);
2620 }
2621
2622 if (ISSET(qword, IXL_RX_DESC_L4E)) {
2623 m->m_pkthdr.csum_flags |= (flags_mask & M_CSUM_TCP_UDP_BAD);
2624 }
2625 }
2626
2627 static int
iavf_rxeof(struct iavf_softc * sc,struct iavf_rx_ring * rxr,u_int rxlimit,struct evcnt * ecnt)2628 iavf_rxeof(struct iavf_softc *sc, struct iavf_rx_ring *rxr, u_int rxlimit,
2629 struct evcnt *ecnt)
2630 {
2631 struct ifnet *ifp = &sc->sc_ec.ec_if;
2632 struct ixl_rx_wb_desc_32 *ring, *rxd;
2633 struct iavf_rx_map *rxm;
2634 bus_dmamap_t map;
2635 unsigned int cons, prod;
2636 struct mbuf *m;
2637 uint64_t word, word0;
2638 unsigned int len;
2639 unsigned int mask;
2640 int done = 0, more = 0;
2641
2642 KASSERT(mutex_owned(&rxr->rxr_lock));
2643
2644 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2645 return 0;
2646
2647 prod = rxr->rxr_prod;
2648 cons = rxr->rxr_cons;
2649
2650 if (cons == prod)
2651 return 0;
2652
2653 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2654 0, IXL_DMA_LEN(&rxr->rxr_mem),
2655 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2656
2657 ring = IXL_DMA_KVA(&rxr->rxr_mem);
2658 mask = sc->sc_rx_ring_ndescs - 1;
2659
2660 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2661
2662 do {
2663 if (rxlimit-- <= 0) {
2664 more = 1;
2665 break;
2666 }
2667
2668 rxd = &ring[cons];
2669
2670 word = le64toh(rxd->qword1);
2671
2672 if (!ISSET(word, IXL_RX_DESC_DD))
2673 break;
2674
2675 rxm = &rxr->rxr_maps[cons];
2676
2677 map = rxm->rxm_map;
2678 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2679 BUS_DMASYNC_POSTREAD);
2680 bus_dmamap_unload(sc->sc_dmat, map);
2681
2682 m = rxm->rxm_m;
2683 rxm->rxm_m = NULL;
2684
2685 KASSERT(m != NULL);
2686
2687 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
2688 m->m_len = len;
2689 m->m_pkthdr.len = 0;
2690
2691 m->m_next = NULL;
2692 *rxr->rxr_m_tail = m;
2693 rxr->rxr_m_tail = &m->m_next;
2694
2695 m = rxr->rxr_m_head;
2696 m->m_pkthdr.len += len;
2697
2698 if (ISSET(word, IXL_RX_DESC_EOP)) {
2699 word0 = le64toh(rxd->qword0);
2700
2701 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
2702 uint16_t vtag;
2703 vtag = __SHIFTOUT(word0, IXL_RX_DESC_L2TAG1_MASK);
2704 vlan_set_tag(m, le16toh(vtag));
2705 }
2706
2707 if ((ifp->if_capenable & IAVF_IFCAP_RXCSUM) != 0)
2708 iavf_rx_csum(m, word);
2709
2710 if (!ISSET(word,
2711 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
2712 m_set_rcvif(m, ifp);
2713 if_statinc_ref(ifp, nsr, if_ipackets);
2714 if_statadd_ref(ifp, nsr, if_ibytes,
2715 m->m_pkthdr.len);
2716 if_percpuq_enqueue(sc->sc_ipq, m);
2717 } else {
2718 if_statinc_ref(ifp, nsr, if_ierrors);
2719 m_freem(m);
2720 }
2721
2722 rxr->rxr_m_head = NULL;
2723 rxr->rxr_m_tail = &rxr->rxr_m_head;
2724 }
2725
2726 cons++;
2727 cons &= mask;
2728
2729 done = 1;
2730 } while (cons != prod);
2731
2732 if (done) {
2733 ecnt->ev_count++;
2734 rxr->rxr_cons = cons;
2735 if (iavf_rxfill(sc, rxr) == -1)
2736 if_statinc_ref(ifp, nsr, if_iqdrops);
2737 }
2738
2739 IF_STAT_PUTREF(ifp);
2740
2741 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
2742 0, IXL_DMA_LEN(&rxr->rxr_mem),
2743 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2744
2745 return more;
2746 }
2747
2748 static void
iavf_rxr_clean(struct iavf_softc * sc,struct iavf_rx_ring * rxr)2749 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2750 {
2751 struct iavf_rx_map *maps, *rxm;
2752 bus_dmamap_t map;
2753 unsigned int i;
2754
2755 KASSERT(mutex_owned(&rxr->rxr_lock));
2756
2757 maps = rxr->rxr_maps;
2758 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
2759 rxm = &maps[i];
2760
2761 if (rxm->rxm_m == NULL)
2762 continue;
2763
2764 map = rxm->rxm_map;
2765 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2766 BUS_DMASYNC_POSTWRITE);
2767 bus_dmamap_unload(sc->sc_dmat, map);
2768
2769 m_freem(rxm->rxm_m);
2770 rxm->rxm_m = NULL;
2771 }
2772
2773 m_freem(rxr->rxr_m_head);
2774 rxr->rxr_m_head = NULL;
2775 rxr->rxr_m_tail = &rxr->rxr_m_head;
2776
2777 memset(IXL_DMA_KVA(&rxr->rxr_mem), 0, IXL_DMA_LEN(&rxr->rxr_mem));
2778 rxr->rxr_prod = rxr->rxr_cons = 0;
2779 }
2780
2781 static int
iavf_txeof(struct iavf_softc * sc,struct iavf_tx_ring * txr,u_int txlimit,struct evcnt * ecnt)2782 iavf_txeof(struct iavf_softc *sc, struct iavf_tx_ring *txr, u_int txlimit,
2783 struct evcnt *ecnt)
2784 {
2785 struct ifnet *ifp = &sc->sc_ec.ec_if;
2786 struct ixl_tx_desc *ring, *txd;
2787 struct iavf_tx_map *txm;
2788 struct mbuf *m;
2789 bus_dmamap_t map;
2790 unsigned int cons, prod, last;
2791 unsigned int mask;
2792 uint64_t dtype;
2793 int done = 0, more = 0;
2794
2795 KASSERT(mutex_owned(&txr->txr_lock));
2796
2797 prod = txr->txr_prod;
2798 cons = txr->txr_cons;
2799
2800 if (cons == prod)
2801 return 0;
2802
2803 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2804 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
2805
2806 ring = IXL_DMA_KVA(&txr->txr_mem);
2807 mask = sc->sc_tx_ring_ndescs - 1;
2808
2809 net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
2810
2811 do {
2812 if (txlimit-- <= 0) {
2813 more = 1;
2814 break;
2815 }
2816
2817 txm = &txr->txr_maps[cons];
2818 last = txm->txm_eop;
2819 txd = &ring[last];
2820
2821 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
2822 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
2823 break;
2824
2825 map = txm->txm_map;
2826
2827 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2828 BUS_DMASYNC_POSTWRITE);
2829 bus_dmamap_unload(sc->sc_dmat, map);
2830
2831 m = txm->txm_m;
2832 if (m != NULL) {
2833 if_statinc_ref(ifp, nsr, if_opackets);
2834 if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
2835 if (ISSET(m->m_flags, M_MCAST))
2836 if_statinc_ref(ifp, nsr, if_omcasts);
2837 m_freem(m);
2838 }
2839
2840 txm->txm_m = NULL;
2841 txm->txm_eop = -1;
2842
2843 cons = last + 1;
2844 cons &= mask;
2845 done = 1;
2846 } while (cons != prod);
2847
2848 IF_STAT_PUTREF(ifp);
2849
2850 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2851 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
2852
2853 txr->txr_cons = cons;
2854
2855 if (done) {
2856 ecnt->ev_count++;
2857 softint_schedule(txr->txr_si);
2858 if (txr->txr_qid == 0) {
2859 CLR(ifp->if_flags, IFF_OACTIVE);
2860 if_schedule_deferred_start(ifp);
2861 }
2862 }
2863
2864 if (txr->txr_cons == txr->txr_prod) {
2865 txr->txr_watchdog = IAVF_WATCHDOG_STOP;
2866 }
2867
2868 return more;
2869 }
2870
2871 static inline int
iavf_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf ** m0,struct iavf_tx_ring * txr)2872 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf **m0,
2873 struct iavf_tx_ring *txr)
2874 {
2875 struct mbuf *m;
2876 int error;
2877
2878 KASSERT(mutex_owned(&txr->txr_lock));
2879
2880 m = *m0;
2881
2882 error = bus_dmamap_load_mbuf(dmat, map, m,
2883 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2884 if (error != EFBIG)
2885 return error;
2886
2887 m = m_defrag(m, M_DONTWAIT);
2888 if (m != NULL) {
2889 *m0 = m;
2890 txr->txr_defragged.ev_count++;
2891 error = bus_dmamap_load_mbuf(dmat, map, m,
2892 BUS_DMA_STREAMING|BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2893 } else {
2894 txr->txr_defrag_failed.ev_count++;
2895 error = ENOBUFS;
2896 }
2897
2898 return error;
2899 }
2900
2901 static inline int
iavf_tx_setup_offloads(struct mbuf * m,uint64_t * cmd_txd)2902 iavf_tx_setup_offloads(struct mbuf *m, uint64_t *cmd_txd)
2903 {
2904 struct ether_header *eh;
2905 size_t len;
2906 uint64_t cmd;
2907
2908 cmd = 0;
2909
2910 eh = mtod(m, struct ether_header *);
2911 switch (htons(eh->ether_type)) {
2912 case ETHERTYPE_IP:
2913 case ETHERTYPE_IPV6:
2914 len = ETHER_HDR_LEN;
2915 break;
2916 case ETHERTYPE_VLAN:
2917 len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2918 break;
2919 default:
2920 len = 0;
2921 }
2922 cmd |= ((len >> 1) << IXL_TX_DESC_MACLEN_SHIFT);
2923
2924 if (m->m_pkthdr.csum_flags &
2925 (M_CSUM_TSOv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) {
2926 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4;
2927 }
2928 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2929 cmd |= IXL_TX_DESC_CMD_IIPT_IPV4_CSUM;
2930 }
2931
2932 if (m->m_pkthdr.csum_flags &
2933 (M_CSUM_TSOv6 | M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
2934 cmd |= IXL_TX_DESC_CMD_IIPT_IPV6;
2935 }
2936
2937 switch (cmd & IXL_TX_DESC_CMD_IIPT_MASK) {
2938 case IXL_TX_DESC_CMD_IIPT_IPV4:
2939 case IXL_TX_DESC_CMD_IIPT_IPV4_CSUM:
2940 len = M_CSUM_DATA_IPv4_IPHL(m->m_pkthdr.csum_data);
2941 break;
2942 case IXL_TX_DESC_CMD_IIPT_IPV6:
2943 len = M_CSUM_DATA_IPv6_IPHL(m->m_pkthdr.csum_data);
2944 break;
2945 default:
2946 len = 0;
2947 }
2948 cmd |= ((len >> 2) << IXL_TX_DESC_IPLEN_SHIFT);
2949
2950 if (m->m_pkthdr.csum_flags &
2951 (M_CSUM_TSOv4 | M_CSUM_TSOv6 | M_CSUM_TCPv4 | M_CSUM_TCPv6)) {
2952 len = sizeof(struct tcphdr);
2953 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2954 } else if (m->m_pkthdr.csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) {
2955 len = sizeof(struct udphdr);
2956 cmd |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2957 } else {
2958 len = 0;
2959 }
2960 cmd |= ((len >> 2) << IXL_TX_DESC_L4LEN_SHIFT);
2961
2962 *cmd_txd |= cmd;
2963 return 0;
2964 }
2965
2966 static void
iavf_tx_common_locked(struct ifnet * ifp,struct iavf_tx_ring * txr,bool is_transmit)2967 iavf_tx_common_locked(struct ifnet *ifp, struct iavf_tx_ring *txr,
2968 bool is_transmit)
2969 {
2970 struct iavf_softc *sc;
2971 struct ixl_tx_desc *ring, *txd;
2972 struct iavf_tx_map *txm;
2973 bus_dmamap_t map;
2974 struct mbuf *m;
2975 unsigned int prod, free, last, i;
2976 unsigned int mask;
2977 uint64_t cmd, cmd_txd;
2978 int post = 0;
2979
2980 KASSERT(mutex_owned(&txr->txr_lock));
2981
2982 sc = ifp->if_softc;
2983
2984 if (!ISSET(ifp->if_flags, IFF_RUNNING)
2985 || (!is_transmit && ISSET(ifp->if_flags, IFF_OACTIVE))) {
2986 if (!is_transmit)
2987 IFQ_PURGE(&ifp->if_snd);
2988 return;
2989 }
2990
2991 prod = txr->txr_prod;
2992 free = txr->txr_cons;
2993
2994 if (free <= prod)
2995 free += sc->sc_tx_ring_ndescs;
2996 free -= prod;
2997
2998 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2999 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
3000
3001 ring = IXL_DMA_KVA(&txr->txr_mem);
3002 mask = sc->sc_tx_ring_ndescs - 1;
3003 last = prod;
3004 cmd = 0;
3005 txd = NULL;
3006
3007 for (;;) {
3008 if (free < IAVF_TX_PKT_DESCS) {
3009 if (!is_transmit)
3010 SET(ifp->if_flags, IFF_OACTIVE);
3011 break;
3012 }
3013
3014 if (is_transmit)
3015 m = pcq_get(txr->txr_intrq);
3016 else
3017 IFQ_DEQUEUE(&ifp->if_snd, m);
3018
3019 if (m == NULL)
3020 break;
3021
3022 txm = &txr->txr_maps[prod];
3023 map = txm->txm_map;
3024
3025 if (iavf_load_mbuf(sc->sc_dmat, map, &m, txr) != 0) {
3026 if_statinc(ifp, if_oerrors);
3027 m_freem(m);
3028 continue;
3029 }
3030
3031 cmd_txd = 0;
3032 if (m->m_pkthdr.csum_flags & IAVF_CSUM_ALL_OFFLOAD) {
3033 iavf_tx_setup_offloads(m, &cmd_txd);
3034 }
3035 if (vlan_has_tag(m)) {
3036 uint16_t vtag;
3037 vtag = htole16(vlan_get_tag(m));
3038 cmd_txd |= IXL_TX_DESC_CMD_IL2TAG1 |
3039 ((uint64_t)vtag << IXL_TX_DESC_L2TAG1_SHIFT);
3040 }
3041
3042 bus_dmamap_sync(sc->sc_dmat, map, 0,
3043 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
3044
3045 for (i = 0; i < (unsigned int)map->dm_nsegs; i++) {
3046 txd = &ring[prod];
3047
3048 cmd = (uint64_t)map->dm_segs[i].ds_len <<
3049 IXL_TX_DESC_BSIZE_SHIFT;
3050 cmd |= IXL_TX_DESC_DTYPE_DATA|IXL_TX_DESC_CMD_ICRC|
3051 cmd_txd;
3052
3053 txd->addr = htole64(map->dm_segs[i].ds_addr);
3054 txd->cmd = htole64(cmd);
3055
3056 last = prod;
3057 prod++;
3058 prod &= mask;
3059 }
3060
3061 cmd |= IXL_TX_DESC_CMD_EOP|IXL_TX_DESC_CMD_RS;
3062 txd->cmd = htole64(cmd);
3063 txm->txm_m = m;
3064 txm->txm_eop = last;
3065
3066 bpf_mtap(ifp, m, BPF_D_OUT);
3067 free -= i;
3068 post = 1;
3069 }
3070
3071 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3072 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
3073
3074 if (post) {
3075 txr->txr_prod = prod;
3076 iavf_wr(sc, txr->txr_tail, prod);
3077 txr->txr_watchdog = IAVF_WATCHDOG_TICKS;
3078 }
3079 }
3080
3081 static inline int
iavf_handle_queue_common(struct iavf_softc * sc,struct iavf_queue_pair * qp,u_int txlimit,struct evcnt * txevcnt,u_int rxlimit,struct evcnt * rxevcnt)3082 iavf_handle_queue_common(struct iavf_softc *sc, struct iavf_queue_pair *qp,
3083 u_int txlimit, struct evcnt *txevcnt,
3084 u_int rxlimit, struct evcnt *rxevcnt)
3085 {
3086 struct iavf_tx_ring *txr;
3087 struct iavf_rx_ring *rxr;
3088 int txmore, rxmore;
3089 int rv;
3090
3091 txr = qp->qp_txr;
3092 rxr = qp->qp_rxr;
3093
3094 mutex_enter(&txr->txr_lock);
3095 txmore = iavf_txeof(sc, txr, txlimit, txevcnt);
3096 mutex_exit(&txr->txr_lock);
3097
3098 mutex_enter(&rxr->rxr_lock);
3099 rxmore = iavf_rxeof(sc, rxr, rxlimit, rxevcnt);
3100 mutex_exit(&rxr->rxr_lock);
3101
3102 rv = txmore | (rxmore << 1);
3103
3104 return rv;
3105 }
3106
3107 static void
iavf_sched_handle_queue(struct iavf_softc * sc,struct iavf_queue_pair * qp)3108 iavf_sched_handle_queue(struct iavf_softc *sc, struct iavf_queue_pair *qp)
3109 {
3110
3111 if (qp->qp_workqueue)
3112 workqueue_enqueue(sc->sc_workq_txrx, &qp->qp_work, NULL);
3113 else
3114 softint_schedule(qp->qp_si);
3115 }
3116
3117 static void
iavf_start(struct ifnet * ifp)3118 iavf_start(struct ifnet *ifp)
3119 {
3120 struct iavf_softc *sc;
3121 struct iavf_tx_ring *txr;
3122
3123 sc = ifp->if_softc;
3124 txr = sc->sc_qps[0].qp_txr;
3125
3126 mutex_enter(&txr->txr_lock);
3127 iavf_tx_common_locked(ifp, txr, false);
3128 mutex_exit(&txr->txr_lock);
3129
3130 }
3131
3132 static inline unsigned int
iavf_select_txqueue(struct iavf_softc * sc,struct mbuf * m)3133 iavf_select_txqueue(struct iavf_softc *sc, struct mbuf *m)
3134 {
3135 u_int cpuid;
3136
3137 cpuid = cpu_index(curcpu());
3138
3139 return (unsigned int)(cpuid % sc->sc_nqueue_pairs);
3140 }
3141
3142 static int
iavf_transmit(struct ifnet * ifp,struct mbuf * m)3143 iavf_transmit(struct ifnet *ifp, struct mbuf *m)
3144 {
3145 struct iavf_softc *sc;
3146 struct iavf_tx_ring *txr;
3147 unsigned int qid;
3148
3149 sc = ifp->if_softc;
3150 qid = iavf_select_txqueue(sc, m);
3151
3152 txr = sc->sc_qps[qid].qp_txr;
3153
3154 if (__predict_false(!pcq_put(txr->txr_intrq, m))) {
3155 mutex_enter(&txr->txr_lock);
3156 txr->txr_pcqdrop.ev_count++;
3157 mutex_exit(&txr->txr_lock);
3158
3159 m_freem(m);
3160 return ENOBUFS;
3161 }
3162
3163 if (mutex_tryenter(&txr->txr_lock)) {
3164 iavf_tx_common_locked(ifp, txr, true);
3165 mutex_exit(&txr->txr_lock);
3166 } else {
3167 kpreempt_disable();
3168 softint_schedule(txr->txr_si);
3169 kpreempt_enable();
3170 }
3171 return 0;
3172 }
3173
3174 static void
iavf_deferred_transmit(void * xtxr)3175 iavf_deferred_transmit(void *xtxr)
3176 {
3177 struct iavf_tx_ring *txr;
3178 struct iavf_softc *sc;
3179 struct ifnet *ifp;
3180
3181 txr = xtxr;
3182 sc = txr->txr_sc;
3183 ifp = &sc->sc_ec.ec_if;
3184
3185 mutex_enter(&txr->txr_lock);
3186 txr->txr_transmitdef.ev_count++;
3187 if (pcq_peek(txr->txr_intrq) != NULL)
3188 iavf_tx_common_locked(ifp, txr, true);
3189 mutex_exit(&txr->txr_lock);
3190 }
3191
3192 static void
iavf_txr_clean(struct iavf_softc * sc,struct iavf_tx_ring * txr)3193 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
3194 {
3195 struct iavf_tx_map *maps, *txm;
3196 bus_dmamap_t map;
3197 unsigned int i;
3198
3199 KASSERT(mutex_owned(&txr->txr_lock));
3200
3201 maps = txr->txr_maps;
3202 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
3203 txm = &maps[i];
3204
3205 if (txm->txm_m == NULL)
3206 continue;
3207
3208 map = txm->txm_map;
3209 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3210 BUS_DMASYNC_POSTWRITE);
3211 bus_dmamap_unload(sc->sc_dmat, map);
3212
3213 m_freem(txm->txm_m);
3214 txm->txm_m = NULL;
3215 }
3216
3217 memset(IXL_DMA_KVA(&txr->txr_mem), 0, IXL_DMA_LEN(&txr->txr_mem));
3218 txr->txr_prod = txr->txr_cons = 0;
3219 }
3220
3221 static int
iavf_intr(void * xsc)3222 iavf_intr(void *xsc)
3223 {
3224 struct iavf_softc *sc = xsc;
3225 struct ifnet *ifp = &sc->sc_ec.ec_if;
3226 struct iavf_rx_ring *rxr;
3227 struct iavf_tx_ring *txr;
3228 uint32_t icr;
3229 unsigned int i;
3230
3231 /* read I40E_VFINT_ICR_ENA1 to clear status */
3232 (void)iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
3233
3234 iavf_intr_enable(sc);
3235 icr = iavf_rd(sc, I40E_VFINT_ICR01);
3236
3237 if (icr == IAVF_REG_VFR) {
3238 log(LOG_INFO, "%s: VF reset in progress\n",
3239 ifp->if_xname);
3240 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
3241 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3242 return 1;
3243 }
3244
3245 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
3246 mutex_enter(&sc->sc_adminq_lock);
3247 iavf_atq_done(sc);
3248 iavf_arq(sc);
3249 mutex_exit(&sc->sc_adminq_lock);
3250 }
3251
3252 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
3253 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3254 rxr = sc->sc_qps[i].qp_rxr;
3255 txr = sc->sc_qps[i].qp_txr;
3256
3257 mutex_enter(&rxr->rxr_lock);
3258 while (iavf_rxeof(sc, rxr, UINT_MAX,
3259 &rxr->rxr_intr) != 0) {
3260 /* do nothing */
3261 }
3262 mutex_exit(&rxr->rxr_lock);
3263
3264 mutex_enter(&txr->txr_lock);
3265 while (iavf_txeof(sc, txr, UINT_MAX,
3266 &txr->txr_intr) != 0) {
3267 /* do nothing */
3268 }
3269 mutex_exit(&txr->txr_lock);
3270 }
3271 }
3272
3273 return 0;
3274 }
3275
3276 static int
iavf_queue_intr(void * xqp)3277 iavf_queue_intr(void *xqp)
3278 {
3279 struct iavf_queue_pair *qp = xqp;
3280 struct iavf_tx_ring *txr;
3281 struct iavf_rx_ring *rxr;
3282 struct iavf_softc *sc;
3283 unsigned int qid;
3284 u_int txlimit, rxlimit;
3285 int more;
3286
3287 txr = qp->qp_txr;
3288 rxr = qp->qp_rxr;
3289 sc = txr->txr_sc;
3290 qid = txr->txr_qid;
3291
3292 txlimit = sc->sc_tx_intr_process_limit;
3293 rxlimit = sc->sc_rx_intr_process_limit;
3294 qp->qp_workqueue = sc->sc_txrx_workqueue;
3295
3296 more = iavf_handle_queue_common(sc, qp,
3297 txlimit, &txr->txr_intr, rxlimit, &rxr->rxr_intr);
3298
3299 if (more != 0) {
3300 iavf_sched_handle_queue(sc, qp);
3301 } else {
3302 /* for ALTQ */
3303 if (txr->txr_qid == 0)
3304 if_schedule_deferred_start(&sc->sc_ec.ec_if);
3305 softint_schedule(txr->txr_si);
3306
3307 iavf_queue_intr_enable(sc, qid);
3308 }
3309
3310 return 0;
3311 }
3312
3313 static void
iavf_handle_queue_wk(struct work * wk,void * xsc __unused)3314 iavf_handle_queue_wk(struct work *wk, void *xsc __unused)
3315 {
3316 struct iavf_queue_pair *qp;
3317
3318 qp = container_of(wk, struct iavf_queue_pair, qp_work);
3319 iavf_handle_queue(qp);
3320 }
3321
3322 static void
iavf_handle_queue(void * xqp)3323 iavf_handle_queue(void *xqp)
3324 {
3325 struct iavf_queue_pair *qp = xqp;
3326 struct iavf_tx_ring *txr;
3327 struct iavf_rx_ring *rxr;
3328 struct iavf_softc *sc;
3329 unsigned int qid;
3330 u_int txlimit, rxlimit;
3331 int more;
3332
3333 txr = qp->qp_txr;
3334 rxr = qp->qp_rxr;
3335 sc = txr->txr_sc;
3336 qid = txr->txr_qid;
3337
3338 txlimit = sc->sc_tx_process_limit;
3339 rxlimit = sc->sc_rx_process_limit;
3340
3341 more = iavf_handle_queue_common(sc, qp,
3342 txlimit, &txr->txr_defer, rxlimit, &rxr->rxr_defer);
3343
3344 if (more != 0)
3345 iavf_sched_handle_queue(sc, qp);
3346 else
3347 iavf_queue_intr_enable(sc, qid);
3348 }
3349
3350 static void
iavf_tick(void * xsc)3351 iavf_tick(void *xsc)
3352 {
3353 struct iavf_softc *sc;
3354 unsigned int i;
3355 int timedout;
3356
3357 sc = xsc;
3358 timedout = 0;
3359
3360 mutex_enter(&sc->sc_cfg_lock);
3361
3362 if (sc->sc_resetting) {
3363 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
3364 mutex_exit(&sc->sc_cfg_lock);
3365 return;
3366 }
3367
3368 iavf_get_stats(sc);
3369
3370 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
3371 timedout |= iavf_watchdog(sc->sc_qps[i].qp_txr);
3372 }
3373
3374 if (timedout != 0) {
3375 iavf_work_add(sc->sc_workq, &sc->sc_wdto_task);
3376 } else {
3377 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3378 }
3379
3380 mutex_exit(&sc->sc_cfg_lock);
3381 }
3382
3383 static void
iavf_tick_halt(void * unused __unused)3384 iavf_tick_halt(void *unused __unused)
3385 {
3386
3387 /* do nothing */
3388 }
3389
3390 static void
iavf_reset_request(void * xsc)3391 iavf_reset_request(void *xsc)
3392 {
3393 struct iavf_softc *sc = xsc;
3394
3395 iavf_reset_vf(sc);
3396 iavf_reset_start(sc);
3397 }
3398
3399 static void
iavf_reset_start(void * xsc)3400 iavf_reset_start(void *xsc)
3401 {
3402 struct iavf_softc *sc = xsc;
3403 struct ifnet *ifp = &sc->sc_ec.ec_if;
3404
3405 mutex_enter(&sc->sc_cfg_lock);
3406
3407 if (sc->sc_resetting)
3408 goto do_reset;
3409
3410 sc->sc_resetting = true;
3411 if_link_state_change(ifp, LINK_STATE_DOWN);
3412
3413 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3414 iavf_stop_locked(sc);
3415 sc->sc_reset_up = true;
3416 }
3417
3418 memcpy(sc->sc_enaddr_reset, sc->sc_enaddr, ETHER_ADDR_LEN);
3419
3420 do_reset:
3421 iavf_work_set(&sc->sc_reset_task, iavf_reset, sc);
3422
3423 mutex_exit(&sc->sc_cfg_lock);
3424
3425 iavf_reset((void *)sc);
3426 }
3427
3428 static void
iavf_reset(void * xsc)3429 iavf_reset(void *xsc)
3430 {
3431 struct iavf_softc *sc = xsc;
3432 struct ifnet *ifp = &sc->sc_ec.ec_if;
3433 struct ixl_aq_buf *aqb;
3434 bool realloc_qps, realloc_intrs;
3435
3436 mutex_enter(&sc->sc_cfg_lock);
3437
3438 mutex_enter(&sc->sc_adminq_lock);
3439 iavf_cleanup_admin_queue(sc);
3440 mutex_exit(&sc->sc_adminq_lock);
3441
3442 sc->sc_major_ver = UINT_MAX;
3443 sc->sc_minor_ver = UINT_MAX;
3444 sc->sc_got_vf_resources = 0;
3445 sc->sc_got_irq_map = 0;
3446
3447 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
3448 if (aqb == NULL)
3449 goto failed;
3450
3451 if (iavf_wait_active(sc) != 0) {
3452 log(LOG_WARNING, "%s: VF reset timed out\n",
3453 ifp->if_xname);
3454 goto failed;
3455 }
3456
3457 if (!iavf_arq_fill(sc)) {
3458 log(LOG_ERR, "%s: unable to fill arq descriptors\n",
3459 ifp->if_xname);
3460 goto failed;
3461 }
3462
3463 if (iavf_init_admin_queue(sc) != 0) {
3464 log(LOG_ERR, "%s: unable to initialize admin queue\n",
3465 ifp->if_xname);
3466 goto failed;
3467 }
3468
3469 if (iavf_get_version(sc, aqb) != 0) {
3470 log(LOG_ERR, "%s: unable to get VF interface version\n",
3471 ifp->if_xname);
3472 goto failed;
3473 }
3474
3475 if (iavf_get_vf_resources(sc, aqb) != 0) {
3476 log(LOG_ERR, "%s: timed out waiting for VF resources\n",
3477 ifp->if_xname);
3478 goto failed;
3479 }
3480
3481 if (sc->sc_nqps_alloc < iavf_calc_queue_pair_size(sc)) {
3482 realloc_qps = true;
3483 } else {
3484 realloc_qps = false;
3485 }
3486
3487 if (sc->sc_nintrs < iavf_calc_msix_count(sc)) {
3488 realloc_intrs = true;
3489 } else {
3490 realloc_intrs = false;
3491 }
3492
3493 if (realloc_qps || realloc_intrs)
3494 iavf_teardown_interrupts(sc);
3495
3496 if (realloc_qps) {
3497 iavf_queue_pairs_free(sc);
3498 if (iavf_queue_pairs_alloc(sc) != 0) {
3499 log(LOG_ERR, "%s: failed to allocate queue pairs\n",
3500 ifp->if_xname);
3501 goto failed;
3502 }
3503 }
3504
3505 if (realloc_qps || realloc_intrs) {
3506 if (iavf_setup_interrupts(sc) != 0) {
3507 sc->sc_nintrs = 0;
3508 log(LOG_ERR, "%s: failed to allocate interrupts\n",
3509 ifp->if_xname);
3510 goto failed;
3511 }
3512 log(LOG_INFO, "%s: reallocated queues\n", ifp->if_xname);
3513 }
3514
3515 if (iavf_config_irq_map(sc, aqb) != 0) {
3516 log(LOG_ERR, "%s: timed out configuring IRQ map\n",
3517 ifp->if_xname);
3518 goto failed;
3519 }
3520
3521 mutex_enter(&sc->sc_adminq_lock);
3522 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3523 mutex_exit(&sc->sc_adminq_lock);
3524
3525 iavf_reset_finish(sc);
3526
3527 mutex_exit(&sc->sc_cfg_lock);
3528 return;
3529
3530 failed:
3531 mutex_enter(&sc->sc_adminq_lock);
3532 iavf_cleanup_admin_queue(sc);
3533 if (aqb != NULL) {
3534 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
3535 }
3536 mutex_exit(&sc->sc_adminq_lock);
3537 callout_schedule(&sc->sc_tick, IAVF_TICK_INTERVAL);
3538 mutex_exit(&sc->sc_cfg_lock);
3539 }
3540
3541 static void
iavf_reset_finish(struct iavf_softc * sc)3542 iavf_reset_finish(struct iavf_softc *sc)
3543 {
3544 struct ethercom *ec = &sc->sc_ec;
3545 struct ether_multi *enm;
3546 struct ether_multistep step;
3547 struct ifnet *ifp = &ec->ec_if;
3548 struct vlanid_list *vlanidp;
3549 uint8_t enaddr_prev[ETHER_ADDR_LEN], enaddr_next[ETHER_ADDR_LEN];
3550
3551 KASSERT(mutex_owned(&sc->sc_cfg_lock));
3552
3553 callout_stop(&sc->sc_tick);
3554
3555 iavf_intr_enable(sc);
3556
3557 if (!iavf_is_etheranyaddr(sc->sc_enaddr_added)) {
3558 iavf_eth_addr(sc, sc->sc_enaddr_added, IAVF_VC_OP_ADD_ETH_ADDR);
3559 }
3560
3561 ETHER_LOCK(ec);
3562 if (!ISSET(ifp->if_flags, IFF_ALLMULTI)) {
3563 for (ETHER_FIRST_MULTI(step, ec, enm); enm != NULL;
3564 ETHER_NEXT_MULTI(step, enm)) {
3565 iavf_add_multi(sc, enm->enm_addrlo, enm->enm_addrhi);
3566 }
3567 }
3568
3569 SIMPLEQ_FOREACH(vlanidp, &ec->ec_vids, vid_list) {
3570 ETHER_UNLOCK(ec);
3571 iavf_config_vlan_id(sc, vlanidp->vid, IAVF_VC_OP_ADD_VLAN);
3572 ETHER_LOCK(ec);
3573 }
3574 ETHER_UNLOCK(ec);
3575
3576 if (memcmp(sc->sc_enaddr, sc->sc_enaddr_reset, ETHER_ADDR_LEN) != 0) {
3577 memcpy(enaddr_prev, sc->sc_enaddr_reset, sizeof(enaddr_prev));
3578 memcpy(enaddr_next, sc->sc_enaddr, sizeof(enaddr_next));
3579 log(LOG_INFO, "%s: Ethernet address changed to %s\n",
3580 ifp->if_xname, ether_sprintf(enaddr_next));
3581
3582 mutex_exit(&sc->sc_cfg_lock);
3583 IFNET_LOCK(ifp);
3584 kpreempt_disable();
3585 /*XXX we need an API to change ethernet address. */
3586 iavf_replace_lla(ifp, enaddr_prev, enaddr_next);
3587 kpreempt_enable();
3588 IFNET_UNLOCK(ifp);
3589 mutex_enter(&sc->sc_cfg_lock);
3590 }
3591
3592 sc->sc_resetting = false;
3593
3594 if (sc->sc_reset_up) {
3595 iavf_init_locked(sc);
3596 }
3597
3598 if (sc->sc_link_state != LINK_STATE_DOWN) {
3599 if_link_state_change(ifp, sc->sc_link_state);
3600 }
3601
3602 }
3603
3604 static int
iavf_dmamem_alloc(bus_dma_tag_t dmat,struct ixl_dmamem * ixm,bus_size_t size,bus_size_t align)3605 iavf_dmamem_alloc(bus_dma_tag_t dmat, struct ixl_dmamem *ixm,
3606 bus_size_t size, bus_size_t align)
3607 {
3608 ixm->ixm_size = size;
3609
3610 if (bus_dmamap_create(dmat, ixm->ixm_size, 1,
3611 ixm->ixm_size, 0,
3612 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
3613 &ixm->ixm_map) != 0)
3614 return 1;
3615 if (bus_dmamem_alloc(dmat, ixm->ixm_size,
3616 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
3617 BUS_DMA_WAITOK) != 0)
3618 goto destroy;
3619 if (bus_dmamem_map(dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
3620 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
3621 goto free;
3622 if (bus_dmamap_load(dmat, ixm->ixm_map, ixm->ixm_kva,
3623 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
3624 goto unmap;
3625
3626 memset(ixm->ixm_kva, 0, ixm->ixm_size);
3627
3628 return 0;
3629 unmap:
3630 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3631 free:
3632 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3633 destroy:
3634 bus_dmamap_destroy(dmat, ixm->ixm_map);
3635 return 1;
3636 }
3637
3638 static void
iavf_dmamem_free(bus_dma_tag_t dmat,struct ixl_dmamem * ixm)3639 iavf_dmamem_free(bus_dma_tag_t dmat, struct ixl_dmamem *ixm)
3640 {
3641
3642 bus_dmamap_unload(dmat, ixm->ixm_map);
3643 bus_dmamem_unmap(dmat, ixm->ixm_kva, ixm->ixm_size);
3644 bus_dmamem_free(dmat, &ixm->ixm_seg, 1);
3645 bus_dmamap_destroy(dmat, ixm->ixm_map);
3646 }
3647
3648 static struct ixl_aq_buf *
iavf_aqb_alloc(bus_dma_tag_t dmat,size_t buflen)3649 iavf_aqb_alloc(bus_dma_tag_t dmat, size_t buflen)
3650 {
3651 struct ixl_aq_buf *aqb;
3652
3653 aqb = kmem_alloc(sizeof(*aqb), KM_NOSLEEP);
3654 if (aqb == NULL)
3655 return NULL;
3656
3657 aqb->aqb_size = buflen;
3658
3659 if (bus_dmamap_create(dmat, aqb->aqb_size, 1,
3660 aqb->aqb_size, 0,
3661 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &aqb->aqb_map) != 0)
3662 goto free;
3663 if (bus_dmamem_alloc(dmat, aqb->aqb_size,
3664 IAVF_AQ_ALIGN, 0, &aqb->aqb_seg, 1, &aqb->aqb_nsegs,
3665 BUS_DMA_WAITOK) != 0)
3666 goto destroy;
3667 if (bus_dmamem_map(dmat, &aqb->aqb_seg, aqb->aqb_nsegs,
3668 aqb->aqb_size, &aqb->aqb_data, BUS_DMA_WAITOK) != 0)
3669 goto dma_free;
3670 if (bus_dmamap_load(dmat, aqb->aqb_map, aqb->aqb_data,
3671 aqb->aqb_size, NULL, BUS_DMA_WAITOK) != 0)
3672 goto unmap;
3673
3674 return aqb;
3675 unmap:
3676 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3677 dma_free:
3678 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3679 destroy:
3680 bus_dmamap_destroy(dmat, aqb->aqb_map);
3681 free:
3682 kmem_free(aqb, sizeof(*aqb));
3683
3684 return NULL;
3685 }
3686
3687 static void
iavf_aqb_free(bus_dma_tag_t dmat,struct ixl_aq_buf * aqb)3688 iavf_aqb_free(bus_dma_tag_t dmat, struct ixl_aq_buf *aqb)
3689 {
3690
3691 bus_dmamap_unload(dmat, aqb->aqb_map);
3692 bus_dmamem_unmap(dmat, aqb->aqb_data, aqb->aqb_size);
3693 bus_dmamem_free(dmat, &aqb->aqb_seg, 1);
3694 bus_dmamap_destroy(dmat, aqb->aqb_map);
3695 kmem_free(aqb, sizeof(*aqb));
3696 }
3697
3698 static struct ixl_aq_buf *
iavf_aqb_get_locked(struct ixl_aq_bufs * q)3699 iavf_aqb_get_locked(struct ixl_aq_bufs *q)
3700 {
3701 struct ixl_aq_buf *aqb;
3702
3703 aqb = SIMPLEQ_FIRST(q);
3704 if (aqb != NULL) {
3705 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3706 }
3707
3708 return aqb;
3709 }
3710
3711 static struct ixl_aq_buf *
iavf_aqb_get(struct iavf_softc * sc,struct ixl_aq_bufs * q)3712 iavf_aqb_get(struct iavf_softc *sc, struct ixl_aq_bufs *q)
3713 {
3714 struct ixl_aq_buf *aqb;
3715
3716 if (q != NULL) {
3717 mutex_enter(&sc->sc_adminq_lock);
3718 aqb = iavf_aqb_get_locked(q);
3719 mutex_exit(&sc->sc_adminq_lock);
3720 } else {
3721 aqb = NULL;
3722 }
3723
3724 if (aqb == NULL) {
3725 aqb = iavf_aqb_alloc(sc->sc_dmat, IAVF_AQ_BUFLEN);
3726 }
3727
3728 return aqb;
3729 }
3730
3731 static void
iavf_aqb_put_locked(struct ixl_aq_bufs * q,struct ixl_aq_buf * aqb)3732 iavf_aqb_put_locked(struct ixl_aq_bufs *q, struct ixl_aq_buf *aqb)
3733 {
3734
3735 SIMPLEQ_INSERT_TAIL(q, aqb, aqb_entry);
3736 }
3737
3738 static void
iavf_aqb_clean(struct ixl_aq_bufs * q,bus_dma_tag_t dmat)3739 iavf_aqb_clean(struct ixl_aq_bufs *q, bus_dma_tag_t dmat)
3740 {
3741 struct ixl_aq_buf *aqb;
3742
3743 while ((aqb = SIMPLEQ_FIRST(q)) != NULL) {
3744 SIMPLEQ_REMOVE(q, aqb, ixl_aq_buf, aqb_entry);
3745 iavf_aqb_free(dmat, aqb);
3746 }
3747 }
3748
3749 static const char *
iavf_aq_vc_opcode_str(const struct ixl_aq_desc * iaq)3750 iavf_aq_vc_opcode_str(const struct ixl_aq_desc *iaq)
3751 {
3752
3753 switch (iavf_aq_vc_get_opcode(iaq)) {
3754 case IAVF_VC_OP_VERSION:
3755 return "GET_VERSION";
3756 case IAVF_VC_OP_RESET_VF:
3757 return "RESET_VF";
3758 case IAVF_VC_OP_GET_VF_RESOURCES:
3759 return "GET_VF_RESOURCES";
3760 case IAVF_VC_OP_CONFIG_TX_QUEUE:
3761 return "CONFIG_TX_QUEUE";
3762 case IAVF_VC_OP_CONFIG_RX_QUEUE:
3763 return "CONFIG_RX_QUEUE";
3764 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
3765 return "CONFIG_VSI_QUEUES";
3766 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3767 return "CONFIG_IRQ_MAP";
3768 case IAVF_VC_OP_ENABLE_QUEUES:
3769 return "ENABLE_QUEUES";
3770 case IAVF_VC_OP_DISABLE_QUEUES:
3771 return "DISABLE_QUEUES";
3772 case IAVF_VC_OP_ADD_ETH_ADDR:
3773 return "ADD_ETH_ADDR";
3774 case IAVF_VC_OP_DEL_ETH_ADDR:
3775 return "DEL_ETH_ADDR";
3776 case IAVF_VC_OP_CONFIG_PROMISC:
3777 return "CONFIG_PROMISC";
3778 case IAVF_VC_OP_GET_STATS:
3779 return "GET_STATS";
3780 case IAVF_VC_OP_EVENT:
3781 return "EVENT";
3782 case IAVF_VC_OP_CONFIG_RSS_KEY:
3783 return "CONFIG_RSS_KEY";
3784 case IAVF_VC_OP_CONFIG_RSS_LUT:
3785 return "CONFIG_RSS_LUT";
3786 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
3787 return "GET_RS_HENA_CAPS";
3788 case IAVF_VC_OP_SET_RSS_HENA:
3789 return "SET_RSS_HENA";
3790 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
3791 return "ENABLE_VLAN_STRIPPING";
3792 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
3793 return "DISABLE_VLAN_STRIPPING";
3794 case IAVF_VC_OP_REQUEST_QUEUES:
3795 return "REQUEST_QUEUES";
3796 }
3797
3798 return "unknown";
3799 }
3800
3801 static void
iavf_aq_dump(const struct iavf_softc * sc,const struct ixl_aq_desc * iaq,const char * msg)3802 iavf_aq_dump(const struct iavf_softc *sc, const struct ixl_aq_desc *iaq,
3803 const char *msg)
3804 {
3805 char buf[512];
3806 size_t len;
3807
3808 len = sizeof(buf);
3809 buf[--len] = '\0';
3810
3811 device_printf(sc->sc_dev, "%s\n", msg);
3812 snprintb(buf, len, IXL_AQ_FLAGS_FMT, le16toh(iaq->iaq_flags));
3813 device_printf(sc->sc_dev, "flags %s opcode %04x\n",
3814 buf, le16toh(iaq->iaq_opcode));
3815 device_printf(sc->sc_dev, "datalen %u retval %u\n",
3816 le16toh(iaq->iaq_datalen), le16toh(iaq->iaq_retval));
3817 device_printf(sc->sc_dev, "vc-opcode %u (%s)\n",
3818 iavf_aq_vc_get_opcode(iaq),
3819 iavf_aq_vc_opcode_str(iaq));
3820 device_printf(sc->sc_dev, "vc-retval %u\n",
3821 iavf_aq_vc_get_retval(iaq));
3822 device_printf(sc->sc_dev, "cookie %016" PRIx64 "\n", iaq->iaq_cookie);
3823 device_printf(sc->sc_dev, "%08x %08x %08x %08x\n",
3824 le32toh(iaq->iaq_param[0]), le32toh(iaq->iaq_param[1]),
3825 le32toh(iaq->iaq_param[2]), le32toh(iaq->iaq_param[3]));
3826 }
3827
3828 static int
iavf_arq_fill(struct iavf_softc * sc)3829 iavf_arq_fill(struct iavf_softc *sc)
3830 {
3831 struct ixl_aq_buf *aqb;
3832 struct ixl_aq_desc *arq, *iaq;
3833 unsigned int prod = sc->sc_arq_prod;
3834 unsigned int n;
3835 int filled;
3836
3837 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3838 IAVF_AQ_NUM);
3839
3840 if (__predict_false(n <= 0))
3841 return 0;
3842
3843 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3844 0, IXL_DMA_LEN(&sc->sc_arq),
3845 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3846
3847 arq = IXL_DMA_KVA(&sc->sc_arq);
3848
3849 do {
3850 iaq = &arq[prod];
3851
3852 if (ixl_aq_has_dva(iaq)) {
3853 /* already filled */
3854 break;
3855 }
3856
3857 aqb = iavf_aqb_get_locked(&sc->sc_arq_idle);
3858 if (aqb == NULL)
3859 break;
3860
3861 memset(aqb->aqb_data, 0, aqb->aqb_size);
3862
3863 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
3864 aqb->aqb_size, BUS_DMASYNC_PREREAD);
3865
3866 iaq->iaq_flags = htole16(IXL_AQ_BUF |
3867 (aqb->aqb_size > I40E_AQ_LARGE_BUF ?
3868 IXL_AQ_LB : 0));
3869 iaq->iaq_opcode = 0;
3870 iaq->iaq_datalen = htole16(aqb->aqb_size);
3871 iaq->iaq_retval = 0;
3872 iaq->iaq_cookie = 0;
3873 iaq->iaq_param[0] = 0;
3874 iaq->iaq_param[1] = 0;
3875 ixl_aq_dva(iaq, IXL_AQB_DVA(aqb));
3876 iavf_aqb_put_locked(&sc->sc_arq_live, aqb);
3877
3878 prod++;
3879 prod &= IAVF_AQ_MASK;
3880 filled = 1;
3881 } while (--n);
3882
3883 sc->sc_arq_prod = prod;
3884
3885 if (filled) {
3886 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3887 0, IXL_DMA_LEN(&sc->sc_arq),
3888 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3889 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3890 }
3891
3892 return filled;
3893 }
3894
3895 static int
iavf_arq_wait(struct iavf_softc * sc,uint32_t opcode)3896 iavf_arq_wait(struct iavf_softc *sc, uint32_t opcode)
3897 {
3898 int error;
3899
3900 KASSERT(mutex_owned(&sc->sc_adminq_lock));
3901
3902 while ((error = cv_timedwait(&sc->sc_adminq_cv,
3903 &sc->sc_adminq_lock, mstohz(IAVF_EXEC_TIMEOUT))) == 0) {
3904 if (opcode == sc->sc_arq_opcode)
3905 break;
3906 }
3907
3908 if (error != 0 &&
3909 atomic_load_relaxed(&sc->sc_debuglevel) >= 2)
3910 device_printf(sc->sc_dev, "cv_timedwait error=%d\n", error);
3911
3912 return error;
3913 }
3914
3915 static void
iavf_arq_refill(void * xsc)3916 iavf_arq_refill(void *xsc)
3917 {
3918 struct iavf_softc *sc = xsc;
3919 struct ixl_aq_bufs aqbs;
3920 struct ixl_aq_buf *aqb;
3921 unsigned int n, i;
3922
3923 mutex_enter(&sc->sc_adminq_lock);
3924 iavf_arq_fill(sc);
3925 n = ixl_rxr_unrefreshed(sc->sc_arq_prod, sc->sc_arq_cons,
3926 IAVF_AQ_NUM);
3927 mutex_exit(&sc->sc_adminq_lock);
3928
3929 if (n == 0)
3930 return;
3931
3932 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 1)
3933 device_printf(sc->sc_dev, "Allocate %d bufs for arq\n", n);
3934
3935 SIMPLEQ_INIT(&aqbs);
3936 for (i = 0; i < n; i++) {
3937 aqb = iavf_aqb_get(sc, NULL);
3938 if (aqb == NULL)
3939 continue;
3940 SIMPLEQ_INSERT_TAIL(&aqbs, aqb, aqb_entry);
3941 }
3942
3943 mutex_enter(&sc->sc_adminq_lock);
3944 while ((aqb = SIMPLEQ_FIRST(&aqbs)) != NULL) {
3945 SIMPLEQ_REMOVE(&aqbs, aqb, ixl_aq_buf, aqb_entry);
3946 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
3947 }
3948 iavf_arq_fill(sc);
3949 mutex_exit(&sc->sc_adminq_lock);
3950 }
3951
3952 static uint32_t
iavf_process_arq(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)3953 iavf_process_arq(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
3954 struct ixl_aq_buf *aqb)
3955 {
3956 uint32_t vc_retval, vc_opcode;
3957 int dbg;
3958
3959 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
3960 if (dbg >= 3)
3961 iavf_aq_dump(sc, iaq, "arq proc");
3962
3963 if (dbg >= 2) {
3964 vc_retval = iavf_aq_vc_get_retval(iaq);
3965 if (vc_retval != IAVF_VC_RC_SUCCESS) {
3966 device_printf(sc->sc_dev, "%s failed=%d(arq)\n",
3967 iavf_aq_vc_opcode_str(iaq), vc_retval);
3968 }
3969 }
3970
3971 vc_opcode = iavf_aq_vc_get_opcode(iaq);
3972 switch (vc_opcode) {
3973 case IAVF_VC_OP_VERSION:
3974 iavf_process_version(sc, iaq, aqb);
3975 break;
3976 case IAVF_VC_OP_GET_VF_RESOURCES:
3977 iavf_process_vf_resources(sc, iaq, aqb);
3978 break;
3979 case IAVF_VC_OP_CONFIG_IRQ_MAP:
3980 iavf_process_irq_map(sc, iaq);
3981 break;
3982 case IAVF_VC_OP_EVENT:
3983 iavf_process_vc_event(sc, iaq, aqb);
3984 break;
3985 case IAVF_VC_OP_GET_STATS:
3986 iavf_process_stats(sc, iaq, aqb);
3987 break;
3988 case IAVF_VC_OP_REQUEST_QUEUES:
3989 iavf_process_req_queues(sc, iaq, aqb);
3990 break;
3991 }
3992
3993 return vc_opcode;
3994 }
3995
3996 static int
iavf_arq_poll(struct iavf_softc * sc,uint32_t wait_opcode,int retry)3997 iavf_arq_poll(struct iavf_softc *sc, uint32_t wait_opcode, int retry)
3998 {
3999 struct ixl_aq_desc *arq, *iaq;
4000 struct ixl_aq_buf *aqb;
4001 unsigned int cons = sc->sc_arq_cons;
4002 unsigned int prod;
4003 uint32_t vc_opcode;
4004 bool received;
4005 int i;
4006
4007 for (i = 0, received = false; i < retry && !received; i++) {
4008 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4009 prod &= sc->sc_aq_regs->arq_head_mask;
4010
4011 if (prod == cons) {
4012 delaymsec(1);
4013 continue;
4014 }
4015
4016 if (prod >= IAVF_AQ_NUM) {
4017 return EIO;
4018 }
4019
4020 arq = IXL_DMA_KVA(&sc->sc_arq);
4021
4022 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4023 0, IXL_DMA_LEN(&sc->sc_arq),
4024 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4025
4026 do {
4027 iaq = &arq[cons];
4028 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4029 KASSERT(aqb != NULL);
4030
4031 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0,
4032 IAVF_AQ_BUFLEN, BUS_DMASYNC_POSTREAD);
4033
4034 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4035
4036 if (vc_opcode == wait_opcode)
4037 received = true;
4038
4039 memset(iaq, 0, sizeof(*iaq));
4040 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4041
4042 cons++;
4043 cons &= IAVF_AQ_MASK;
4044
4045 } while (cons != prod);
4046
4047 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4048 0, IXL_DMA_LEN(&sc->sc_arq),
4049 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4050
4051 sc->sc_arq_cons = cons;
4052 iavf_arq_fill(sc);
4053
4054 }
4055
4056 if (!received)
4057 return ETIMEDOUT;
4058
4059 return 0;
4060 }
4061
4062 static int
iavf_arq(struct iavf_softc * sc)4063 iavf_arq(struct iavf_softc *sc)
4064 {
4065 struct ixl_aq_desc *arq, *iaq;
4066 struct ixl_aq_buf *aqb;
4067 unsigned int cons = sc->sc_arq_cons;
4068 unsigned int prod;
4069 uint32_t vc_opcode;
4070
4071 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4072
4073 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head);
4074 prod &= sc->sc_aq_regs->arq_head_mask;
4075
4076 /* broken value at resetting */
4077 if (prod >= IAVF_AQ_NUM) {
4078 iavf_work_set(&sc->sc_reset_task, iavf_reset_start, sc);
4079 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4080 return 0;
4081 }
4082
4083 if (cons == prod)
4084 return 0;
4085
4086 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
4087 0, IXL_DMA_LEN(&sc->sc_arq),
4088 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4089
4090 arq = IXL_DMA_KVA(&sc->sc_arq);
4091
4092 do {
4093 iaq = &arq[cons];
4094 aqb = iavf_aqb_get_locked(&sc->sc_arq_live);
4095
4096 KASSERT(aqb != NULL);
4097
4098 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
4099 BUS_DMASYNC_POSTREAD);
4100
4101 vc_opcode = iavf_process_arq(sc, iaq, aqb);
4102
4103 switch (vc_opcode) {
4104 case IAVF_VC_OP_CONFIG_TX_QUEUE:
4105 case IAVF_VC_OP_CONFIG_RX_QUEUE:
4106 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
4107 case IAVF_VC_OP_ENABLE_QUEUES:
4108 case IAVF_VC_OP_DISABLE_QUEUES:
4109 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
4110 case IAVF_VC_OP_SET_RSS_HENA:
4111 case IAVF_VC_OP_ADD_ETH_ADDR:
4112 case IAVF_VC_OP_DEL_ETH_ADDR:
4113 case IAVF_VC_OP_CONFIG_PROMISC:
4114 case IAVF_VC_OP_ADD_VLAN:
4115 case IAVF_VC_OP_DEL_VLAN:
4116 case IAVF_VC_OP_ENABLE_VLAN_STRIP:
4117 case IAVF_VC_OP_DISABLE_VLAN_STRIP:
4118 case IAVF_VC_OP_CONFIG_RSS_KEY:
4119 case IAVF_VC_OP_CONFIG_RSS_LUT:
4120 sc->sc_arq_retval = iavf_aq_vc_get_retval(iaq);
4121 sc->sc_arq_opcode = vc_opcode;
4122 cv_signal(&sc->sc_adminq_cv);
4123 break;
4124 }
4125
4126 memset(iaq, 0, sizeof(*iaq));
4127 iavf_aqb_put_locked(&sc->sc_arq_idle, aqb);
4128
4129 cons++;
4130 cons &= IAVF_AQ_MASK;
4131 } while (cons != prod);
4132
4133 sc->sc_arq_cons = cons;
4134 iavf_work_add(sc->sc_workq, &sc->sc_arq_refill);
4135
4136 return 1;
4137 }
4138
4139 static int
iavf_atq_post(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4140 iavf_atq_post(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4141 struct ixl_aq_buf *aqb)
4142 {
4143 struct ixl_aq_desc *atq, *slot;
4144 unsigned int prod;
4145
4146 atq = IXL_DMA_KVA(&sc->sc_atq);
4147 prod = sc->sc_atq_prod;
4148 slot = &atq[prod];
4149
4150 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4151 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
4152
4153 *slot = *iaq;
4154 slot->iaq_flags |= htole16(IXL_AQ_SI);
4155 if (aqb != NULL) {
4156 ixl_aq_dva(slot, IXL_AQB_DVA(aqb));
4157 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4158 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_PREWRITE);
4159 iavf_aqb_put_locked(&sc->sc_atq_live, aqb);
4160 } else {
4161 ixl_aq_dva(slot, (bus_addr_t)0);
4162 }
4163
4164 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4165 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
4166
4167 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3)
4168 iavf_aq_dump(sc, slot, "post");
4169
4170 prod++;
4171 prod &= IAVF_AQ_MASK;
4172 sc->sc_atq_prod = prod;
4173 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
4174 return prod;
4175 }
4176
4177 static int
iavf_atq_poll(struct iavf_softc * sc,unsigned int tm)4178 iavf_atq_poll(struct iavf_softc *sc, unsigned int tm)
4179 {
4180 struct ixl_aq_desc *atq, *slot;
4181 struct ixl_aq_desc iaq;
4182 unsigned int prod;
4183 unsigned int t;
4184 int dbg;
4185
4186 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4187 atq = IXL_DMA_KVA(&sc->sc_atq);
4188 prod = sc->sc_atq_prod;
4189 slot = &atq[prod];
4190 t = 0;
4191
4192 while (iavf_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
4193 delaymsec(1);
4194
4195 if (t++ > tm) {
4196 if (dbg >= 2) {
4197 device_printf(sc->sc_dev,
4198 "atq timedout\n");
4199 }
4200 return ETIMEDOUT;
4201 }
4202 }
4203
4204 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4205 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
4206 iaq = *slot;
4207 memset(slot, 0, sizeof(*slot));
4208 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4209 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
4210
4211 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4212 if (dbg >= 2) {
4213 device_printf(sc->sc_dev,
4214 "atq retcode=0x%04x\n", le16toh(iaq.iaq_retval));
4215 }
4216 return EIO;
4217 }
4218
4219 return 0;
4220 }
4221
4222 static void
iavf_atq_done(struct iavf_softc * sc)4223 iavf_atq_done(struct iavf_softc *sc)
4224 {
4225 struct ixl_aq_desc *atq, *slot;
4226 struct ixl_aq_buf *aqb;
4227 unsigned int cons;
4228 unsigned int prod;
4229
4230 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4231
4232 prod = sc->sc_atq_prod;
4233 cons = sc->sc_atq_cons;
4234
4235 if (prod == cons)
4236 return;
4237
4238 atq = IXL_DMA_KVA(&sc->sc_atq);
4239
4240 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4241 0, IXL_DMA_LEN(&sc->sc_atq),
4242 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
4243
4244 do {
4245 slot = &atq[cons];
4246 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
4247 break;
4248
4249 if (ixl_aq_has_dva(slot) &&
4250 (aqb = iavf_aqb_get_locked(&sc->sc_atq_live)) != NULL) {
4251 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4252 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4253 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4254 }
4255
4256 memset(slot, 0, sizeof(*slot));
4257
4258 cons++;
4259 cons &= IAVF_AQ_MASK;
4260 } while (cons != prod);
4261
4262 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
4263 0, IXL_DMA_LEN(&sc->sc_atq),
4264 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4265
4266 sc->sc_atq_cons = cons;
4267 }
4268
4269 static int
iavf_adminq_poll(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb,int retry)4270 iavf_adminq_poll(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4271 struct ixl_aq_buf *aqb, int retry)
4272 {
4273 int error;
4274
4275 mutex_enter(&sc->sc_adminq_lock);
4276 error = iavf_adminq_poll_locked(sc, iaq, aqb, retry);
4277 mutex_exit(&sc->sc_adminq_lock);
4278
4279 return error;
4280 }
4281
4282 static int
iavf_adminq_poll_locked(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb,int retry)4283 iavf_adminq_poll_locked(struct iavf_softc *sc,
4284 struct ixl_aq_desc *iaq, struct ixl_aq_buf *aqb, int retry)
4285 {
4286 uint32_t opcode;
4287 int error;
4288
4289 KASSERT(!sc->sc_attached || mutex_owned(&sc->sc_adminq_lock));
4290
4291 opcode = iavf_aq_vc_get_opcode(iaq);
4292
4293 iavf_atq_post(sc, iaq, aqb);
4294
4295 error = iavf_atq_poll(sc, retry);
4296
4297 /*
4298 * collect the aqb used in the current command and
4299 * added to sc_atq_live at iavf_atq_post(),
4300 * whether or not the command succeeded.
4301 */
4302 if (aqb != NULL) {
4303 (void)iavf_aqb_get_locked(&sc->sc_atq_live);
4304 bus_dmamap_sync(sc->sc_dmat, IXL_AQB_MAP(aqb),
4305 0, IXL_AQB_LEN(aqb), BUS_DMASYNC_POSTWRITE);
4306 }
4307
4308 if (error)
4309 return error;
4310
4311 error = iavf_arq_poll(sc, opcode, retry);
4312
4313 if (error != 0 &&
4314 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4315 device_printf(sc->sc_dev, "%s failed=%d(polling)\n",
4316 iavf_aq_vc_opcode_str(iaq), error);
4317 }
4318
4319 return error;
4320 }
4321
4322 static int
iavf_adminq_exec(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4323 iavf_adminq_exec(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4324 struct ixl_aq_buf *aqb)
4325 {
4326 int error;
4327 uint32_t opcode;
4328
4329 opcode = iavf_aq_vc_get_opcode(iaq);
4330
4331 mutex_enter(&sc->sc_adminq_lock);
4332 iavf_atq_post(sc, iaq, aqb);
4333
4334 error = iavf_arq_wait(sc, opcode);
4335 if (error == 0) {
4336 error = sc->sc_arq_retval;
4337 if (error != IAVF_VC_RC_SUCCESS &&
4338 atomic_load_relaxed(&sc->sc_debuglevel) >= 1) {
4339 device_printf(sc->sc_dev, "%s failed=%d\n",
4340 iavf_aq_vc_opcode_str(iaq), error);
4341 }
4342 }
4343
4344 mutex_exit(&sc->sc_adminq_lock);
4345 return error;
4346 }
4347
4348 static void
iavf_process_version(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4349 iavf_process_version(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4350 struct ixl_aq_buf *aqb)
4351 {
4352 struct iavf_vc_version_info *ver;
4353
4354 ver = (struct iavf_vc_version_info *)aqb->aqb_data;
4355 sc->sc_major_ver = le32toh(ver->major);
4356 sc->sc_minor_ver = le32toh(ver->minor);
4357 }
4358
4359 static void
iavf_process_vf_resources(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4360 iavf_process_vf_resources(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4361 struct ixl_aq_buf *aqb)
4362 {
4363 struct iavf_vc_vf_resource *vf_res;
4364 struct iavf_vc_vsi_resource *vsi_res;
4365 uint8_t *enaddr;
4366 int mtu, dbg;
4367 char buf[512];
4368
4369 dbg = atomic_load_relaxed(&sc->sc_debuglevel);
4370 sc->sc_got_vf_resources = 1;
4371
4372 vf_res = aqb->aqb_data;
4373 sc->sc_max_vectors = le16toh(vf_res->max_vectors);
4374 if (le16toh(vf_res->num_vsis) == 0) {
4375 if (dbg >= 1) {
4376 device_printf(sc->sc_dev, "no vsi available\n");
4377 }
4378 return;
4379 }
4380 sc->sc_vf_cap = le32toh(vf_res->offload_flags);
4381 if (dbg >= 2) {
4382 snprintb(buf, sizeof(buf),
4383 IAVF_VC_OFFLOAD_FMT, sc->sc_vf_cap);
4384 device_printf(sc->sc_dev, "VF cap=%s\n", buf);
4385 }
4386
4387 mtu = le16toh(vf_res->max_mtu);
4388 if (IAVF_MIN_MTU < mtu && mtu < IAVF_MAX_MTU) {
4389 sc->sc_max_mtu = MIN(IAVF_MAX_MTU, mtu);
4390 }
4391
4392 vsi_res = &vf_res->vsi_res[0];
4393 sc->sc_vsi_id = le16toh(vsi_res->vsi_id);
4394 sc->sc_vf_id = le32toh(iaq->iaq_param[0]);
4395 sc->sc_qset_handle = le16toh(vsi_res->qset_handle);
4396 sc->sc_nqps_vsi = le16toh(vsi_res->num_queue_pairs);
4397 if (!iavf_is_etheranyaddr(vsi_res->default_mac)) {
4398 enaddr = vsi_res->default_mac;
4399 } else {
4400 enaddr = sc->sc_enaddr_fake;
4401 }
4402 memcpy(sc->sc_enaddr, enaddr, ETHER_ADDR_LEN);
4403 }
4404
4405 static void
iavf_process_irq_map(struct iavf_softc * sc,struct ixl_aq_desc * iaq)4406 iavf_process_irq_map(struct iavf_softc *sc, struct ixl_aq_desc *iaq)
4407 {
4408 uint32_t retval;
4409
4410 retval = iavf_aq_vc_get_retval(iaq);
4411 if (retval != IAVF_VC_RC_SUCCESS) {
4412 return;
4413 }
4414
4415 sc->sc_got_irq_map = 1;
4416 }
4417
4418 static void
iavf_process_vc_event(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4419 iavf_process_vc_event(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4420 struct ixl_aq_buf *aqb)
4421 {
4422 struct iavf_vc_pf_event *event;
4423 struct ifnet *ifp = &sc->sc_ec.ec_if;
4424 const struct iavf_link_speed *speed;
4425 int link;
4426
4427 event = aqb->aqb_data;
4428 switch (event->event) {
4429 case IAVF_VC_EVENT_LINK_CHANGE:
4430 sc->sc_media_status = IFM_AVALID;
4431 sc->sc_media_active = IFM_ETHER;
4432 link = LINK_STATE_DOWN;
4433 if (event->link_status) {
4434 link = LINK_STATE_UP;
4435 sc->sc_media_status |= IFM_ACTIVE;
4436 sc->sc_media_active |= IFM_FDX;
4437
4438 ifp->if_baudrate = 0;
4439 speed = iavf_find_link_speed(sc, event->link_speed);
4440 if (speed != NULL) {
4441 sc->sc_media_active |= speed->media;
4442 ifp->if_baudrate = speed->baudrate;
4443 }
4444 }
4445
4446 if (sc->sc_link_state != link) {
4447 sc->sc_link_state = link;
4448 if (sc->sc_attached) {
4449 if_link_state_change(ifp, link);
4450 }
4451 }
4452 break;
4453 case IAVF_VC_EVENT_RESET_IMPENDING:
4454 log(LOG_INFO, "%s: Reset warning received from the PF\n",
4455 ifp->if_xname);
4456 iavf_work_set(&sc->sc_reset_task, iavf_reset_request, sc);
4457 iavf_work_add(sc->sc_workq, &sc->sc_reset_task);
4458 break;
4459 }
4460 }
4461
4462 static void
iavf_process_stats(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4463 iavf_process_stats(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4464 struct ixl_aq_buf *aqb)
4465 {
4466 struct iavf_stat_counters *isc;
4467 struct i40e_eth_stats *st;
4468
4469 KASSERT(mutex_owned(&sc->sc_adminq_lock));
4470
4471 st = aqb->aqb_data;
4472 isc = &sc->sc_stat_counters;
4473
4474 isc->isc_rx_bytes.ev_count = st->rx_bytes;
4475 isc->isc_rx_unicast.ev_count = st->rx_unicast;
4476 isc->isc_rx_multicast.ev_count = st->rx_multicast;
4477 isc->isc_rx_broadcast.ev_count = st->rx_broadcast;
4478 isc->isc_rx_discards.ev_count = st->rx_discards;
4479 isc->isc_rx_unknown_protocol.ev_count = st->rx_unknown_protocol;
4480
4481 isc->isc_tx_bytes.ev_count = st->tx_bytes;
4482 isc->isc_tx_unicast.ev_count = st->tx_unicast;
4483 isc->isc_tx_multicast.ev_count = st->tx_multicast;
4484 isc->isc_tx_broadcast.ev_count = st->tx_broadcast;
4485 isc->isc_tx_discards.ev_count = st->tx_discards;
4486 isc->isc_tx_errors.ev_count = st->tx_errors;
4487 }
4488
4489 static void
iavf_process_req_queues(struct iavf_softc * sc,struct ixl_aq_desc * iaq,struct ixl_aq_buf * aqb)4490 iavf_process_req_queues(struct iavf_softc *sc, struct ixl_aq_desc *iaq,
4491 struct ixl_aq_buf *aqb)
4492 {
4493 struct iavf_vc_res_request *req;
4494 struct ifnet *ifp;
4495 uint32_t vc_retval;
4496
4497 ifp = &sc->sc_ec.ec_if;
4498 req = aqb->aqb_data;
4499
4500 vc_retval = iavf_aq_vc_get_retval(iaq);
4501 if (vc_retval != IAVF_VC_RC_SUCCESS) {
4502 return;
4503 }
4504
4505 if (sc->sc_nqps_req < req->num_queue_pairs) {
4506 log(LOG_INFO,
4507 "%s: requested %d queues, but only %d left.\n",
4508 ifp->if_xname,
4509 sc->sc_nqps_req, req->num_queue_pairs);
4510 }
4511
4512 if (sc->sc_nqps_vsi < req->num_queue_pairs) {
4513 if (!sc->sc_req_queues_retried) {
4514 /* req->num_queue_pairs indicates max qps */
4515 sc->sc_nqps_req = req->num_queue_pairs;
4516
4517 sc->sc_req_queues_retried = true;
4518 iavf_work_add(sc->sc_workq, &sc->sc_req_queues_task);
4519 }
4520 }
4521 }
4522
4523 static int
iavf_get_version(struct iavf_softc * sc,struct ixl_aq_buf * aqb)4524 iavf_get_version(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4525 {
4526 struct ixl_aq_desc iaq;
4527 struct iavf_vc_version_info *ver;
4528 int error;
4529
4530 memset(&iaq, 0, sizeof(iaq));
4531 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4532 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4533 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_VERSION);
4534 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
4535
4536 ver = IXL_AQB_KVA(aqb);
4537 ver->major = htole32(IAVF_VF_MAJOR);
4538 ver->minor = htole32(IAVF_VF_MINOR);
4539
4540 sc->sc_major_ver = UINT_MAX;
4541 sc->sc_minor_ver = UINT_MAX;
4542
4543 if (sc->sc_attached) {
4544 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4545 } else {
4546 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4547 }
4548
4549 if (error)
4550 return -1;
4551
4552 return 0;
4553 }
4554
4555 static int
iavf_get_vf_resources(struct iavf_softc * sc,struct ixl_aq_buf * aqb)4556 iavf_get_vf_resources(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4557 {
4558 struct ixl_aq_desc iaq;
4559 uint32_t *cap, cap0;
4560 int error;
4561
4562 memset(&iaq, 0, sizeof(iaq));
4563 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4564 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4565 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_VF_RESOURCES);
4566
4567 if (sc->sc_major_ver > 0) {
4568 cap0 = IAVF_VC_OFFLOAD_L2 |
4569 IAVF_VC_OFFLOAD_VLAN |
4570 IAVF_VC_OFFLOAD_RSS_PF |
4571 IAVF_VC_OFFLOAD_REQ_QUEUES;
4572
4573 cap = IXL_AQB_KVA(aqb);
4574 *cap = htole32(cap0);
4575 iaq.iaq_datalen = htole16(sizeof(*cap));
4576 }
4577
4578 sc->sc_got_vf_resources = 0;
4579 if (sc->sc_attached) {
4580 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4581 } else {
4582 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4583 }
4584
4585 if (error)
4586 return -1;
4587 return 0;
4588 }
4589
4590 static int
iavf_get_stats(struct iavf_softc * sc)4591 iavf_get_stats(struct iavf_softc *sc)
4592 {
4593 struct ixl_aq_desc iaq;
4594 struct ixl_aq_buf *aqb;
4595 struct iavf_vc_queue_select *qsel;
4596 int error;
4597
4598 mutex_enter(&sc->sc_adminq_lock);
4599 aqb = iavf_aqb_get_locked(&sc->sc_atq_idle);
4600 mutex_exit(&sc->sc_adminq_lock);
4601
4602 if (aqb == NULL)
4603 return ENOMEM;
4604
4605 qsel = IXL_AQB_KVA(aqb);
4606 memset(qsel, 0, sizeof(*qsel));
4607 qsel->vsi_id = htole16(sc->sc_vsi_id);
4608
4609 memset(&iaq, 0, sizeof(iaq));
4610
4611 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4612 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4613 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_GET_STATS);
4614 iaq.iaq_datalen = htole16(sizeof(*qsel));
4615
4616 if (atomic_load_relaxed(&sc->sc_debuglevel) >= 3) {
4617 device_printf(sc->sc_dev, "post GET_STATS command\n");
4618 }
4619
4620 mutex_enter(&sc->sc_adminq_lock);
4621 error = iavf_atq_post(sc, &iaq, aqb);
4622 mutex_exit(&sc->sc_adminq_lock);
4623
4624 return error;
4625 }
4626
4627 static int
iavf_config_irq_map(struct iavf_softc * sc,struct ixl_aq_buf * aqb)4628 iavf_config_irq_map(struct iavf_softc *sc, struct ixl_aq_buf *aqb)
4629 {
4630 struct ixl_aq_desc iaq;
4631 struct iavf_vc_vector_map *vec;
4632 struct iavf_vc_irq_map_info *map;
4633 struct iavf_rx_ring *rxr;
4634 struct iavf_tx_ring *txr;
4635 unsigned int num_vec;
4636 int error;
4637
4638 map = IXL_AQB_KVA(aqb);
4639 vec = map->vecmap;
4640 num_vec = 0;
4641
4642 if (sc->sc_nintrs == 1) {
4643 vec[0].vsi_id = htole16(sc->sc_vsi_id);
4644 vec[0].vector_id = htole16(0);
4645 vec[0].rxq_map = htole16(iavf_allqueues(sc));
4646 vec[0].txq_map = htole16(iavf_allqueues(sc));
4647 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4648 vec[0].rxitr_idx = htole16(IAVF_NOITR);
4649 num_vec = 1;
4650 } else if (sc->sc_nintrs > 1) {
4651 KASSERT(sc->sc_nqps_alloc >= (sc->sc_nintrs - 1));
4652 for (; num_vec < (sc->sc_nintrs - 1); num_vec++) {
4653 rxr = sc->sc_qps[num_vec].qp_rxr;
4654 txr = sc->sc_qps[num_vec].qp_txr;
4655
4656 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4657 vec[num_vec].vector_id = htole16(num_vec + 1);
4658 vec[num_vec].rxq_map = htole16(__BIT(rxr->rxr_qid));
4659 vec[num_vec].txq_map = htole16(__BIT(txr->txr_qid));
4660 vec[num_vec].rxitr_idx = htole16(IAVF_ITR_RX);
4661 vec[num_vec].txitr_idx = htole16(IAVF_ITR_TX);
4662 }
4663
4664 vec[num_vec].vsi_id = htole16(sc->sc_vsi_id);
4665 vec[num_vec].vector_id = htole16(0);
4666 vec[num_vec].rxq_map = htole16(0);
4667 vec[num_vec].txq_map = htole16(0);
4668 num_vec++;
4669 }
4670
4671 map->num_vectors = htole16(num_vec);
4672
4673 memset(&iaq, 0, sizeof(iaq));
4674 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4675 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4676 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_IRQ_MAP);
4677 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * num_vec);
4678
4679 if (sc->sc_attached) {
4680 error = iavf_adminq_poll(sc, &iaq, aqb, 250);
4681 } else {
4682 error = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4683 }
4684
4685 if (error)
4686 return -1;
4687
4688 return 0;
4689 }
4690
4691 static int
iavf_config_vsi_queues(struct iavf_softc * sc)4692 iavf_config_vsi_queues(struct iavf_softc *sc)
4693 {
4694 struct ifnet *ifp = &sc->sc_ec.ec_if;
4695 struct ixl_aq_desc iaq;
4696 struct ixl_aq_buf *aqb;
4697 struct iavf_vc_queue_config_info *config;
4698 struct iavf_vc_txq_info *txq;
4699 struct iavf_vc_rxq_info *rxq;
4700 struct iavf_rx_ring *rxr;
4701 struct iavf_tx_ring *txr;
4702 uint32_t rxmtu_max;
4703 unsigned int i;
4704 int error;
4705
4706 rxmtu_max = ifp->if_mtu + IAVF_MTU_ETHERLEN;
4707
4708 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4709
4710 if (aqb == NULL)
4711 return -1;
4712
4713 config = IXL_AQB_KVA(aqb);
4714 memset(config, 0, sizeof(*config));
4715 config->vsi_id = htole16(sc->sc_vsi_id);
4716 config->num_queue_pairs = htole16(sc->sc_nqueue_pairs);
4717
4718 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
4719 rxr = sc->sc_qps[i].qp_rxr;
4720 txr = sc->sc_qps[i].qp_txr;
4721
4722 txq = &config->qpair[i].txq;
4723 txq->vsi_id = htole16(sc->sc_vsi_id);
4724 txq->queue_id = htole16(txr->txr_qid);
4725 txq->ring_len = htole16(sc->sc_tx_ring_ndescs);
4726 txq->headwb_ena = 0;
4727 txq->dma_ring_addr = htole64(IXL_DMA_DVA(&txr->txr_mem));
4728 txq->dma_headwb_addr = 0;
4729
4730 rxq = &config->qpair[i].rxq;
4731 rxq->vsi_id = htole16(sc->sc_vsi_id);
4732 rxq->queue_id = htole16(rxr->rxr_qid);
4733 rxq->ring_len = htole16(sc->sc_rx_ring_ndescs);
4734 rxq->splithdr_ena = 0;
4735 rxq->databuf_size = htole32(IAVF_MCLBYTES);
4736 rxq->max_pkt_size = htole32(rxmtu_max);
4737 rxq->dma_ring_addr = htole64(IXL_DMA_DVA(&rxr->rxr_mem));
4738 rxq->rx_split_pos = 0;
4739 }
4740
4741 memset(&iaq, 0, sizeof(iaq));
4742 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4743 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4744 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_VSI_QUEUES);
4745 iaq.iaq_datalen = htole16(sizeof(*config) +
4746 sizeof(config->qpair[0]) * sc->sc_nqueue_pairs);
4747
4748 error = iavf_adminq_exec(sc, &iaq, aqb);
4749 if (error != IAVF_VC_RC_SUCCESS) {
4750 return -1;
4751 }
4752
4753 return 0;
4754 }
4755
4756 static int
iavf_config_hena(struct iavf_softc * sc)4757 iavf_config_hena(struct iavf_softc *sc)
4758 {
4759 struct ixl_aq_desc iaq;
4760 struct ixl_aq_buf *aqb;
4761 uint64_t *caps;
4762 int error;
4763
4764 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4765
4766 if (aqb == NULL)
4767 return -1;
4768
4769 caps = IXL_AQB_KVA(aqb);
4770 if (sc->sc_mac_type == I40E_MAC_X722_VF)
4771 *caps = IXL_RSS_HENA_DEFAULT_X722;
4772 else
4773 *caps = IXL_RSS_HENA_DEFAULT_XL710;
4774
4775 memset(&iaq, 0, sizeof(iaq));
4776 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4777 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4778 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_SET_RSS_HENA);
4779 iaq.iaq_datalen = htole16(sizeof(*caps));
4780
4781 error = iavf_adminq_exec(sc, &iaq, aqb);
4782 if (error != IAVF_VC_RC_SUCCESS) {
4783 return -1;
4784 }
4785
4786 return 0;
4787 }
4788
4789 static inline void
iavf_get_default_rss_key(uint8_t * buf,size_t len)4790 iavf_get_default_rss_key(uint8_t *buf, size_t len)
4791 {
4792 uint8_t rss_seed[RSS_KEYSIZE];
4793 size_t cplen;
4794
4795 cplen = MIN(len, sizeof(rss_seed));
4796 rss_getkey(rss_seed);
4797
4798 memcpy(buf, rss_seed, cplen);
4799 if (cplen < len)
4800 memset(buf + cplen, 0, len - cplen);
4801 }
4802
4803 static int
iavf_config_rss_key(struct iavf_softc * sc)4804 iavf_config_rss_key(struct iavf_softc *sc)
4805 {
4806 struct ixl_aq_desc iaq;
4807 struct ixl_aq_buf *aqb;
4808 struct iavf_vc_rss_key *rss_key;
4809 size_t key_len;
4810 int rv;
4811
4812 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4813 if (aqb == NULL)
4814 return -1;
4815
4816 rss_key = IXL_AQB_KVA(aqb);
4817 rss_key->vsi_id = htole16(sc->sc_vsi_id);
4818 key_len = IXL_RSS_KEY_SIZE;
4819 iavf_get_default_rss_key(rss_key->key, key_len);
4820 rss_key->key_len = key_len;
4821
4822 memset(&iaq, 0, sizeof(iaq));
4823 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4824 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4825 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_KEY);
4826 iaq.iaq_datalen = htole16(sizeof(*rss_key) - sizeof(rss_key->pad)
4827 + (sizeof(rss_key->key[0]) * key_len));
4828
4829 rv = iavf_adminq_exec(sc, &iaq, aqb);
4830 if (rv != IAVF_VC_RC_SUCCESS) {
4831 return -1;
4832 }
4833
4834 return 0;
4835 }
4836
4837 static int
iavf_config_rss_lut(struct iavf_softc * sc)4838 iavf_config_rss_lut(struct iavf_softc *sc)
4839 {
4840 struct ixl_aq_desc iaq;
4841 struct ixl_aq_buf *aqb;
4842 struct iavf_vc_rss_lut *rss_lut;
4843 uint8_t *lut, v;
4844 int rv, i;
4845
4846 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4847 if (aqb == NULL)
4848 return -1;
4849
4850 rss_lut = IXL_AQB_KVA(aqb);
4851 rss_lut->vsi_id = htole16(sc->sc_vsi_id);
4852 rss_lut->lut_entries = htole16(IXL_RSS_VSI_LUT_SIZE);
4853
4854 lut = rss_lut->lut;
4855 for (i = 0; i < IXL_RSS_VSI_LUT_SIZE; i++) {
4856 v = i % sc->sc_nqueue_pairs;
4857 v &= IAVF_RSS_VSI_LUT_ENTRY_MASK;
4858 lut[i] = v;
4859 }
4860
4861 memset(&iaq, 0, sizeof(iaq));
4862 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4863 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4864 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_RSS_LUT);
4865 iaq.iaq_datalen = htole16(sizeof(*rss_lut) - sizeof(rss_lut->pad)
4866 + (sizeof(rss_lut->lut[0]) * IXL_RSS_VSI_LUT_SIZE));
4867
4868 rv = iavf_adminq_exec(sc, &iaq, aqb);
4869 if (rv != IAVF_VC_RC_SUCCESS) {
4870 return -1;
4871 }
4872
4873 return 0;
4874 }
4875
4876 static int
iavf_queue_select(struct iavf_softc * sc,int opcode)4877 iavf_queue_select(struct iavf_softc *sc, int opcode)
4878 {
4879 struct ixl_aq_desc iaq;
4880 struct ixl_aq_buf *aqb;
4881 struct iavf_vc_queue_select *qsel;
4882 int error;
4883
4884 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4885 if (aqb == NULL)
4886 return -1;
4887
4888 qsel = IXL_AQB_KVA(aqb);
4889 qsel->vsi_id = htole16(sc->sc_vsi_id);
4890 qsel->rx_queues = htole32(iavf_allqueues(sc));
4891 qsel->tx_queues = htole32(iavf_allqueues(sc));
4892
4893 memset(&iaq, 0, sizeof(iaq));
4894 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4895 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4896 iavf_aq_vc_set_opcode(&iaq, opcode);
4897 iaq.iaq_datalen = htole16(sizeof(*qsel));
4898
4899 error = iavf_adminq_exec(sc, &iaq, aqb);
4900 if (error != IAVF_VC_RC_SUCCESS) {
4901 return -1;
4902 }
4903
4904 return 0;
4905 }
4906
4907 static int
iavf_request_queues(struct iavf_softc * sc,unsigned int req_num)4908 iavf_request_queues(struct iavf_softc *sc, unsigned int req_num)
4909 {
4910 struct ixl_aq_desc iaq;
4911 struct ixl_aq_buf *aqb;
4912 struct iavf_vc_res_request *req;
4913 int rv;
4914
4915 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4916 if (aqb == NULL)
4917 return ENOMEM;
4918
4919 req = IXL_AQB_KVA(aqb);
4920 req->num_queue_pairs = req_num;
4921
4922 memset(&iaq, 0, sizeof(iaq));
4923 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4924 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4925 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_REQUEST_QUEUES);
4926 iaq.iaq_datalen = htole16(sizeof(*req));
4927
4928 mutex_enter(&sc->sc_adminq_lock);
4929 rv = iavf_atq_post(sc, &iaq, aqb);
4930 mutex_exit(&sc->sc_adminq_lock);
4931
4932 return rv;
4933 }
4934
4935 static int
iavf_reset_vf(struct iavf_softc * sc)4936 iavf_reset_vf(struct iavf_softc *sc)
4937 {
4938 struct ixl_aq_desc iaq;
4939 int error;
4940
4941 memset(&iaq, 0, sizeof(iaq));
4942 iaq.iaq_flags = htole16(IXL_AQ_RD);
4943 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4944 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_RESET_VF);
4945 iaq.iaq_datalen = htole16(0);
4946
4947 iavf_wr(sc, I40E_VFGEN_RSTAT, IAVF_VFR_INPROGRESS);
4948
4949 mutex_enter(&sc->sc_adminq_lock);
4950 error = iavf_atq_post(sc, &iaq, NULL);
4951 mutex_exit(&sc->sc_adminq_lock);
4952
4953 return error;
4954 }
4955
4956 static int
iavf_eth_addr(struct iavf_softc * sc,const uint8_t * addr,uint32_t opcode)4957 iavf_eth_addr(struct iavf_softc *sc, const uint8_t *addr, uint32_t opcode)
4958 {
4959 struct ixl_aq_desc iaq;
4960 struct ixl_aq_buf *aqb;
4961 struct iavf_vc_eth_addr_list *addrs;
4962 struct iavf_vc_eth_addr *vcaddr;
4963 int rv;
4964
4965 KASSERT(sc->sc_attached);
4966 KASSERT(opcode == IAVF_VC_OP_ADD_ETH_ADDR ||
4967 opcode == IAVF_VC_OP_DEL_ETH_ADDR);
4968
4969 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
4970 if (aqb == NULL)
4971 return -1;
4972
4973 addrs = IXL_AQB_KVA(aqb);
4974 addrs->vsi_id = htole16(sc->sc_vsi_id);
4975 addrs->num_elements = htole16(1);
4976 vcaddr = addrs->list;
4977 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
4978
4979 memset(&iaq, 0, sizeof(iaq));
4980 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4981 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
4982 iavf_aq_vc_set_opcode(&iaq, opcode);
4983 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
4984
4985 if (sc->sc_resetting) {
4986 mutex_enter(&sc->sc_adminq_lock);
4987 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
4988 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
4989 mutex_exit(&sc->sc_adminq_lock);
4990 } else {
4991 rv = iavf_adminq_exec(sc, &iaq, aqb);
4992 }
4993
4994 if (rv != IAVF_VC_RC_SUCCESS) {
4995 return -1;
4996 }
4997
4998 return 0;
4999 }
5000
5001 static int
iavf_config_promisc_mode(struct iavf_softc * sc,int unicast,int multicast)5002 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
5003 {
5004 struct ixl_aq_desc iaq;
5005 struct ixl_aq_buf *aqb;
5006 struct iavf_vc_promisc_info *promisc;
5007 int flags;
5008
5009 KASSERT(sc->sc_attached);
5010
5011 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5012 if (aqb == NULL)
5013 return -1;
5014
5015 flags = 0;
5016 if (unicast)
5017 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
5018 if (multicast)
5019 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
5020
5021 promisc = IXL_AQB_KVA(aqb);
5022 promisc->vsi_id = htole16(sc->sc_vsi_id);
5023 promisc->flags = htole16(flags);
5024
5025 memset(&iaq, 0, sizeof(iaq));
5026 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5027 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5028 iavf_aq_vc_set_opcode(&iaq, IAVF_VC_OP_CONFIG_PROMISC);
5029 iaq.iaq_datalen = htole16(sizeof(*promisc));
5030
5031 if (iavf_adminq_exec(sc, &iaq, aqb) != IAVF_VC_RC_SUCCESS) {
5032 return -1;
5033 }
5034
5035 return 0;
5036 }
5037
5038 static int
iavf_config_vlan_stripping(struct iavf_softc * sc,int eccap)5039 iavf_config_vlan_stripping(struct iavf_softc *sc, int eccap)
5040 {
5041 struct ixl_aq_desc iaq;
5042 uint32_t opcode;
5043
5044 opcode = ISSET(eccap, ETHERCAP_VLAN_HWTAGGING) ?
5045 IAVF_VC_OP_ENABLE_VLAN_STRIP : IAVF_VC_OP_DISABLE_VLAN_STRIP;
5046
5047 memset(&iaq, 0, sizeof(iaq));
5048 iaq.iaq_flags = htole16(IXL_AQ_RD);
5049 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5050 iavf_aq_vc_set_opcode(&iaq, opcode);
5051 iaq.iaq_datalen = htole16(0);
5052
5053 if (iavf_adminq_exec(sc, &iaq, NULL) != IAVF_VC_RC_SUCCESS) {
5054 return -1;
5055 }
5056
5057 return 0;
5058 }
5059
5060 static int
iavf_config_vlan_id(struct iavf_softc * sc,uint16_t vid,uint32_t opcode)5061 iavf_config_vlan_id(struct iavf_softc *sc, uint16_t vid, uint32_t opcode)
5062 {
5063 struct ixl_aq_desc iaq;
5064 struct ixl_aq_buf *aqb;
5065 struct iavf_vc_vlan_filter *vfilter;
5066 int rv;
5067
5068 KASSERT(opcode == IAVF_VC_OP_ADD_VLAN || opcode == IAVF_VC_OP_DEL_VLAN);
5069
5070 aqb = iavf_aqb_get(sc, &sc->sc_atq_idle);
5071
5072 if (aqb == NULL)
5073 return -1;
5074
5075 vfilter = IXL_AQB_KVA(aqb);
5076 vfilter->vsi_id = htole16(sc->sc_vsi_id);
5077 vfilter->num_vlan_id = htole16(1);
5078 vfilter->vlan_id[0] = vid;
5079
5080 memset(&iaq, 0, sizeof(iaq));
5081 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
5082 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
5083 iavf_aq_vc_set_opcode(&iaq, opcode);
5084 iaq.iaq_datalen = htole16(sizeof(*vfilter) + sizeof(vid));
5085
5086 if (sc->sc_resetting) {
5087 mutex_enter(&sc->sc_adminq_lock);
5088 rv = iavf_adminq_poll_locked(sc, &iaq, aqb, 250);
5089 iavf_aqb_put_locked(&sc->sc_atq_idle, aqb);
5090 mutex_exit(&sc->sc_adminq_lock);
5091 } else {
5092 rv = iavf_adminq_exec(sc, &iaq, aqb);
5093 }
5094
5095 if (rv != IAVF_VC_RC_SUCCESS) {
5096 return -1;
5097 }
5098
5099 return 0;
5100 }
5101
5102 static void
iavf_post_request_queues(void * xsc)5103 iavf_post_request_queues(void *xsc)
5104 {
5105 struct iavf_softc *sc;
5106 struct ifnet *ifp;
5107
5108 sc = xsc;
5109 ifp = &sc->sc_ec.ec_if;
5110
5111 if (!ISSET(sc->sc_vf_cap, IAVF_VC_OFFLOAD_REQ_QUEUES)) {
5112 log(LOG_DEBUG, "%s: the VF has no REQ_QUEUES capability\n",
5113 ifp->if_xname);
5114 return;
5115 }
5116
5117 log(LOG_INFO, "%s: try to change the number of queue pairs"
5118 " (vsi %u, %u allocated, request %u)\n",
5119 ifp->if_xname,
5120 sc->sc_nqps_vsi, sc->sc_nqps_alloc, sc->sc_nqps_req);
5121 iavf_request_queues(sc, sc->sc_nqps_req);
5122 }
5123
5124 static bool
iavf_sysctlnode_is_rx(struct sysctlnode * node)5125 iavf_sysctlnode_is_rx(struct sysctlnode *node)
5126 {
5127
5128 if (strstr(node->sysctl_parent->sysctl_name, "rx") != NULL)
5129 return true;
5130
5131 return false;
5132 }
5133
5134 static int
iavf_sysctl_itr_handler(SYSCTLFN_ARGS)5135 iavf_sysctl_itr_handler(SYSCTLFN_ARGS)
5136 {
5137 struct sysctlnode node = *rnode;
5138 struct iavf_softc *sc = (struct iavf_softc *)node.sysctl_data;
5139 uint32_t newitr, *itrptr;
5140 unsigned int i;
5141 int itr, error;
5142
5143 if (iavf_sysctlnode_is_rx(&node)) {
5144 itrptr = &sc->sc_rx_itr;
5145 itr = IAVF_ITR_RX;
5146 } else {
5147 itrptr = &sc->sc_tx_itr;
5148 itr = IAVF_ITR_TX;
5149 }
5150
5151 newitr = *itrptr;
5152 node.sysctl_data = &newitr;
5153 node.sysctl_size = sizeof(newitr);
5154
5155 error = sysctl_lookup(SYSCTLFN_CALL(&node));
5156 if (error || newp == NULL)
5157 return error;
5158
5159 if (newitr > 0x07FF)
5160 return EINVAL;
5161
5162 *itrptr = newitr;
5163
5164 for (i = 0; i < sc->sc_nqueue_pairs; i++) {
5165 iavf_wr(sc, I40E_VFINT_ITRN1(itr, i), *itrptr);
5166 }
5167 iavf_wr(sc, I40E_VFINT_ITR01(itr), *itrptr);
5168
5169 return 0;
5170 }
5171
5172 static void
iavf_workq_work(struct work * wk,void * context)5173 iavf_workq_work(struct work *wk, void *context)
5174 {
5175 struct iavf_work *work;
5176
5177 work = container_of(wk, struct iavf_work, ixw_cookie);
5178
5179 atomic_swap_uint(&work->ixw_added, 0);
5180 work->ixw_func(work->ixw_arg);
5181 }
5182
5183 static struct workqueue *
iavf_workq_create(const char * name,pri_t prio,int ipl,int flags)5184 iavf_workq_create(const char *name, pri_t prio, int ipl, int flags)
5185 {
5186 struct workqueue *wq;
5187 int error;
5188
5189 error = workqueue_create(&wq, name, iavf_workq_work, NULL,
5190 prio, ipl, flags);
5191
5192 if (error)
5193 return NULL;
5194
5195 return wq;
5196 }
5197
5198 static void
iavf_workq_destroy(struct workqueue * wq)5199 iavf_workq_destroy(struct workqueue *wq)
5200 {
5201
5202 workqueue_destroy(wq);
5203 }
5204
5205 static int
iavf_work_set(struct iavf_work * work,void (* func)(void *),void * arg)5206 iavf_work_set(struct iavf_work *work, void (*func)(void *), void *arg)
5207 {
5208
5209 if (work->ixw_added != 0)
5210 return -1;
5211
5212 memset(work, 0, sizeof(*work));
5213 work->ixw_func = func;
5214 work->ixw_arg = arg;
5215
5216 return 0;
5217 }
5218
5219 static void
iavf_work_add(struct workqueue * wq,struct iavf_work * work)5220 iavf_work_add(struct workqueue *wq, struct iavf_work *work)
5221 {
5222 if (atomic_cas_uint(&work->ixw_added, 0, 1) != 0)
5223 return;
5224
5225 kpreempt_disable();
5226 workqueue_enqueue(wq, &work->ixw_cookie, NULL);
5227 kpreempt_enable();
5228 }
5229
5230 static void
iavf_work_wait(struct workqueue * wq,struct iavf_work * work)5231 iavf_work_wait(struct workqueue *wq, struct iavf_work *work)
5232 {
5233
5234 workqueue_wait(wq, &work->ixw_cookie);
5235 }
5236
5237 static void
iavf_evcnt_attach(struct evcnt * ec,const char * n0,const char * n1)5238 iavf_evcnt_attach(struct evcnt *ec,
5239 const char *n0, const char *n1)
5240 {
5241
5242 evcnt_attach_dynamic(ec, EVCNT_TYPE_MISC,
5243 NULL, n0, n1);
5244 }
5245
5246 MODULE(MODULE_CLASS_DRIVER, if_iavf, "pci");
5247
5248 #ifdef _MODULE
5249 #include "ioconf.c"
5250 #endif
5251
5252 #ifdef _MODULE
5253 static void
iavf_parse_modprop(prop_dictionary_t dict)5254 iavf_parse_modprop(prop_dictionary_t dict)
5255 {
5256 prop_object_t obj;
5257 int64_t val;
5258 uint32_t n;
5259
5260 if (dict == NULL)
5261 return;
5262
5263 obj = prop_dictionary_get(dict, "debug_level");
5264 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5265 val = prop_number_signed_value((prop_number_t)obj);
5266
5267 if (val > 0) {
5268 iavf_params.debug = val;
5269 printf("iavf: debug level=%d\n", iavf_params.debug);
5270 }
5271 }
5272
5273 obj = prop_dictionary_get(dict, "max_qps");
5274 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5275 val = prop_number_signed_value((prop_number_t)obj);
5276
5277 if (val < 1 || val > I40E_MAX_VF_QUEUES) {
5278 printf("iavf: invalid queue size(1 <= n <= %d)",
5279 I40E_MAX_VF_QUEUES);
5280 } else {
5281 iavf_params.max_qps = val;
5282 printf("iavf: request queue pair = %u\n",
5283 iavf_params.max_qps);
5284 }
5285 }
5286
5287 obj = prop_dictionary_get(dict, "tx_itr");
5288 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5289 val = prop_number_signed_value((prop_number_t)obj);
5290 if (val > 0x07FF) {
5291 printf("iavf: TX ITR too big (%" PRId64 " <= %d)",
5292 val, 0x7FF);
5293 } else {
5294 iavf_params.tx_itr = val;
5295 printf("iavf: TX ITR = 0x%" PRIx32,
5296 iavf_params.tx_itr);
5297 }
5298 }
5299
5300 obj = prop_dictionary_get(dict, "rx_itr");
5301 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5302 val = prop_number_signed_value((prop_number_t)obj);
5303 if (val > 0x07FF) {
5304 printf("iavf: RX ITR too big (%" PRId64 " <= %d)",
5305 val, 0x7FF);
5306 } else {
5307 iavf_params.rx_itr = val;
5308 printf("iavf: RX ITR = 0x%" PRIx32,
5309 iavf_params.rx_itr);
5310 }
5311 }
5312
5313 obj = prop_dictionary_get(dict, "tx_ndescs");
5314 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5315 val = prop_number_signed_value((prop_number_t)obj);
5316 n = 1U << (fls32(val) - 1);
5317 if (val != (int64_t) n) {
5318 printf("iavf: TX desc invalid size"
5319 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5320 } else if (val > (8192 - 32)) {
5321 printf("iavf: Tx desc too big (%" PRId64 " > %d)",
5322 val, (8192 - 32));
5323 } else {
5324 iavf_params.tx_ndescs = val;
5325 printf("iavf: TX descriptors = 0x%04x",
5326 iavf_params.tx_ndescs);
5327 }
5328 }
5329
5330 obj = prop_dictionary_get(dict, "rx_ndescs");
5331 if (obj != NULL && prop_object_type(obj) == PROP_TYPE_NUMBER) {
5332 val = prop_number_signed_value((prop_number_t)obj);
5333 n = 1U << (fls32(val) - 1);
5334 if (val != (int64_t) n) {
5335 printf("iavf: RX desc invalid size"
5336 "(%" PRId64 " != %" PRIu32 ")\n", val, n);
5337 } else if (val > (8192 - 32)) {
5338 printf("iavf: Rx desc too big (%" PRId64 " > %d)",
5339 val, (8192 - 32));
5340 } else {
5341 iavf_params.rx_ndescs = val;
5342 printf("iavf: RX descriptors = 0x%04x",
5343 iavf_params.rx_ndescs);
5344 }
5345 }
5346 }
5347 #endif
5348
5349 static int
if_iavf_modcmd(modcmd_t cmd,void * opaque)5350 if_iavf_modcmd(modcmd_t cmd, void *opaque)
5351 {
5352 int error = 0;
5353
5354 #ifdef _MODULE
5355 switch (cmd) {
5356 case MODULE_CMD_INIT:
5357 iavf_parse_modprop((prop_dictionary_t)opaque);
5358 error = config_init_component(cfdriver_ioconf_if_iavf,
5359 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5360 break;
5361 case MODULE_CMD_FINI:
5362 error = config_fini_component(cfdriver_ioconf_if_iavf,
5363 cfattach_ioconf_if_iavf, cfdata_ioconf_if_iavf);
5364 break;
5365 default:
5366 error = ENOTTY;
5367 break;
5368 }
5369 #endif
5370
5371 return error;
5372 }
5373