xref: /openbsd-src/sys/dev/pci/if_ixv.c (revision e6ed55bd92e6f2b94c038b128235a402294ff03b)
1 /*	$OpenBSD: if_ixv.c,v 1.1 2024/11/02 04:37:20 yasuoka Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2017, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 
36 #include <dev/pci/if_ix.h>
37 #include <dev/pci/ixgbe_type.h>
38 #include <dev/pci/ixgbe.h>
39 
40 /************************************************************************
41  * Driver version
42  ************************************************************************/
43 char ixv_driver_version[] = "1.5.32";
44 
45 /************************************************************************
46  * PCI Device ID Table
47  *
48  *   Used by probe to select devices to load on
49  *
50  *   { Vendor ID, Device ID }
51  ************************************************************************/
52 const struct pci_matchid ixv_devices[] = {
53 	{PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599VF},
54 	{PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540_VF},
55 	{PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550_VF},
56 	{PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_VF},
57 	{PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_VF}
58 };
59 
60 /************************************************************************
61  * Function prototypes
62  ************************************************************************/
63 static int	ixv_probe(struct device *, void *, void *);
64 static void	ixv_identify_hardware(struct ix_softc *sc);
65 static void	ixv_attach(struct device *, struct device *, void *);
66 static int	ixv_detach(struct device *, int);
67 static int	ixv_ioctl(struct ifnet *, u_long, caddr_t);
68 static void	ixv_watchdog(struct ifnet *);
69 static void	ixv_init(struct ix_softc *);
70 static void	ixv_stop(void *);
71 static int	ixv_allocate_msix(struct ix_softc *);
72 static void	ixv_setup_interface(struct device *, struct ix_softc *);
73 static int	ixv_negotiate_api(struct ix_softc *);
74 
75 static void	ixv_initialize_transmit_units(struct ix_softc *);
76 static void	ixv_initialize_receive_units(struct ix_softc *);
77 static void	ixv_initialize_rss_mapping(struct ix_softc *);
78 
79 static void	ixv_enable_intr(struct ix_softc *);
80 static void	ixv_disable_intr(struct ix_softc *);
81 static void	ixv_iff(struct ix_softc *);
82 static void	ixv_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
83 static void	ixv_configure_ivars(struct ix_softc *);
84 static uint8_t *ixv_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
85 
86 static void	ixv_setup_vlan_support(struct ix_softc *);
87 
88 /* The MSI-X Interrupt handlers */
89 static int	ixv_msix_que(void *);
90 static int	ixv_msix_mbx(void *);
91 
92 /* Share functions between ixv and ix. */
93 void    ixgbe_start(struct ifqueue *ifq);
94 int	ixgbe_activate(struct device *, int);
95 int	ixgbe_allocate_queues(struct ix_softc *);
96 int	ixgbe_setup_transmit_structures(struct ix_softc *);
97 int	ixgbe_setup_receive_structures(struct ix_softc *);
98 void	ixgbe_free_transmit_structures(struct ix_softc *);
99 void	ixgbe_free_receive_structures(struct ix_softc *);
100 int	ixgbe_txeof(struct ix_txring *);
101 int	ixgbe_rxeof(struct ix_rxring *);
102 void	ixgbe_rxrefill(void *);
103 void    ixgbe_update_link_status(struct ix_softc *);
104 int     ixgbe_allocate_pci_resources(struct ix_softc *);
105 void    ixgbe_free_pci_resources(struct ix_softc *);
106 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
107 int	ixgbe_media_change(struct ifnet *);
108 void	ixgbe_add_media_types(struct ix_softc *);
109 int	ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
110 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
111 
112 #if NKSTAT > 0
113 static void	ixv_kstats(struct ix_softc *);
114 static void	ixv_rxq_kstats(struct ix_softc *, struct ix_rxring *);
115 static void	ixv_txq_kstats(struct ix_softc *, struct ix_txring *);
116 static void	ixv_kstats_tick(void *);
117 #endif
118 
119 /************************************************************************
120  * Value Definitions
121  ************************************************************************/
122 /*
123   Default value for Extended Interrupt Throttling Register.
124   128 * 2.048 uSec will be minimum interrupt iterval for 10GbE link.
125   Minimum interrupt interval can be set from 0 to 2044 in increments of 4.
126  */
127 #define IXGBE_EITR_DEFAULT              128
128 
129 /*********************************************************************
130  *  OpenBSD Device Interface Entry Points
131  *********************************************************************/
132 
133 struct cfdriver ixv_cd = {
134 	NULL, "ixv", DV_IFNET
135 };
136 
137 const struct cfattach ixv_ca = {
138 	sizeof(struct ix_softc), ixv_probe, ixv_attach, ixv_detach,
139 	ixgbe_activate
140 };
141 
142 /************************************************************************
143  * ixv_probe - Device identification routine
144  *
145  *   Determines if the driver should be loaded on
146  *   adapter based on its PCI vendor/device ID.
147  *
148  *   return BUS_PROBE_DEFAULT on success, positive on failure
149  ************************************************************************/
150 static int
151 ixv_probe(struct device *parent, void *match, void *aux)
152 {
153 	INIT_DEBUGOUT("ixv_probe: begin");
154 
155 	return (pci_matchbyid((struct pci_attach_args *)aux, ixv_devices,
156 	    nitems(ixv_devices)));
157 }
158 
159 /*********************************************************************
160  *
161  *  Determine hardware revision.
162  *
163  **********************************************************************/
164 static void
165 ixv_identify_hardware(struct ix_softc *sc)
166 {
167 	struct ixgbe_osdep	*os = &sc->osdep;
168 	struct pci_attach_args	*pa = &os->os_pa;
169 	uint32_t		 reg;
170 
171 	/* Save off the information about this board */
172 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
173 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
174 
175 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
176 	sc->hw.revision_id = PCI_REVISION(reg);
177 
178 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
179 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
180 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
181 
182 	sc->num_segs = IXGBE_82599_SCATTER;
183 }
184 
185 /************************************************************************
186  * ixv_attach - Device initialization routine
187  *
188  *   Called when the driver is being loaded.
189  *   Identifies the type of hardware, allocates all resources
190  *   and initializes the hardware.
191  *
192  *   return 0 on success, positive on failure
193  ************************************************************************/
194 static void
195 ixv_attach(struct device *parent, struct device *self, void *aux)
196 {
197 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
198 	struct ix_softc		*sc = (struct ix_softc *)self;
199 	struct ixgbe_hw *hw;
200 	int	error;
201 
202 	INIT_DEBUGOUT("ixv_attach: begin");
203 
204 	sc->osdep.os_sc = sc;
205 	sc->osdep.os_pa = *pa;
206 
207 	rw_init(&sc->sfflock, "ixvsff");
208 
209 	/* Allocate, clear, and link in our adapter structure */
210 	sc->dev = *self;
211 	sc->hw.back = sc;
212 	hw = &sc->hw;
213 
214 	/* Indicate to RX setup to use Jumbo Clusters */
215 	sc->num_tx_desc = DEFAULT_TXD;
216 	sc->num_rx_desc = DEFAULT_RXD;
217 
218 	ixv_identify_hardware(sc);
219 
220 #if NKSTAT > 0
221 	ixv_kstats(sc);
222 #endif
223 
224 	/* Allocate multicast array memory */
225 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
226 	    IXGBE_MAX_MULTICAST_ADDRESSES_VF, M_DEVBUF, M_NOWAIT);
227 	if (sc->mta == NULL) {
228 		printf("Can not allocate multicast setup array\n");
229 		return;
230 	}
231 
232 	/* Do base PCI setup - map BAR0 */
233 	if (ixgbe_allocate_pci_resources(sc)) {
234 		printf("ixgbe_allocate_pci_resources() failed!\n");
235 		goto err_out;
236 	}
237 
238 	/* Allocate our TX/RX Queues */
239 	if (ixgbe_allocate_queues(sc)) {
240 		printf("ixgbe_allocate_queues() failed!\n");
241 		goto err_out;
242 	}
243 
244 	/* A subset of set_mac_type */
245 	switch (hw->device_id) {
246 	case IXGBE_DEV_ID_82599_VF:
247 		hw->mac.type = ixgbe_mac_82599_vf;
248 		break;
249 	case IXGBE_DEV_ID_X540_VF:
250 		hw->mac.type = ixgbe_mac_X540_vf;
251 		break;
252 	case IXGBE_DEV_ID_X550_VF:
253 		hw->mac.type = ixgbe_mac_X550_vf;
254 		break;
255 	case IXGBE_DEV_ID_X550EM_X_VF:
256 		hw->mac.type = ixgbe_mac_X550EM_x_vf;
257 		break;
258 	case IXGBE_DEV_ID_X550EM_A_VF:
259 		hw->mac.type = ixgbe_mac_X550EM_a_vf;
260 		break;
261 	default:
262 		/* Shouldn't get here since probe succeeded */
263 		printf("Unknown device ID!\n");
264 		goto err_out;
265 	}
266 
267 	/* Initialize the shared code */
268 	if (ixgbe_init_ops_vf(hw)) {
269 		printf("ixgbe_init_ops_vf() failed!\n");
270 		goto err_out;
271 	}
272 
273 	/* Setup the mailbox */
274 	ixgbe_init_mbx_params_vf(hw);
275 
276 	/* Set the right number of segments */
277 	sc->num_segs = IXGBE_82599_SCATTER;
278 
279 	error = hw->mac.ops.reset_hw(hw);
280 	switch (error) {
281 	case 0:
282 		break;
283 	case IXGBE_ERR_RESET_FAILED:
284 		printf("...reset_hw() failure: Reset Failed!\n");
285 		goto err_out;
286 	default:
287 		printf("...reset_hw() failed with error %d\n",
288 		    error);
289 		goto err_out;
290 	}
291 
292 	error = hw->mac.ops.init_hw(hw);
293 	if (error) {
294 		printf("...init_hw() failed with error %d\n",
295 		    error);
296 		goto err_out;
297 	}
298 
299 	/* Negotiate mailbox API version */
300 	if (ixv_negotiate_api(sc)) {
301 		printf("Mailbox API negotiation failed during attach!\n");
302 		goto err_out;
303 	}
304 
305 	/* If no mac address was assigned, make a random one */
306 	if (memcmp(hw->mac.addr, etheranyaddr, ETHER_ADDR_LEN) == 0) {
307 		ether_fakeaddr(&sc->arpcom.ac_if);
308 		bcopy(sc->arpcom.ac_enaddr, hw->mac.addr, ETHER_ADDR_LEN);
309 		bcopy(sc->arpcom.ac_enaddr, hw->mac.perm_addr, ETHER_ADDR_LEN);
310 	} else
311 		bcopy(hw->mac.addr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
312 
313 	/* Setup OS specific network interface */
314 	ixv_setup_interface(self, sc);
315 
316 	/* Setup MSI-X */
317 	if (ixv_allocate_msix(sc)) {
318 		printf("ixv_allocate_msix() failed!\n");
319 		goto err_late;
320 	}
321 
322 	/* Check if VF was disabled by PF */
323 	if (hw->mac.ops.get_link_state(hw, &sc->link_enabled)) {
324 		/* PF is not capable of controlling VF state. Enable the link. */
325 		sc->link_enabled = TRUE;
326 	}
327 
328 	/* Set an initial default flow control value */
329 	sc->fc = ixgbe_fc_full;
330 
331 	INIT_DEBUGOUT("ixv_attach: end");
332 
333 	return;
334 
335 err_late:
336 	ixgbe_free_transmit_structures(sc);
337 	ixgbe_free_receive_structures(sc);
338 err_out:
339 	ixgbe_free_pci_resources(sc);
340 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
341 	     IXGBE_MAX_MULTICAST_ADDRESSES_VF);
342 } /* ixv_attach */
343 
344 /************************************************************************
345  * ixv_detach - Device removal routine
346  *
347  *   Called when the driver is being removed.
348  *   Stops the adapter and deallocates all the resources
349  *   that were allocated for driver operation.
350  *
351  *   return 0 on success, positive on failure
352  ************************************************************************/
353 static int
354 ixv_detach(struct device *self, int flags)
355 {
356 	struct ix_softc *sc = (struct ix_softc *)self;
357 	struct ifnet *ifp = &sc->arpcom.ac_if;
358 
359 	INIT_DEBUGOUT("ixv_detach: begin");
360 
361 	ixv_stop(sc);
362 	ether_ifdetach(ifp);
363 	if_detach(ifp);
364 
365 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
366 	     IXGBE_MAX_MULTICAST_ADDRESSES_VF);
367 
368 	ixgbe_free_pci_resources(sc);
369 
370 	ixgbe_free_transmit_structures(sc);
371 	ixgbe_free_receive_structures(sc);
372 
373 	return (0);
374 } /* ixv_detach */
375 
376 /*********************************************************************
377  *  Watchdog entry point
378  *
379  **********************************************************************/
380 static void
381 ixv_watchdog(struct ifnet * ifp)
382 {
383 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
384 	struct ix_txring *txr = sc->tx_rings;
385 	struct ixgbe_hw *hw = &sc->hw;
386 	int		tx_hang = FALSE;
387 	int		i;
388 
389 	/*
390 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
391 	 * Anytime all descriptors are clean the timer is set to 0.
392 	 */
393 	for (i = 0; i < sc->num_queues; i++, txr++) {
394 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
395 			continue;
396 		else {
397 			tx_hang = TRUE;
398 			break;
399 		}
400 	}
401 	if (tx_hang == FALSE)
402 		return;
403 
404 
405 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
406 	for (i = 0; i < sc->num_queues; i++, txr++) {
407 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
408 		    IXGBE_READ_REG(hw, IXGBE_VFTDH(i)),
409 		    IXGBE_READ_REG(hw, txr->tail));
410 		printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
411 		    i, txr->next_to_clean);
412 	}
413 	ifp->if_flags &= ~IFF_RUNNING;
414 
415 	ixv_init(sc);
416 }
417 
418 /************************************************************************
419  * ixv_init - Init entry point
420  *
421  *   Used in two ways: It is used by the stack as an init entry
422  *   point in network interface structure. It is also used
423  *   by the driver as a hw/sw initialization routine to get
424  *   to a consistent state.
425  *
426  *   return 0 on success, positive on failure
427  ************************************************************************/
428 void
429 ixv_init(struct ix_softc *sc)
430 {
431 	struct ifnet    *ifp = &sc->arpcom.ac_if;
432 	struct ixgbe_hw *hw = &sc->hw;
433 	struct ix_queue *que = sc->queues;
434 	uint32_t        mask;
435 	int             i, s, error = 0;
436 
437 	INIT_DEBUGOUT("ixv_init: begin");
438 
439 	s = splnet();
440 
441 	hw->adapter_stopped = FALSE;
442 	hw->mac.ops.stop_adapter(hw);
443 
444 	/* reprogram the RAR[0] in case user changed it. */
445 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
446 
447 	/* Get the latest mac address, User can use a LAA */
448 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
449 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
450 
451 	sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
452 
453 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
454 
455 	/* Prepare transmit descriptors and buffers */
456 	if (ixgbe_setup_transmit_structures(sc)) {
457 		printf("Could not setup transmit structures\n");
458 		ixv_stop(sc);
459 		splx(s);
460 		return;
461 	}
462 
463 	/* Reset VF and renegotiate mailbox API version */
464 	hw->mac.ops.reset_hw(hw);
465 	error = ixv_negotiate_api(sc);
466 	if (error) {
467 		printf("Mailbox API negotiation failed in init!\n");
468 		splx(s);
469 		return;
470 	}
471 
472 	ixv_initialize_transmit_units(sc);
473 
474 	/* Setup Multicast table */
475 	ixv_iff(sc);
476 
477 	/* Use 2k clusters, even for jumbo frames */
478 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
479 
480 	/* Prepare receive descriptors and buffers */
481 	if (ixgbe_setup_receive_structures(sc)) {
482 		printf("Could not setup receive structures\n");
483 		ixv_stop(sc);
484 		splx(s);
485 		return;
486 	}
487 
488 	/* Configure RX settings */
489 	ixv_initialize_receive_units(sc);
490 
491 	/* Set up VLAN offload and filter */
492 	ixv_setup_vlan_support(sc);
493 
494 	/* Set up MSI-X routing */
495 	ixv_configure_ivars(sc);
496 
497 	/* Set up auto-mask */
498 	mask = (1 << sc->linkvec);
499 	for (i = 0; i < sc->num_queues; i++, que++)
500 		mask |= (1 << que->msix);
501 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
502 
503 	/* Set moderation on the Link interrupt */
504 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(sc->linkvec),
505 			IXGBE_LINK_ITR);
506 
507 	/* Config/Enable Link */
508 	error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
509 	if (error) {
510 		/* PF is not capable of controlling VF state. Enable the link. */
511 		sc->link_enabled = TRUE;
512 	} else if (sc->link_enabled == FALSE)
513 		printf("VF is disabled by PF\n");
514 
515 	hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
516 	    FALSE);
517 
518 	/* And now turn on interrupts */
519 	ixv_enable_intr(sc);
520 
521 	/* Now inform the stack we're ready */
522 	ifp->if_flags |= IFF_RUNNING;
523 	for (i = 0; i < sc->num_queues; i++)
524 		ifq_clr_oactive(ifp->if_ifqs[i]);
525 
526 	splx(s);
527 } /* ixv_init */
528 
529 /*
530  * MSI-X Interrupt Handlers and Tasklets
531  */
532 
533 static inline void
534 ixv_enable_queue(struct ix_softc *sc, uint32_t vector)
535 {
536 	struct ixgbe_hw *hw = &sc->hw;
537 	uint32_t             queue = 1 << vector;
538 	uint32_t             mask;
539 
540 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
541 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
542 } /* ixv_enable_queue */
543 
544 static inline void
545 ixv_disable_queue(struct ix_softc *sc, uint32_t vector)
546 {
547 	struct ixgbe_hw *hw = &sc->hw;
548 	uint64_t             queue = (1ULL << vector);
549 	uint32_t             mask;
550 
551 	mask = (IXGBE_EIMS_RTX_QUEUE & queue);
552 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
553 } /* ixv_disable_queue */
554 
555 /************************************************************************
556  * ixv_msix_que - MSI Queue Interrupt Service routine
557  ************************************************************************/
558 int
559 ixv_msix_que(void *arg)
560 {
561 	struct ix_queue  *que = arg;
562 	struct ix_softc  *sc = que->sc;
563 	struct ifnet     *ifp = &sc->arpcom.ac_if;
564 	struct ix_txring *txr = que->txr;
565 	struct ix_rxring *rxr = que->rxr;
566 
567 	if ((ifp->if_flags & IFF_RUNNING) == 0)
568 		return 1;
569 
570 	ixv_disable_queue(sc, que->msix);
571 
572 	ixgbe_rxeof(rxr);
573 	ixgbe_txeof(txr);
574 	ixgbe_rxrefill(rxr);
575 
576 	/* Reenable this interrupt */
577 	ixv_enable_queue(sc, que->msix);
578 
579 	return 1;
580 } /* ixv_msix_que */
581 
582 
583 /************************************************************************
584  * ixv_msix_mbx
585  ************************************************************************/
586 static int
587 ixv_msix_mbx(void *arg)
588 {
589 	struct ix_softc  *sc = arg;
590 	struct ixgbe_hw *hw = &sc->hw;
591 
592 	sc->hw.mac.get_link_status = TRUE;
593 	KERNEL_LOCK();
594 	ixgbe_update_link_status(sc);
595 	KERNEL_UNLOCK();
596 
597 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
598 
599 
600 	return 1;
601 } /* ixv_msix_mbx */
602 
603 /************************************************************************
604  * ixv_negotiate_api
605  *
606  *   Negotiate the Mailbox API with the PF;
607  *   start with the most featured API first.
608  ************************************************************************/
609 static int
610 ixv_negotiate_api(struct ix_softc *sc)
611 {
612 	struct ixgbe_hw *hw = &sc->hw;
613 	int             mbx_api[] = { ixgbe_mbox_api_12,
614 	                              ixgbe_mbox_api_11,
615 	                              ixgbe_mbox_api_10,
616 	                              ixgbe_mbox_api_unknown };
617 	int             i = 0;
618 
619 	while (mbx_api[i] != ixgbe_mbox_api_unknown) {
620 		if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
621 			return (0);
622 		i++;
623 	}
624 
625 	return (EINVAL);
626 } /* ixv_negotiate_api */
627 
628 
629 /************************************************************************
630  * ixv_iff - Multicast Update
631  *
632  *   Called whenever multicast address list is updated.
633  ************************************************************************/
634 static void
635 ixv_iff(struct ix_softc *sc)
636 {
637 	struct ifnet       *ifp = &sc->arpcom.ac_if;
638 	struct ixgbe_hw    *hw = &sc->hw;
639 	struct arpcom      *ac = &sc->arpcom;
640 	uint8_t            *mta, *update_ptr;
641 	struct ether_multi *enm;
642 	struct ether_multistep step;
643 	int                xcast_mode, mcnt = 0;
644 
645 	IOCTL_DEBUGOUT("ixv_iff: begin");
646 
647 	mta = sc->mta;
648 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
649 	      IXGBE_MAX_MULTICAST_ADDRESSES_VF);
650 
651 	ifp->if_flags &= ~IFF_ALLMULTI;
652 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
653 	    ac->ac_multicnt > IXGBE_MAX_MULTICAST_ADDRESSES_VF) {
654 		ifp->if_flags |= IFF_ALLMULTI;
655 	} else {
656 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
657 		while (enm != NULL) {
658 			bcopy(enm->enm_addrlo,
659 			      &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
660 			      IXGBE_ETH_LENGTH_OF_ADDRESS);
661 			mcnt++;
662 
663 			ETHER_NEXT_MULTI(step, enm);
664 		}
665 
666 		update_ptr = mta;
667 		hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
668 						ixv_mc_array_itr, TRUE);
669 	}
670 
671 	/* request the most inclusive mode we need */
672 	if (ISSET(ifp->if_flags, IFF_PROMISC))
673 		xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
674 	else if (ISSET(ifp->if_flags, IFF_ALLMULTI))
675 		xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
676 	else if (ISSET(ifp->if_flags, (IFF_BROADCAST | IFF_MULTICAST)))
677 		xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
678 	else
679 		xcast_mode = IXGBEVF_XCAST_MODE_NONE;
680 
681 	hw->mac.ops.update_xcast_mode(hw, xcast_mode);
682 
683 
684 } /* ixv_iff */
685 
686 /************************************************************************
687  * ixv_mc_array_itr
688  *
689  *   An iterator function needed by the multicast shared code.
690  *   It feeds the shared code routine the addresses in the
691  *   array of ixv_iff() one by one.
692  ************************************************************************/
693 static uint8_t *
694 ixv_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
695 {
696 	uint8_t *mta = *update_ptr;
697 
698 	*vmdq = 0;
699 	*update_ptr = mta + IXGBE_ETH_LENGTH_OF_ADDRESS;
700 
701 	return (mta);
702 } /* ixv_mc_array_itr */
703 
704 /************************************************************************
705  * ixv_stop - Stop the hardware
706  *
707  *   Disables all traffic on the adapter by issuing a
708  *   global reset on the MAC and deallocates TX/RX buffers.
709  ************************************************************************/
710 static void
711 ixv_stop(void *arg)
712 {
713 	struct ix_softc  *sc = arg;
714 	struct ifnet *ifp = &sc->arpcom.ac_if;
715 	struct ixgbe_hw *hw = &sc->hw;
716 	int i;
717 
718 	INIT_DEBUGOUT("ixv_stop: begin\n");
719 #if NKSTAT > 0
720 	timeout_del(&sc->sc_kstat_tmo);
721 #endif
722 	ixv_disable_intr(sc);
723 
724 
725 	/* Tell the stack that the interface is no longer active */
726 	ifp->if_flags &= ~IFF_RUNNING;
727 
728 	hw->mac.ops.reset_hw(hw);
729 	sc->hw.adapter_stopped = FALSE;
730 	hw->mac.ops.stop_adapter(hw);
731 
732 	/* reprogram the RAR[0] in case user changed it. */
733 	hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
734 
735 	intr_barrier(sc->tag);
736 	for (i = 0; i < sc->num_queues; i++) {
737 		struct ifqueue *ifq = ifp->if_ifqs[i];
738 		ifq_barrier(ifq);
739 		ifq_clr_oactive(ifq);
740 
741 		if (sc->queues[i].tag != NULL)
742 			intr_barrier(sc->queues[i].tag);
743 		timeout_del(&sc->rx_rings[i].rx_refill);
744 	}
745 
746 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
747 
748 	/* Should we really clear all structures on stop? */
749 	ixgbe_free_transmit_structures(sc);
750 	ixgbe_free_receive_structures(sc);
751 
752 	ixgbe_update_link_status(sc);
753 } /* ixv_stop */
754 
755 /************************************************************************
756  * ixv_setup_interface
757  *
758  *   Setup networking device structure and register an interface.
759  ************************************************************************/
760 static void
761 ixv_setup_interface(struct device *dev, struct ix_softc *sc)
762 {
763 	struct ifnet *ifp;
764 	int i;
765 
766 	ifp = &sc->arpcom.ac_if;
767 
768 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
769 	ifp->if_softc = sc;
770 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
771 	ifp->if_xflags = IFXF_MPSAFE;
772 	ifp->if_ioctl = ixv_ioctl;
773 	ifp->if_qstart = ixgbe_start;
774 	ifp->if_timer = 0;
775 	ifp->if_watchdog = ixv_watchdog;
776 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
777 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
778 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
779 
780 	ifp->if_capabilities = IFCAP_VLAN_MTU;
781 
782 #if NVLAN > 0
783 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
784 #endif
785 
786 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
787 	ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
788 	ifp->if_capabilities |= IFCAP_CSUM_IPv4;
789 
790 	/*
791 	 * Specify the media types supported by this sc and register
792 	 * callbacks to update media and link information
793 	 */
794 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
795 	    ixgbe_media_status);
796 	ixgbe_add_media_types(sc);
797 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
798 
799 	if_attach(ifp);
800 	ether_ifattach(ifp);
801 
802 	if_attach_queues(ifp, sc->num_queues);
803 	if_attach_iqueues(ifp, sc->num_queues);
804 	for (i = 0; i < sc->num_queues; i++) {
805 		struct ifqueue *ifq = ifp->if_ifqs[i];
806 		struct ifiqueue *ifiq = ifp->if_iqs[i];
807 		struct ix_txring *txr = &sc->tx_rings[i];
808 		struct ix_rxring *rxr = &sc->rx_rings[i];
809 
810 		ifq->ifq_softc = txr;
811 		txr->ifq = ifq;
812 
813 		ifiq->ifiq_softc = rxr;
814 		rxr->ifiq = ifiq;
815 
816 #if NKSTAT > 0
817 		ixv_txq_kstats(sc, txr);
818 		ixv_rxq_kstats(sc, rxr);
819 #endif
820 	}
821 
822 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
823 } /* ixv_setup_interface */
824 
825 /************************************************************************
826  * ixv_initialize_transmit_units - Enable transmit unit.
827  ************************************************************************/
828 static void
829 ixv_initialize_transmit_units(struct ix_softc *sc)
830 {
831 	struct ifnet     *ifp = &sc->arpcom.ac_if;
832 	struct ix_txring *txr;
833 	struct ixgbe_hw  *hw = &sc->hw;
834 	uint64_t tdba;
835 	uint32_t txctrl, txdctl;
836 	int i;
837 
838 	for (i = 0; i < sc->num_queues; i++) {
839 		txr = &sc->tx_rings[i];
840 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
841 
842 		/* Set WTHRESH to 8, burst writeback */
843 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
844 		txdctl |= (8 << 16);
845 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
846 
847 		/* Set Tx Tail register */
848 		txr->tail = IXGBE_VFTDT(i);
849 
850 		/* Set the HW Tx Head and Tail indices */
851 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(i), 0);
852 		IXGBE_WRITE_REG(&sc->hw, txr->tail, 0);
853 
854 		/* Setup Transmit Descriptor Cmd Settings */
855 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
856 		txr->queue_status = IXGBE_QUEUE_IDLE;
857 		txr->watchdog_timer = 0;
858 
859 		/* Set Ring parameters */
860 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
861 		    (tdba & 0x00000000ffffffffULL));
862 		IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
863 		IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
864 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
865 		txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
866 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
867 		IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
868 
869 		/* Now enable */
870 		txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
871 		txdctl |= IXGBE_TXDCTL_ENABLE;
872 		IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
873 	}
874 	ifp->if_timer = 0;
875 
876 	return;
877 } /* ixv_initialize_transmit_units */
878 
879 /************************************************************************
880  * ixv_initialize_rss_mapping
881  ************************************************************************/
882 static void
883 ixv_initialize_rss_mapping(struct ix_softc *sc)
884 {
885 	struct ixgbe_hw *hw = &sc->hw;
886 	uint32_t             reta = 0, mrqc, rss_key[10];
887 	int             queue_id;
888 	int             i, j;
889 
890 	/* set up random bits */
891 	stoeplitz_to_key(&rss_key, sizeof(rss_key));
892 
893 	/* Now fill out hash function seeds */
894 	for (i = 0; i < 10; i++)
895 		IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
896 
897 	/* Set up the redirection table */
898 	for (i = 0, j = 0; i < 64; i++, j++) {
899 		if (j == sc->num_queues)
900 			j = 0;
901 
902 		/*
903 		 * Fetch the RSS bucket id for the given indirection
904 		 * entry. Cap it at the number of configured buckets
905 		 * (which is num_queues.)
906 		 */
907 		queue_id = queue_id % sc->num_queues;
908 
909 		/*
910 		 * The low 8 bits are for hash value (n+0);
911 		 * The next 8 bits are for hash value (n+1), etc.
912 		 */
913 		reta >>= 8;
914 		reta |= ((uint32_t)queue_id) << 24;
915 		if ((i & 3) == 3) {
916 			IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
917 			reta = 0;
918 		}
919 	}
920 
921 	/*
922 	 * Disable UDP - IP fragments aren't currently being handled
923 	 * and so we end up with a mix of 2-tuple and 4-tuple
924 	 * traffic.
925 	 */
926 	mrqc = IXGBE_MRQC_RSSEN
927 		| IXGBE_MRQC_RSS_FIELD_IPV4
928 		| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
929 		| IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
930 		| IXGBE_MRQC_RSS_FIELD_IPV6_EX
931 		| IXGBE_MRQC_RSS_FIELD_IPV6
932 		| IXGBE_MRQC_RSS_FIELD_IPV6_TCP
933 	;
934 	IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
935 } /* ixv_initialize_rss_mapping */
936 
937 
938 /************************************************************************
939  * ixv_initialize_receive_units - Setup receive registers and features.
940  ************************************************************************/
941 static void
942 ixv_initialize_receive_units(struct ix_softc *sc)
943 {
944 	struct ix_rxring *rxr = sc->rx_rings;
945 	struct ixgbe_hw  *hw = &sc->hw;
946 	uint64_t          rdba;
947 	uint32_t          reg, rxdctl, bufsz, psrtype;
948 	int               i, j, k;
949 
950 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
951 
952 	psrtype = IXGBE_PSRTYPE_TCPHDR
953 	        | IXGBE_PSRTYPE_UDPHDR
954 	        | IXGBE_PSRTYPE_IPV4HDR
955 	        | IXGBE_PSRTYPE_IPV6HDR
956 	        | IXGBE_PSRTYPE_L2HDR;
957 
958 	if (sc->num_queues > 1)
959 		psrtype |= 1 << 29;
960 
961 	IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
962 
963 	/* Tell PF our max_frame size */
964 	if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
965 		printf("There is a problem with the PF setup."
966 		       "  It is likely the receive unit for this VF will not function correctly.\n");
967 	}
968 
969 	for (i = 0; i < sc->num_queues; i++, rxr++) {
970 		rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
971 
972 		/* Disable the queue */
973 		rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
974 		rxdctl &= ~IXGBE_RXDCTL_ENABLE;
975 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
976 		for (j = 0; j < 10; j++) {
977 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
978 			    IXGBE_RXDCTL_ENABLE)
979 				msec_delay(1);
980 			else
981 				break;
982 		}
983 
984 		/* Setup the Base and Length of the Rx Descriptor Ring */
985 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
986 		    (rdba & 0x00000000ffffffffULL));
987 		IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
988 		IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
989 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
990 
991 		/* Capture Rx Tail index */
992 		rxr->tail = IXGBE_VFRDT(rxr->me);
993 
994 		/* Reset the ring indices */
995 		IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
996 		IXGBE_WRITE_REG(hw, rxr->tail, 0);
997 
998 		/* Set up the SRRCTL register */
999 		reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
1000 		reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1001 		reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1002 		reg |= bufsz;
1003 		reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1004 		IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
1005 
1006 		/* Do the queue enabling last */
1007 		rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1008 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
1009 		for (k = 0; k < 10; k++) {
1010 			if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
1011 			    IXGBE_RXDCTL_ENABLE)
1012 				break;
1013 			msec_delay(1);
1014 		}
1015 
1016 		/* Set the Tail Pointer */
1017 		IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
1018 				sc->num_rx_desc - 1);
1019 	}
1020 
1021 	/*
1022 	 * Do not touch RSS and RETA settings for older hardware
1023 	 * as those are shared among PF and all VF.
1024 	 */
1025 	if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
1026 		ixv_initialize_rss_mapping(sc);
1027 
1028 	return;
1029 } /* ixv_initialize_receive_units */
1030 
1031 /************************************************************************
1032  * ixv_setup_vlan_support
1033  ************************************************************************/
1034 static void
1035 ixv_setup_vlan_support(struct ix_softc *sc)
1036 {
1037 	struct ixgbe_hw *hw = &sc->hw;
1038 	uint32_t         ctrl, vid, vfta, retry;
1039 	int              i, j;
1040 
1041 	/*
1042 	 * We get here thru init, meaning
1043 	 * a soft reset, this has already cleared
1044 	 * the VFTA and other state, so if there
1045 	 * have been no vlan's registered do nothing.
1046 	 */
1047 	if (sc->num_vlans == 0)
1048 		return;
1049 
1050 	/* Enable the queues */
1051 	for (i = 0; i < sc->num_queues; i++) {
1052 		ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
1053 		ctrl |= IXGBE_RXDCTL_VME;
1054 		IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
1055 		/*
1056 		 * Let Rx path know that it needs to store VLAN tag
1057 		 * as part of extra mbuf info.
1058 		 */
1059 	}
1060 
1061 	/*
1062 	 * A soft reset zero's out the VFTA, so
1063 	 * we need to repopulate it now.
1064 	 */
1065 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
1066 		if (sc->shadow_vfta[i] == 0)
1067 			continue;
1068 		vfta = sc->shadow_vfta[i];
1069 		/*
1070 		 * Reconstruct the vlan id's
1071 		 * based on the bits set in each
1072 		 * of the array ints.
1073 		 */
1074 		for (j = 0; j < 32; j++) {
1075 			retry = 0;
1076 			if ((vfta & (1 << j)) == 0)
1077 				continue;
1078 			vid = (i * 32) + j;
1079 			/* Call the shared code mailbox routine */
1080 			while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
1081 				if (++retry > 5)
1082 					break;
1083 			}
1084 		}
1085 	}
1086 } /* ixv_setup_vlan_support */
1087 
1088 /************************************************************************
1089  * ixv_enable_intr
1090  ************************************************************************/
1091 static void
1092 ixv_enable_intr(struct ix_softc *sc)
1093 {
1094 	struct ixgbe_hw *hw = &sc->hw;
1095 	struct ix_queue *que = sc->queues;
1096 	uint32_t         mask;
1097 	int              i;
1098 
1099 	/* For VTEIAC */
1100 	mask = (1 << sc->linkvec);
1101 	for (i = 0; i < sc->num_queues; i++, que++)
1102 		mask |= (1 << que->msix);
1103 	IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
1104 
1105 	/* For VTEIMS */
1106 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
1107 	que = sc->queues;
1108 	for (i = 0; i < sc->num_queues; i++, que++)
1109 		ixv_enable_queue(sc, que->msix);
1110 
1111 	IXGBE_WRITE_FLUSH(hw);
1112 
1113 	return;
1114 } /* ixv_enable_intr */
1115 
1116 /************************************************************************
1117  * ixv_disable_intr
1118  ************************************************************************/
1119 static void
1120 ixv_disable_intr(struct ix_softc *sc)
1121 {
1122 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
1123 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
1124 	IXGBE_WRITE_FLUSH(&sc->hw);
1125 
1126 	return;
1127 } /* ixv_disable_intr */
1128 
1129 /************************************************************************
1130  * ixv_set_ivar
1131  *
1132  *   Setup the correct IVAR register for a particular MSI-X interrupt
1133  *    - entry is the register array entry
1134  *    - vector is the MSI-X vector for this queue
1135  *    - type is RX/TX/MISC
1136  ************************************************************************/
1137 static void
1138 ixv_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
1139 {
1140 	struct ixgbe_hw *hw = &sc->hw;
1141 	uint32_t             ivar, index;
1142 
1143 	vector |= IXGBE_IVAR_ALLOC_VAL;
1144 
1145 	if (type == -1) { /* MISC IVAR */
1146 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
1147 		ivar &= ~0xFF;
1148 		ivar |= vector;
1149 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
1150 	} else {          /* RX/TX IVARS */
1151 		index = (16 * (entry & 1)) + (8 * type);
1152 		ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
1153 		ivar &= ~(0xFF << index);
1154 		ivar |= (vector << index);
1155 		IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
1156 	}
1157 } /* ixv_set_ivar */
1158 
1159 /************************************************************************
1160  * ixv_configure_ivars
1161  ************************************************************************/
1162 static void
1163 ixv_configure_ivars(struct ix_softc *sc)
1164 {
1165 	struct ix_queue *que = sc->queues;
1166 	int              i;
1167 
1168 	for (i = 0; i < sc->num_queues; i++, que++) {
1169 		/* First the RX queue entry */
1170 		ixv_set_ivar(sc, i, que->msix, 0);
1171 		/* ... and the TX */
1172 		ixv_set_ivar(sc, i, que->msix, 1);
1173 		/* Set an initial value in EITR */
1174 		IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
1175 		    IXGBE_EITR_DEFAULT);
1176 	}
1177 
1178 	/* For the mailbox interrupt */
1179 	ixv_set_ivar(sc, 1, sc->linkvec, -1);
1180 } /* ixv_configure_ivars */
1181 
1182 /************************************************************************
1183  * ixv_ioctl - Ioctl entry point
1184  *
1185  *   Called when the user wants to configure the interface.
1186  *
1187  *   return 0 on success, positive on failure
1188  ************************************************************************/
1189 static int
1190 ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1191 {
1192 	struct ix_softc *sc = ifp->if_softc;
1193 	struct ifreq   *ifr = (struct ifreq *)data;
1194 	int		s, error = 0;
1195 
1196 	s = splnet();
1197 
1198 	switch (command) {
1199 	case SIOCSIFADDR:
1200 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
1201 		ifp->if_flags |= IFF_UP;
1202 		if (!(ifp->if_flags & IFF_RUNNING))
1203 			ixv_init(sc);
1204 		break;
1205 
1206 	case SIOCSIFFLAGS:
1207 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
1208 		if (ifp->if_flags & IFF_UP) {
1209 			if (ifp->if_flags & IFF_RUNNING)
1210 				error = ENETRESET;
1211 			else
1212 				ixv_init(sc);
1213 		} else {
1214 			if (ifp->if_flags & IFF_RUNNING)
1215 				ixv_stop(sc);
1216 		}
1217 		break;
1218 
1219 	case SIOCSIFMEDIA:
1220 	case SIOCGIFMEDIA:
1221 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
1222 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
1223 		break;
1224 
1225 	case SIOCGIFRXR:
1226 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1227 		break;
1228 
1229 	default:
1230 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
1231 	}
1232 
1233 	switch (error) {
1234 	case 0:
1235 		if (command == SIOCSIFMTU)
1236 			ixv_init(sc);
1237 		break;
1238 	case ENETRESET:
1239 		if (ifp->if_flags & IFF_RUNNING) {
1240 			ixv_disable_intr(sc);
1241 			ixv_iff(sc);
1242 			ixv_enable_intr(sc);
1243 		}
1244 		error = 0;
1245 	}
1246 
1247 	splx(s);
1248 	return (error);
1249 } /* ixv_ioctl */
1250 
1251 /************************************************************************
1252  * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
1253  ************************************************************************/
1254 static int
1255 ixv_allocate_msix(struct ix_softc *sc)
1256 {
1257 	struct ixgbe_osdep      *os = &sc->osdep;
1258 	struct pci_attach_args  *pa  = &os->os_pa;
1259 	int                      i = 0, error = 0, off;
1260 	struct ix_queue         *que;
1261 	pci_intr_handle_t       ih;
1262 	pcireg_t                reg;
1263 
1264 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
1265 		if (pci_intr_map_msix(pa, i, &ih)) {
1266 			printf("ixv_allocate_msix: "
1267 			       "pci_intr_map_msix vec %d failed\n", i);
1268 			error = ENOMEM;
1269 			goto fail;
1270 		}
1271 
1272 		que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
1273 			IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
1274 			ixv_msix_que, que, que->name);
1275 		if (que->tag == NULL) {
1276 			printf("ixv_allocate_msix: "
1277 			       "pci_intr_establish vec %d failed\n", i);
1278 			error = ENOMEM;
1279 			goto fail;
1280 		}
1281 
1282 		que->msix = i;
1283 	}
1284 
1285 	/* and Mailbox */
1286 	if (pci_intr_map_msix(pa, i, &ih)) {
1287 		printf("ixgbe_allocate_msix: "
1288 		       "pci_intr_map_msix mbox vector failed\n");
1289 		error = ENOMEM;
1290 		goto fail;
1291 	}
1292 
1293 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
1294 			ixv_msix_mbx, sc, sc->dev.dv_xname);
1295 	if (sc->tag == NULL) {
1296 		printf("ixv_allocate_msix: "
1297 		       "pci_intr_establish mbox vector failed\n");
1298 		error = ENOMEM;
1299 		goto fail;
1300 	}
1301 	sc->linkvec = i;
1302 
1303 	/*
1304 	 * Due to a broken design QEMU will fail to properly
1305 	 * enable the guest for MSI-X unless the vectors in
1306 	 * the table are all set up, so we must rewrite the
1307 	 * ENABLE in the MSI-X control register again at this
1308 	 * point to cause it to successfully initialize us.
1309 	 */
1310 	if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
1311 		pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL);
1312 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, off);
1313 		pci_conf_write(pa->pa_pc, pa->pa_tag, off, reg | PCI_MSIX_MC_MSIXE);
1314 	}
1315 
1316 	printf(", %s, %d queue%s\n", pci_intr_string(pa->pa_pc, ih),
1317 	    i, (i > 1) ? "s" : "");
1318 
1319 	return (0);
1320 
1321 fail:
1322 	for (que = sc->queues; i > 0; i--, que++) {
1323 		if (que->tag == NULL)
1324 			continue;
1325 		pci_intr_disestablish(pa->pa_pc, que->tag);
1326 		que->tag = NULL;
1327 	}
1328 	return (error);
1329 } /* ixv_allocate_msix */
1330 
1331 #if NKSTAT > 0
1332 enum ixv_counter_idx {
1333 	ixv_good_packets_received_count,
1334 	ixv_good_packets_transmitted_count,
1335 	ixv_good_octets_received_count,
1336 	ixv_good_octets_transmitted_count,
1337 	ixv_multicast_packets_received_count,
1338 
1339 	ixv_counter_num,
1340 };
1341 
1342 CTASSERT(KSTAT_KV_U_PACKETS <= 0xff);
1343 CTASSERT(KSTAT_KV_U_BYTES <= 0xff);
1344 
1345 struct ixv_counter {
1346 	char			 name[KSTAT_KV_NAMELEN];
1347 	uint32_t		 reg;
1348 	uint8_t			 width;
1349 	uint8_t			 unit;
1350 };
1351 
1352 static const struct ixv_counter ixv_counters[ixv_counter_num] = {
1353 	[ixv_good_packets_received_count] = { "rx good",  IXGBE_VFGPRC, 32, KSTAT_KV_U_PACKETS },
1354 	[ixv_good_packets_transmitted_count] = { "tx good",  IXGBE_VFGPTC, 32, KSTAT_KV_U_PACKETS },
1355 	[ixv_good_octets_received_count] = { "rx total",  IXGBE_VFGORC_LSB, 36, KSTAT_KV_U_BYTES },
1356 	[ixv_good_octets_transmitted_count] = { "tx total",  IXGBE_VFGOTC_LSB, 36, KSTAT_KV_U_BYTES },
1357 	[ixv_multicast_packets_received_count] = { "rx mcast",  IXGBE_VFMPRC, 32, KSTAT_KV_U_PACKETS },
1358 };
1359 
1360 struct ixv_rxq_kstats {
1361 	struct kstat_kv	qprc;
1362 	struct kstat_kv	qbrc;
1363 	struct kstat_kv	qprdc;
1364 };
1365 
1366 static const struct ixv_rxq_kstats ixv_rxq_kstats_tpl = {
1367 	KSTAT_KV_UNIT_INITIALIZER("packets",
1368 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
1369 	KSTAT_KV_UNIT_INITIALIZER("bytes",
1370 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
1371 	KSTAT_KV_UNIT_INITIALIZER("qdrops",
1372 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
1373 };
1374 
1375 struct ixv_txq_kstats {
1376 	struct kstat_kv	qptc;
1377 	struct kstat_kv	qbtc;
1378 };
1379 
1380 static const struct ixv_txq_kstats ixv_txq_kstats_tpl = {
1381 	KSTAT_KV_UNIT_INITIALIZER("packets",
1382 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
1383 	KSTAT_KV_UNIT_INITIALIZER("bytes",
1384 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
1385 };
1386 
1387 static int	ixv_kstats_read(struct kstat *ks);
1388 static int	ixv_rxq_kstats_read(struct kstat *ks);
1389 static int	ixv_txq_kstats_read(struct kstat *ks);
1390 
1391 static void
1392 ixv_kstats(struct ix_softc *sc)
1393 {
1394 	struct kstat *ks;
1395 	struct kstat_kv *kvs;
1396 	unsigned int i;
1397 
1398 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
1399 	timeout_set(&sc->sc_kstat_tmo, ixv_kstats_tick, sc);
1400 
1401 	ks = kstat_create(sc->dev.dv_xname, 0, "ixv-stats", 0,
1402 	    KSTAT_T_KV, 0);
1403 	if (ks == NULL)
1404 		return;
1405 
1406 	kvs = mallocarray(nitems(ixv_counters), sizeof(*kvs),
1407 	    M_DEVBUF, M_WAITOK|M_ZERO);
1408 
1409 	for (i = 0; i < nitems(ixv_counters); i++) {
1410 		const struct ixv_counter *ixc = &ixv_counters[i];
1411 
1412 		kstat_kv_unit_init(&kvs[i], ixc->name,
1413 		    KSTAT_KV_T_COUNTER64, ixc->unit);
1414 	}
1415 
1416 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1417 	ks->ks_softc = sc;
1418 	ks->ks_data = kvs;
1419 	ks->ks_datalen = nitems(ixv_counters) * sizeof(*kvs);
1420 	ks->ks_read = ixv_kstats_read;
1421 
1422 	sc->sc_kstat = ks;
1423 	kstat_install(ks);
1424 }
1425 
1426 static void
1427 ixv_rxq_kstats(struct ix_softc *sc, struct ix_rxring *rxr)
1428 {
1429 	struct ixv_rxq_kstats *stats;
1430 	struct kstat *ks;
1431 
1432 	ks = kstat_create(sc->dev.dv_xname, 0, "ixv-rxq", rxr->me,
1433 	    KSTAT_T_KV, 0);
1434 	if (ks == NULL)
1435 		return;
1436 
1437 	stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
1438 	*stats = ixv_rxq_kstats_tpl;
1439 
1440 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1441 	ks->ks_softc = rxr;
1442 	ks->ks_data = stats;
1443 	ks->ks_datalen = sizeof(*stats);
1444 	ks->ks_read = ixv_rxq_kstats_read;
1445 
1446 	rxr->kstat = ks;
1447 	kstat_install(ks);
1448 }
1449 
1450 static void
1451 ixv_txq_kstats(struct ix_softc *sc, struct ix_txring *txr)
1452 {
1453 	struct ixv_txq_kstats *stats;
1454 	struct kstat *ks;
1455 
1456 	ks = kstat_create(sc->dev.dv_xname, 0, "ixv-txq", txr->me,
1457 	    KSTAT_T_KV, 0);
1458 	if (ks == NULL)
1459 		return;
1460 
1461 	stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
1462 	*stats = ixv_txq_kstats_tpl;
1463 
1464 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
1465 	ks->ks_softc = txr;
1466 	ks->ks_data = stats;
1467 	ks->ks_datalen = sizeof(*stats);
1468 	ks->ks_read = ixv_txq_kstats_read;
1469 
1470 	txr->kstat = ks;
1471 	kstat_install(ks);
1472 }
1473 
1474 /**********************************************************************
1475  *
1476  *  Update the board statistics counters.
1477  *
1478  **********************************************************************/
1479 
1480 static void
1481 ixv_kstats_tick(void *arg)
1482 {
1483 	struct ix_softc *sc = arg;
1484 	int i;
1485 
1486 	timeout_add_sec(&sc->sc_kstat_tmo, 1);
1487 
1488 	mtx_enter(&sc->sc_kstat_mtx);
1489 	ixv_kstats_read(sc->sc_kstat);
1490 	for (i = 0; i < sc->num_queues; i++) {
1491 		ixv_rxq_kstats_read(sc->rx_rings[i].kstat);
1492 		ixv_txq_kstats_read(sc->tx_rings[i].kstat);
1493 	}
1494 	mtx_leave(&sc->sc_kstat_mtx);
1495 }
1496 
1497 static uint64_t
1498 ixv_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
1499 {
1500 	uint64_t lo, hi;
1501 
1502 	lo = IXGBE_READ_REG(hw, loreg);
1503 	hi = IXGBE_READ_REG(hw, hireg);
1504 
1505 	return (((hi & 0xf) << 32) | lo);
1506 }
1507 
1508 static int
1509 ixv_kstats_read(struct kstat *ks)
1510 {
1511 	struct ix_softc *sc = ks->ks_softc;
1512 	struct kstat_kv *kvs = ks->ks_data;
1513 	struct ixgbe_hw	*hw = &sc->hw;
1514 	unsigned int i;
1515 
1516 	for (i = 0; i < nitems(ixv_counters); i++) {
1517 		const struct ixv_counter *ixc = &ixv_counters[i];
1518 		uint32_t reg = ixc->reg;
1519 		uint64_t v;
1520 
1521 		if (reg == 0)
1522 			continue;
1523 
1524 		if (ixc->width > 32)
1525 			v = ixv_read36(hw, reg, reg + 4);
1526 		else
1527 			v = IXGBE_READ_REG(hw, reg);
1528 
1529 		kstat_kv_u64(&kvs[i]) = v;
1530 	}
1531 
1532 	getnanouptime(&ks->ks_updated);
1533 
1534 	return (0);
1535 }
1536 
1537 int
1538 ixv_rxq_kstats_read(struct kstat *ks)
1539 {
1540 	struct ixv_rxq_kstats *stats = ks->ks_data;
1541 	struct ix_rxring *rxr = ks->ks_softc;
1542 	struct ix_softc *sc = rxr->sc;
1543 	struct ixgbe_hw	*hw = &sc->hw;
1544 	uint32_t i = rxr->me;
1545 
1546 	kstat_kv_u64(&stats->qprc) += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
1547 	kstat_kv_u64(&stats->qprdc) += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
1548 	kstat_kv_u64(&stats->qbrc) +=
1549 		ixv_read36(hw, IXGBE_QBRC_L(i), IXGBE_QBRC_H(i));
1550 
1551 	getnanouptime(&ks->ks_updated);
1552 
1553 	return (0);
1554 }
1555 
1556 int
1557 ixv_txq_kstats_read(struct kstat *ks)
1558 {
1559 	struct ixv_txq_kstats *stats = ks->ks_data;
1560 	struct ix_txring *txr = ks->ks_softc;
1561 	struct ix_softc *sc = txr->sc;
1562 	struct ixgbe_hw	*hw = &sc->hw;
1563 	uint32_t i = txr->me;
1564 
1565 	kstat_kv_u64(&stats->qptc) += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
1566 	kstat_kv_u64(&stats->qbtc) +=
1567 		ixv_read36(hw, IXGBE_QBTC_L(i), IXGBE_QBTC_H(i));
1568 
1569 	getnanouptime(&ks->ks_updated);
1570 
1571 	return (0);
1572 }
1573 #endif /* NKVSTAT > 0 */
1574