xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 1a8dbaac879b9f3335ad7fb25429ce63ac1d6bac)
1 /*	$OpenBSD: if_ix.c,v 1.172 2020/07/18 07:18:22 dlg Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 /* char ixgbe_driver_version[] = "2.5.13"; */
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR_L },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP_N },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_10G_T },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L }
97 };
98 
99 /*********************************************************************
100  *  Function prototypes
101  *********************************************************************/
102 int	ixgbe_probe(struct device *, void *, void *);
103 void	ixgbe_attach(struct device *, struct device *, void *);
104 int	ixgbe_detach(struct device *, int);
105 void	ixgbe_start(struct ifqueue *);
106 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
107 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
108 int	ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
109 void	ixgbe_watchdog(struct ifnet *);
110 void	ixgbe_init(void *);
111 void	ixgbe_stop(void *);
112 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
113 int	ixgbe_media_change(struct ifnet *);
114 void	ixgbe_identify_hardware(struct ix_softc *);
115 int	ixgbe_allocate_pci_resources(struct ix_softc *);
116 int	ixgbe_allocate_legacy(struct ix_softc *);
117 int	ixgbe_allocate_msix(struct ix_softc *);
118 void	ixgbe_setup_msix(struct ix_softc *);
119 int	ixgbe_allocate_queues(struct ix_softc *);
120 void	ixgbe_free_pci_resources(struct ix_softc *);
121 void	ixgbe_local_timer(void *);
122 void	ixgbe_setup_interface(struct ix_softc *);
123 void	ixgbe_config_gpie(struct ix_softc *);
124 void	ixgbe_config_delay_values(struct ix_softc *);
125 void	ixgbe_add_media_types(struct ix_softc *);
126 void	ixgbe_config_link(struct ix_softc *);
127 
128 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
129 int	ixgbe_setup_transmit_structures(struct ix_softc *);
130 int	ixgbe_setup_transmit_ring(struct tx_ring *);
131 void	ixgbe_initialize_transmit_units(struct ix_softc *);
132 void	ixgbe_free_transmit_structures(struct ix_softc *);
133 void	ixgbe_free_transmit_buffers(struct tx_ring *);
134 
135 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
136 int	ixgbe_setup_receive_structures(struct ix_softc *);
137 int	ixgbe_setup_receive_ring(struct rx_ring *);
138 void	ixgbe_initialize_receive_units(struct ix_softc *);
139 void	ixgbe_free_receive_structures(struct ix_softc *);
140 void	ixgbe_free_receive_buffers(struct rx_ring *);
141 void	ixgbe_initialize_rss_mapping(struct ix_softc *);
142 int	ixgbe_rxfill(struct rx_ring *);
143 void	ixgbe_rxrefill(void *);
144 
145 int	ixgbe_intr(struct ix_softc *sc);
146 void	ixgbe_enable_intr(struct ix_softc *);
147 void	ixgbe_disable_intr(struct ix_softc *);
148 int	ixgbe_txeof(struct tx_ring *);
149 int	ixgbe_rxeof(struct rx_ring *);
150 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
151 void	ixgbe_iff(struct ix_softc *);
152 void	ixgbe_map_queue_statistics(struct ix_softc *);
153 void	ixgbe_update_link_status(struct ix_softc *);
154 int	ixgbe_get_buf(struct rx_ring *, int);
155 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
156 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
157 		    struct ixgbe_dma_alloc *, int);
158 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
159 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
160 	    uint32_t *);
161 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
162 	    uint32_t *);
163 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
164 void	ixgbe_configure_ivars(struct ix_softc *);
165 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
166 
167 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
168 
169 /* Support for pluggable optic modules */
170 void	ixgbe_handle_mod(struct ix_softc *);
171 void	ixgbe_handle_msf(struct ix_softc *);
172 void	ixgbe_handle_phy(struct ix_softc *);
173 
174 /* Legacy (single vector interrupt handler */
175 int	ixgbe_legacy_intr(void *);
176 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
177 void	ixgbe_enable_queues(struct ix_softc *);
178 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
179 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
180 
181 /* MSI-X (multiple vectors interrupt handlers)  */
182 int	ixgbe_link_intr(void *);
183 int	ixgbe_queue_intr(void *);
184 
185 #if NKSTAT > 0
186 static void	ix_kstats(struct ix_softc *);
187 static void	ix_rxq_kstats(struct ix_softc *, struct rx_ring *);
188 static void	ix_txq_kstats(struct ix_softc *, struct tx_ring *);
189 static void	ix_kstats_tick(void *);
190 #endif
191 
192 /*********************************************************************
193  *  OpenBSD Device Interface Entry Points
194  *********************************************************************/
195 
196 struct cfdriver ix_cd = {
197 	NULL, "ix", DV_IFNET
198 };
199 
200 struct cfattach ix_ca = {
201 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
202 };
203 
204 int ixgbe_smart_speed = ixgbe_smart_speed_on;
205 int ixgbe_enable_msix = 1;
206 
207 /*********************************************************************
208  *  Device identification routine
209  *
210  *  ixgbe_probe determines if the driver should be loaded on
211  *  adapter based on PCI vendor/device id of the adapter.
212  *
213  *  return 0 on success, positive on failure
214  *********************************************************************/
215 
216 int
217 ixgbe_probe(struct device *parent, void *match, void *aux)
218 {
219 	INIT_DEBUGOUT("ixgbe_probe: begin");
220 
221 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
222 	    nitems(ixgbe_devices)));
223 }
224 
225 /*********************************************************************
226  *  Device initialization routine
227  *
228  *  The attach entry point is called when the driver is being loaded.
229  *  This routine identifies the type of hardware, allocates all resources
230  *  and initializes the hardware.
231  *
232  *  return 0 on success, positive on failure
233  *********************************************************************/
234 
235 void
236 ixgbe_attach(struct device *parent, struct device *self, void *aux)
237 {
238 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
239 	struct ix_softc		*sc = (struct ix_softc *)self;
240 	int			 error = 0;
241 	uint16_t		 csum;
242 	uint32_t			 ctrl_ext;
243 	struct ixgbe_hw		*hw = &sc->hw;
244 
245 	INIT_DEBUGOUT("ixgbe_attach: begin");
246 
247 	sc->osdep.os_sc = sc;
248 	sc->osdep.os_pa = *pa;
249 
250 	rw_init(&sc->sfflock, "ixsff");
251 
252 #if NKSTAT > 0
253 	ix_kstats(sc);
254 #endif
255 
256 	/* Determine hardware revision */
257 	ixgbe_identify_hardware(sc);
258 
259 	/* Indicate to RX setup to use Jumbo Clusters */
260 	sc->num_tx_desc = DEFAULT_TXD;
261 	sc->num_rx_desc = DEFAULT_RXD;
262 
263 	/* Do base PCI setup - map BAR0 */
264 	if (ixgbe_allocate_pci_resources(sc))
265 		goto err_out;
266 
267 	/* Allocate our TX/RX Queues */
268 	if (ixgbe_allocate_queues(sc))
269 		goto err_out;
270 
271 	/* Allocate multicast array memory. */
272 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
273 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
274 	if (sc->mta == NULL) {
275 		printf(": Can not allocate multicast setup array\n");
276 		goto err_late;
277 	}
278 
279 	/* Initialize the shared code */
280 	error = ixgbe_init_shared_code(hw);
281 	if (error) {
282 		printf(": Unable to initialize the shared code\n");
283 		goto err_late;
284 	}
285 
286 	/* Make sure we have a good EEPROM before we read from it */
287 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
288 		printf(": The EEPROM Checksum Is Not Valid\n");
289 		goto err_late;
290 	}
291 
292 	error = ixgbe_init_hw(hw);
293 	if (error == IXGBE_ERR_EEPROM_VERSION) {
294 		printf(": This device is a pre-production adapter/"
295 		    "LOM.  Please be aware there may be issues associated "
296 		    "with your hardware.\nIf you are experiencing problems "
297 		    "please contact your Intel or hardware representative "
298 		    "who provided you with this hardware.\n");
299 	} else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
300 	    error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
301 		printf(": Hardware Initialization Failure\n");
302 		goto err_late;
303 	}
304 
305 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
306 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
307 
308 	if (sc->sc_intrmap)
309 		error = ixgbe_allocate_msix(sc);
310 	else
311 		error = ixgbe_allocate_legacy(sc);
312 	if (error)
313 		goto err_late;
314 
315 	/* Enable the optics for 82599 SFP+ fiber */
316 	if (sc->hw.mac.ops.enable_tx_laser)
317 		sc->hw.mac.ops.enable_tx_laser(&sc->hw);
318 
319 	/* Enable power to the phy */
320 	if (hw->phy.ops.set_phy_power)
321 		hw->phy.ops.set_phy_power(&sc->hw, TRUE);
322 
323 	/* Setup OS specific network interface */
324 	ixgbe_setup_interface(sc);
325 
326 	/* Get the PCI-E bus info and determine LAN ID */
327 	hw->mac.ops.get_bus_info(hw);
328 
329 	/* Set an initial default flow control value */
330 	sc->fc = ixgbe_fc_full;
331 
332 	/* let hardware know driver is loaded */
333 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
334 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
335 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
336 
337 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
338 
339 	INIT_DEBUGOUT("ixgbe_attach: end");
340 	return;
341 
342 err_late:
343 	ixgbe_free_transmit_structures(sc);
344 	ixgbe_free_receive_structures(sc);
345 err_out:
346 	ixgbe_free_pci_resources(sc);
347 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
348 	    MAX_NUM_MULTICAST_ADDRESSES);
349 }
350 
351 /*********************************************************************
352  *  Device removal routine
353  *
354  *  The detach entry point is called when the driver is being removed.
355  *  This routine stops the adapter and deallocates all the resources
356  *  that were allocated for driver operation.
357  *
358  *  return 0 on success, positive on failure
359  *********************************************************************/
360 
361 int
362 ixgbe_detach(struct device *self, int flags)
363 {
364 	struct ix_softc *sc = (struct ix_softc *)self;
365 	struct ifnet *ifp = &sc->arpcom.ac_if;
366 	uint32_t	ctrl_ext;
367 
368 	INIT_DEBUGOUT("ixgbe_detach: begin");
369 
370 	ixgbe_stop(sc);
371 
372 	/* let hardware know driver is unloading */
373 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
374 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
375 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
376 
377 	ether_ifdetach(ifp);
378 	if_detach(ifp);
379 
380 	ixgbe_free_pci_resources(sc);
381 
382 	ixgbe_free_transmit_structures(sc);
383 	ixgbe_free_receive_structures(sc);
384 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
385 	    MAX_NUM_MULTICAST_ADDRESSES);
386 
387 	/* XXX kstat */
388 
389 	return (0);
390 }
391 
392 /*********************************************************************
393  *  Transmit entry point
394  *
395  *  ixgbe_start is called by the stack to initiate a transmit.
396  *  The driver will remain in this routine as long as there are
397  *  packets to transmit and transmit resources are available.
398  *  In case resources are not available stack is notified and
399  *  the packet is requeued.
400  **********************************************************************/
401 
402 void
403 ixgbe_start(struct ifqueue *ifq)
404 {
405 	struct ifnet		*ifp = ifq->ifq_if;
406 	struct ix_softc		*sc = ifp->if_softc;
407 	struct tx_ring		*txr = ifq->ifq_softc;
408 	struct mbuf  		*m_head;
409 	unsigned int		 head, free, used;
410 	int			 post = 0;
411 
412 	if (!sc->link_up)
413 		return;
414 
415 	head = txr->next_avail_desc;
416 	free = txr->next_to_clean;
417 	if (free <= head)
418 		free += sc->num_tx_desc;
419 	free -= head;
420 
421 	membar_consumer();
422 
423 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
424 	    0, txr->txdma.dma_map->dm_mapsize,
425 	    BUS_DMASYNC_POSTWRITE);
426 
427 	for (;;) {
428 		/* Check that we have the minimal number of TX descriptors. */
429 		if (free <= IXGBE_TX_OP_THRESHOLD) {
430 			ifq_set_oactive(ifq);
431 			break;
432 		}
433 
434 		m_head = ifq_dequeue(ifq);
435 		if (m_head == NULL)
436 			break;
437 
438 		used = ixgbe_encap(txr, m_head);
439 		if (used == 0) {
440 			m_freem(m_head);
441 			continue;
442 		}
443 
444 		free -= used;
445 
446 #if NBPFILTER > 0
447 		if (ifp->if_bpf)
448 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
449 #endif
450 
451 		/* Set timeout in case hardware has problems transmitting */
452 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
453 		ifp->if_timer = IXGBE_TX_TIMEOUT;
454 
455 		post = 1;
456 	}
457 
458 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
459 	    0, txr->txdma.dma_map->dm_mapsize,
460 	    BUS_DMASYNC_PREWRITE);
461 
462 	/*
463 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
464 	 * hardware that this frame is available to transmit.
465 	 */
466 	if (post)
467 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
468 		    txr->next_avail_desc);
469 }
470 
471 /*********************************************************************
472  *  Ioctl entry point
473  *
474  *  ixgbe_ioctl is called when the user wants to configure the
475  *  interface.
476  *
477  *  return 0 on success, positive on failure
478  **********************************************************************/
479 
480 int
481 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
482 {
483 	struct ix_softc	*sc = ifp->if_softc;
484 	struct ifreq	*ifr = (struct ifreq *) data;
485 	int		s, error = 0;
486 
487 	s = splnet();
488 
489 	switch (command) {
490 	case SIOCSIFADDR:
491 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
492 		ifp->if_flags |= IFF_UP;
493 		if (!(ifp->if_flags & IFF_RUNNING))
494 			ixgbe_init(sc);
495 		break;
496 
497 	case SIOCSIFFLAGS:
498 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
499 		if (ifp->if_flags & IFF_UP) {
500 			if (ifp->if_flags & IFF_RUNNING)
501 				error = ENETRESET;
502 			else
503 				ixgbe_init(sc);
504 		} else {
505 			if (ifp->if_flags & IFF_RUNNING)
506 				ixgbe_stop(sc);
507 		}
508 		break;
509 
510 	case SIOCSIFMEDIA:
511 	case SIOCGIFMEDIA:
512 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
513 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
514 		break;
515 
516 	case SIOCGIFRXR:
517 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
518 		break;
519 
520 	case SIOCGIFSFFPAGE:
521 		error = rw_enter(&sc->sfflock, RW_WRITE|RW_INTR);
522 		if (error != 0)
523 			break;
524 
525 		error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
526 		rw_exit(&sc->sfflock);
527 		break;
528 
529 	default:
530 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
531 	}
532 
533 	if (error == ENETRESET) {
534 		if (ifp->if_flags & IFF_RUNNING) {
535 			ixgbe_disable_intr(sc);
536 			ixgbe_iff(sc);
537 			ixgbe_enable_intr(sc);
538 			ixgbe_enable_queues(sc);
539 		}
540 		error = 0;
541 	}
542 
543 	splx(s);
544 	return (error);
545 }
546 
547 int
548 ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff)
549 {
550 	struct ixgbe_hw *hw = &sc->hw;
551 	uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
552 	uint8_t page;
553 	size_t i;
554 	int error = EIO;
555 
556 	if (hw->phy.type == ixgbe_phy_fw)
557 		return (ENODEV);
558 
559 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
560 		return (EBUSY); /* XXX */
561 
562 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
563 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127,
564 		    IFSFF_ADDR_EEPROM, &page))
565 			goto error;
566 		if (page != sff->sff_page &&
567 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
568 		    IFSFF_ADDR_EEPROM, sff->sff_page))
569 			goto error;
570 	}
571 
572 	for (i = 0; i < sizeof(sff->sff_data); i++) {
573 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, i,
574 		    sff->sff_addr, &sff->sff_data[i]))
575 			goto error;
576 	}
577 
578 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
579 		if (page != sff->sff_page &&
580 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
581 		    IFSFF_ADDR_EEPROM, page))
582 			goto error;
583 	}
584 
585 	error = 0;
586 error:
587 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
588 	return (error);
589 }
590 
591 int
592 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
593 {
594 	struct if_rxring_info *ifr, ifr1;
595 	struct rx_ring *rxr;
596 	int error, i;
597 	u_int n = 0;
598 
599 	if (sc->num_queues > 1) {
600 		if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
601 		    M_WAITOK | M_ZERO)) == NULL)
602 			return (ENOMEM);
603 	} else
604 		ifr = &ifr1;
605 
606 	for (i = 0; i < sc->num_queues; i++) {
607 		rxr = &sc->rx_rings[i];
608 		ifr[n].ifr_size = sc->rx_mbuf_sz;
609 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
610 		ifr[n].ifr_info = rxr->rx_ring;
611 		n++;
612 	}
613 
614 	error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
615 
616 	if (sc->num_queues > 1)
617 		free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
618 	return (error);
619 }
620 
621 /*********************************************************************
622  *  Watchdog entry point
623  *
624  **********************************************************************/
625 
626 void
627 ixgbe_watchdog(struct ifnet * ifp)
628 {
629 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
630 	struct tx_ring *txr = sc->tx_rings;
631 	struct ixgbe_hw *hw = &sc->hw;
632 	int		tx_hang = FALSE;
633 	int		i;
634 
635 	/*
636 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
637 	 * Anytime all descriptors are clean the timer is set to 0.
638 	 */
639 	for (i = 0; i < sc->num_queues; i++, txr++) {
640 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
641 			continue;
642 		else {
643 			tx_hang = TRUE;
644 			break;
645 		}
646 	}
647 	if (tx_hang == FALSE)
648 		return;
649 
650 	/*
651 	 * If we are in this routine because of pause frames, then don't
652 	 * reset the hardware.
653 	 */
654 	if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
655 		for (i = 0; i < sc->num_queues; i++, txr++)
656 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
657 		ifp->if_timer = IXGBE_TX_TIMEOUT;
658 		return;
659 	}
660 
661 
662 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
663 	for (i = 0; i < sc->num_queues; i++, txr++) {
664 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
665 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
666 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
667 		printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
668 		    i, txr->next_to_clean);
669 	}
670 	ifp->if_flags &= ~IFF_RUNNING;
671 
672 	ixgbe_init(sc);
673 }
674 
675 /*********************************************************************
676  *  Init entry point
677  *
678  *  This routine is used in two ways. It is used by the stack as
679  *  init entry point in network interface structure. It is also used
680  *  by the driver as a hw/sw initialization routine to get to a
681  *  consistent state.
682  *
683  *  return 0 on success, positive on failure
684  **********************************************************************/
685 #define IXGBE_MHADD_MFS_SHIFT 16
686 
687 void
688 ixgbe_init(void *arg)
689 {
690 	struct ix_softc	*sc = (struct ix_softc *)arg;
691 	struct ifnet	*ifp = &sc->arpcom.ac_if;
692 	struct rx_ring	*rxr = sc->rx_rings;
693 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, itr;
694 	int		 i, s, err;
695 
696 	INIT_DEBUGOUT("ixgbe_init: begin");
697 
698 	s = splnet();
699 
700 	ixgbe_stop(sc);
701 
702 	/* reprogram the RAR[0] in case user changed it. */
703 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
704 
705 	/* Get the latest mac address, User can use a LAA */
706 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
707 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
708 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
709 	sc->hw.addr_ctrl.rar_used_count = 1;
710 
711 	/* Prepare transmit descriptors and buffers */
712 	if (ixgbe_setup_transmit_structures(sc)) {
713 		printf("%s: Could not setup transmit structures\n",
714 		    ifp->if_xname);
715 		ixgbe_stop(sc);
716 		splx(s);
717 		return;
718 	}
719 
720 	ixgbe_init_hw(&sc->hw);
721 	ixgbe_initialize_transmit_units(sc);
722 
723 	/* Use 2k clusters, even for jumbo frames */
724 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
725 
726 	/* Prepare receive descriptors and buffers */
727 	if (ixgbe_setup_receive_structures(sc)) {
728 		printf("%s: Could not setup receive structures\n",
729 		    ifp->if_xname);
730 		ixgbe_stop(sc);
731 		splx(s);
732 		return;
733 	}
734 
735 	/* Configure RX settings */
736 	ixgbe_initialize_receive_units(sc);
737 
738 	/* Enable SDP & MSIX interrupts based on adapter */
739 	ixgbe_config_gpie(sc);
740 
741 	/* Program promiscuous mode and multicast filters. */
742 	ixgbe_iff(sc);
743 
744 	/* Set MRU size */
745 	mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
746 	mhadd &= ~IXGBE_MHADD_MFS_MASK;
747 	mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
748 	IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
749 
750 	/* Now enable all the queues */
751 	for (i = 0; i < sc->num_queues; i++) {
752 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
753 		txdctl |= IXGBE_TXDCTL_ENABLE;
754 		/* Set WTHRESH to 8, burst writeback */
755 		txdctl |= (8 << 16);
756 		/*
757 		 * When the internal queue falls below PTHRESH (16),
758 		 * start prefetching as long as there are at least
759 		 * HTHRESH (1) buffers ready.
760 		 */
761 		txdctl |= (16 << 0) | (1 << 8);
762 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
763 	}
764 
765 	for (i = 0; i < sc->num_queues; i++) {
766 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
767 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
768 			/*
769 			 * PTHRESH = 21
770 			 * HTHRESH = 4
771 			 * WTHRESH = 8
772 			 */
773 			rxdctl &= ~0x3FFFFF;
774 			rxdctl |= 0x080420;
775 		}
776 		rxdctl |= IXGBE_RXDCTL_ENABLE;
777 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
778 		for (k = 0; k < 10; k++) {
779 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
780 			    IXGBE_RXDCTL_ENABLE)
781 				break;
782 			else
783 				msec_delay(1);
784 		}
785 		IXGBE_WRITE_FLUSH(&sc->hw);
786 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
787 	}
788 
789 	/* Set up VLAN support and filter */
790 	ixgbe_setup_vlan_hw_support(sc);
791 
792 	/* Enable Receive engine */
793 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
794 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
795 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
796 	rxctrl |= IXGBE_RXCTRL_RXEN;
797 	sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
798 
799 	/* Set up MSI/X routing */
800 	if (sc->sc_intrmap) {
801 		ixgbe_configure_ivars(sc);
802 		/* Set up auto-mask */
803 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
804 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
805 		else {
806 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
807 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
808 		}
809 	} else {  /* Simple settings for Legacy/MSI */
810 		ixgbe_set_ivar(sc, 0, 0, 0);
811 		ixgbe_set_ivar(sc, 0, 0, 1);
812 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
813 	}
814 
815 	/* Check on any SFP devices that need to be kick-started */
816 	if (sc->hw.phy.type == ixgbe_phy_none) {
817 		err = sc->hw.phy.ops.identify(&sc->hw);
818 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
819 			printf("Unsupported SFP+ module type was detected.\n");
820 			splx(s);
821 			return;
822 		}
823 	}
824 
825 	/* Setup interrupt moderation */
826 	itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
827 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
828 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
829 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
830 
831 	if (sc->sc_intrmap) {
832 		/* Set moderation on the Link interrupt */
833 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(sc->linkvec),
834 		    IXGBE_LINK_ITR);
835 	}
836 
837 	/* Enable power to the phy */
838 	if (sc->hw.phy.ops.set_phy_power)
839 		sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
840 
841 	/* Config/Enable Link */
842 	ixgbe_config_link(sc);
843 
844 	/* Hardware Packet Buffer & Flow Control setup */
845 	ixgbe_config_delay_values(sc);
846 
847 	/* Initialize the FC settings */
848 	sc->hw.mac.ops.start_hw(&sc->hw);
849 
850 	/* And now turn on interrupts */
851 	ixgbe_enable_intr(sc);
852 	ixgbe_enable_queues(sc);
853 
854 	/* Now inform the stack we're ready */
855 	ifp->if_flags |= IFF_RUNNING;
856 	for (i = 0; i < sc->num_queues; i++)
857 		ifq_clr_oactive(ifp->if_ifqs[i]);
858 
859 #if NKSTAT > 0
860 	ix_kstats_tick(sc);
861 #endif
862 
863 	splx(s);
864 }
865 
866 void
867 ixgbe_config_gpie(struct ix_softc *sc)
868 {
869 	struct ixgbe_hw	*hw = &sc->hw;
870 	uint32_t gpie;
871 
872 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
873 
874 	/* Fan Failure Interrupt */
875 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
876 		gpie |= IXGBE_SDP1_GPIEN;
877 
878 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
879 		/* Add for Module detection */
880 		gpie |= IXGBE_SDP2_GPIEN;
881 
882 		/* Media ready */
883 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
884 			gpie |= IXGBE_SDP1_GPIEN;
885 
886 		/*
887 		 * Set LL interval to max to reduce the number of low latency
888 		 * interrupts hitting the card when the ring is getting full.
889 		 */
890 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
891 	}
892 
893 	if (sc->hw.mac.type == ixgbe_mac_X540 ||
894 	    sc->hw.mac.type == ixgbe_mac_X550EM_x ||
895 	    sc->hw.mac.type == ixgbe_mac_X550EM_a) {
896 		/*
897 		 * Thermal Failure Detection (X540)
898 		 * Link Detection (X552 SFP+, X552/X557-AT)
899 		 */
900 		gpie |= IXGBE_SDP0_GPIEN_X540;
901 
902 		/*
903 		 * Set LL interval to max to reduce the number of low latency
904 		 * interrupts hitting the card when the ring is getting full.
905 		 */
906 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
907 	}
908 
909 	if (sc->sc_intrmap) {
910 		/* Enable Enhanced MSIX mode */
911 		gpie |= IXGBE_GPIE_MSIX_MODE;
912 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
913 		    IXGBE_GPIE_OCD;
914 	}
915 
916 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
917 }
918 
919 /*
920  * Requires sc->max_frame_size to be set.
921  */
922 void
923 ixgbe_config_delay_values(struct ix_softc *sc)
924 {
925 	struct ixgbe_hw *hw = &sc->hw;
926 	uint32_t rxpb, frame, size, tmp;
927 
928 	frame = sc->max_frame_size;
929 
930 	/* Calculate High Water */
931 	switch (hw->mac.type) {
932 	case ixgbe_mac_X540:
933 	case ixgbe_mac_X550:
934 	case ixgbe_mac_X550EM_x:
935 	case ixgbe_mac_X550EM_a:
936 		tmp = IXGBE_DV_X540(frame, frame);
937 		break;
938 	default:
939 		tmp = IXGBE_DV(frame, frame);
940 		break;
941 	}
942 	size = IXGBE_BT2KB(tmp);
943 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
944 	hw->fc.high_water[0] = rxpb - size;
945 
946 	/* Now calculate Low Water */
947 	switch (hw->mac.type) {
948 	case ixgbe_mac_X540:
949 	case ixgbe_mac_X550:
950 	case ixgbe_mac_X550EM_x:
951 	case ixgbe_mac_X550EM_a:
952 		tmp = IXGBE_LOW_DV_X540(frame);
953 		break;
954 	default:
955 		tmp = IXGBE_LOW_DV(frame);
956 		break;
957 	}
958 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
959 
960 	hw->fc.requested_mode = sc->fc;
961 	hw->fc.pause_time = IXGBE_FC_PAUSE;
962 	hw->fc.send_xon = TRUE;
963 }
964 
965 /*
966  * MSIX Interrupt Handlers
967  */
968 void
969 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
970 {
971 	uint64_t queue = 1ULL << vector;
972 	uint32_t mask;
973 
974 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
975 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
976 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
977 	} else {
978 		mask = (queue & 0xFFFFFFFF);
979 		if (mask)
980 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
981 		mask = (queue >> 32);
982 		if (mask)
983 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
984 	}
985 }
986 
987 void
988 ixgbe_enable_queues(struct ix_softc *sc)
989 {
990 	struct ix_queue *que;
991 	int i;
992 
993 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++)
994 		ixgbe_enable_queue(sc, que->msix);
995 }
996 
997 void
998 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
999 {
1000 	uint64_t queue = 1ULL << vector;
1001 	uint32_t mask;
1002 
1003 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
1004 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
1005 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
1006 	} else {
1007 		mask = (queue & 0xFFFFFFFF);
1008 		if (mask)
1009 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
1010 		mask = (queue >> 32);
1011 		if (mask)
1012 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
1013 	}
1014 }
1015 
1016 /*
1017  * MSIX Interrupt Handlers
1018  */
1019 int
1020 ixgbe_link_intr(void *vsc)
1021 {
1022 	struct ix_softc	*sc = (struct ix_softc *)vsc;
1023 
1024 	return ixgbe_intr(sc);
1025 }
1026 
1027 int
1028 ixgbe_queue_intr(void *vque)
1029 {
1030 	struct ix_queue *que = vque;
1031 	struct ix_softc	*sc = que->sc;
1032 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1033 	struct rx_ring	*rxr = que->rxr;
1034 	struct tx_ring	*txr = que->txr;
1035 
1036 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1037 		ixgbe_rxeof(rxr);
1038 		ixgbe_txeof(txr);
1039 		ixgbe_rxrefill(rxr);
1040 	}
1041 
1042 	ixgbe_enable_queue(sc, que->msix);
1043 
1044 	return (1);
1045 }
1046 
1047 /*********************************************************************
1048  *
1049  *  Legacy Interrupt Service routine
1050  *
1051  **********************************************************************/
1052 
1053 int
1054 ixgbe_legacy_intr(void *arg)
1055 {
1056 	struct ix_softc	*sc = (struct ix_softc *)arg;
1057 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1058 	struct rx_ring	*rxr = sc->rx_rings;
1059 	struct tx_ring	*txr = sc->tx_rings;
1060 	int rv;
1061 
1062 	rv = ixgbe_intr(sc);
1063 	if (rv == 0) {
1064 		ixgbe_enable_queues(sc);
1065 		return (0);
1066 	}
1067 
1068 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1069 		ixgbe_rxeof(rxr);
1070 		ixgbe_txeof(txr);
1071 		ixgbe_rxrefill(rxr);
1072 	}
1073 
1074 	ixgbe_enable_queues(sc);
1075 	return (rv);
1076 }
1077 
1078 int
1079 ixgbe_intr(struct ix_softc *sc)
1080 {
1081 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1082 	struct ixgbe_hw	*hw = &sc->hw;
1083 	uint32_t	 reg_eicr, mod_mask, msf_mask;
1084 
1085 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
1086 	if (reg_eicr == 0) {
1087 		ixgbe_enable_intr(sc);
1088 		return (0);
1089 	}
1090 
1091 	/* Link status change */
1092 	if (reg_eicr & IXGBE_EICR_LSC) {
1093 		KERNEL_LOCK();
1094 		ixgbe_update_link_status(sc);
1095 		KERNEL_UNLOCK();
1096 	}
1097 
1098 	if (hw->mac.type != ixgbe_mac_82598EB) {
1099 		if (reg_eicr & IXGBE_EICR_ECC) {
1100 			printf("%s: CRITICAL: ECC ERROR!! "
1101 			    "Please Reboot!!\n", sc->dev.dv_xname);
1102 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1103 		}
1104 		/* Check for over temp condition */
1105 		if (reg_eicr & IXGBE_EICR_TS) {
1106 			printf("%s: CRITICAL: OVER TEMP!! "
1107 			    "PHY IS SHUT DOWN!!\n", ifp->if_xname);
1108 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1109 		}
1110 	}
1111 
1112 	/* Pluggable optics-related interrupt */
1113 	if (ixgbe_is_sfp(hw)) {
1114 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1115 			mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1116 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1117 		} else if (hw->mac.type == ixgbe_mac_X540 ||
1118 		    hw->mac.type == ixgbe_mac_X550 ||
1119 		    hw->mac.type == ixgbe_mac_X550EM_x) {
1120 			mod_mask = IXGBE_EICR_GPI_SDP2_X540;
1121 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1122 		} else {
1123 			mod_mask = IXGBE_EICR_GPI_SDP2;
1124 			msf_mask = IXGBE_EICR_GPI_SDP1;
1125 		}
1126 		if (reg_eicr & mod_mask) {
1127 			/* Clear the interrupt */
1128 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1129 			KERNEL_LOCK();
1130 			ixgbe_handle_mod(sc);
1131 			KERNEL_UNLOCK();
1132 		} else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
1133 		    (reg_eicr & msf_mask)) {
1134 			/* Clear the interrupt */
1135 			IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
1136 			KERNEL_LOCK();
1137 			ixgbe_handle_msf(sc);
1138 			KERNEL_UNLOCK();
1139 		}
1140 	}
1141 
1142 	/* Check for fan failure */
1143 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1144 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1145 		printf("%s: CRITICAL: FAN FAILURE!! "
1146 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1147 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1148 	}
1149 
1150 	/* External PHY interrupt */
1151 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1152 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1153 		/* Clear the interrupt */
1154 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1155 		KERNEL_LOCK();
1156 		ixgbe_handle_phy(sc);
1157 		KERNEL_UNLOCK();
1158 	}
1159 
1160 	return (1);
1161 }
1162 
1163 /*********************************************************************
1164  *
1165  *  Media Ioctl callback
1166  *
1167  *  This routine is called whenever the user queries the status of
1168  *  the interface using ifconfig.
1169  *
1170  **********************************************************************/
1171 void
1172 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1173 {
1174 	struct ix_softc *sc = ifp->if_softc;
1175 	uint64_t layer;
1176 
1177 	ifmr->ifm_active = IFM_ETHER;
1178 	ifmr->ifm_status = IFM_AVALID;
1179 
1180 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1181 	ixgbe_update_link_status(sc);
1182 
1183 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
1184 		return;
1185 
1186 	ifmr->ifm_status |= IFM_ACTIVE;
1187 	layer = sc->phy_layer;
1188 
1189 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1190 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1191 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1192 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1193 		switch (sc->link_speed) {
1194 		case IXGBE_LINK_SPEED_10GB_FULL:
1195 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1196 			break;
1197 		case IXGBE_LINK_SPEED_1GB_FULL:
1198 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1199 			break;
1200 		case IXGBE_LINK_SPEED_100_FULL:
1201 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1202 			break;
1203 		case IXGBE_LINK_SPEED_10_FULL:
1204 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1205 			break;
1206 		}
1207 	}
1208 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1209 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1210 		switch (sc->link_speed) {
1211 		case IXGBE_LINK_SPEED_10GB_FULL:
1212 			ifmr->ifm_active |= IFM_10G_SFP_CU | IFM_FDX;
1213 			break;
1214 		}
1215 	}
1216 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1217 		switch (sc->link_speed) {
1218 		case IXGBE_LINK_SPEED_10GB_FULL:
1219 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1220 			break;
1221 		case IXGBE_LINK_SPEED_1GB_FULL:
1222 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1223 			break;
1224 		}
1225 	}
1226 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1227 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1228 		switch (sc->link_speed) {
1229 		case IXGBE_LINK_SPEED_10GB_FULL:
1230 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1231 			break;
1232 		case IXGBE_LINK_SPEED_1GB_FULL:
1233 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1234 			break;
1235 		}
1236 	}
1237 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1238 		switch (sc->link_speed) {
1239 		case IXGBE_LINK_SPEED_10GB_FULL:
1240 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1241 			break;
1242 		}
1243 	}
1244 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1245 		switch (sc->link_speed) {
1246 		case IXGBE_LINK_SPEED_10GB_FULL:
1247 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1248 			break;
1249 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1250 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1251 			break;
1252 		case IXGBE_LINK_SPEED_1GB_FULL:
1253 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1254 			break;
1255 		}
1256 	} else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1257 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
1258 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1259 		switch (sc->link_speed) {
1260 		case IXGBE_LINK_SPEED_10GB_FULL:
1261 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1262 			break;
1263 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1264 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1265 			break;
1266 		case IXGBE_LINK_SPEED_1GB_FULL:
1267 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1268 			break;
1269 		}
1270 	}
1271 
1272 	switch (sc->hw.fc.current_mode) {
1273 	case ixgbe_fc_tx_pause:
1274 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1275 		break;
1276 	case ixgbe_fc_rx_pause:
1277 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1278 		break;
1279 	case ixgbe_fc_full:
1280 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1281 		    IFM_ETH_TXPAUSE;
1282 		break;
1283 	default:
1284 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1285 		    IFM_ETH_TXPAUSE);
1286 		break;
1287 	}
1288 }
1289 
1290 /*********************************************************************
1291  *
1292  *  Media Ioctl callback
1293  *
1294  *  This routine is called when the user changes speed/duplex using
1295  *  media/mediopt option with ifconfig.
1296  *
1297  **********************************************************************/
1298 int
1299 ixgbe_media_change(struct ifnet *ifp)
1300 {
1301 	struct ix_softc	*sc = ifp->if_softc;
1302 	struct ixgbe_hw	*hw = &sc->hw;
1303 	struct ifmedia	*ifm = &sc->media;
1304 	ixgbe_link_speed speed = 0;
1305 
1306 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1307 		return (EINVAL);
1308 
1309 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1310 		return (ENODEV);
1311 
1312 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1313 		case IFM_AUTO:
1314 		case IFM_10G_T:
1315 			speed |= IXGBE_LINK_SPEED_100_FULL;
1316 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1317 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1318 			break;
1319 		case IFM_10G_SR:
1320 		case IFM_10G_KR:
1321 		case IFM_10G_LR:
1322 		case IFM_10G_LRM:
1323 		case IFM_10G_CX4:
1324 		case IFM_10G_KX4:
1325 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1326 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1327 			break;
1328 		case IFM_10G_SFP_CU:
1329 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1330 			break;
1331 		case IFM_1000_T:
1332 			speed |= IXGBE_LINK_SPEED_100_FULL;
1333 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1334 			break;
1335 		case IFM_1000_LX:
1336 		case IFM_1000_SX:
1337 		case IFM_1000_CX:
1338 		case IFM_1000_KX:
1339 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1340 			break;
1341 		case IFM_100_TX:
1342 			speed |= IXGBE_LINK_SPEED_100_FULL;
1343 			break;
1344 		case IFM_10_T:
1345 			speed |= IXGBE_LINK_SPEED_10_FULL;
1346 			break;
1347 		default:
1348 			return (EINVAL);
1349 	}
1350 
1351 	hw->mac.autotry_restart = TRUE;
1352 	hw->mac.ops.setup_link(hw, speed, TRUE);
1353 
1354 	return (0);
1355 }
1356 
1357 /*********************************************************************
1358  *
1359  *  This routine maps the mbufs to tx descriptors, allowing the
1360  *  TX engine to transmit the packets.
1361  *  	- return 0 on success, positive on failure
1362  *
1363  **********************************************************************/
1364 
1365 int
1366 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1367 {
1368 	struct ix_softc *sc = txr->sc;
1369 	uint32_t	olinfo_status = 0, cmd_type_len;
1370 	int             i, j, ntxc;
1371 	int		first, last = 0;
1372 	bus_dmamap_t	map;
1373 	struct ixgbe_tx_buf *txbuf;
1374 	union ixgbe_adv_tx_desc *txd = NULL;
1375 
1376 	/* Basic descriptor defines */
1377 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1378 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1379 
1380 #if NVLAN > 0
1381 	if (m_head->m_flags & M_VLANTAG)
1382 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1383 #endif
1384 
1385 	/*
1386 	 * Important to capture the first descriptor
1387 	 * used because it will contain the index of
1388 	 * the one we tell the hardware to report back
1389 	 */
1390 	first = txr->next_avail_desc;
1391 	txbuf = &txr->tx_buffers[first];
1392 	map = txbuf->map;
1393 
1394 	/*
1395 	 * Map the packet for DMA.
1396 	 */
1397 	switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1398 	    m_head, BUS_DMA_NOWAIT)) {
1399 	case 0:
1400 		break;
1401 	case EFBIG:
1402 		if (m_defrag(m_head, M_NOWAIT) == 0 &&
1403 		    bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1404 		     m_head, BUS_DMA_NOWAIT) == 0)
1405 			break;
1406 		/* FALLTHROUGH */
1407 	default:
1408 		return (0);
1409 	}
1410 
1411 	/*
1412 	 * Set the appropriate offload context
1413 	 * this will becomes the first descriptor.
1414 	 */
1415 	ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1416 	if (ntxc == -1)
1417 		goto xmit_fail;
1418 
1419 	i = txr->next_avail_desc + ntxc;
1420 	if (i >= sc->num_tx_desc)
1421 		i -= sc->num_tx_desc;
1422 
1423 	for (j = 0; j < map->dm_nsegs; j++) {
1424 		txd = &txr->tx_base[i];
1425 
1426 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1427 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1428 		    cmd_type_len | map->dm_segs[j].ds_len);
1429 		txd->read.olinfo_status = htole32(olinfo_status);
1430 		last = i; /* descriptor that will get completion IRQ */
1431 
1432 		if (++i == sc->num_tx_desc)
1433 			i = 0;
1434 	}
1435 
1436 	txd->read.cmd_type_len |=
1437 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1438 
1439 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1440 	    BUS_DMASYNC_PREWRITE);
1441 
1442 	/* Set the index of the descriptor that will be marked done */
1443 	txbuf->m_head = m_head;
1444 	txbuf->eop_index = last;
1445 
1446 	membar_producer();
1447 
1448 	txr->next_avail_desc = i;
1449 
1450 	return (ntxc + j);
1451 
1452 xmit_fail:
1453 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1454 	return (0);
1455 }
1456 
1457 void
1458 ixgbe_iff(struct ix_softc *sc)
1459 {
1460 	struct ifnet *ifp = &sc->arpcom.ac_if;
1461 	struct arpcom *ac = &sc->arpcom;
1462 	uint32_t	fctrl;
1463 	uint8_t	*mta;
1464 	uint8_t	*update_ptr;
1465 	struct ether_multi *enm;
1466 	struct ether_multistep step;
1467 	int	mcnt = 0;
1468 
1469 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1470 
1471 	mta = sc->mta;
1472 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1473 	    MAX_NUM_MULTICAST_ADDRESSES);
1474 
1475 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1476 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1477 	ifp->if_flags &= ~IFF_ALLMULTI;
1478 
1479 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1480 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1481 		ifp->if_flags |= IFF_ALLMULTI;
1482 		fctrl |= IXGBE_FCTRL_MPE;
1483 		if (ifp->if_flags & IFF_PROMISC)
1484 			fctrl |= IXGBE_FCTRL_UPE;
1485 	} else {
1486 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1487 		while (enm != NULL) {
1488 			bcopy(enm->enm_addrlo,
1489 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1490 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1491 			mcnt++;
1492 
1493 			ETHER_NEXT_MULTI(step, enm);
1494 		}
1495 
1496 		update_ptr = mta;
1497 		sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1498 		    ixgbe_mc_array_itr, TRUE);
1499 	}
1500 
1501 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1502 }
1503 
1504 /*
1505  * This is an iterator function now needed by the multicast
1506  * shared code. It simply feeds the shared code routine the
1507  * addresses in the array of ixgbe_iff() one by one.
1508  */
1509 uint8_t *
1510 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1511 {
1512 	uint8_t *addr = *update_ptr;
1513 	uint8_t *newptr;
1514 	*vmdq = 0;
1515 
1516 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1517 	*update_ptr = newptr;
1518 	return addr;
1519 }
1520 
1521 void
1522 ixgbe_update_link_status(struct ix_softc *sc)
1523 {
1524 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1525 	int		link_state = LINK_STATE_DOWN;
1526 
1527 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1528 
1529 	ifp->if_baudrate = 0;
1530 	if (sc->link_up) {
1531 		link_state = LINK_STATE_FULL_DUPLEX;
1532 
1533 		switch (sc->link_speed) {
1534 		case IXGBE_LINK_SPEED_UNKNOWN:
1535 			ifp->if_baudrate = 0;
1536 			break;
1537 		case IXGBE_LINK_SPEED_100_FULL:
1538 			ifp->if_baudrate = IF_Mbps(100);
1539 			break;
1540 		case IXGBE_LINK_SPEED_1GB_FULL:
1541 			ifp->if_baudrate = IF_Gbps(1);
1542 			break;
1543 		case IXGBE_LINK_SPEED_10GB_FULL:
1544 			ifp->if_baudrate = IF_Gbps(10);
1545 			break;
1546 		}
1547 
1548 		/* Update any Flow Control changes */
1549 		sc->hw.mac.ops.fc_enable(&sc->hw);
1550 	}
1551 	if (ifp->if_link_state != link_state) {
1552 		ifp->if_link_state = link_state;
1553 		if_link_state_change(ifp);
1554 	}
1555 }
1556 
1557 
1558 /*********************************************************************
1559  *
1560  *  This routine disables all traffic on the adapter by issuing a
1561  *  global reset on the MAC and deallocates TX/RX buffers.
1562  *
1563  **********************************************************************/
1564 
1565 void
1566 ixgbe_stop(void *arg)
1567 {
1568 	struct ix_softc *sc = arg;
1569 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1570 	int i;
1571 
1572 	/* Tell the stack that the interface is no longer active */
1573 	ifp->if_flags &= ~IFF_RUNNING;
1574 
1575 #if NKSTAT > 0
1576 	timeout_del(&sc->sc_kstat_tmo);
1577 #endif
1578 
1579 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1580 	ixgbe_disable_intr(sc);
1581 
1582 	sc->hw.mac.ops.reset_hw(&sc->hw);
1583 	sc->hw.adapter_stopped = FALSE;
1584 	sc->hw.mac.ops.stop_adapter(&sc->hw);
1585 	if (sc->hw.mac.type == ixgbe_mac_82599EB)
1586 		sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1587 	/* Turn off the laser */
1588 	if (sc->hw.mac.ops.disable_tx_laser)
1589 		sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1590 
1591 	/* reprogram the RAR[0] in case user changed it. */
1592 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1593 
1594 	intr_barrier(sc->tag);
1595 	for (i = 0; i < sc->num_queues; i++) {
1596 		struct ifqueue *ifq = ifp->if_ifqs[i];
1597 		ifq_barrier(ifq);
1598 		ifq_clr_oactive(ifq);
1599 
1600 		intr_barrier(sc->queues[i].tag);
1601 		timeout_del(&sc->rx_rings[i].rx_refill);
1602 	}
1603 
1604 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1605 
1606 	/* Should we really clear all structures on stop? */
1607 	ixgbe_free_transmit_structures(sc);
1608 	ixgbe_free_receive_structures(sc);
1609 }
1610 
1611 
1612 /*********************************************************************
1613  *
1614  *  Determine hardware revision.
1615  *
1616  **********************************************************************/
1617 void
1618 ixgbe_identify_hardware(struct ix_softc *sc)
1619 {
1620 	struct ixgbe_osdep	*os = &sc->osdep;
1621 	struct pci_attach_args	*pa = &os->os_pa;
1622 	uint32_t		 reg;
1623 
1624 	/* Save off the information about this board */
1625 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1626 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1627 
1628 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1629 	sc->hw.revision_id = PCI_REVISION(reg);
1630 
1631 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1632 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1633 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1634 
1635 	/* We need this here to set the num_segs below */
1636 	ixgbe_set_mac_type(&sc->hw);
1637 
1638 	/* Pick up the 82599 and VF settings */
1639 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
1640 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1641 	sc->num_segs = IXGBE_82599_SCATTER;
1642 }
1643 
1644 /*********************************************************************
1645  *
1646  *  Setup the Legacy or MSI Interrupt handler
1647  *
1648  **********************************************************************/
1649 int
1650 ixgbe_allocate_legacy(struct ix_softc *sc)
1651 {
1652 	struct ixgbe_osdep	*os = &sc->osdep;
1653 	struct pci_attach_args	*pa = &os->os_pa;
1654 	const char		*intrstr = NULL;
1655 	pci_chipset_tag_t	pc = pa->pa_pc;
1656 	pci_intr_handle_t	ih;
1657 
1658 	/* We allocate a single interrupt resource */
1659 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1660 	    pci_intr_map(pa, &ih) != 0) {
1661 		printf(": couldn't map interrupt\n");
1662 		return (ENXIO);
1663 	}
1664 
1665 #if 0
1666 	/* XXX */
1667 	/* Tasklets for Link, SFP and Multispeed Fiber */
1668 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1669 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1670 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1671 #endif
1672 
1673 	intrstr = pci_intr_string(pc, ih);
1674 	sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1675 	    ixgbe_legacy_intr, sc, sc->dev.dv_xname);
1676 	if (sc->tag == NULL) {
1677 		printf(": couldn't establish interrupt");
1678 		if (intrstr != NULL)
1679 			printf(" at %s", intrstr);
1680 		printf("\n");
1681 		return (ENXIO);
1682 	}
1683 	printf(": %s", intrstr);
1684 
1685 	/* For simplicity in the handlers */
1686 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1687 
1688 	return (0);
1689 }
1690 
1691 /*********************************************************************
1692  *
1693  *  Setup the MSI-X Interrupt handlers
1694  *
1695  **********************************************************************/
1696 int
1697 ixgbe_allocate_msix(struct ix_softc *sc)
1698 {
1699 	struct ixgbe_osdep	*os = &sc->osdep;
1700 	struct pci_attach_args	*pa  = &os->os_pa;
1701 	int                      i = 0, error = 0;
1702 	struct ix_queue         *que;
1703 	pci_intr_handle_t	ih;
1704 
1705 	for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
1706 		if (pci_intr_map_msix(pa, i, &ih)) {
1707 			printf("ixgbe_allocate_msix: "
1708 			    "pci_intr_map_msix vec %d failed\n", i);
1709 			error = ENOMEM;
1710 			goto fail;
1711 		}
1712 
1713 		que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
1714 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
1715 		    ixgbe_queue_intr, que, que->name);
1716 		if (que->tag == NULL) {
1717 			printf("ixgbe_allocate_msix: "
1718 			    "pci_intr_establish vec %d failed\n", i);
1719 			error = ENOMEM;
1720 			goto fail;
1721 		}
1722 
1723 		que->msix = i;
1724 	}
1725 
1726 	/* Now the link status/control last MSI-X vector */
1727 	if (pci_intr_map_msix(pa, i, &ih)) {
1728 		printf("ixgbe_allocate_msix: "
1729 		    "pci_intr_map_msix link vector failed\n");
1730 		error = ENOMEM;
1731 		goto fail;
1732 	}
1733 
1734 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
1735 	    ixgbe_link_intr, sc, sc->dev.dv_xname);
1736 	if (sc->tag == NULL) {
1737 		printf("ixgbe_allocate_msix: "
1738 		    "pci_intr_establish link vector failed\n");
1739 		error = ENOMEM;
1740 		goto fail;
1741 	}
1742 	sc->linkvec = i;
1743 	printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih),
1744 	    i, (i > 1) ? "s" : "");
1745 
1746 	return (0);
1747 fail:
1748 	for (que = sc->queues; i > 0; i--, que++) {
1749 		if (que->tag == NULL)
1750 			continue;
1751 		pci_intr_disestablish(pa->pa_pc, que->tag);
1752 		que->tag = NULL;
1753 	}
1754 
1755 	return (error);
1756 }
1757 
1758 void
1759 ixgbe_setup_msix(struct ix_softc *sc)
1760 {
1761 	struct ixgbe_osdep	*os = &sc->osdep;
1762 	struct pci_attach_args	*pa = &os->os_pa;
1763 	int			 nmsix;
1764 	unsigned int		 maxq;
1765 
1766 	if (!ixgbe_enable_msix)
1767 		return;
1768 
1769 	nmsix = pci_intr_msix_count(pa->pa_pc, pa->pa_tag);
1770 	if (nmsix <= 1)
1771 		return;
1772 
1773 	/* give one vector to events */
1774 	nmsix--;
1775 
1776 	/* XXX the number of queues is limited to what we can keep stats on */
1777 	maxq = (sc->hw.mac.type == ixgbe_mac_82598EB) ? 8 : 16;
1778 
1779 	sc->sc_intrmap = intrmap_create(&sc->dev, nmsix, maxq, 0);
1780 	sc->num_queues = intrmap_count(sc->sc_intrmap);
1781 }
1782 
1783 int
1784 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1785 {
1786 	struct ixgbe_osdep	*os = &sc->osdep;
1787 	struct pci_attach_args	*pa = &os->os_pa;
1788 	int			 val;
1789 
1790 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1791 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1792 		printf(": mmba is not mem space\n");
1793 		return (ENXIO);
1794 	}
1795 
1796 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1797 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1798 		printf(": cannot find mem space\n");
1799 		return (ENXIO);
1800 	}
1801 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1802 
1803 	/* Legacy defaults */
1804 	sc->num_queues = 1;
1805 	sc->hw.back = os;
1806 
1807 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1808 	ixgbe_setup_msix(sc);
1809 
1810 	return (0);
1811 }
1812 
1813 void
1814 ixgbe_free_pci_resources(struct ix_softc * sc)
1815 {
1816 	struct ixgbe_osdep	*os = &sc->osdep;
1817 	struct pci_attach_args	*pa = &os->os_pa;
1818 	struct ix_queue *que = sc->queues;
1819 	int i;
1820 
1821 	/* Release all msix queue resources: */
1822 	for (i = 0; i < sc->num_queues; i++, que++) {
1823 		if (que->tag)
1824 			pci_intr_disestablish(pa->pa_pc, que->tag);
1825 		que->tag = NULL;
1826 	}
1827 
1828 	if (sc->tag)
1829 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1830 	sc->tag = NULL;
1831 	if (os->os_membase != 0)
1832 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1833 	os->os_membase = 0;
1834 }
1835 
1836 /*********************************************************************
1837  *
1838  *  Setup networking device structure and register an interface.
1839  *
1840  **********************************************************************/
1841 void
1842 ixgbe_setup_interface(struct ix_softc *sc)
1843 {
1844 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1845 	int i;
1846 
1847 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1848 	ifp->if_softc = sc;
1849 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1850 	ifp->if_xflags = IFXF_MPSAFE;
1851 	ifp->if_ioctl = ixgbe_ioctl;
1852 	ifp->if_qstart = ixgbe_start;
1853 	ifp->if_timer = 0;
1854 	ifp->if_watchdog = ixgbe_watchdog;
1855 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1856 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1857 	ifq_set_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
1858 
1859 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1860 
1861 #if NVLAN > 0
1862 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1863 #endif
1864 
1865 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1866 
1867 	/*
1868 	 * Specify the media types supported by this sc and register
1869 	 * callbacks to update media and link information
1870 	 */
1871 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1872 	    ixgbe_media_status);
1873 	ixgbe_add_media_types(sc);
1874 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1875 
1876 	if_attach(ifp);
1877 	ether_ifattach(ifp);
1878 
1879 	if_attach_queues(ifp, sc->num_queues);
1880 	if_attach_iqueues(ifp, sc->num_queues);
1881 	for (i = 0; i < sc->num_queues; i++) {
1882 		struct ifqueue *ifq = ifp->if_ifqs[i];
1883 		struct ifiqueue *ifiq = ifp->if_iqs[i];
1884 		struct tx_ring *txr = &sc->tx_rings[i];
1885 		struct rx_ring *rxr = &sc->rx_rings[i];
1886 
1887 		ifq->ifq_softc = txr;
1888 		txr->ifq = ifq;
1889 
1890 		ifiq->ifiq_softc = rxr;
1891 		rxr->ifiq = ifiq;
1892 
1893 #if NKSTAT > 0
1894 		ix_txq_kstats(sc, txr);
1895 		ix_rxq_kstats(sc, rxr);
1896 #endif
1897 	}
1898 
1899 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1900 }
1901 
1902 void
1903 ixgbe_add_media_types(struct ix_softc *sc)
1904 {
1905 	struct ixgbe_hw	*hw = &sc->hw;
1906 	uint64_t layer;
1907 
1908 	sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw);
1909 	layer = sc->phy_layer;
1910 
1911 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1912 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1913 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1914 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1915 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1916 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1917 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1918 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1919 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1920 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1921 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1922 		if (hw->phy.multispeed_fiber)
1923 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1924 			    NULL);
1925 	}
1926 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1927 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1928 		if (hw->phy.multispeed_fiber)
1929 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1930 			    NULL);
1931 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1932 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1933 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1934 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1935 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1936 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1937 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1938 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1939 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1940 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1941 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1942 		ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1943 
1944 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1945 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1946 		    NULL);
1947 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1948 	}
1949 
1950 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1951 }
1952 
1953 void
1954 ixgbe_config_link(struct ix_softc *sc)
1955 {
1956 	uint32_t	autoneg, err = 0;
1957 	bool		negotiate;
1958 
1959 	if (ixgbe_is_sfp(&sc->hw)) {
1960 		if (sc->hw.phy.multispeed_fiber) {
1961 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1962 			if (sc->hw.mac.ops.enable_tx_laser)
1963 				sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1964 			ixgbe_handle_msf(sc);
1965 		} else
1966 			ixgbe_handle_mod(sc);
1967 	} else {
1968 		if (sc->hw.mac.ops.check_link)
1969 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1970 			    &sc->link_up, FALSE);
1971 		if (err)
1972 			return;
1973 		autoneg = sc->hw.phy.autoneg_advertised;
1974 		if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1975 			err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1976 			    &autoneg, &negotiate);
1977 		if (err)
1978 			return;
1979 		if (sc->hw.mac.ops.setup_link)
1980 			sc->hw.mac.ops.setup_link(&sc->hw,
1981 			    autoneg, sc->link_up);
1982 	}
1983 }
1984 
1985 /********************************************************************
1986  * Manage DMA'able memory.
1987   *******************************************************************/
1988 int
1989 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1990 		struct ixgbe_dma_alloc *dma, int mapflags)
1991 {
1992 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1993 	struct ixgbe_osdep	*os = &sc->osdep;
1994 	int			 r;
1995 
1996 	dma->dma_tag = os->os_pa.pa_dmat;
1997 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1998 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1999 	if (r != 0) {
2000 		printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
2001 		       "error %u\n", ifp->if_xname, r);
2002 		goto fail_0;
2003 	}
2004 
2005 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
2006 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
2007 	if (r != 0) {
2008 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
2009 		       "error %u\n", ifp->if_xname, r);
2010 		goto fail_1;
2011 	}
2012 
2013 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
2014 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
2015 	if (r != 0) {
2016 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
2017 		       "error %u\n", ifp->if_xname, r);
2018 		goto fail_2;
2019 	}
2020 
2021 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
2022 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
2023 	if (r != 0) {
2024 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
2025 		       "error %u\n", ifp->if_xname, r);
2026 		goto fail_3;
2027 	}
2028 
2029 	dma->dma_size = size;
2030 	return (0);
2031 fail_3:
2032 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
2033 fail_2:
2034 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2035 fail_1:
2036 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2037 fail_0:
2038 	dma->dma_map = NULL;
2039 	dma->dma_tag = NULL;
2040 	return (r);
2041 }
2042 
2043 void
2044 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
2045 {
2046 	if (dma->dma_tag == NULL)
2047 		return;
2048 
2049 	if (dma->dma_map != NULL) {
2050 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
2051 		    dma->dma_map->dm_mapsize,
2052 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2053 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
2054 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
2055 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
2056 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
2057 		dma->dma_map = NULL;
2058 	}
2059 }
2060 
2061 
2062 /*********************************************************************
2063  *
2064  *  Allocate memory for the transmit and receive rings, and then
2065  *  the descriptors associated with each, called only once at attach.
2066  *
2067  **********************************************************************/
2068 int
2069 ixgbe_allocate_queues(struct ix_softc *sc)
2070 {
2071 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2072 	struct ix_queue *que;
2073 	struct tx_ring *txr;
2074 	struct rx_ring *rxr;
2075 	int rsize, tsize;
2076 	int txconf = 0, rxconf = 0, i;
2077 
2078 	/* First allocate the top level queue structs */
2079 	if (!(sc->queues = mallocarray(sc->num_queues,
2080 	    sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2081 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
2082 		goto fail;
2083 	}
2084 
2085 	/* Then allocate the TX ring struct memory */
2086 	if (!(sc->tx_rings = mallocarray(sc->num_queues,
2087 	    sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2088 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
2089 		goto fail;
2090 	}
2091 
2092 	/* Next allocate the RX */
2093 	if (!(sc->rx_rings = mallocarray(sc->num_queues,
2094 	    sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2095 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
2096 		goto rx_fail;
2097 	}
2098 
2099 	/* For the ring itself */
2100 	tsize = roundup2(sc->num_tx_desc *
2101 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
2102 
2103 	/*
2104 	 * Now set up the TX queues, txconf is needed to handle the
2105 	 * possibility that things fail midcourse and we need to
2106 	 * undo memory gracefully
2107 	 */
2108 	for (i = 0; i < sc->num_queues; i++, txconf++) {
2109 		/* Set up some basics */
2110 		txr = &sc->tx_rings[i];
2111 		txr->sc = sc;
2112 		txr->me = i;
2113 
2114 		if (ixgbe_dma_malloc(sc, tsize,
2115 		    &txr->txdma, BUS_DMA_NOWAIT)) {
2116 			printf("%s: Unable to allocate TX Descriptor memory\n",
2117 			    ifp->if_xname);
2118 			goto err_tx_desc;
2119 		}
2120 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
2121 		bzero((void *)txr->tx_base, tsize);
2122 	}
2123 
2124 	/*
2125 	 * Next the RX queues...
2126 	 */
2127 	rsize = roundup2(sc->num_rx_desc *
2128 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2129 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
2130 		rxr = &sc->rx_rings[i];
2131 		/* Set up some basics */
2132 		rxr->sc = sc;
2133 		rxr->me = i;
2134 		timeout_set(&rxr->rx_refill, ixgbe_rxrefill, rxr);
2135 
2136 		if (ixgbe_dma_malloc(sc, rsize,
2137 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
2138 			printf("%s: Unable to allocate RxDescriptor memory\n",
2139 			    ifp->if_xname);
2140 			goto err_rx_desc;
2141 		}
2142 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
2143 		bzero((void *)rxr->rx_base, rsize);
2144 	}
2145 
2146 	/*
2147 	 * Finally set up the queue holding structs
2148 	 */
2149 	for (i = 0; i < sc->num_queues; i++) {
2150 		que = &sc->queues[i];
2151 		que->sc = sc;
2152 		que->txr = &sc->tx_rings[i];
2153 		que->rxr = &sc->rx_rings[i];
2154 		snprintf(que->name, sizeof(que->name), "%s:%d",
2155 		    sc->dev.dv_xname, i);
2156 	}
2157 
2158 	return (0);
2159 
2160 err_rx_desc:
2161 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2162 		ixgbe_dma_free(sc, &rxr->rxdma);
2163 err_tx_desc:
2164 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2165 		ixgbe_dma_free(sc, &txr->txdma);
2166 	free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
2167 	sc->rx_rings = NULL;
2168 rx_fail:
2169 	free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
2170 	sc->tx_rings = NULL;
2171 fail:
2172 	return (ENOMEM);
2173 }
2174 
2175 /*********************************************************************
2176  *
2177  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2178  *  the information needed to transmit a packet on the wire. This is
2179  *  called only once at attach, setup is done every reset.
2180  *
2181  **********************************************************************/
2182 int
2183 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2184 {
2185 	struct ix_softc 	*sc = txr->sc;
2186 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2187 	struct ixgbe_tx_buf	*txbuf;
2188 	int			 error, i;
2189 
2190 	if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
2191 	    sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2192 		printf("%s: Unable to allocate tx_buffer memory\n",
2193 		    ifp->if_xname);
2194 		error = ENOMEM;
2195 		goto fail;
2196 	}
2197 	txr->txtag = txr->txdma.dma_tag;
2198 
2199 	/* Create the descriptor buffer dma maps */
2200 	for (i = 0; i < sc->num_tx_desc; i++) {
2201 		txbuf = &txr->tx_buffers[i];
2202 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
2203 			    sc->num_segs, PAGE_SIZE, 0,
2204 			    BUS_DMA_NOWAIT, &txbuf->map);
2205 
2206 		if (error != 0) {
2207 			printf("%s: Unable to create TX DMA map\n",
2208 			    ifp->if_xname);
2209 			goto fail;
2210 		}
2211 	}
2212 
2213 	return 0;
2214 fail:
2215 	return (error);
2216 }
2217 
2218 /*********************************************************************
2219  *
2220  *  Initialize a transmit ring.
2221  *
2222  **********************************************************************/
2223 int
2224 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2225 {
2226 	struct ix_softc		*sc = txr->sc;
2227 	int			 error;
2228 
2229 	/* Now allocate transmit buffers for the ring */
2230 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
2231 		return (error);
2232 
2233 	/* Clear the old ring contents */
2234 	bzero((void *)txr->tx_base,
2235 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
2236 
2237 	/* Reset indices */
2238 	txr->next_avail_desc = 0;
2239 	txr->next_to_clean = 0;
2240 
2241 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2242 	    0, txr->txdma.dma_map->dm_mapsize,
2243 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2244 
2245 	return (0);
2246 }
2247 
2248 /*********************************************************************
2249  *
2250  *  Initialize all transmit rings.
2251  *
2252  **********************************************************************/
2253 int
2254 ixgbe_setup_transmit_structures(struct ix_softc *sc)
2255 {
2256 	struct tx_ring *txr = sc->tx_rings;
2257 	int		i, error;
2258 
2259 	for (i = 0; i < sc->num_queues; i++, txr++) {
2260 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2261 			goto fail;
2262 	}
2263 
2264 	return (0);
2265 fail:
2266 	ixgbe_free_transmit_structures(sc);
2267 	return (error);
2268 }
2269 
2270 /*********************************************************************
2271  *
2272  *  Enable transmit unit.
2273  *
2274  **********************************************************************/
2275 void
2276 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2277 {
2278 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2279 	struct tx_ring	*txr;
2280 	struct ixgbe_hw	*hw = &sc->hw;
2281 	int		 i;
2282 	uint64_t	 tdba;
2283 	uint32_t	 txctrl;
2284 
2285 	/* Setup the Base and Length of the Tx Descriptor Ring */
2286 
2287 	for (i = 0; i < sc->num_queues; i++) {
2288 		txr = &sc->tx_rings[i];
2289 
2290 		/* Setup descriptor base address */
2291 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2292 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2293 		       (tdba & 0x00000000ffffffffULL));
2294 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2295 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2296 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2297 
2298 		/* Setup the HW Tx Head and Tail descriptor pointers */
2299 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2300 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2301 
2302 		/* Setup Transmit Descriptor Cmd Settings */
2303 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2304 		txr->queue_status = IXGBE_QUEUE_IDLE;
2305 		txr->watchdog_timer = 0;
2306 
2307 		/* Disable Head Writeback */
2308 		switch (hw->mac.type) {
2309 		case ixgbe_mac_82598EB:
2310 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2311 			break;
2312 		case ixgbe_mac_82599EB:
2313 		case ixgbe_mac_X540:
2314 		default:
2315 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2316 			break;
2317 		}
2318 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2319 		switch (hw->mac.type) {
2320 		case ixgbe_mac_82598EB:
2321 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2322 			break;
2323 		case ixgbe_mac_82599EB:
2324 		case ixgbe_mac_X540:
2325 		default:
2326 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2327 			break;
2328 		}
2329 	}
2330 	ifp->if_timer = 0;
2331 
2332 	if (hw->mac.type != ixgbe_mac_82598EB) {
2333 		uint32_t dmatxctl, rttdcs;
2334 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2335 		dmatxctl |= IXGBE_DMATXCTL_TE;
2336 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2337 		/* Disable arbiter to set MTQC */
2338 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2339 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2340 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2341 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2342 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2343 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2344 	}
2345 }
2346 
2347 /*********************************************************************
2348  *
2349  *  Free all transmit rings.
2350  *
2351  **********************************************************************/
2352 void
2353 ixgbe_free_transmit_structures(struct ix_softc *sc)
2354 {
2355 	struct tx_ring *txr = sc->tx_rings;
2356 	int		i;
2357 
2358 	for (i = 0; i < sc->num_queues; i++, txr++)
2359 		ixgbe_free_transmit_buffers(txr);
2360 }
2361 
2362 /*********************************************************************
2363  *
2364  *  Free transmit ring related data structures.
2365  *
2366  **********************************************************************/
2367 void
2368 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2369 {
2370 	struct ix_softc *sc = txr->sc;
2371 	struct ixgbe_tx_buf *tx_buffer;
2372 	int             i;
2373 
2374 	INIT_DEBUGOUT("free_transmit_ring: begin");
2375 
2376 	if (txr->tx_buffers == NULL)
2377 		return;
2378 
2379 	tx_buffer = txr->tx_buffers;
2380 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2381 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2382 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2383 			    0, tx_buffer->map->dm_mapsize,
2384 			    BUS_DMASYNC_POSTWRITE);
2385 			bus_dmamap_unload(txr->txdma.dma_tag,
2386 			    tx_buffer->map);
2387 		}
2388 		if (tx_buffer->m_head != NULL) {
2389 			m_freem(tx_buffer->m_head);
2390 			tx_buffer->m_head = NULL;
2391 		}
2392 		if (tx_buffer->map != NULL) {
2393 			bus_dmamap_destroy(txr->txdma.dma_tag,
2394 			    tx_buffer->map);
2395 			tx_buffer->map = NULL;
2396 		}
2397 	}
2398 
2399 	if (txr->tx_buffers != NULL)
2400 		free(txr->tx_buffers, M_DEVBUF,
2401 		    sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2402 	txr->tx_buffers = NULL;
2403 	txr->txtag = NULL;
2404 }
2405 
2406 /*********************************************************************
2407  *
2408  *  Advanced Context Descriptor setup for VLAN or CSUM
2409  *
2410  **********************************************************************/
2411 
2412 int
2413 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2414     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2415 {
2416 	struct ixgbe_adv_tx_context_desc *TXD;
2417 	struct ixgbe_tx_buf *tx_buffer;
2418 #if NVLAN > 0
2419 	struct ether_vlan_header *eh;
2420 #else
2421 	struct ether_header *eh;
2422 #endif
2423 	struct ip *ip;
2424 #ifdef notyet
2425 	struct ip6_hdr *ip6;
2426 #endif
2427 	struct mbuf *m;
2428 	int	ipoff;
2429 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2430 	int 	ehdrlen, ip_hlen = 0;
2431 	uint16_t etype;
2432 	uint8_t	ipproto = 0;
2433 	int	offload = TRUE;
2434 	int	ctxd = txr->next_avail_desc;
2435 #if NVLAN > 0
2436 	uint16_t vtag = 0;
2437 #endif
2438 
2439 #if notyet
2440 	/* First check if TSO is to be used */
2441 	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2442 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2443 #endif
2444 
2445 	if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2446 		offload = FALSE;
2447 
2448 	/* Indicate the whole packet as payload when not doing TSO */
2449 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2450 
2451 	/* Now ready a context descriptor */
2452 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2453 	tx_buffer = &txr->tx_buffers[ctxd];
2454 
2455 	/*
2456 	 * In advanced descriptors the vlan tag must
2457 	 * be placed into the descriptor itself. Hence
2458 	 * we need to make one even if not doing offloads.
2459 	 */
2460 #if NVLAN > 0
2461 	if (mp->m_flags & M_VLANTAG) {
2462 		vtag = mp->m_pkthdr.ether_vtag;
2463 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2464 	} else
2465 #endif
2466 	if (offload == FALSE)
2467 		return (0);	/* No need for CTX */
2468 
2469 	/*
2470 	 * Determine where frame payload starts.
2471 	 * Jump over vlan headers if already present,
2472 	 * helpful for QinQ too.
2473 	 */
2474 	if (mp->m_len < sizeof(struct ether_header))
2475 		return (-1);
2476 #if NVLAN > 0
2477 	eh = mtod(mp, struct ether_vlan_header *);
2478 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2479 		if (mp->m_len < sizeof(struct ether_vlan_header))
2480 			return (-1);
2481 		etype = ntohs(eh->evl_proto);
2482 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2483 	} else {
2484 		etype = ntohs(eh->evl_encap_proto);
2485 		ehdrlen = ETHER_HDR_LEN;
2486 	}
2487 #else
2488 	eh = mtod(mp, struct ether_header *);
2489 	etype = ntohs(eh->ether_type);
2490 	ehdrlen = ETHER_HDR_LEN;
2491 #endif
2492 
2493 	/* Set the ether header length */
2494 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2495 
2496 	switch (etype) {
2497 	case ETHERTYPE_IP:
2498 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2499 			return (-1);
2500 		m = m_getptr(mp, ehdrlen, &ipoff);
2501 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2502 		ip = (struct ip *)(m->m_data + ipoff);
2503 		ip_hlen = ip->ip_hl << 2;
2504 		ipproto = ip->ip_p;
2505 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2506 		break;
2507 #ifdef notyet
2508 	case ETHERTYPE_IPV6:
2509 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2510 			return (-1);
2511 		m = m_getptr(mp, ehdrlen, &ipoff);
2512 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2513 		ip6 = (struct ip6 *)(m->m_data + ipoff);
2514 		ip_hlen = sizeof(*ip6);
2515 		/* XXX-BZ this will go badly in case of ext hdrs. */
2516 		ipproto = ip6->ip6_nxt;
2517 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2518 		break;
2519 #endif
2520 	default:
2521 		offload = FALSE;
2522 		break;
2523 	}
2524 
2525 	vlan_macip_lens |= ip_hlen;
2526 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2527 
2528 	switch (ipproto) {
2529 	case IPPROTO_TCP:
2530 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2531 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2532 		break;
2533 	case IPPROTO_UDP:
2534 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2535 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2536 		break;
2537 	default:
2538 		offload = FALSE;
2539 		break;
2540 	}
2541 
2542 	if (offload) /* For the TX descriptor setup */
2543 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2544 
2545 	/* Now copy bits into descriptor */
2546 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2547 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2548 	TXD->seqnum_seed = htole32(0);
2549 	TXD->mss_l4len_idx = htole32(0);
2550 
2551 	tx_buffer->m_head = NULL;
2552 	tx_buffer->eop_index = -1;
2553 
2554 	return (1);
2555 }
2556 
2557 /**********************************************************************
2558  *
2559  *  Examine each tx_buffer in the used queue. If the hardware is done
2560  *  processing the packet then free associated resources. The
2561  *  tx_buffer is put back on the free queue.
2562  *
2563  **********************************************************************/
2564 int
2565 ixgbe_txeof(struct tx_ring *txr)
2566 {
2567 	struct ix_softc			*sc = txr->sc;
2568 	struct ifqueue			*ifq = txr->ifq;
2569 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2570 	unsigned int			 head, tail, last;
2571 	struct ixgbe_tx_buf		*tx_buffer;
2572 	struct ixgbe_legacy_tx_desc	*tx_desc;
2573 
2574 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2575 		return FALSE;
2576 
2577 	head = txr->next_avail_desc;
2578 	tail = txr->next_to_clean;
2579 
2580 	membar_consumer();
2581 
2582 	if (head == tail)
2583 		return (FALSE);
2584 
2585 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2586 	    0, txr->txdma.dma_map->dm_mapsize,
2587 	    BUS_DMASYNC_POSTREAD);
2588 
2589 	for (;;) {
2590 		tx_buffer = &txr->tx_buffers[tail];
2591 		last = tx_buffer->eop_index;
2592 		tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2593 
2594 		if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD))
2595 			break;
2596 
2597 		bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2598 		    0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2599 		bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
2600 		m_freem(tx_buffer->m_head);
2601 
2602 		tx_buffer->m_head = NULL;
2603 		tx_buffer->eop_index = -1;
2604 
2605 		tail = last + 1;
2606 		if (tail == sc->num_tx_desc)
2607 			tail = 0;
2608 		if (head == tail) {
2609 			/* All clean, turn off the timer */
2610 			ifp->if_timer = 0;
2611 			break;
2612 		}
2613 	}
2614 
2615 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2616 	    0, txr->txdma.dma_map->dm_mapsize,
2617 	    BUS_DMASYNC_PREREAD);
2618 
2619 	membar_producer();
2620 
2621 	txr->next_to_clean = tail;
2622 
2623 	if (ifq_is_oactive(ifq))
2624 		ifq_restart(ifq);
2625 
2626 	return TRUE;
2627 }
2628 
2629 /*********************************************************************
2630  *
2631  *  Get a buffer from system mbuf buffer pool.
2632  *
2633  **********************************************************************/
2634 int
2635 ixgbe_get_buf(struct rx_ring *rxr, int i)
2636 {
2637 	struct ix_softc		*sc = rxr->sc;
2638 	struct ixgbe_rx_buf	*rxbuf;
2639 	struct mbuf		*mp;
2640 	int			error;
2641 	union ixgbe_adv_rx_desc	*rxdesc;
2642 
2643 	rxbuf = &rxr->rx_buffers[i];
2644 	rxdesc = &rxr->rx_base[i];
2645 	if (rxbuf->buf) {
2646 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2647 		    sc->dev.dv_xname, i);
2648 		return (ENOBUFS);
2649 	}
2650 
2651 	/* needed in any case so prealocate since this one will fail for sure */
2652 	mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2653 	if (!mp)
2654 		return (ENOBUFS);
2655 
2656 	mp->m_data += (mp->m_ext.ext_size - sc->rx_mbuf_sz);
2657 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2658 
2659 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2660 	    mp, BUS_DMA_NOWAIT);
2661 	if (error) {
2662 		m_freem(mp);
2663 		return (error);
2664 	}
2665 
2666 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2667 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2668 	rxbuf->buf = mp;
2669 
2670 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2671 
2672 	return (0);
2673 }
2674 
2675 /*********************************************************************
2676  *
2677  *  Allocate memory for rx_buffer structures. Since we use one
2678  *  rx_buffer per received packet, the maximum number of rx_buffer's
2679  *  that we'll need is equal to the number of receive descriptors
2680  *  that we've allocated.
2681  *
2682  **********************************************************************/
2683 int
2684 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2685 {
2686 	struct ix_softc		*sc = rxr->sc;
2687 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2688 	struct ixgbe_rx_buf 	*rxbuf;
2689 	int			i, error;
2690 
2691 	if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2692 	    sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2693 		printf("%s: Unable to allocate rx_buffer memory\n",
2694 		    ifp->if_xname);
2695 		error = ENOMEM;
2696 		goto fail;
2697 	}
2698 
2699 	rxbuf = rxr->rx_buffers;
2700 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2701 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2702 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2703 		if (error) {
2704 			printf("%s: Unable to create Pack DMA map\n",
2705 			    ifp->if_xname);
2706 			goto fail;
2707 		}
2708 	}
2709 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2710 	    rxr->rxdma.dma_map->dm_mapsize,
2711 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2712 
2713 	return (0);
2714 
2715 fail:
2716 	return (error);
2717 }
2718 
2719 /*********************************************************************
2720  *
2721  *  Initialize a receive ring and its buffers.
2722  *
2723  **********************************************************************/
2724 int
2725 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2726 {
2727 	struct ix_softc		*sc = rxr->sc;
2728 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2729 	int			 rsize, error;
2730 
2731 	rsize = roundup2(sc->num_rx_desc *
2732 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2733 	/* Clear the ring contents */
2734 	bzero((void *)rxr->rx_base, rsize);
2735 
2736 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2737 		return (error);
2738 
2739 	/* Setup our descriptor indices */
2740 	rxr->next_to_check = 0;
2741 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2742 
2743 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2744 	    sc->num_rx_desc - 1);
2745 
2746 	ixgbe_rxfill(rxr);
2747 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2748 		printf("%s: unable to fill any rx descriptors\n",
2749 		    sc->dev.dv_xname);
2750 		return (ENOBUFS);
2751 	}
2752 
2753 	return (0);
2754 }
2755 
2756 int
2757 ixgbe_rxfill(struct rx_ring *rxr)
2758 {
2759 	struct ix_softc *sc = rxr->sc;
2760 	int		 post = 0;
2761 	u_int		 slots;
2762 	int		 i;
2763 
2764 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2765 	    0, rxr->rxdma.dma_map->dm_mapsize,
2766 	    BUS_DMASYNC_POSTWRITE);
2767 
2768 	i = rxr->last_desc_filled;
2769 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2770 	    slots > 0; slots--) {
2771 		if (++i == sc->num_rx_desc)
2772 			i = 0;
2773 
2774 		if (ixgbe_get_buf(rxr, i) != 0)
2775 			break;
2776 
2777 		rxr->last_desc_filled = i;
2778 		post = 1;
2779 	}
2780 
2781 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2782 	    0, rxr->rxdma.dma_map->dm_mapsize,
2783 	    BUS_DMASYNC_PREWRITE);
2784 
2785 	if_rxr_put(&rxr->rx_ring, slots);
2786 
2787 	return (post);
2788 }
2789 
2790 void
2791 ixgbe_rxrefill(void *xrxr)
2792 {
2793 	struct rx_ring *rxr = xrxr;
2794 	struct ix_softc *sc = rxr->sc;
2795 
2796 	if (ixgbe_rxfill(rxr)) {
2797 		/* Advance the Rx Queue "Tail Pointer" */
2798 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
2799 		    rxr->last_desc_filled);
2800 	} else if (if_rxr_inuse(&rxr->rx_ring) == 0)
2801 		timeout_add(&rxr->rx_refill, 1);
2802 
2803 }
2804 
2805 /*********************************************************************
2806  *
2807  *  Initialize all receive rings.
2808  *
2809  **********************************************************************/
2810 int
2811 ixgbe_setup_receive_structures(struct ix_softc *sc)
2812 {
2813 	struct rx_ring *rxr = sc->rx_rings;
2814 	int i;
2815 
2816 	for (i = 0; i < sc->num_queues; i++, rxr++)
2817 		if (ixgbe_setup_receive_ring(rxr))
2818 			goto fail;
2819 
2820 	return (0);
2821 fail:
2822 	ixgbe_free_receive_structures(sc);
2823 	return (ENOBUFS);
2824 }
2825 
2826 /*********************************************************************
2827  *
2828  *  Setup receive registers and features.
2829  *
2830  **********************************************************************/
2831 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2832 
2833 void
2834 ixgbe_initialize_receive_units(struct ix_softc *sc)
2835 {
2836 	struct rx_ring	*rxr = sc->rx_rings;
2837 	struct ixgbe_hw	*hw = &sc->hw;
2838 	uint32_t	bufsz, fctrl, srrctl, rxcsum;
2839 	uint32_t	hlreg;
2840 	int		i;
2841 
2842 	/*
2843 	 * Make sure receives are disabled while
2844 	 * setting up the descriptor ring
2845 	 */
2846 	ixgbe_disable_rx(hw);
2847 
2848 	/* Enable broadcasts */
2849 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2850 	fctrl |= IXGBE_FCTRL_BAM;
2851 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2852 		fctrl |= IXGBE_FCTRL_DPF;
2853 		fctrl |= IXGBE_FCTRL_PMCF;
2854 	}
2855 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2856 
2857 	/* Always enable jumbo frame reception */
2858 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2859 	hlreg |= IXGBE_HLREG0_JUMBOEN;
2860 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2861 
2862 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2863 
2864 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2865 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2866 
2867 		/* Setup the Base and Length of the Rx Descriptor Ring */
2868 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2869 			       (rdba & 0x00000000ffffffffULL));
2870 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2871 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2872 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2873 
2874 		/* Set up the SRRCTL register */
2875 		srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2876 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2877 
2878 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2879 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2880 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2881 	}
2882 
2883 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2884 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2885 			      IXGBE_PSRTYPE_UDPHDR |
2886 			      IXGBE_PSRTYPE_IPV4HDR |
2887 			      IXGBE_PSRTYPE_IPV6HDR;
2888 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2889 	}
2890 
2891 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2892 	rxcsum &= ~IXGBE_RXCSUM_PCSD;
2893 
2894 	ixgbe_initialize_rss_mapping(sc);
2895 
2896 	/* Setup RSS */
2897 	if (sc->num_queues > 1) {
2898 		/* RSS and RX IPP Checksum are mutually exclusive */
2899 		rxcsum |= IXGBE_RXCSUM_PCSD;
2900 	}
2901 
2902 	/* Map QPRC/QPRDC/QPTC on a per queue basis */
2903 	ixgbe_map_queue_statistics(sc);
2904 
2905 	/* This is useful for calculating UDP/IP fragment checksums */
2906 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2907 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2908 
2909 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2910 }
2911 
2912 void
2913 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2914 {
2915 	struct ixgbe_hw	*hw = &sc->hw;
2916 	uint32_t reta = 0, mrqc, rss_key[10];
2917 	int i, j, queue_id, table_size, index_mult;
2918 
2919 	/* set up random bits */
2920 	stoeplitz_to_key(&rss_key, sizeof(rss_key));
2921 
2922 	/* Set multiplier for RETA setup and table size based on MAC */
2923 	index_mult = 0x1;
2924 	table_size = 128;
2925 	switch (sc->hw.mac.type) {
2926 	case ixgbe_mac_82598EB:
2927 		index_mult = 0x11;
2928 		break;
2929 	case ixgbe_mac_X550:
2930 	case ixgbe_mac_X550EM_x:
2931 	case ixgbe_mac_X550EM_a:
2932 		table_size = 512;
2933 		break;
2934 	default:
2935 		break;
2936 	}
2937 
2938 	/* Set up the redirection table */
2939 	for (i = 0, j = 0; i < table_size; i++, j++) {
2940 		if (j == sc->num_queues) j = 0;
2941 		queue_id = (j * index_mult);
2942 		/*
2943 		 * The low 8 bits are for hash value (n+0);
2944 		 * The next 8 bits are for hash value (n+1), etc.
2945 		 */
2946 		reta = reta >> 8;
2947 		reta = reta | ( ((uint32_t) queue_id) << 24);
2948 		if ((i & 3) == 3) {
2949 			if (i < 128)
2950 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2951 			else
2952 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2953 				    reta);
2954 			reta = 0;
2955 		}
2956 	}
2957 
2958 	/* Now fill our hash function seeds */
2959 	for (i = 0; i < 10; i++)
2960 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2961 
2962 	/*
2963 	 * Disable UDP - IP fragments aren't currently being handled
2964 	 * and so we end up with a mix of 2-tuple and 4-tuple
2965 	 * traffic.
2966 	 */
2967 	mrqc = IXGBE_MRQC_RSSEN
2968 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2969 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2970 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2971 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2972 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2973 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2974 	;
2975 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2976 }
2977 
2978 /*********************************************************************
2979  *
2980  *  Free all receive rings.
2981  *
2982  **********************************************************************/
2983 void
2984 ixgbe_free_receive_structures(struct ix_softc *sc)
2985 {
2986 	struct rx_ring *rxr;
2987 	int		i;
2988 
2989 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2990 		if_rxr_init(&rxr->rx_ring, 0, 0);
2991 
2992 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2993 		ixgbe_free_receive_buffers(rxr);
2994 }
2995 
2996 /*********************************************************************
2997  *
2998  *  Free receive ring data structures
2999  *
3000  **********************************************************************/
3001 void
3002 ixgbe_free_receive_buffers(struct rx_ring *rxr)
3003 {
3004 	struct ix_softc		*sc;
3005 	struct ixgbe_rx_buf	*rxbuf;
3006 	int			 i;
3007 
3008 	sc = rxr->sc;
3009 	if (rxr->rx_buffers != NULL) {
3010 		for (i = 0; i < sc->num_rx_desc; i++) {
3011 			rxbuf = &rxr->rx_buffers[i];
3012 			if (rxbuf->buf != NULL) {
3013 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
3014 				    0, rxbuf->map->dm_mapsize,
3015 				    BUS_DMASYNC_POSTREAD);
3016 				bus_dmamap_unload(rxr->rxdma.dma_tag,
3017 				    rxbuf->map);
3018 				m_freem(rxbuf->buf);
3019 				rxbuf->buf = NULL;
3020 			}
3021 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
3022 			rxbuf->map = NULL;
3023 		}
3024 		free(rxr->rx_buffers, M_DEVBUF,
3025 		    sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
3026 		rxr->rx_buffers = NULL;
3027 	}
3028 }
3029 
3030 /*********************************************************************
3031  *
3032  *  This routine executes in interrupt context. It replenishes
3033  *  the mbufs in the descriptor and sends data which has been
3034  *  dma'ed into host memory to upper layer.
3035  *
3036  *********************************************************************/
3037 int
3038 ixgbe_rxeof(struct rx_ring *rxr)
3039 {
3040 	struct ix_softc 	*sc = rxr->sc;
3041 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
3042 	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
3043 	struct mbuf    		*mp, *sendmp;
3044 	uint8_t		    	 eop = 0;
3045 	uint16_t		 len, vtag;
3046 	uint32_t		 staterr = 0, ptype;
3047 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
3048 	union ixgbe_adv_rx_desc	*rxdesc;
3049 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
3050 	int			 i, nextp;
3051 
3052 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
3053 		return FALSE;
3054 
3055 	i = rxr->next_to_check;
3056 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
3057 		uint32_t hash, hashtype;
3058 
3059 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3060 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
3061 
3062 		rxdesc = &rxr->rx_base[i];
3063 		staterr = letoh32(rxdesc->wb.upper.status_error);
3064 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
3065 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3066 			    dsize * i, dsize,
3067 			    BUS_DMASYNC_PREREAD);
3068 			break;
3069 		}
3070 
3071 		/* Zero out the receive descriptors status  */
3072 		rxdesc->wb.upper.status_error = 0;
3073 		rxbuf = &rxr->rx_buffers[i];
3074 
3075 		/* pull the mbuf off the ring */
3076 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
3077 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
3078 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
3079 
3080 		mp = rxbuf->buf;
3081 		len = letoh16(rxdesc->wb.upper.length);
3082 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
3083 		    IXGBE_RXDADV_PKTTYPE_MASK;
3084 		vtag = letoh16(rxdesc->wb.upper.vlan);
3085 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
3086 		hash = lemtoh32(&rxdesc->wb.lower.hi_dword.rss);
3087 		hashtype = lemtoh32(&rxdesc->wb.lower.lo_dword.data) &
3088 		    IXGBE_RXDADV_RSSTYPE_MASK;
3089 
3090 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
3091 			if (rxbuf->fmp) {
3092 				m_freem(rxbuf->fmp);
3093 				rxbuf->fmp = NULL;
3094 			}
3095 
3096 			m_freem(mp);
3097 			rxbuf->buf = NULL;
3098 			goto next_desc;
3099 		}
3100 
3101 		if (mp == NULL) {
3102 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
3103 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
3104 			    i, if_rxr_inuse(&rxr->rx_ring),
3105 			    rxr->last_desc_filled);
3106 		}
3107 
3108 		/* Currently no HW RSC support of 82599 */
3109 		if (!eop) {
3110 			/*
3111 			 * Figure out the next descriptor of this frame.
3112 			 */
3113 			nextp = i + 1;
3114 			if (nextp == sc->num_rx_desc)
3115 				nextp = 0;
3116 			nxbuf = &rxr->rx_buffers[nextp];
3117 			/* prefetch(nxbuf); */
3118 		}
3119 
3120 		/*
3121 		 * Rather than using the fmp/lmp global pointers
3122 		 * we now keep the head of a packet chain in the
3123 		 * buffer struct and pass this along from one
3124 		 * descriptor to the next, until we get EOP.
3125 		 */
3126 		mp->m_len = len;
3127 		/*
3128 		 * See if there is a stored head
3129 		 * that determines what we are
3130 		 */
3131 		sendmp = rxbuf->fmp;
3132 		rxbuf->buf = rxbuf->fmp = NULL;
3133 
3134 		if (sendmp != NULL) /* secondary frag */
3135 			sendmp->m_pkthdr.len += mp->m_len;
3136 		else {
3137 			/* first desc of a non-ps chain */
3138 			sendmp = mp;
3139 			sendmp->m_pkthdr.len = mp->m_len;
3140 #if NVLAN > 0
3141 			if (staterr & IXGBE_RXD_STAT_VP) {
3142 				sendmp->m_pkthdr.ether_vtag = vtag;
3143 				sendmp->m_flags |= M_VLANTAG;
3144 			}
3145 #endif
3146 		}
3147 
3148 		/* Pass the head pointer on */
3149 		if (eop == 0) {
3150 			nxbuf->fmp = sendmp;
3151 			sendmp = NULL;
3152 			mp->m_next = nxbuf->buf;
3153 		} else { /* Sending this frame? */
3154 			ixgbe_rx_checksum(staterr, sendmp, ptype);
3155 
3156 			if (hashtype != IXGBE_RXDADV_RSSTYPE_NONE) {
3157 				sendmp->m_pkthdr.ph_flowid = hash;
3158 				SET(sendmp->m_pkthdr.csum_flags, M_FLOWID);
3159 			}
3160 
3161 			ml_enqueue(&ml, sendmp);
3162 		}
3163 next_desc:
3164 		if_rxr_put(&rxr->rx_ring, 1);
3165 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3166 		    dsize * i, dsize,
3167 		    BUS_DMASYNC_PREREAD);
3168 
3169 		/* Advance our pointers to the next descriptor. */
3170 		if (++i == sc->num_rx_desc)
3171 			i = 0;
3172 	}
3173 	rxr->next_to_check = i;
3174 
3175 	if (ifiq_input(rxr->ifiq, &ml))
3176 		if_rxr_livelocked(&rxr->rx_ring);
3177 
3178 	if (!(staterr & IXGBE_RXD_STAT_DD))
3179 		return FALSE;
3180 
3181 	return TRUE;
3182 }
3183 
3184 /*********************************************************************
3185  *
3186  *  Verify that the hardware indicated that the checksum is valid.
3187  *  Inform the stack about the status of checksum so that stack
3188  *  doesn't spend time verifying the checksum.
3189  *
3190  *********************************************************************/
3191 void
3192 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
3193 {
3194 	uint16_t status = (uint16_t) staterr;
3195 	uint8_t  errors = (uint8_t) (staterr >> 24);
3196 
3197 	if (status & IXGBE_RXD_STAT_IPCS) {
3198 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
3199 			/* IP Checksum Good */
3200 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3201 		} else
3202 			mp->m_pkthdr.csum_flags = 0;
3203 	}
3204 	if (status & IXGBE_RXD_STAT_L4CS) {
3205 		if (!(errors & IXGBE_RXD_ERR_TCPE))
3206 			mp->m_pkthdr.csum_flags |=
3207 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3208 	}
3209 }
3210 
3211 void
3212 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
3213 {
3214 	uint32_t	ctrl;
3215 	int		i;
3216 
3217 	/*
3218 	 * A soft reset zero's out the VFTA, so
3219 	 * we need to repopulate it now.
3220 	 */
3221 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3222 		if (sc->shadow_vfta[i] != 0)
3223 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3224 			    sc->shadow_vfta[i]);
3225 	}
3226 
3227 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3228 #if 0
3229 	/* Enable the Filter Table if enabled */
3230 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3231 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3232 		ctrl |= IXGBE_VLNCTRL_VFE;
3233 	}
3234 #endif
3235 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3236 		ctrl |= IXGBE_VLNCTRL_VME;
3237 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3238 
3239 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3240 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3241 		for (i = 0; i < sc->num_queues; i++) {
3242 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3243 			ctrl |= IXGBE_RXDCTL_VME;
3244 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3245 		}
3246 	}
3247 }
3248 
3249 void
3250 ixgbe_enable_intr(struct ix_softc *sc)
3251 {
3252 	struct ixgbe_hw *hw = &sc->hw;
3253 	uint32_t	mask, fwsm;
3254 
3255 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3256 	/* Enable Fan Failure detection */
3257 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3258 		    mask |= IXGBE_EIMS_GPI_SDP1;
3259 
3260 	switch (sc->hw.mac.type) {
3261 	case ixgbe_mac_82599EB:
3262 		mask |= IXGBE_EIMS_ECC;
3263 		/* Temperature sensor on some adapters */
3264 		mask |= IXGBE_EIMS_GPI_SDP0;
3265 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3266 		mask |= IXGBE_EIMS_GPI_SDP1;
3267 		mask |= IXGBE_EIMS_GPI_SDP2;
3268 		break;
3269 	case ixgbe_mac_X540:
3270 		mask |= IXGBE_EIMS_ECC;
3271 		/* Detect if Thermal Sensor is enabled */
3272 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3273 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3274 			mask |= IXGBE_EIMS_TS;
3275 		break;
3276 	case ixgbe_mac_X550:
3277 	case ixgbe_mac_X550EM_x:
3278 	case ixgbe_mac_X550EM_a:
3279 		mask |= IXGBE_EIMS_ECC;
3280 		/* MAC thermal sensor is automatically enabled */
3281 		mask |= IXGBE_EIMS_TS;
3282 		/* Some devices use SDP0 for important information */
3283 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3284 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3285 			mask |= IXGBE_EIMS_GPI_SDP0_X540;
3286 	default:
3287 		break;
3288 	}
3289 
3290 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3291 
3292 	/* With MSI-X we use auto clear */
3293 	if (sc->sc_intrmap) {
3294 		mask = IXGBE_EIMS_ENABLE_MASK;
3295 		/* Don't autoclear Link */
3296 		mask &= ~IXGBE_EIMS_OTHER;
3297 		mask &= ~IXGBE_EIMS_LSC;
3298 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3299 	}
3300 
3301 	IXGBE_WRITE_FLUSH(hw);
3302 }
3303 
3304 void
3305 ixgbe_disable_intr(struct ix_softc *sc)
3306 {
3307 	if (sc->sc_intrmap)
3308 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3309 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3310 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3311 	} else {
3312 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3313 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3314 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3315 	}
3316 	IXGBE_WRITE_FLUSH(&sc->hw);
3317 }
3318 
3319 uint16_t
3320 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3321 {
3322 	struct pci_attach_args	*pa;
3323 	uint32_t value;
3324 	int high = 0;
3325 
3326 	if (reg & 0x2) {
3327 		high = 1;
3328 		reg &= ~0x2;
3329 	}
3330 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3331 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3332 
3333 	if (high)
3334 		value >>= 16;
3335 
3336 	return (value & 0xffff);
3337 }
3338 
3339 void
3340 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3341 {
3342 	struct pci_attach_args	*pa;
3343 	uint32_t rv;
3344 	int high = 0;
3345 
3346 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3347 	if (reg & 0x2) {
3348 		high = 1;
3349 		reg &= ~0x2;
3350 	}
3351 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3352 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3353 	if (!high)
3354 		rv = (rv & 0xffff0000) | value;
3355 	else
3356 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3357 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3358 }
3359 
3360 /*
3361  * Setup the correct IVAR register for a particular MSIX interrupt
3362  *   (yes this is all very magic and confusing :)
3363  *  - entry is the register array entry
3364  *  - vector is the MSIX vector for this queue
3365  *  - type is RX/TX/MISC
3366  */
3367 void
3368 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3369 {
3370 	struct ixgbe_hw *hw = &sc->hw;
3371 	uint32_t ivar, index;
3372 
3373 	vector |= IXGBE_IVAR_ALLOC_VAL;
3374 
3375 	switch (hw->mac.type) {
3376 
3377 	case ixgbe_mac_82598EB:
3378 		if (type == -1)
3379 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3380 		else
3381 			entry += (type * 64);
3382 		index = (entry >> 2) & 0x1F;
3383 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3384 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3385 		ivar |= (vector << (8 * (entry & 0x3)));
3386 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3387 		break;
3388 
3389 	case ixgbe_mac_82599EB:
3390 	case ixgbe_mac_X540:
3391 	case ixgbe_mac_X550:
3392 	case ixgbe_mac_X550EM_x:
3393 	case ixgbe_mac_X550EM_a:
3394 		if (type == -1) { /* MISC IVAR */
3395 			index = (entry & 1) * 8;
3396 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3397 			ivar &= ~(0xFF << index);
3398 			ivar |= (vector << index);
3399 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3400 		} else {	/* RX/TX IVARS */
3401 			index = (16 * (entry & 1)) + (8 * type);
3402 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3403 			ivar &= ~(0xFF << index);
3404 			ivar |= (vector << index);
3405 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3406 		}
3407 
3408 	default:
3409 		break;
3410 	}
3411 }
3412 
3413 void
3414 ixgbe_configure_ivars(struct ix_softc *sc)
3415 {
3416 	struct ix_queue *que = sc->queues;
3417 	uint32_t newitr;
3418 	int i;
3419 
3420 	newitr = (4000000 / IXGBE_INTS_PER_SEC) & 0x0FF8;
3421 
3422 	for (i = 0; i < sc->num_queues; i++, que++) {
3423 		/* First the RX queue entry */
3424 		ixgbe_set_ivar(sc, i, que->msix, 0);
3425 		/* ... and the TX */
3426 		ixgbe_set_ivar(sc, i, que->msix, 1);
3427 		/* Set an Initial EITR value */
3428 		IXGBE_WRITE_REG(&sc->hw,
3429 		    IXGBE_EITR(que->msix), newitr);
3430 	}
3431 
3432 	/* For the Link interrupt */
3433 	ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3434 }
3435 
3436 /*
3437  * SFP module interrupts handler
3438  */
3439 void
3440 ixgbe_handle_mod(struct ix_softc *sc)
3441 {
3442 	struct ixgbe_hw *hw = &sc->hw;
3443 	uint32_t err;
3444 
3445 	err = hw->phy.ops.identify_sfp(hw);
3446 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3447 		printf("%s: Unsupported SFP+ module type was detected!\n",
3448 		    sc->dev.dv_xname);
3449 		return;
3450 	}
3451 	err = hw->mac.ops.setup_sfp(hw);
3452 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3453 		printf("%s: Setup failure - unsupported SFP+ module type!\n",
3454 		    sc->dev.dv_xname);
3455 		return;
3456 	}
3457 
3458 	ixgbe_handle_msf(sc);
3459 }
3460 
3461 
3462 /*
3463  * MSF (multispeed fiber) interrupts handler
3464  */
3465 void
3466 ixgbe_handle_msf(struct ix_softc *sc)
3467 {
3468 	struct ixgbe_hw *hw = &sc->hw;
3469 	uint32_t autoneg;
3470 	bool negotiate;
3471 
3472 	autoneg = hw->phy.autoneg_advertised;
3473 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3474 		if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3475 			return;
3476 	}
3477 	if (hw->mac.ops.setup_link)
3478 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3479 
3480 	ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3481 	ixgbe_add_media_types(sc);
3482 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3483 }
3484 
3485 /*
3486  * External PHY interrupts handler
3487  */
3488 void
3489 ixgbe_handle_phy(struct ix_softc *sc)
3490 {
3491 	struct ixgbe_hw *hw = &sc->hw;
3492 	int error;
3493 
3494 	error = hw->phy.ops.handle_lasi(hw);
3495 	if (error == IXGBE_ERR_OVERTEMP)
3496 		printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3497 		    " PHY will downshift to lower power state!\n",
3498 		    sc->dev.dv_xname);
3499 	else if (error)
3500 		printf("%s: Error handling LASI interrupt: %d\n",
3501 		    sc->dev.dv_xname, error);
3502 
3503 }
3504 
3505 #if NKSTAT > 0
3506 enum ix_counter_idx {
3507 	ix_counter_crcerrs,
3508 	ix_counter_lxontxc,
3509 	ix_counter_lxonrxc,
3510 	ix_counter_lxofftxc,
3511 	ix_counter_lxoffrxc,
3512 	ix_counter_prc64,
3513 	ix_counter_prc127,
3514 	ix_counter_prc255,
3515 	ix_counter_prc511,
3516 	ix_counter_prc1023,
3517 	ix_counter_prc1522,
3518 	ix_counter_gptc,
3519 	ix_counter_gorc,
3520 	ix_counter_gotc,
3521 	ix_counter_ruc,
3522 	ix_counter_rfc,
3523 	ix_counter_roc,
3524 	ix_counter_rjc,
3525 	ix_counter_tor,
3526 	ix_counter_tpr,
3527 	ix_counter_tpt,
3528 	ix_counter_gprc,
3529 	ix_counter_bprc,
3530 	ix_counter_mprc,
3531 	ix_counter_ptc64,
3532 	ix_counter_ptc127,
3533 	ix_counter_ptc255,
3534 	ix_counter_ptc511,
3535 	ix_counter_ptc1023,
3536 	ix_counter_ptc1522,
3537 	ix_counter_mptc,
3538 	ix_counter_bptc,
3539 
3540 	ix_counter_num,
3541 };
3542 
3543 CTASSERT(KSTAT_KV_U_PACKETS <= 0xff);
3544 CTASSERT(KSTAT_KV_U_BYTES <= 0xff);
3545 
3546 struct ix_counter {
3547 	char			 name[KSTAT_KV_NAMELEN];
3548 	uint32_t		 reg;
3549 	uint8_t			 width;
3550 	uint8_t			 unit;
3551 };
3552 
3553 static const struct ix_counter ix_counters[ix_counter_num] = {
3554 	[ix_counter_crcerrs] = {	"crc errs",	IXGBE_CRCERRS,	32,
3555 					    KSTAT_KV_U_PACKETS },
3556 	[ix_counter_lxontxc] = {	"tx link xon",	IXGBE_LXONTXC,	32,
3557 					    KSTAT_KV_U_PACKETS },
3558 	[ix_counter_lxonrxc] = {	"rx link xon",	0,		32,
3559 					    KSTAT_KV_U_PACKETS },
3560 	[ix_counter_lxofftxc] = {	"tx link xoff",	IXGBE_LXOFFTXC,	32,
3561 					    KSTAT_KV_U_PACKETS },
3562 	[ix_counter_lxoffrxc] = {	"rx link xoff",	0,		32,
3563 					    KSTAT_KV_U_PACKETS },
3564 	[ix_counter_prc64] = {		"rx 64B",	IXGBE_PRC64,	32,
3565 					    KSTAT_KV_U_PACKETS },
3566 	[ix_counter_prc127] = {		"rx 65-127B",	IXGBE_PRC127,	32,
3567 					    KSTAT_KV_U_PACKETS },
3568 	[ix_counter_prc255] = {		"rx 128-255B",	IXGBE_PRC255,	32,
3569 					    KSTAT_KV_U_PACKETS },
3570 	[ix_counter_prc511] = {		"rx 256-511B",	IXGBE_PRC511,	32,
3571 					    KSTAT_KV_U_PACKETS },
3572 	[ix_counter_prc1023] = {	"rx 512-1023B",	IXGBE_PRC1023,	32,
3573 					    KSTAT_KV_U_PACKETS },
3574 	[ix_counter_prc1522] = {	"rx 1024-maxB",	IXGBE_PRC1522,	32,
3575 					    KSTAT_KV_U_PACKETS },
3576 	[ix_counter_gptc] = {		"tx good",	IXGBE_GPTC,	32,
3577 					    KSTAT_KV_U_PACKETS },
3578 	[ix_counter_gorc] = {		"rx good",	IXGBE_GORCL,	36,
3579 					    KSTAT_KV_U_BYTES },
3580 	[ix_counter_gotc] = {		"tx good",	IXGBE_GOTCL,	36,
3581 					    KSTAT_KV_U_BYTES },
3582 	[ix_counter_ruc] = {		"rx undersize",	IXGBE_RUC,	32,
3583 					    KSTAT_KV_U_PACKETS },
3584 	[ix_counter_rfc] = {		"rx fragment",	IXGBE_RFC,	32,
3585 					    KSTAT_KV_U_PACKETS },
3586 	[ix_counter_roc] = {		"rx oversize",	IXGBE_ROC,	32,
3587 					    KSTAT_KV_U_PACKETS },
3588 	[ix_counter_rjc] = {		"rx jabber",	IXGBE_RJC,	32,
3589 					    KSTAT_KV_U_PACKETS },
3590 	[ix_counter_tor] = {		"rx total",	IXGBE_TORL,	36,
3591 					    KSTAT_KV_U_BYTES },
3592 	[ix_counter_tpr] = {		"rx total",	IXGBE_TPR,	32,
3593 					    KSTAT_KV_U_PACKETS },
3594 	[ix_counter_tpt] = {		"tx total",	IXGBE_TPT,	32,
3595 					    KSTAT_KV_U_PACKETS },
3596 	[ix_counter_gprc] = {		"rx good",	IXGBE_GPRC,	32,
3597 					    KSTAT_KV_U_PACKETS },
3598 	[ix_counter_bprc] = {		"rx bcast",	IXGBE_BPRC,	32,
3599 					    KSTAT_KV_U_PACKETS },
3600 	[ix_counter_mprc] = {		"rx mcast",	IXGBE_MPRC,	32,
3601 					    KSTAT_KV_U_PACKETS },
3602 	[ix_counter_ptc64] = {		"tx 64B",	IXGBE_PTC64,	32,
3603 					    KSTAT_KV_U_PACKETS },
3604 	[ix_counter_ptc127] = {		"tx 65-127B",	IXGBE_PTC127,	32,
3605 					    KSTAT_KV_U_PACKETS },
3606 	[ix_counter_ptc255] = {		"tx 128-255B",	IXGBE_PTC255,	32,
3607 					    KSTAT_KV_U_PACKETS },
3608 	[ix_counter_ptc511] = {		"tx 256-511B",	IXGBE_PTC511,	32,
3609 					    KSTAT_KV_U_PACKETS },
3610 	[ix_counter_ptc1023] = {	"tx 512-1023B",	IXGBE_PTC1023,	32,
3611 					    KSTAT_KV_U_PACKETS },
3612 	[ix_counter_ptc1522] = {	"tx 1024-maxB",	IXGBE_PTC1522,	32,
3613 					    KSTAT_KV_U_PACKETS },
3614 	[ix_counter_mptc] = {		"tx mcast",	IXGBE_MPTC,	32,
3615 					    KSTAT_KV_U_PACKETS },
3616 	[ix_counter_bptc] = {		"tx bcast",	IXGBE_BPTC,	32,
3617 					    KSTAT_KV_U_PACKETS },
3618 };
3619 
3620 struct ix_rxq_kstats {
3621 	struct kstat_kv	qprc;
3622 	struct kstat_kv	qbrc;
3623 	struct kstat_kv	qprdc;
3624 };
3625 
3626 static const struct ix_rxq_kstats ix_rxq_kstats_tpl = {
3627 	KSTAT_KV_UNIT_INITIALIZER("packets",
3628 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3629 	KSTAT_KV_UNIT_INITIALIZER("bytes",
3630 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
3631 	KSTAT_KV_UNIT_INITIALIZER("qdrops",
3632 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3633 };
3634 
3635 struct ix_txq_kstats {
3636 	struct kstat_kv	qptc;
3637 	struct kstat_kv	qbtc;
3638 };
3639 
3640 static const struct ix_txq_kstats ix_txq_kstats_tpl = {
3641 	KSTAT_KV_UNIT_INITIALIZER("packets",
3642 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
3643 	KSTAT_KV_UNIT_INITIALIZER("bytes",
3644 	    KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
3645 };
3646 
3647 static int	ix_kstats_read(struct kstat *ks);
3648 static int	ix_rxq_kstats_read(struct kstat *ks);
3649 static int	ix_txq_kstats_read(struct kstat *ks);
3650 
3651 static void
3652 ix_kstats(struct ix_softc *sc)
3653 {
3654 	struct kstat *ks;
3655 	struct kstat_kv *kvs;
3656 	unsigned int i;
3657 
3658 	mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
3659 	timeout_set(&sc->sc_kstat_tmo, ix_kstats_tick, sc);
3660 
3661 	ks = kstat_create(sc->dev.dv_xname, 0, "ix-stats", 0,
3662 	    KSTAT_T_KV, 0);
3663 	if (ks == NULL)
3664 		return;
3665 
3666 	kvs = mallocarray(nitems(ix_counters), sizeof(*kvs),
3667 	    M_DEVBUF, M_WAITOK|M_ZERO);
3668 
3669 	for (i = 0; i < nitems(ix_counters); i++) {
3670 		const struct ix_counter *ixc = &ix_counters[i];
3671 
3672 		kstat_kv_unit_init(&kvs[i], ixc->name,
3673 		    KSTAT_KV_T_COUNTER64, ixc->unit);
3674 	}
3675 
3676 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3677 	ks->ks_softc = sc;
3678 	ks->ks_data = kvs;
3679 	ks->ks_datalen = nitems(ix_counters) * sizeof(*kvs);
3680 	ks->ks_read = ix_kstats_read;
3681 
3682 	sc->sc_kstat = ks;
3683 	kstat_install(ks);
3684 }
3685 
3686 static void
3687 ix_rxq_kstats(struct ix_softc *sc, struct rx_ring *rxr)
3688 {
3689 	struct ix_rxq_kstats *stats;
3690 	struct kstat *ks;
3691 
3692 	ks = kstat_create(sc->dev.dv_xname, 0, "ix-rxq", rxr->me,
3693 	    KSTAT_T_KV, 0);
3694 	if (ks == NULL)
3695 		return;
3696 
3697 	stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
3698 	*stats = ix_rxq_kstats_tpl;
3699 
3700 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3701 	ks->ks_softc = rxr;
3702 	ks->ks_data = stats;
3703 	ks->ks_datalen = sizeof(*stats);
3704 	ks->ks_read = ix_rxq_kstats_read;
3705 
3706 	rxr->kstat = ks;
3707 	kstat_install(ks);
3708 }
3709 
3710 static void
3711 ix_txq_kstats(struct ix_softc *sc, struct tx_ring *txr)
3712 {
3713 	struct ix_txq_kstats *stats;
3714 	struct kstat *ks;
3715 
3716 	ks = kstat_create(sc->dev.dv_xname, 0, "ix-txq", txr->me,
3717 	    KSTAT_T_KV, 0);
3718 	if (ks == NULL)
3719 		return;
3720 
3721 	stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
3722 	*stats = ix_txq_kstats_tpl;
3723 
3724 	kstat_set_mutex(ks, &sc->sc_kstat_mtx);
3725 	ks->ks_softc = txr;
3726 	ks->ks_data = stats;
3727 	ks->ks_datalen = sizeof(*stats);
3728 	ks->ks_read = ix_txq_kstats_read;
3729 
3730 	txr->kstat = ks;
3731 	kstat_install(ks);
3732 }
3733 
3734 /**********************************************************************
3735  *
3736  *  Update the board statistics counters.
3737  *
3738  **********************************************************************/
3739 
3740 static void
3741 ix_kstats_tick(void *arg)
3742 {
3743 	struct ix_softc *sc = arg;
3744 	int i;
3745 
3746 	timeout_add_sec(&sc->sc_kstat_tmo, 1);
3747 
3748 	mtx_enter(&sc->sc_kstat_mtx);
3749 	ix_kstats_read(sc->sc_kstat);
3750 	for (i = 0; i < sc->num_queues; i++) {
3751 		ix_rxq_kstats_read(sc->rx_rings[i].kstat);
3752 		ix_txq_kstats_read(sc->tx_rings[i].kstat);
3753 	}
3754 	mtx_leave(&sc->sc_kstat_mtx);
3755 }
3756 
3757 static uint64_t
3758 ix_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
3759 {
3760 	uint64_t lo, hi;
3761 
3762 	lo = IXGBE_READ_REG(hw, loreg);
3763 	hi = IXGBE_READ_REG(hw, hireg);
3764 
3765 	return (((hi & 0xf) << 32) | lo);
3766 }
3767 
3768 static int
3769 ix_kstats_read(struct kstat *ks)
3770 {
3771 	struct ix_softc *sc = ks->ks_softc;
3772 	struct kstat_kv *kvs = ks->ks_data;
3773 	struct ixgbe_hw	*hw = &sc->hw;
3774 	unsigned int i;
3775 
3776 	for (i = 0; i < nitems(ix_counters); i++) {
3777 		const struct ix_counter *ixc = &ix_counters[i];
3778 		uint32_t reg = ixc->reg;
3779 		uint64_t v;
3780 
3781 		if (reg == 0)
3782 			continue;
3783 
3784 		if (ixc->width > 32) {
3785 			if (sc->hw.mac.type == ixgbe_mac_82598EB)
3786 				v = IXGBE_READ_REG(hw, reg + 4);
3787 			else
3788 				v = ix_read36(hw, reg, reg + 4);
3789 		} else
3790 			v = IXGBE_READ_REG(hw, reg);
3791 
3792 		kstat_kv_u64(&kvs[i]) += v;
3793 	}
3794 
3795 	/* handle the exceptions */
3796 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3797 		kstat_kv_u64(&kvs[ix_counter_lxonrxc]) +=
3798 		    IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3799 		kstat_kv_u64(&kvs[ix_counter_lxoffrxc]) +=
3800 		    IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3801 	} else {
3802 		kstat_kv_u64(&kvs[ix_counter_lxonrxc]) +=
3803 		    IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3804 		kstat_kv_u64(&kvs[ix_counter_lxoffrxc]) +=
3805 		    IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3806 	}
3807 
3808 	getnanouptime(&ks->ks_updated);
3809 
3810 	return (0);
3811 }
3812 
3813 int
3814 ix_rxq_kstats_read(struct kstat *ks)
3815 {
3816 	struct ix_rxq_kstats *stats = ks->ks_data;
3817 	struct rx_ring *rxr = ks->ks_softc;
3818 	struct ix_softc *sc = rxr->sc;
3819 	struct ixgbe_hw	*hw = &sc->hw;
3820 	uint32_t i = rxr->me;
3821 
3822 	kstat_kv_u64(&stats->qprc) += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3823 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3824 		kstat_kv_u64(&stats->qprdc) +=
3825 		    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3826 		kstat_kv_u64(&stats->qbrc) +=
3827 		    IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3828 	} else {
3829 		kstat_kv_u64(&stats->qprdc) +=
3830 		    IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
3831 		kstat_kv_u64(&stats->qbrc) +=
3832 		    ix_read36(hw, IXGBE_QBRC_L(i), IXGBE_QBRC_H(i));
3833 	}
3834 
3835 	getnanouptime(&ks->ks_updated);
3836 
3837 	return (0);
3838 }
3839 
3840 int
3841 ix_txq_kstats_read(struct kstat *ks)
3842 {
3843 	struct ix_txq_kstats *stats = ks->ks_data;
3844 	struct rx_ring *txr = ks->ks_softc;
3845 	struct ix_softc *sc = txr->sc;
3846 	struct ixgbe_hw	*hw = &sc->hw;
3847 	uint32_t i = txr->me;
3848 
3849 	kstat_kv_u64(&stats->qptc) += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3850 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3851 		kstat_kv_u64(&stats->qbtc) +=
3852 		    IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3853 	} else {
3854 		kstat_kv_u64(&stats->qbtc) +=
3855 		    ix_read36(hw, IXGBE_QBTC_L(i), IXGBE_QBTC_H(i));
3856 	}
3857 
3858 	getnanouptime(&ks->ks_updated);
3859 
3860 	return (0);
3861 }
3862 #endif /* NKVSTAT > 0 */
3863 
3864 void
3865 ixgbe_map_queue_statistics(struct ix_softc *sc)
3866 {
3867 	int i;
3868 	uint32_t r;
3869 
3870 	for (i = 0; i < 32; i++) {
3871 		/*
3872 		 * Queues 0-15 are mapped 1:1
3873 		 * Queue 0 -> Counter 0
3874 		 * Queue 1 -> Counter 1
3875 		 * Queue 2 -> Counter 2....
3876 		 * Queues 16-127 are mapped to Counter 0
3877 		 */
3878 		if (i < 4) {
3879 			r = (i * 4 + 0);
3880 			r |= (i * 4 + 1) << 8;
3881 			r |= (i * 4 + 2) << 16;
3882 			r |= (i * 4 + 3) << 24;
3883 		} else
3884 			r = 0;
3885 
3886 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RQSMR(i), r);
3887 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TQSM(i), r);
3888 	}
3889 }
3890