xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 7350f337b9e3eb4461d99580e625c7ef148d107c)
1 /*	$OpenBSD: if_ix.c,v 1.157 2019/04/10 09:55:02 dlg Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 /* char ixgbe_driver_version[] = "2.5.13"; */
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 };
89 
90 /*********************************************************************
91  *  Function prototypes
92  *********************************************************************/
93 int	ixgbe_probe(struct device *, void *, void *);
94 void	ixgbe_attach(struct device *, struct device *, void *);
95 int	ixgbe_detach(struct device *, int);
96 void	ixgbe_start(struct ifqueue *);
97 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
99 int	ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
100 void	ixgbe_watchdog(struct ifnet *);
101 void	ixgbe_init(void *);
102 void	ixgbe_stop(void *);
103 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
104 int	ixgbe_media_change(struct ifnet *);
105 void	ixgbe_identify_hardware(struct ix_softc *);
106 int	ixgbe_allocate_pci_resources(struct ix_softc *);
107 int	ixgbe_allocate_legacy(struct ix_softc *);
108 int	ixgbe_allocate_queues(struct ix_softc *);
109 void	ixgbe_free_pci_resources(struct ix_softc *);
110 void	ixgbe_local_timer(void *);
111 void	ixgbe_setup_interface(struct ix_softc *);
112 void	ixgbe_config_gpie(struct ix_softc *);
113 void	ixgbe_config_delay_values(struct ix_softc *);
114 void	ixgbe_add_media_types(struct ix_softc *);
115 void	ixgbe_config_link(struct ix_softc *);
116 
117 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
118 int	ixgbe_setup_transmit_structures(struct ix_softc *);
119 int	ixgbe_setup_transmit_ring(struct tx_ring *);
120 void	ixgbe_initialize_transmit_units(struct ix_softc *);
121 void	ixgbe_free_transmit_structures(struct ix_softc *);
122 void	ixgbe_free_transmit_buffers(struct tx_ring *);
123 
124 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
125 int	ixgbe_setup_receive_structures(struct ix_softc *);
126 int	ixgbe_setup_receive_ring(struct rx_ring *);
127 void	ixgbe_initialize_receive_units(struct ix_softc *);
128 void	ixgbe_free_receive_structures(struct ix_softc *);
129 void	ixgbe_free_receive_buffers(struct rx_ring *);
130 void	ixgbe_initialize_rss_mapping(struct ix_softc *);
131 int	ixgbe_rxfill(struct rx_ring *);
132 void	ixgbe_rxrefill(void *);
133 
134 void	ixgbe_enable_intr(struct ix_softc *);
135 void	ixgbe_disable_intr(struct ix_softc *);
136 void	ixgbe_update_stats_counters(struct ix_softc *);
137 int	ixgbe_txeof(struct tx_ring *);
138 int	ixgbe_rxeof(struct ix_queue *);
139 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
140 void	ixgbe_iff(struct ix_softc *);
141 #ifdef IX_DEBUG
142 void	ixgbe_print_hw_stats(struct ix_softc *);
143 #endif
144 void	ixgbe_update_link_status(struct ix_softc *);
145 int	ixgbe_get_buf(struct rx_ring *, int);
146 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
147 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
148 		    struct ixgbe_dma_alloc *, int);
149 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
150 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
151 	    uint32_t *);
152 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
153 	    uint32_t *);
154 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
155 void	ixgbe_configure_ivars(struct ix_softc *);
156 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
157 
158 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
159 
160 /* Support for pluggable optic modules */
161 void	ixgbe_setup_optics(struct ix_softc *);
162 void	ixgbe_handle_mod(struct ix_softc *);
163 void	ixgbe_handle_msf(struct ix_softc *);
164 void	ixgbe_handle_phy(struct ix_softc *);
165 
166 /* Legacy (single vector interrupt handler */
167 int	ixgbe_intr(void *);
168 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
169 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
170 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
171 
172 /*********************************************************************
173  *  OpenBSD Device Interface Entry Points
174  *********************************************************************/
175 
176 struct cfdriver ix_cd = {
177 	NULL, "ix", DV_IFNET
178 };
179 
180 struct cfattach ix_ca = {
181 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
182 };
183 
184 int ixgbe_smart_speed = ixgbe_smart_speed_on;
185 
186 /*********************************************************************
187  *  Device identification routine
188  *
189  *  ixgbe_probe determines if the driver should be loaded on
190  *  adapter based on PCI vendor/device id of the adapter.
191  *
192  *  return 0 on success, positive on failure
193  *********************************************************************/
194 
195 int
196 ixgbe_probe(struct device *parent, void *match, void *aux)
197 {
198 	INIT_DEBUGOUT("ixgbe_probe: begin");
199 
200 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
201 	    nitems(ixgbe_devices)));
202 }
203 
204 /*********************************************************************
205  *  Device initialization routine
206  *
207  *  The attach entry point is called when the driver is being loaded.
208  *  This routine identifies the type of hardware, allocates all resources
209  *  and initializes the hardware.
210  *
211  *  return 0 on success, positive on failure
212  *********************************************************************/
213 
214 void
215 ixgbe_attach(struct device *parent, struct device *self, void *aux)
216 {
217 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
218 	struct ix_softc		*sc = (struct ix_softc *)self;
219 	int			 error = 0;
220 	uint16_t		 csum;
221 	uint32_t			 ctrl_ext;
222 	struct ixgbe_hw		*hw = &sc->hw;
223 
224 	INIT_DEBUGOUT("ixgbe_attach: begin");
225 
226 	sc->osdep.os_sc = sc;
227 	sc->osdep.os_pa = *pa;
228 
229 	rw_init(&sc->sfflock, "ixsff");
230 
231 	/* Set up the timer callout */
232 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
233 	timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
234 
235 	/* Determine hardware revision */
236 	ixgbe_identify_hardware(sc);
237 
238 	/* Indicate to RX setup to use Jumbo Clusters */
239 	sc->num_tx_desc = DEFAULT_TXD;
240 	sc->num_rx_desc = DEFAULT_RXD;
241 
242 	/* Do base PCI setup - map BAR0 */
243 	if (ixgbe_allocate_pci_resources(sc))
244 		goto err_out;
245 
246 	/* Allocate our TX/RX Queues */
247 	if (ixgbe_allocate_queues(sc))
248 		goto err_out;
249 
250 	/* Allocate multicast array memory. */
251 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
252 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
253 	if (sc->mta == NULL) {
254 		printf(": Can not allocate multicast setup array\n");
255 		goto err_late;
256 	}
257 
258 	/* Initialize the shared code */
259 	error = ixgbe_init_shared_code(hw);
260 	if (error) {
261 		printf(": Unable to initialize the shared code\n");
262 		goto err_late;
263 	}
264 
265 	/* Make sure we have a good EEPROM before we read from it */
266 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
267 		printf(": The EEPROM Checksum Is Not Valid\n");
268 		goto err_late;
269 	}
270 
271 	error = ixgbe_init_hw(hw);
272 	if (error == IXGBE_ERR_EEPROM_VERSION) {
273 		printf(": This device is a pre-production adapter/"
274 		    "LOM.  Please be aware there may be issues associated "
275 		    "with your hardware.\nIf you are experiencing problems "
276 		    "please contact your Intel or hardware representative "
277 		    "who provided you with this hardware.\n");
278 	} else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
279 	    error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
280 		printf(": Hardware Initialization Failure\n");
281 		goto err_late;
282 	}
283 
284 	/* Detect and set physical type */
285 	ixgbe_setup_optics(sc);
286 
287 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
288 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
289 
290 	error = ixgbe_allocate_legacy(sc);
291 	if (error)
292 		goto err_late;
293 
294 	/* Enable the optics for 82599 SFP+ fiber */
295 	if (sc->hw.mac.ops.enable_tx_laser)
296 		sc->hw.mac.ops.enable_tx_laser(&sc->hw);
297 
298 	/* Enable power to the phy */
299 	if (hw->phy.ops.set_phy_power)
300 		hw->phy.ops.set_phy_power(&sc->hw, TRUE);
301 
302 	/* Setup OS specific network interface */
303 	ixgbe_setup_interface(sc);
304 
305 	/* Initialize statistics */
306 	ixgbe_update_stats_counters(sc);
307 
308 	/* Get the PCI-E bus info and determine LAN ID */
309 	hw->mac.ops.get_bus_info(hw);
310 
311 	/* Set an initial default flow control value */
312 	sc->fc = ixgbe_fc_full;
313 
314 	/* let hardware know driver is loaded */
315 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
316 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
317 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
318 
319 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
320 
321 	INIT_DEBUGOUT("ixgbe_attach: end");
322 	return;
323 
324 err_late:
325 	ixgbe_free_transmit_structures(sc);
326 	ixgbe_free_receive_structures(sc);
327 err_out:
328 	ixgbe_free_pci_resources(sc);
329 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
330 	    MAX_NUM_MULTICAST_ADDRESSES);
331 }
332 
333 /*********************************************************************
334  *  Device removal routine
335  *
336  *  The detach entry point is called when the driver is being removed.
337  *  This routine stops the adapter and deallocates all the resources
338  *  that were allocated for driver operation.
339  *
340  *  return 0 on success, positive on failure
341  *********************************************************************/
342 
343 int
344 ixgbe_detach(struct device *self, int flags)
345 {
346 	struct ix_softc *sc = (struct ix_softc *)self;
347 	struct ifnet *ifp = &sc->arpcom.ac_if;
348 	uint32_t	ctrl_ext;
349 
350 	INIT_DEBUGOUT("ixgbe_detach: begin");
351 
352 	ixgbe_stop(sc);
353 
354 	/* let hardware know driver is unloading */
355 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
356 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
357 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
358 
359 	ether_ifdetach(ifp);
360 	if_detach(ifp);
361 
362 	timeout_del(&sc->timer);
363 	timeout_del(&sc->rx_refill);
364 	ixgbe_free_pci_resources(sc);
365 
366 	ixgbe_free_transmit_structures(sc);
367 	ixgbe_free_receive_structures(sc);
368 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
369 	    MAX_NUM_MULTICAST_ADDRESSES);
370 
371 	return (0);
372 }
373 
374 /*********************************************************************
375  *  Transmit entry point
376  *
377  *  ixgbe_start is called by the stack to initiate a transmit.
378  *  The driver will remain in this routine as long as there are
379  *  packets to transmit and transmit resources are available.
380  *  In case resources are not available stack is notified and
381  *  the packet is requeued.
382  **********************************************************************/
383 
384 void
385 ixgbe_start(struct ifqueue *ifq)
386 {
387 	struct ifnet		*ifp = ifq->ifq_if;
388 	struct ix_softc		*sc = ifp->if_softc;
389 	struct tx_ring		*txr = sc->tx_rings;
390 	struct mbuf  		*m_head;
391 	unsigned int		 head, free, used;
392 	int			 post = 0;
393 
394 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(ifq))
395 		return;
396 	if (!sc->link_up)
397 		return;
398 
399 	head = txr->next_avail_desc;
400 	free = txr->next_to_clean;
401 	if (free <= head)
402 		free += sc->num_tx_desc;
403 	free -= head;
404 
405 	membar_consumer();
406 
407 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
408 	    0, txr->txdma.dma_map->dm_mapsize,
409 	    BUS_DMASYNC_POSTWRITE);
410 
411 	for (;;) {
412 		/* Check that we have the minimal number of TX descriptors. */
413 		if (free <= IXGBE_TX_OP_THRESHOLD) {
414 			ifq_set_oactive(ifq);
415 			break;
416 		}
417 
418 		m_head = ifq_dequeue(ifq);
419 		if (m_head == NULL)
420 			break;
421 
422 		used = ixgbe_encap(txr, m_head);
423 		if (used == 0) {
424 			m_freem(m_head);
425 			continue;
426 		}
427 
428 		free -= used;
429 
430 #if NBPFILTER > 0
431 		if (ifp->if_bpf)
432 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
433 #endif
434 
435 		/* Set timeout in case hardware has problems transmitting */
436 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
437 		ifp->if_timer = IXGBE_TX_TIMEOUT;
438 
439 		post = 1;
440 	}
441 
442 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
443 	    0, txr->txdma.dma_map->dm_mapsize,
444 	    BUS_DMASYNC_PREWRITE);
445 
446 	/*
447 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
448 	 * hardware that this frame is available to transmit.
449 	 */
450 	if (post)
451 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
452 		    txr->next_avail_desc);
453 }
454 
455 /*********************************************************************
456  *  Ioctl entry point
457  *
458  *  ixgbe_ioctl is called when the user wants to configure the
459  *  interface.
460  *
461  *  return 0 on success, positive on failure
462  **********************************************************************/
463 
464 int
465 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
466 {
467 	struct ix_softc	*sc = ifp->if_softc;
468 	struct ifreq	*ifr = (struct ifreq *) data;
469 	int		s, error = 0;
470 
471 	s = splnet();
472 
473 	switch (command) {
474 	case SIOCSIFADDR:
475 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
476 		ifp->if_flags |= IFF_UP;
477 		if (!(ifp->if_flags & IFF_RUNNING))
478 			ixgbe_init(sc);
479 		break;
480 
481 	case SIOCSIFFLAGS:
482 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
483 		if (ifp->if_flags & IFF_UP) {
484 			if (ifp->if_flags & IFF_RUNNING)
485 				error = ENETRESET;
486 			else
487 				ixgbe_init(sc);
488 		} else {
489 			if (ifp->if_flags & IFF_RUNNING)
490 				ixgbe_stop(sc);
491 		}
492 		break;
493 
494 	case SIOCSIFMEDIA:
495 	case SIOCGIFMEDIA:
496 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
497 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
498 		break;
499 
500 	case SIOCGIFRXR:
501 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
502 		break;
503 
504 	case SIOCGIFSFFPAGE:
505 		error = rw_enter(&sc->sfflock, RW_WRITE|RW_INTR);
506 		if (error != 0)
507 			break;
508 
509 		error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
510 		rw_exit(&sc->sfflock);
511 		break;
512 
513 	default:
514 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
515 	}
516 
517 	if (error == ENETRESET) {
518 		if (ifp->if_flags & IFF_RUNNING) {
519 			ixgbe_disable_intr(sc);
520 			ixgbe_iff(sc);
521 			ixgbe_enable_intr(sc);
522 		}
523 		error = 0;
524 	}
525 
526 	splx(s);
527 	return (error);
528 }
529 
530 int
531 ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff)
532 {
533 	struct ixgbe_hw *hw = &sc->hw;
534 	uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
535 	uint8_t page;
536 	size_t i;
537 	int error = EIO;
538 
539 	if (hw->phy.type == ixgbe_phy_fw)
540 		return (ENODEV);
541 
542 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
543 		return (EBUSY); /* XXX */
544 
545 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
546 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127,
547 		    IFSFF_ADDR_EEPROM, &page))
548 			goto error;
549 		if (page != sff->sff_page &&
550 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
551 		    IFSFF_ADDR_EEPROM, sff->sff_page))
552 			goto error;
553 	}
554 
555 	for (i = 0; i < sizeof(sff->sff_data); i++) {
556 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, i,
557 		    sff->sff_addr, &sff->sff_data[i]))
558 			goto error;
559 	}
560 
561 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
562 		if (page != sff->sff_page &&
563 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
564 		    IFSFF_ADDR_EEPROM, page))
565 			goto error;
566 	}
567 
568 	error = 0;
569 error:
570 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
571 	return (error);
572 }
573 
574 int
575 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
576 {
577 	struct if_rxring_info *ifr, ifr1;
578 	struct rx_ring *rxr;
579 	int error, i;
580 	u_int n = 0;
581 
582 	if (sc->num_queues > 1) {
583 		if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
584 		    M_WAITOK | M_ZERO)) == NULL)
585 			return (ENOMEM);
586 	} else
587 		ifr = &ifr1;
588 
589 	for (i = 0; i < sc->num_queues; i++) {
590 		rxr = &sc->rx_rings[i];
591 		ifr[n].ifr_size = sc->rx_mbuf_sz;
592 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
593 		ifr[n].ifr_info = rxr->rx_ring;
594 		n++;
595 	}
596 
597 	error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
598 
599 	if (sc->num_queues > 1)
600 		free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
601 	return (error);
602 }
603 
604 /*********************************************************************
605  *  Watchdog entry point
606  *
607  **********************************************************************/
608 
609 void
610 ixgbe_watchdog(struct ifnet * ifp)
611 {
612 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
613 	struct tx_ring *txr = sc->tx_rings;
614 	struct ixgbe_hw *hw = &sc->hw;
615 	int		tx_hang = FALSE;
616 	int		i;
617 
618 	/*
619 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
620 	 * Anytime all descriptors are clean the timer is set to 0.
621 	 */
622 	for (i = 0; i < sc->num_queues; i++, txr++) {
623 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
624 			continue;
625 		else {
626 			tx_hang = TRUE;
627 			break;
628 		}
629 	}
630 	if (tx_hang == FALSE)
631 		return;
632 
633 	/*
634 	 * If we are in this routine because of pause frames, then don't
635 	 * reset the hardware.
636 	 */
637 	if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
638 		for (i = 0; i < sc->num_queues; i++, txr++)
639 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
640 		ifp->if_timer = IXGBE_TX_TIMEOUT;
641 		return;
642 	}
643 
644 
645 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
646 	for (i = 0; i < sc->num_queues; i++, txr++) {
647 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
648 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
649 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
650 		printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
651 		    i, txr->next_to_clean);
652 	}
653 	ifp->if_flags &= ~IFF_RUNNING;
654 	sc->watchdog_events++;
655 
656 	ixgbe_init(sc);
657 }
658 
659 /*********************************************************************
660  *  Init entry point
661  *
662  *  This routine is used in two ways. It is used by the stack as
663  *  init entry point in network interface structure. It is also used
664  *  by the driver as a hw/sw initialization routine to get to a
665  *  consistent state.
666  *
667  *  return 0 on success, positive on failure
668  **********************************************************************/
669 #define IXGBE_MHADD_MFS_SHIFT 16
670 
671 void
672 ixgbe_init(void *arg)
673 {
674 	struct ix_softc	*sc = (struct ix_softc *)arg;
675 	struct ifnet	*ifp = &sc->arpcom.ac_if;
676 	struct rx_ring	*rxr = sc->rx_rings;
677 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, itr;
678 	int		 i, s, err;
679 
680 	INIT_DEBUGOUT("ixgbe_init: begin");
681 
682 	s = splnet();
683 
684 	ixgbe_stop(sc);
685 
686 	/* reprogram the RAR[0] in case user changed it. */
687 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
688 
689 	/* Get the latest mac address, User can use a LAA */
690 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
691 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
692 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
693 	sc->hw.addr_ctrl.rar_used_count = 1;
694 
695 	/* Prepare transmit descriptors and buffers */
696 	if (ixgbe_setup_transmit_structures(sc)) {
697 		printf("%s: Could not setup transmit structures\n",
698 		    ifp->if_xname);
699 		ixgbe_stop(sc);
700 		splx(s);
701 		return;
702 	}
703 
704 	ixgbe_init_hw(&sc->hw);
705 	ixgbe_initialize_transmit_units(sc);
706 
707 	/* Use 2k clusters, even for jumbo frames */
708 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
709 
710 	/* Prepare receive descriptors and buffers */
711 	if (ixgbe_setup_receive_structures(sc)) {
712 		printf("%s: Could not setup receive structures\n",
713 		    ifp->if_xname);
714 		ixgbe_stop(sc);
715 		splx(s);
716 		return;
717 	}
718 
719 	/* Configure RX settings */
720 	ixgbe_initialize_receive_units(sc);
721 
722 	/* Enable SDP & MSIX interrupts based on adapter */
723 	ixgbe_config_gpie(sc);
724 
725 	/* Program promiscuous mode and multicast filters. */
726 	ixgbe_iff(sc);
727 
728 	/* Set MRU size */
729 	mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
730 	mhadd &= ~IXGBE_MHADD_MFS_MASK;
731 	mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
732 	IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
733 
734 	/* Now enable all the queues */
735 	for (i = 0; i < sc->num_queues; i++) {
736 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
737 		txdctl |= IXGBE_TXDCTL_ENABLE;
738 		/* Set WTHRESH to 8, burst writeback */
739 		txdctl |= (8 << 16);
740 		/*
741 		 * When the internal queue falls below PTHRESH (16),
742 		 * start prefetching as long as there are at least
743 		 * HTHRESH (1) buffers ready.
744 		 */
745 		txdctl |= (16 << 0) | (1 << 8);
746 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
747 	}
748 
749 	for (i = 0; i < sc->num_queues; i++) {
750 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
751 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
752 			/*
753 			 * PTHRESH = 21
754 			 * HTHRESH = 4
755 			 * WTHRESH = 8
756 			 */
757 			rxdctl &= ~0x3FFFFF;
758 			rxdctl |= 0x080420;
759 		}
760 		rxdctl |= IXGBE_RXDCTL_ENABLE;
761 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
762 		for (k = 0; k < 10; k++) {
763 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
764 			    IXGBE_RXDCTL_ENABLE)
765 				break;
766 			else
767 				msec_delay(1);
768 		}
769 		IXGBE_WRITE_FLUSH(&sc->hw);
770 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
771 	}
772 
773 	/* Set up VLAN support and filter */
774 	ixgbe_setup_vlan_hw_support(sc);
775 
776 	/* Enable Receive engine */
777 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
778 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
779 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
780 	rxctrl |= IXGBE_RXCTRL_RXEN;
781 	sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
782 
783 	timeout_add_sec(&sc->timer, 1);
784 
785 	/* Set up MSI/X routing */
786 	if (sc->msix > 1) {
787 		ixgbe_configure_ivars(sc);
788 		/* Set up auto-mask */
789 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
790 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
791 		else {
792 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
793 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
794 		}
795 	} else {  /* Simple settings for Legacy/MSI */
796 		ixgbe_set_ivar(sc, 0, 0, 0);
797 		ixgbe_set_ivar(sc, 0, 0, 1);
798 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
799 	}
800 
801 	/* Check on any SFP devices that need to be kick-started */
802 	if (sc->hw.phy.type == ixgbe_phy_none) {
803 		err = sc->hw.phy.ops.identify(&sc->hw);
804 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
805 			printf("Unsupported SFP+ module type was detected.\n");
806 			splx(s);
807 			return;
808 		}
809 	}
810 
811 	/* Setup interrupt moderation */
812 	itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
813 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
814 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
815 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
816 
817 	/* Enable power to the phy */
818 	if (sc->hw.phy.ops.set_phy_power)
819 		sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
820 
821 	/* Config/Enable Link */
822 	ixgbe_config_link(sc);
823 
824 	/* Hardware Packet Buffer & Flow Control setup */
825 	ixgbe_config_delay_values(sc);
826 
827 	/* Initialize the FC settings */
828 	sc->hw.mac.ops.start_hw(&sc->hw);
829 
830 	/* And now turn on interrupts */
831 	ixgbe_enable_intr(sc);
832 
833 	/* Now inform the stack we're ready */
834 	ifp->if_flags |= IFF_RUNNING;
835 	ifq_clr_oactive(&ifp->if_snd);
836 
837 	splx(s);
838 }
839 
840 void
841 ixgbe_config_gpie(struct ix_softc *sc)
842 {
843 	struct ixgbe_hw	*hw = &sc->hw;
844 	uint32_t gpie;
845 
846 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
847 
848 	/* Fan Failure Interrupt */
849 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
850 		gpie |= IXGBE_SDP1_GPIEN;
851 
852 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
853 		/* Add for Module detection */
854 		gpie |= IXGBE_SDP2_GPIEN;
855 
856 		/* Media ready */
857 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
858 			gpie |= IXGBE_SDP1_GPIEN;
859 
860 		/*
861 		 * Set LL interval to max to reduce the number of low latency
862 		 * interrupts hitting the card when the ring is getting full.
863 		 */
864 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
865 	}
866 
867 	if (sc->hw.mac.type == ixgbe_mac_X540 ||
868 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
869 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
870 		/*
871 		 * Thermal Failure Detection (X540)
872 		 * Link Detection (X552 SFP+, X552/X557-AT)
873 		 */
874 		gpie |= IXGBE_SDP0_GPIEN_X540;
875 
876 		/*
877 		 * Set LL interval to max to reduce the number of low latency
878 		 * interrupts hitting the card when the ring is getting full.
879 		 */
880 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
881 	}
882 
883 	if (sc->msix > 1) {
884 		/* Enable Enhanced MSIX mode */
885 		gpie |= IXGBE_GPIE_MSIX_MODE;
886 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
887 		    IXGBE_GPIE_OCD;
888 	}
889 
890 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
891 }
892 
893 /*
894  * Requires sc->max_frame_size to be set.
895  */
896 void
897 ixgbe_config_delay_values(struct ix_softc *sc)
898 {
899 	struct ixgbe_hw *hw = &sc->hw;
900 	uint32_t rxpb, frame, size, tmp;
901 
902 	frame = sc->max_frame_size;
903 
904 	/* Calculate High Water */
905 	switch (hw->mac.type) {
906 	case ixgbe_mac_X540:
907 	case ixgbe_mac_X550:
908 	case ixgbe_mac_X550EM_x:
909 		tmp = IXGBE_DV_X540(frame, frame);
910 		break;
911 	default:
912 		tmp = IXGBE_DV(frame, frame);
913 		break;
914 	}
915 	size = IXGBE_BT2KB(tmp);
916 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
917 	hw->fc.high_water[0] = rxpb - size;
918 
919 	/* Now calculate Low Water */
920 	switch (hw->mac.type) {
921 	case ixgbe_mac_X540:
922 	case ixgbe_mac_X550:
923 	case ixgbe_mac_X550EM_x:
924 		tmp = IXGBE_LOW_DV_X540(frame);
925 		break;
926 	default:
927 		tmp = IXGBE_LOW_DV(frame);
928 		break;
929 	}
930 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
931 
932 	hw->fc.requested_mode = sc->fc;
933 	hw->fc.pause_time = IXGBE_FC_PAUSE;
934 	hw->fc.send_xon = TRUE;
935 }
936 
937 /*
938  * MSIX Interrupt Handlers
939  */
940 void
941 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
942 {
943 	uint64_t queue = 1ULL << vector;
944 	uint32_t mask;
945 
946 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
947 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
948 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
949 	} else {
950 		mask = (queue & 0xFFFFFFFF);
951 		if (mask)
952 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
953 		mask = (queue >> 32);
954 		if (mask)
955 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
956 	}
957 }
958 
959 void
960 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
961 {
962 	uint64_t queue = 1ULL << vector;
963 	uint32_t mask;
964 
965 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
966 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
967 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
968 	} else {
969 		mask = (queue & 0xFFFFFFFF);
970 		if (mask)
971 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
972 		mask = (queue >> 32);
973 		if (mask)
974 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
975 	}
976 }
977 
978 /*********************************************************************
979  *
980  *  Legacy Interrupt Service routine
981  *
982  **********************************************************************/
983 
984 int
985 ixgbe_intr(void *arg)
986 {
987 	struct ix_softc	*sc = (struct ix_softc *)arg;
988 	struct ix_queue *que = sc->queues;
989 	struct ifnet	*ifp = &sc->arpcom.ac_if;
990 	struct tx_ring	*txr = sc->tx_rings;
991 	struct ixgbe_hw	*hw = &sc->hw;
992 	uint32_t	 reg_eicr, mod_mask, msf_mask;
993 	int		 i, refill = 0;
994 
995 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
996 	if (reg_eicr == 0) {
997 		ixgbe_enable_intr(sc);
998 		return (0);
999 	}
1000 
1001 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1002 		ixgbe_rxeof(que);
1003 		ixgbe_txeof(txr);
1004 		refill = 1;
1005 	}
1006 
1007 	if (refill) {
1008 		if (ixgbe_rxfill(que->rxr)) {
1009 			/* Advance the Rx Queue "Tail Pointer" */
1010 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
1011 			    que->rxr->last_desc_filled);
1012 		} else
1013 			timeout_add(&sc->rx_refill, 1);
1014 	}
1015 
1016 	/* Link status change */
1017 	if (reg_eicr & IXGBE_EICR_LSC) {
1018 		KERNEL_LOCK();
1019 		ixgbe_update_link_status(sc);
1020 		KERNEL_UNLOCK();
1021 		ifq_start(&ifp->if_snd);
1022 	}
1023 
1024 	if (hw->mac.type != ixgbe_mac_82598EB) {
1025 		if (reg_eicr & IXGBE_EICR_ECC) {
1026 			printf("%s: CRITICAL: ECC ERROR!! "
1027 			    "Please Reboot!!\n", sc->dev.dv_xname);
1028 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1029 		}
1030 		/* Check for over temp condition */
1031 		if (reg_eicr & IXGBE_EICR_TS) {
1032 			printf("%s: CRITICAL: OVER TEMP!! "
1033 			    "PHY IS SHUT DOWN!!\n", ifp->if_xname);
1034 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1035 		}
1036 	}
1037 
1038 	/* Pluggable optics-related interrupt */
1039 	if (ixgbe_is_sfp(hw)) {
1040 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1041 			mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1042 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1043 		} else if (hw->mac.type == ixgbe_mac_X540 ||
1044 		    hw->mac.type == ixgbe_mac_X550 ||
1045 		    hw->mac.type == ixgbe_mac_X550EM_x) {
1046 			mod_mask = IXGBE_EICR_GPI_SDP2_X540;
1047 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1048 		} else {
1049 			mod_mask = IXGBE_EICR_GPI_SDP2;
1050 			msf_mask = IXGBE_EICR_GPI_SDP1;
1051 		}
1052 		if (reg_eicr & mod_mask) {
1053 			/* Clear the interrupt */
1054 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1055 			KERNEL_LOCK();
1056 			ixgbe_handle_mod(sc);
1057 			KERNEL_UNLOCK();
1058 		} else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
1059 		    (reg_eicr & msf_mask)) {
1060 			/* Clear the interrupt */
1061 			IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
1062 			KERNEL_LOCK();
1063 			ixgbe_handle_msf(sc);
1064 			KERNEL_UNLOCK();
1065 		}
1066 	}
1067 
1068 	/* Check for fan failure */
1069 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1070 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1071 		printf("%s: CRITICAL: FAN FAILURE!! "
1072 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1073 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1074 	}
1075 
1076 	/* External PHY interrupt */
1077 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1078 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1079 		/* Clear the interrupt */
1080 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1081 		KERNEL_LOCK();
1082 		ixgbe_handle_phy(sc);
1083 		KERNEL_UNLOCK();
1084 	}
1085 
1086 	for (i = 0; i < sc->num_queues; i++, que++)
1087 		ixgbe_enable_queue(sc, que->msix);
1088 
1089 	return (1);
1090 }
1091 
1092 /*********************************************************************
1093  *
1094  *  Media Ioctl callback
1095  *
1096  *  This routine is called whenever the user queries the status of
1097  *  the interface using ifconfig.
1098  *
1099  **********************************************************************/
1100 void
1101 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1102 {
1103 	struct ix_softc *sc = ifp->if_softc;
1104 
1105 	ifmr->ifm_active = IFM_ETHER;
1106 	ifmr->ifm_status = IFM_AVALID;
1107 
1108 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1109 	ixgbe_update_link_status(sc);
1110 
1111 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1112 		ifmr->ifm_status |= IFM_ACTIVE;
1113 
1114 		switch (sc->link_speed) {
1115 		case IXGBE_LINK_SPEED_100_FULL:
1116 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1117 			break;
1118 		case IXGBE_LINK_SPEED_1GB_FULL:
1119 			switch (sc->optics) {
1120 			case IFM_10G_SR: /* multi-speed fiber */
1121 				ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1122 				break;
1123 			case IFM_10G_LR: /* multi-speed fiber */
1124 				ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1125 				break;
1126 			default:
1127 				ifmr->ifm_active |= sc->optics | IFM_FDX;
1128 				break;
1129 			}
1130 			break;
1131 		case IXGBE_LINK_SPEED_10GB_FULL:
1132 			ifmr->ifm_active |= sc->optics | IFM_FDX;
1133 			break;
1134 		}
1135 
1136 		switch (sc->hw.fc.current_mode) {
1137 		case ixgbe_fc_tx_pause:
1138 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1139 			break;
1140 		case ixgbe_fc_rx_pause:
1141 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1142 			break;
1143 		case ixgbe_fc_full:
1144 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1145 			    IFM_ETH_TXPAUSE;
1146 			break;
1147 		default:
1148 			ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1149 			    IFM_ETH_TXPAUSE);
1150 			break;
1151 		}
1152 	}
1153 }
1154 
1155 /*********************************************************************
1156  *
1157  *  Media Ioctl callback
1158  *
1159  *  This routine is called when the user changes speed/duplex using
1160  *  media/mediopt option with ifconfig.
1161  *
1162  **********************************************************************/
1163 int
1164 ixgbe_media_change(struct ifnet *ifp)
1165 {
1166 	struct ix_softc	*sc = ifp->if_softc;
1167 	struct ixgbe_hw	*hw = &sc->hw;
1168 	struct ifmedia	*ifm = &sc->media;
1169 	ixgbe_link_speed speed = 0;
1170 
1171 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1172 		return (EINVAL);
1173 
1174 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1175 		return (ENODEV);
1176 
1177 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1178 		case IFM_AUTO:
1179 		case IFM_10G_T:
1180 			speed |= IXGBE_LINK_SPEED_100_FULL;
1181 		case IFM_10G_SR: /* KR, too */
1182 		case IFM_10G_LR:
1183 		case IFM_10G_CX4: /* KX4 */
1184 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1185 		case IFM_10G_SFP_CU:
1186 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1187 			break;
1188 		case IFM_1000_T:
1189 			speed |= IXGBE_LINK_SPEED_100_FULL;
1190 		case IFM_1000_LX:
1191 		case IFM_1000_SX:
1192 		case IFM_1000_CX: /* KX */
1193 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1194 			break;
1195 		case IFM_100_TX:
1196 			speed |= IXGBE_LINK_SPEED_100_FULL;
1197 			break;
1198 		default:
1199 			return (EINVAL);
1200 	}
1201 
1202 	hw->mac.autotry_restart = TRUE;
1203 	hw->mac.ops.setup_link(hw, speed, TRUE);
1204 
1205 	return (0);
1206 }
1207 
1208 /*********************************************************************
1209  *
1210  *  This routine maps the mbufs to tx descriptors, allowing the
1211  *  TX engine to transmit the packets.
1212  *  	- return 0 on success, positive on failure
1213  *
1214  **********************************************************************/
1215 
1216 int
1217 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1218 {
1219 	struct ix_softc *sc = txr->sc;
1220 	uint32_t	olinfo_status = 0, cmd_type_len;
1221 	int             i, j, ntxc;
1222 	int		first, last = 0;
1223 	bus_dmamap_t	map;
1224 	struct ixgbe_tx_buf *txbuf;
1225 	union ixgbe_adv_tx_desc *txd = NULL;
1226 
1227 	/* Basic descriptor defines */
1228 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1229 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1230 
1231 #if NVLAN > 0
1232 	if (m_head->m_flags & M_VLANTAG)
1233 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1234 #endif
1235 
1236 	/*
1237 	 * Important to capture the first descriptor
1238 	 * used because it will contain the index of
1239 	 * the one we tell the hardware to report back
1240 	 */
1241 	first = txr->next_avail_desc;
1242 	txbuf = &txr->tx_buffers[first];
1243 	map = txbuf->map;
1244 
1245 	/*
1246 	 * Map the packet for DMA.
1247 	 */
1248 	switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1249 	    m_head, BUS_DMA_NOWAIT)) {
1250 	case 0:
1251 		break;
1252 	case EFBIG:
1253 		if (m_defrag(m_head, M_NOWAIT) == 0 &&
1254 		    bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1255 		     m_head, BUS_DMA_NOWAIT) == 0)
1256 			break;
1257 		/* FALLTHROUGH */
1258 	default:
1259 		sc->no_tx_dma_setup++;
1260 		return (0);
1261 	}
1262 
1263 	/*
1264 	 * Set the appropriate offload context
1265 	 * this will becomes the first descriptor.
1266 	 */
1267 	ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1268 	if (ntxc == -1)
1269 		goto xmit_fail;
1270 
1271 	i = txr->next_avail_desc + ntxc;
1272 	if (i >= sc->num_tx_desc)
1273 		i -= sc->num_tx_desc;
1274 
1275 	for (j = 0; j < map->dm_nsegs; j++) {
1276 		txd = &txr->tx_base[i];
1277 
1278 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1279 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1280 		    cmd_type_len | map->dm_segs[j].ds_len);
1281 		txd->read.olinfo_status = htole32(olinfo_status);
1282 		last = i; /* descriptor that will get completion IRQ */
1283 
1284 		if (++i == sc->num_tx_desc)
1285 			i = 0;
1286 	}
1287 
1288 	txd->read.cmd_type_len |=
1289 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1290 
1291 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1292 	    BUS_DMASYNC_PREWRITE);
1293 
1294 	/* Set the index of the descriptor that will be marked done */
1295 	txbuf->m_head = m_head;
1296 	txbuf->eop_index = last;
1297 
1298 	membar_producer();
1299 
1300 	txr->next_avail_desc = i;
1301 
1302 	++txr->tx_packets;
1303 	return (ntxc + j);
1304 
1305 xmit_fail:
1306 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1307 	return (0);
1308 }
1309 
1310 void
1311 ixgbe_iff(struct ix_softc *sc)
1312 {
1313 	struct ifnet *ifp = &sc->arpcom.ac_if;
1314 	struct arpcom *ac = &sc->arpcom;
1315 	uint32_t	fctrl;
1316 	uint8_t	*mta;
1317 	uint8_t	*update_ptr;
1318 	struct ether_multi *enm;
1319 	struct ether_multistep step;
1320 	int	mcnt = 0;
1321 
1322 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1323 
1324 	mta = sc->mta;
1325 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1326 	    MAX_NUM_MULTICAST_ADDRESSES);
1327 
1328 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1329 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1330 	ifp->if_flags &= ~IFF_ALLMULTI;
1331 
1332 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1333 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1334 		ifp->if_flags |= IFF_ALLMULTI;
1335 		fctrl |= IXGBE_FCTRL_MPE;
1336 		if (ifp->if_flags & IFF_PROMISC)
1337 			fctrl |= IXGBE_FCTRL_UPE;
1338 	} else {
1339 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1340 		while (enm != NULL) {
1341 			bcopy(enm->enm_addrlo,
1342 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1343 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1344 			mcnt++;
1345 
1346 			ETHER_NEXT_MULTI(step, enm);
1347 		}
1348 
1349 		update_ptr = mta;
1350 		sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1351 		    ixgbe_mc_array_itr, TRUE);
1352 	}
1353 
1354 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1355 }
1356 
1357 /*
1358  * This is an iterator function now needed by the multicast
1359  * shared code. It simply feeds the shared code routine the
1360  * addresses in the array of ixgbe_iff() one by one.
1361  */
1362 uint8_t *
1363 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1364 {
1365 	uint8_t *addr = *update_ptr;
1366 	uint8_t *newptr;
1367 	*vmdq = 0;
1368 
1369 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1370 	*update_ptr = newptr;
1371 	return addr;
1372 }
1373 
1374 void
1375 ixgbe_local_timer(void *arg)
1376 {
1377 	struct ix_softc *sc = arg;
1378 #ifdef IX_DEBUG
1379 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1380 #endif
1381 	int		 s;
1382 
1383 	s = splnet();
1384 
1385 	ixgbe_update_stats_counters(sc);
1386 
1387 #ifdef IX_DEBUG
1388 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1389 	    (IFF_RUNNING|IFF_DEBUG))
1390 		ixgbe_print_hw_stats(sc);
1391 #endif
1392 
1393 	timeout_add_sec(&sc->timer, 1);
1394 
1395 	splx(s);
1396 }
1397 
1398 void
1399 ixgbe_update_link_status(struct ix_softc *sc)
1400 {
1401 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1402 	int		link_state = LINK_STATE_DOWN;
1403 
1404 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1405 
1406 	ifp->if_baudrate = 0;
1407 	if (sc->link_up) {
1408 		link_state = LINK_STATE_FULL_DUPLEX;
1409 
1410 		switch (sc->link_speed) {
1411 		case IXGBE_LINK_SPEED_UNKNOWN:
1412 			ifp->if_baudrate = 0;
1413 			break;
1414 		case IXGBE_LINK_SPEED_100_FULL:
1415 			ifp->if_baudrate = IF_Mbps(100);
1416 			break;
1417 		case IXGBE_LINK_SPEED_1GB_FULL:
1418 			ifp->if_baudrate = IF_Gbps(1);
1419 			break;
1420 		case IXGBE_LINK_SPEED_10GB_FULL:
1421 			ifp->if_baudrate = IF_Gbps(10);
1422 			break;
1423 		}
1424 
1425 		/* Update any Flow Control changes */
1426 		sc->hw.mac.ops.fc_enable(&sc->hw);
1427 	}
1428 	if (ifp->if_link_state != link_state) {
1429 		ifp->if_link_state = link_state;
1430 		if_link_state_change(ifp);
1431 	}
1432 }
1433 
1434 
1435 /*********************************************************************
1436  *
1437  *  This routine disables all traffic on the adapter by issuing a
1438  *  global reset on the MAC and deallocates TX/RX buffers.
1439  *
1440  **********************************************************************/
1441 
1442 void
1443 ixgbe_stop(void *arg)
1444 {
1445 	struct ix_softc *sc = arg;
1446 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1447 
1448 	/* Tell the stack that the interface is no longer active */
1449 	ifp->if_flags &= ~IFF_RUNNING;
1450 
1451 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1452 	ixgbe_disable_intr(sc);
1453 
1454 	sc->hw.mac.ops.reset_hw(&sc->hw);
1455 	sc->hw.adapter_stopped = FALSE;
1456 	sc->hw.mac.ops.stop_adapter(&sc->hw);
1457 	if (sc->hw.mac.type == ixgbe_mac_82599EB)
1458 		sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1459 	/* Turn off the laser */
1460 	if (sc->hw.mac.ops.disable_tx_laser)
1461 		sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1462 	timeout_del(&sc->timer);
1463 	timeout_del(&sc->rx_refill);
1464 
1465 	/* reprogram the RAR[0] in case user changed it. */
1466 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1467 
1468 	ifq_barrier(&ifp->if_snd);
1469 	intr_barrier(sc->tag);
1470 
1471 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1472 
1473 	ifq_clr_oactive(&ifp->if_snd);
1474 
1475 	/* Should we really clear all structures on stop? */
1476 	ixgbe_free_transmit_structures(sc);
1477 	ixgbe_free_receive_structures(sc);
1478 }
1479 
1480 
1481 /*********************************************************************
1482  *
1483  *  Determine hardware revision.
1484  *
1485  **********************************************************************/
1486 void
1487 ixgbe_identify_hardware(struct ix_softc *sc)
1488 {
1489 	struct ixgbe_osdep	*os = &sc->osdep;
1490 	struct pci_attach_args	*pa = &os->os_pa;
1491 	uint32_t		 reg;
1492 
1493 	/* Save off the information about this board */
1494 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1495 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1496 
1497 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1498 	sc->hw.revision_id = PCI_REVISION(reg);
1499 
1500 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1501 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1502 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1503 
1504 	/* We need this here to set the num_segs below */
1505 	ixgbe_set_mac_type(&sc->hw);
1506 
1507 	/* Pick up the 82599 and VF settings */
1508 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
1509 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1510 	sc->num_segs = IXGBE_82599_SCATTER;
1511 }
1512 
1513 /*********************************************************************
1514  *
1515  *  Determine optic type
1516  *
1517  **********************************************************************/
1518 void
1519 ixgbe_setup_optics(struct ix_softc *sc)
1520 {
1521 	struct ixgbe_hw *hw = &sc->hw;
1522 	int		layer;
1523 
1524 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1525 
1526 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1527 		sc->optics = IFM_10G_T;
1528 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1529 		sc->optics = IFM_1000_T;
1530 	else if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1531 		sc->optics = IFM_100_TX;
1532 	else if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1533 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1534 		sc->optics = IFM_10G_SFP_CU;
1535 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR ||
1536 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1537 		sc->optics = IFM_10G_LR;
1538 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
1539 		sc->optics = IFM_10G_SR;
1540 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1541 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1542 		sc->optics = IFM_10G_CX4;
1543 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1544 		sc->optics = IFM_1000_SX;
1545 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
1546 		sc->optics = IFM_1000_LX;
1547 	else
1548 		sc->optics = IFM_AUTO;
1549 }
1550 
1551 /*********************************************************************
1552  *
1553  *  Setup the Legacy or MSI Interrupt handler
1554  *
1555  **********************************************************************/
1556 int
1557 ixgbe_allocate_legacy(struct ix_softc *sc)
1558 {
1559 	struct ixgbe_osdep	*os = &sc->osdep;
1560 	struct pci_attach_args	*pa = &os->os_pa;
1561 	const char		*intrstr = NULL;
1562 	pci_chipset_tag_t	pc = pa->pa_pc;
1563 	pci_intr_handle_t	ih;
1564 
1565 	/* We allocate a single interrupt resource */
1566 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1567 	    pci_intr_map(pa, &ih) != 0) {
1568 		printf(": couldn't map interrupt\n");
1569 		return (ENXIO);
1570 	}
1571 
1572 #if 0
1573 	/* XXX */
1574 	/* Tasklets for Link, SFP and Multispeed Fiber */
1575 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1576 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1577 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1578 #endif
1579 
1580 	intrstr = pci_intr_string(pc, ih);
1581 	sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1582 	    ixgbe_intr, sc, sc->dev.dv_xname);
1583 	if (sc->tag == NULL) {
1584 		printf(": couldn't establish interrupt");
1585 		if (intrstr != NULL)
1586 			printf(" at %s", intrstr);
1587 		printf("\n");
1588 		return (ENXIO);
1589 	}
1590 	printf(": %s", intrstr);
1591 
1592 	/* For simplicity in the handlers */
1593 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1594 
1595 	return (0);
1596 }
1597 
1598 int
1599 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1600 {
1601 	struct ixgbe_osdep	*os = &sc->osdep;
1602 	struct pci_attach_args	*pa = &os->os_pa;
1603 	int			 val;
1604 
1605 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1606 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1607 		printf(": mmba is not mem space\n");
1608 		return (ENXIO);
1609 	}
1610 
1611 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1612 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1613 		printf(": cannot find mem space\n");
1614 		return (ENXIO);
1615 	}
1616 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1617 
1618 	/* Legacy defaults */
1619 	sc->num_queues = 1;
1620 	sc->hw.back = os;
1621 
1622 #ifdef notyet
1623 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1624 	sc->msix = ixgbe_setup_msix(sc);
1625 #endif
1626 
1627 	return (0);
1628 }
1629 
1630 void
1631 ixgbe_free_pci_resources(struct ix_softc * sc)
1632 {
1633 	struct ixgbe_osdep	*os = &sc->osdep;
1634 	struct pci_attach_args	*pa = &os->os_pa;
1635 	struct ix_queue *que = sc->queues;
1636 	int i;
1637 
1638 	/* Release all msix queue resources: */
1639 	for (i = 0; i < sc->num_queues; i++, que++) {
1640 		if (que->tag)
1641 			pci_intr_disestablish(pa->pa_pc, que->tag);
1642 		que->tag = NULL;
1643 	}
1644 
1645 	if (sc->tag)
1646 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1647 	sc->tag = NULL;
1648 	if (os->os_membase != 0)
1649 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1650 	os->os_membase = 0;
1651 }
1652 
1653 /*********************************************************************
1654  *
1655  *  Setup networking device structure and register an interface.
1656  *
1657  **********************************************************************/
1658 void
1659 ixgbe_setup_interface(struct ix_softc *sc)
1660 {
1661 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1662 
1663 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1664 	ifp->if_softc = sc;
1665 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1666 	ifp->if_xflags = IFXF_MPSAFE;
1667 	ifp->if_ioctl = ixgbe_ioctl;
1668 	ifp->if_qstart = ixgbe_start;
1669 	ifp->if_timer = 0;
1670 	ifp->if_watchdog = ixgbe_watchdog;
1671 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1672 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1673 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1674 
1675 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1676 
1677 #if NVLAN > 0
1678 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1679 #endif
1680 
1681 #ifdef IX_CSUM_OFFLOAD
1682 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1683 #endif
1684 
1685 	/*
1686 	 * Specify the media types supported by this sc and register
1687 	 * callbacks to update media and link information
1688 	 */
1689 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1690 	    ixgbe_media_status);
1691 	ixgbe_add_media_types(sc);
1692 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1693 
1694 	if_attach(ifp);
1695 	ether_ifattach(ifp);
1696 
1697 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1698 }
1699 
1700 void
1701 ixgbe_add_media_types(struct ix_softc *sc)
1702 {
1703 	struct ixgbe_hw	*hw = &sc->hw;
1704 	int		layer;
1705 
1706 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1707 
1708 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1709 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1710 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1711 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1712 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1713 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1714 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1715 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1716 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1717 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1718 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1719 		if (hw->phy.multispeed_fiber)
1720 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1721 			    NULL);
1722 	}
1723 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1724 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1725 		if (hw->phy.multispeed_fiber)
1726 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1727 			    NULL);
1728 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1729 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1730 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1731 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1732 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1733 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1734 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1735 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1736 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1737 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1738 
1739 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1740 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1741 		    NULL);
1742 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1743 	}
1744 
1745 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1746 }
1747 
1748 void
1749 ixgbe_config_link(struct ix_softc *sc)
1750 {
1751 	uint32_t	autoneg, err = 0;
1752 	bool		negotiate;
1753 
1754 	if (ixgbe_is_sfp(&sc->hw)) {
1755 		if (sc->hw.phy.multispeed_fiber) {
1756 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1757 			if (sc->hw.mac.ops.enable_tx_laser)
1758 				sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1759 			ixgbe_handle_msf(sc);
1760 		} else
1761 			ixgbe_handle_mod(sc);
1762 	} else {
1763 		if (sc->hw.mac.ops.check_link)
1764 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1765 			    &sc->link_up, FALSE);
1766 		if (err)
1767 			return;
1768 		autoneg = sc->hw.phy.autoneg_advertised;
1769 		if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1770 			err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1771 			    &autoneg, &negotiate);
1772 		if (err)
1773 			return;
1774 		if (sc->hw.mac.ops.setup_link)
1775 			sc->hw.mac.ops.setup_link(&sc->hw,
1776 			    autoneg, sc->link_up);
1777 	}
1778 }
1779 
1780 /********************************************************************
1781  * Manage DMA'able memory.
1782   *******************************************************************/
1783 int
1784 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1785 		struct ixgbe_dma_alloc *dma, int mapflags)
1786 {
1787 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1788 	struct ixgbe_osdep	*os = &sc->osdep;
1789 	int			 r;
1790 
1791 	dma->dma_tag = os->os_pa.pa_dmat;
1792 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1793 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1794 	if (r != 0) {
1795 		printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
1796 		       "error %u\n", ifp->if_xname, r);
1797 		goto fail_0;
1798 	}
1799 
1800 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1801 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1802 	if (r != 0) {
1803 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1804 		       "error %u\n", ifp->if_xname, r);
1805 		goto fail_1;
1806 	}
1807 
1808 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1809 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1810 	if (r != 0) {
1811 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1812 		       "error %u\n", ifp->if_xname, r);
1813 		goto fail_2;
1814 	}
1815 
1816 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1817 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1818 	if (r != 0) {
1819 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1820 		       "error %u\n", ifp->if_xname, r);
1821 		goto fail_3;
1822 	}
1823 
1824 	dma->dma_size = size;
1825 	return (0);
1826 fail_3:
1827 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1828 fail_2:
1829 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1830 fail_1:
1831 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1832 fail_0:
1833 	dma->dma_map = NULL;
1834 	dma->dma_tag = NULL;
1835 	return (r);
1836 }
1837 
1838 void
1839 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1840 {
1841 	if (dma->dma_tag == NULL)
1842 		return;
1843 
1844 	if (dma->dma_map != NULL) {
1845 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1846 		    dma->dma_map->dm_mapsize,
1847 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1848 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1849 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1850 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1851 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1852 		dma->dma_map = NULL;
1853 	}
1854 }
1855 
1856 
1857 /*********************************************************************
1858  *
1859  *  Allocate memory for the transmit and receive rings, and then
1860  *  the descriptors associated with each, called only once at attach.
1861  *
1862  **********************************************************************/
1863 int
1864 ixgbe_allocate_queues(struct ix_softc *sc)
1865 {
1866 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1867 	struct ix_queue *que;
1868 	struct tx_ring *txr;
1869 	struct rx_ring *rxr;
1870 	int rsize, tsize;
1871 	int txconf = 0, rxconf = 0, i;
1872 
1873 	/* First allocate the top level queue structs */
1874 	if (!(sc->queues = mallocarray(sc->num_queues,
1875 	    sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1876 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1877 		goto fail;
1878 	}
1879 
1880 	/* Then allocate the TX ring struct memory */
1881 	if (!(sc->tx_rings = mallocarray(sc->num_queues,
1882 	    sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1883 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1884 		goto fail;
1885 	}
1886 
1887 	/* Next allocate the RX */
1888 	if (!(sc->rx_rings = mallocarray(sc->num_queues,
1889 	    sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1890 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1891 		goto rx_fail;
1892 	}
1893 
1894 	/* For the ring itself */
1895 	tsize = roundup2(sc->num_tx_desc *
1896 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1897 
1898 	/*
1899 	 * Now set up the TX queues, txconf is needed to handle the
1900 	 * possibility that things fail midcourse and we need to
1901 	 * undo memory gracefully
1902 	 */
1903 	for (i = 0; i < sc->num_queues; i++, txconf++) {
1904 		/* Set up some basics */
1905 		txr = &sc->tx_rings[i];
1906 		txr->sc = sc;
1907 		txr->me = i;
1908 
1909 		if (ixgbe_dma_malloc(sc, tsize,
1910 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1911 			printf("%s: Unable to allocate TX Descriptor memory\n",
1912 			    ifp->if_xname);
1913 			goto err_tx_desc;
1914 		}
1915 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1916 		bzero((void *)txr->tx_base, tsize);
1917 	}
1918 
1919 	/*
1920 	 * Next the RX queues...
1921 	 */
1922 	rsize = roundup2(sc->num_rx_desc *
1923 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1924 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
1925 		rxr = &sc->rx_rings[i];
1926 		/* Set up some basics */
1927 		rxr->sc = sc;
1928 		rxr->me = i;
1929 
1930 		if (ixgbe_dma_malloc(sc, rsize,
1931 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1932 			printf("%s: Unable to allocate RxDescriptor memory\n",
1933 			    ifp->if_xname);
1934 			goto err_rx_desc;
1935 		}
1936 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1937 		bzero((void *)rxr->rx_base, rsize);
1938 	}
1939 
1940 	/*
1941 	 * Finally set up the queue holding structs
1942 	 */
1943 	for (i = 0; i < sc->num_queues; i++) {
1944 		que = &sc->queues[i];
1945 		que->sc = sc;
1946 		que->txr = &sc->tx_rings[i];
1947 		que->rxr = &sc->rx_rings[i];
1948 	}
1949 
1950 	return (0);
1951 
1952 err_rx_desc:
1953 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1954 		ixgbe_dma_free(sc, &rxr->rxdma);
1955 err_tx_desc:
1956 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1957 		ixgbe_dma_free(sc, &txr->txdma);
1958 	free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
1959 	sc->rx_rings = NULL;
1960 rx_fail:
1961 	free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
1962 	sc->tx_rings = NULL;
1963 fail:
1964 	return (ENOMEM);
1965 }
1966 
1967 /*********************************************************************
1968  *
1969  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1970  *  the information needed to transmit a packet on the wire. This is
1971  *  called only once at attach, setup is done every reset.
1972  *
1973  **********************************************************************/
1974 int
1975 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1976 {
1977 	struct ix_softc 	*sc = txr->sc;
1978 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1979 	struct ixgbe_tx_buf	*txbuf;
1980 	int			 error, i;
1981 
1982 	if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
1983 	    sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1984 		printf("%s: Unable to allocate tx_buffer memory\n",
1985 		    ifp->if_xname);
1986 		error = ENOMEM;
1987 		goto fail;
1988 	}
1989 	txr->txtag = txr->txdma.dma_tag;
1990 
1991 	/* Create the descriptor buffer dma maps */
1992 	for (i = 0; i < sc->num_tx_desc; i++) {
1993 		txbuf = &txr->tx_buffers[i];
1994 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1995 			    sc->num_segs, PAGE_SIZE, 0,
1996 			    BUS_DMA_NOWAIT, &txbuf->map);
1997 
1998 		if (error != 0) {
1999 			printf("%s: Unable to create TX DMA map\n",
2000 			    ifp->if_xname);
2001 			goto fail;
2002 		}
2003 	}
2004 
2005 	return 0;
2006 fail:
2007 	return (error);
2008 }
2009 
2010 /*********************************************************************
2011  *
2012  *  Initialize a transmit ring.
2013  *
2014  **********************************************************************/
2015 int
2016 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2017 {
2018 	struct ix_softc		*sc = txr->sc;
2019 	int			 error;
2020 
2021 	/* Now allocate transmit buffers for the ring */
2022 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
2023 		return (error);
2024 
2025 	/* Clear the old ring contents */
2026 	bzero((void *)txr->tx_base,
2027 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
2028 
2029 	/* Reset indices */
2030 	txr->next_avail_desc = 0;
2031 	txr->next_to_clean = 0;
2032 
2033 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2034 	    0, txr->txdma.dma_map->dm_mapsize,
2035 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2036 
2037 	return (0);
2038 }
2039 
2040 /*********************************************************************
2041  *
2042  *  Initialize all transmit rings.
2043  *
2044  **********************************************************************/
2045 int
2046 ixgbe_setup_transmit_structures(struct ix_softc *sc)
2047 {
2048 	struct tx_ring *txr = sc->tx_rings;
2049 	int		i, error;
2050 
2051 	for (i = 0; i < sc->num_queues; i++, txr++) {
2052 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2053 			goto fail;
2054 	}
2055 
2056 	return (0);
2057 fail:
2058 	ixgbe_free_transmit_structures(sc);
2059 	return (error);
2060 }
2061 
2062 /*********************************************************************
2063  *
2064  *  Enable transmit unit.
2065  *
2066  **********************************************************************/
2067 void
2068 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2069 {
2070 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2071 	struct tx_ring	*txr;
2072 	struct ixgbe_hw	*hw = &sc->hw;
2073 	int		 i;
2074 	uint64_t	 tdba;
2075 	uint32_t	 txctrl;
2076 
2077 	/* Setup the Base and Length of the Tx Descriptor Ring */
2078 
2079 	for (i = 0; i < sc->num_queues; i++) {
2080 		txr = &sc->tx_rings[i];
2081 
2082 		/* Setup descriptor base address */
2083 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2084 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2085 		       (tdba & 0x00000000ffffffffULL));
2086 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2087 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2088 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2089 
2090 		/* Setup the HW Tx Head and Tail descriptor pointers */
2091 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2092 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2093 
2094 		/* Setup Transmit Descriptor Cmd Settings */
2095 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2096 		txr->queue_status = IXGBE_QUEUE_IDLE;
2097 		txr->watchdog_timer = 0;
2098 
2099 		/* Disable Head Writeback */
2100 		switch (hw->mac.type) {
2101 		case ixgbe_mac_82598EB:
2102 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2103 			break;
2104 		case ixgbe_mac_82599EB:
2105 		case ixgbe_mac_X540:
2106 		default:
2107 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2108 			break;
2109 		}
2110 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2111 		switch (hw->mac.type) {
2112 		case ixgbe_mac_82598EB:
2113 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2114 			break;
2115 		case ixgbe_mac_82599EB:
2116 		case ixgbe_mac_X540:
2117 		default:
2118 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2119 			break;
2120 		}
2121 	}
2122 	ifp->if_timer = 0;
2123 
2124 	if (hw->mac.type != ixgbe_mac_82598EB) {
2125 		uint32_t dmatxctl, rttdcs;
2126 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2127 		dmatxctl |= IXGBE_DMATXCTL_TE;
2128 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2129 		/* Disable arbiter to set MTQC */
2130 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2131 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2132 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2133 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2134 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2135 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2136 	}
2137 }
2138 
2139 /*********************************************************************
2140  *
2141  *  Free all transmit rings.
2142  *
2143  **********************************************************************/
2144 void
2145 ixgbe_free_transmit_structures(struct ix_softc *sc)
2146 {
2147 	struct tx_ring *txr = sc->tx_rings;
2148 	int		i;
2149 
2150 	for (i = 0; i < sc->num_queues; i++, txr++)
2151 		ixgbe_free_transmit_buffers(txr);
2152 }
2153 
2154 /*********************************************************************
2155  *
2156  *  Free transmit ring related data structures.
2157  *
2158  **********************************************************************/
2159 void
2160 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2161 {
2162 	struct ix_softc *sc = txr->sc;
2163 	struct ixgbe_tx_buf *tx_buffer;
2164 	int             i;
2165 
2166 	INIT_DEBUGOUT("free_transmit_ring: begin");
2167 
2168 	if (txr->tx_buffers == NULL)
2169 		return;
2170 
2171 	tx_buffer = txr->tx_buffers;
2172 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2173 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2174 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2175 			    0, tx_buffer->map->dm_mapsize,
2176 			    BUS_DMASYNC_POSTWRITE);
2177 			bus_dmamap_unload(txr->txdma.dma_tag,
2178 			    tx_buffer->map);
2179 		}
2180 		if (tx_buffer->m_head != NULL) {
2181 			m_freem(tx_buffer->m_head);
2182 			tx_buffer->m_head = NULL;
2183 		}
2184 		if (tx_buffer->map != NULL) {
2185 			bus_dmamap_destroy(txr->txdma.dma_tag,
2186 			    tx_buffer->map);
2187 			tx_buffer->map = NULL;
2188 		}
2189 	}
2190 
2191 	if (txr->tx_buffers != NULL)
2192 		free(txr->tx_buffers, M_DEVBUF,
2193 		    sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2194 	txr->tx_buffers = NULL;
2195 	txr->txtag = NULL;
2196 }
2197 
2198 /*********************************************************************
2199  *
2200  *  Advanced Context Descriptor setup for VLAN or CSUM
2201  *
2202  **********************************************************************/
2203 
2204 int
2205 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2206     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2207 {
2208 	struct ixgbe_adv_tx_context_desc *TXD;
2209 	struct ixgbe_tx_buf *tx_buffer;
2210 #if NVLAN > 0
2211 	struct ether_vlan_header *eh;
2212 #else
2213 	struct ether_header *eh;
2214 #endif
2215 	struct ip *ip;
2216 #ifdef notyet
2217 	struct ip6_hdr *ip6;
2218 #endif
2219 	struct mbuf *m;
2220 	int	ipoff;
2221 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2222 	int 	ehdrlen, ip_hlen = 0;
2223 	uint16_t etype;
2224 	uint8_t	ipproto = 0;
2225 	int	offload = TRUE;
2226 	int	ctxd = txr->next_avail_desc;
2227 #if NVLAN > 0
2228 	uint16_t vtag = 0;
2229 #endif
2230 
2231 #if notyet
2232 	/* First check if TSO is to be used */
2233 	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2234 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2235 #endif
2236 
2237 	if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2238 		offload = FALSE;
2239 
2240 	/* Indicate the whole packet as payload when not doing TSO */
2241 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2242 
2243 	/* Now ready a context descriptor */
2244 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2245 	tx_buffer = &txr->tx_buffers[ctxd];
2246 
2247 	/*
2248 	 * In advanced descriptors the vlan tag must
2249 	 * be placed into the descriptor itself. Hence
2250 	 * we need to make one even if not doing offloads.
2251 	 */
2252 #if NVLAN > 0
2253 	if (mp->m_flags & M_VLANTAG) {
2254 		vtag = mp->m_pkthdr.ether_vtag;
2255 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2256 	} else
2257 #endif
2258 	if (offload == FALSE)
2259 		return (0);	/* No need for CTX */
2260 
2261 	/*
2262 	 * Determine where frame payload starts.
2263 	 * Jump over vlan headers if already present,
2264 	 * helpful for QinQ too.
2265 	 */
2266 	if (mp->m_len < sizeof(struct ether_header))
2267 		return (-1);
2268 #if NVLAN > 0
2269 	eh = mtod(mp, struct ether_vlan_header *);
2270 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2271 		if (mp->m_len < sizeof(struct ether_vlan_header))
2272 			return (-1);
2273 		etype = ntohs(eh->evl_proto);
2274 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2275 	} else {
2276 		etype = ntohs(eh->evl_encap_proto);
2277 		ehdrlen = ETHER_HDR_LEN;
2278 	}
2279 #else
2280 	eh = mtod(mp, struct ether_header *);
2281 	etype = ntohs(eh->ether_type);
2282 	ehdrlen = ETHER_HDR_LEN;
2283 #endif
2284 
2285 	/* Set the ether header length */
2286 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2287 
2288 	switch (etype) {
2289 	case ETHERTYPE_IP:
2290 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2291 			return (-1);
2292 		m = m_getptr(mp, ehdrlen, &ipoff);
2293 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2294 		ip = (struct ip *)(m->m_data + ipoff);
2295 		ip_hlen = ip->ip_hl << 2;
2296 		ipproto = ip->ip_p;
2297 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2298 		break;
2299 #ifdef notyet
2300 	case ETHERTYPE_IPV6:
2301 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2302 			return (-1);
2303 		m = m_getptr(mp, ehdrlen, &ipoff);
2304 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2305 		ip6 = (struct ip6 *)(m->m_data + ipoff);
2306 		ip_hlen = sizeof(*ip6);
2307 		/* XXX-BZ this will go badly in case of ext hdrs. */
2308 		ipproto = ip6->ip6_nxt;
2309 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2310 		break;
2311 #endif
2312 	default:
2313 		offload = FALSE;
2314 		break;
2315 	}
2316 
2317 	vlan_macip_lens |= ip_hlen;
2318 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2319 
2320 	switch (ipproto) {
2321 	case IPPROTO_TCP:
2322 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2323 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2324 		break;
2325 	case IPPROTO_UDP:
2326 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2327 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2328 		break;
2329 	default:
2330 		offload = FALSE;
2331 		break;
2332 	}
2333 
2334 	if (offload) /* For the TX descriptor setup */
2335 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2336 
2337 	/* Now copy bits into descriptor */
2338 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2339 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2340 	TXD->seqnum_seed = htole32(0);
2341 	TXD->mss_l4len_idx = htole32(0);
2342 
2343 	tx_buffer->m_head = NULL;
2344 	tx_buffer->eop_index = -1;
2345 
2346 	return (1);
2347 }
2348 
2349 /**********************************************************************
2350  *
2351  *  Examine each tx_buffer in the used queue. If the hardware is done
2352  *  processing the packet then free associated resources. The
2353  *  tx_buffer is put back on the free queue.
2354  *
2355  **********************************************************************/
2356 int
2357 ixgbe_txeof(struct tx_ring *txr)
2358 {
2359 	struct ix_softc			*sc = txr->sc;
2360 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2361 	unsigned int			 head, tail, last;
2362 	struct ixgbe_tx_buf		*tx_buffer;
2363 	struct ixgbe_legacy_tx_desc	*tx_desc;
2364 
2365 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2366 		return FALSE;
2367 
2368 	head = txr->next_avail_desc;
2369 	tail = txr->next_to_clean;
2370 
2371 	membar_consumer();
2372 
2373 	if (head == tail)
2374 		return (FALSE);
2375 
2376 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2377 	    0, txr->txdma.dma_map->dm_mapsize,
2378 	    BUS_DMASYNC_POSTREAD);
2379 
2380 	for (;;) {
2381 		tx_buffer = &txr->tx_buffers[tail];
2382 		last = tx_buffer->eop_index;
2383 		tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2384 
2385 		if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD))
2386 			break;
2387 
2388 		bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2389 		    0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2390 		bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
2391 		m_freem(tx_buffer->m_head);
2392 
2393 		tx_buffer->m_head = NULL;
2394 		tx_buffer->eop_index = -1;
2395 
2396 		tail = last + 1;
2397 		if (tail == sc->num_tx_desc)
2398 			tail = 0;
2399 		if (head == tail) {
2400 			/* All clean, turn off the timer */
2401 			ifp->if_timer = 0;
2402 			break;
2403 		}
2404 	}
2405 
2406 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2407 	    0, txr->txdma.dma_map->dm_mapsize,
2408 	    BUS_DMASYNC_PREREAD);
2409 
2410 	membar_producer();
2411 
2412 	txr->next_to_clean = tail;
2413 
2414 	if (ifq_is_oactive(&ifp->if_snd))
2415 		ifq_restart(&ifp->if_snd);
2416 
2417 	return TRUE;
2418 }
2419 
2420 /*********************************************************************
2421  *
2422  *  Get a buffer from system mbuf buffer pool.
2423  *
2424  **********************************************************************/
2425 int
2426 ixgbe_get_buf(struct rx_ring *rxr, int i)
2427 {
2428 	struct ix_softc		*sc = rxr->sc;
2429 	struct ixgbe_rx_buf	*rxbuf;
2430 	struct mbuf		*mp;
2431 	int			error;
2432 	union ixgbe_adv_rx_desc	*rxdesc;
2433 
2434 	rxbuf = &rxr->rx_buffers[i];
2435 	rxdesc = &rxr->rx_base[i];
2436 	if (rxbuf->buf) {
2437 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2438 		    sc->dev.dv_xname, i);
2439 		return (ENOBUFS);
2440 	}
2441 
2442 	/* needed in any case so prealocate since this one will fail for sure */
2443 	mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2444 	if (!mp)
2445 		return (ENOBUFS);
2446 
2447 	mp->m_data += (mp->m_ext.ext_size - sc->rx_mbuf_sz);
2448 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2449 
2450 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2451 	    mp, BUS_DMA_NOWAIT);
2452 	if (error) {
2453 		m_freem(mp);
2454 		return (error);
2455 	}
2456 
2457 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2458 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2459 	rxbuf->buf = mp;
2460 
2461 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2462 
2463 	return (0);
2464 }
2465 
2466 /*********************************************************************
2467  *
2468  *  Allocate memory for rx_buffer structures. Since we use one
2469  *  rx_buffer per received packet, the maximum number of rx_buffer's
2470  *  that we'll need is equal to the number of receive descriptors
2471  *  that we've allocated.
2472  *
2473  **********************************************************************/
2474 int
2475 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2476 {
2477 	struct ix_softc		*sc = rxr->sc;
2478 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2479 	struct ixgbe_rx_buf 	*rxbuf;
2480 	int			i, error;
2481 
2482 	if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2483 	    sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2484 		printf("%s: Unable to allocate rx_buffer memory\n",
2485 		    ifp->if_xname);
2486 		error = ENOMEM;
2487 		goto fail;
2488 	}
2489 
2490 	rxbuf = rxr->rx_buffers;
2491 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2492 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2493 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2494 		if (error) {
2495 			printf("%s: Unable to create Pack DMA map\n",
2496 			    ifp->if_xname);
2497 			goto fail;
2498 		}
2499 	}
2500 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2501 	    rxr->rxdma.dma_map->dm_mapsize,
2502 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2503 
2504 	return (0);
2505 
2506 fail:
2507 	return (error);
2508 }
2509 
2510 /*********************************************************************
2511  *
2512  *  Initialize a receive ring and its buffers.
2513  *
2514  **********************************************************************/
2515 int
2516 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2517 {
2518 	struct ix_softc		*sc = rxr->sc;
2519 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2520 	int			 rsize, error;
2521 
2522 	rsize = roundup2(sc->num_rx_desc *
2523 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2524 	/* Clear the ring contents */
2525 	bzero((void *)rxr->rx_base, rsize);
2526 
2527 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2528 		return (error);
2529 
2530 	/* Setup our descriptor indices */
2531 	rxr->next_to_check = 0;
2532 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2533 
2534 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2535 	    sc->num_rx_desc - 1);
2536 
2537 	ixgbe_rxfill(rxr);
2538 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2539 		printf("%s: unable to fill any rx descriptors\n",
2540 		    sc->dev.dv_xname);
2541 		return (ENOBUFS);
2542 	}
2543 
2544 	return (0);
2545 }
2546 
2547 int
2548 ixgbe_rxfill(struct rx_ring *rxr)
2549 {
2550 	struct ix_softc *sc = rxr->sc;
2551 	int		 post = 0;
2552 	u_int		 slots;
2553 	int		 i;
2554 
2555 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2556 	    0, rxr->rxdma.dma_map->dm_mapsize,
2557 	    BUS_DMASYNC_POSTWRITE);
2558 
2559 	i = rxr->last_desc_filled;
2560 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2561 	    slots > 0; slots--) {
2562 		if (++i == sc->num_rx_desc)
2563 			i = 0;
2564 
2565 		if (ixgbe_get_buf(rxr, i) != 0)
2566 			break;
2567 
2568 		rxr->last_desc_filled = i;
2569 		post = 1;
2570 	}
2571 
2572 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2573 	    0, rxr->rxdma.dma_map->dm_mapsize,
2574 	    BUS_DMASYNC_PREWRITE);
2575 
2576 	if_rxr_put(&rxr->rx_ring, slots);
2577 
2578 	return (post);
2579 }
2580 
2581 void
2582 ixgbe_rxrefill(void *xsc)
2583 {
2584 	struct ix_softc *sc = xsc;
2585 	struct ix_queue *que = sc->queues;
2586 	int s;
2587 
2588 	s = splnet();
2589 	if (ixgbe_rxfill(que->rxr)) {
2590 		/* Advance the Rx Queue "Tail Pointer" */
2591 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2592 		    que->rxr->last_desc_filled);
2593 	} else
2594 		timeout_add(&sc->rx_refill, 1);
2595 	splx(s);
2596 }
2597 
2598 /*********************************************************************
2599  *
2600  *  Initialize all receive rings.
2601  *
2602  **********************************************************************/
2603 int
2604 ixgbe_setup_receive_structures(struct ix_softc *sc)
2605 {
2606 	struct rx_ring *rxr = sc->rx_rings;
2607 	int i;
2608 
2609 	for (i = 0; i < sc->num_queues; i++, rxr++)
2610 		if (ixgbe_setup_receive_ring(rxr))
2611 			goto fail;
2612 
2613 	return (0);
2614 fail:
2615 	ixgbe_free_receive_structures(sc);
2616 	return (ENOBUFS);
2617 }
2618 
2619 /*********************************************************************
2620  *
2621  *  Setup receive registers and features.
2622  *
2623  **********************************************************************/
2624 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2625 
2626 void
2627 ixgbe_initialize_receive_units(struct ix_softc *sc)
2628 {
2629 	struct rx_ring	*rxr = sc->rx_rings;
2630 	struct ixgbe_hw	*hw = &sc->hw;
2631 	uint32_t	bufsz, fctrl, srrctl, rxcsum;
2632 	uint32_t	hlreg;
2633 	int		i;
2634 
2635 	/*
2636 	 * Make sure receives are disabled while
2637 	 * setting up the descriptor ring
2638 	 */
2639 	ixgbe_disable_rx(hw);
2640 
2641 	/* Enable broadcasts */
2642 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2643 	fctrl |= IXGBE_FCTRL_BAM;
2644 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2645 		fctrl |= IXGBE_FCTRL_DPF;
2646 		fctrl |= IXGBE_FCTRL_PMCF;
2647 	}
2648 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2649 
2650 	/* Always enable jumbo frame reception */
2651 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2652 	hlreg |= IXGBE_HLREG0_JUMBOEN;
2653 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2654 
2655 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2656 
2657 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2658 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2659 
2660 		/* Setup the Base and Length of the Rx Descriptor Ring */
2661 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2662 			       (rdba & 0x00000000ffffffffULL));
2663 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2664 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2665 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2666 
2667 		/* Set up the SRRCTL register */
2668 		srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2669 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2670 
2671 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2672 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2673 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2674 	}
2675 
2676 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2677 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2678 			      IXGBE_PSRTYPE_UDPHDR |
2679 			      IXGBE_PSRTYPE_IPV4HDR |
2680 			      IXGBE_PSRTYPE_IPV6HDR;
2681 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2682 	}
2683 
2684 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2685 	rxcsum &= ~IXGBE_RXCSUM_PCSD;
2686 
2687 	/* Setup RSS */
2688 	if (sc->num_queues > 1) {
2689 		ixgbe_initialize_rss_mapping(sc);
2690 
2691 		/* RSS and RX IPP Checksum are mutually exclusive */
2692 		rxcsum |= IXGBE_RXCSUM_PCSD;
2693 	}
2694 
2695 	/* This is useful for calculating UDP/IP fragment checksums */
2696 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2697 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2698 
2699 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2700 }
2701 
2702 void
2703 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2704 {
2705 	struct ixgbe_hw	*hw = &sc->hw;
2706 	uint32_t reta = 0, mrqc, rss_key[10];
2707 	int i, j, queue_id, table_size, index_mult;
2708 
2709 	/* set up random bits */
2710 	arc4random_buf(&rss_key, sizeof(rss_key));
2711 
2712 	/* Set multiplier for RETA setup and table size based on MAC */
2713 	index_mult = 0x1;
2714 	table_size = 128;
2715 	switch (sc->hw.mac.type) {
2716 	case ixgbe_mac_82598EB:
2717 		index_mult = 0x11;
2718 		break;
2719 	case ixgbe_mac_X550:
2720 	case ixgbe_mac_X550EM_x:
2721 		table_size = 512;
2722 		break;
2723 	default:
2724 		break;
2725 	}
2726 
2727 	/* Set up the redirection table */
2728 	for (i = 0, j = 0; i < table_size; i++, j++) {
2729 		if (j == sc->num_queues) j = 0;
2730 		queue_id = (j * index_mult);
2731 		/*
2732 		 * The low 8 bits are for hash value (n+0);
2733 		 * The next 8 bits are for hash value (n+1), etc.
2734 		 */
2735 		reta = reta >> 8;
2736 		reta = reta | ( ((uint32_t) queue_id) << 24);
2737 		if ((i & 3) == 3) {
2738 			if (i < 128)
2739 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2740 			else
2741 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2742 				    reta);
2743 			reta = 0;
2744 		}
2745 	}
2746 
2747 	/* Now fill our hash function seeds */
2748 	for (i = 0; i < 10; i++)
2749 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2750 
2751 	/*
2752 	 * Disable UDP - IP fragments aren't currently being handled
2753 	 * and so we end up with a mix of 2-tuple and 4-tuple
2754 	 * traffic.
2755 	 */
2756 	mrqc = IXGBE_MRQC_RSSEN
2757 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2758 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2759 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2760 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2761 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2762 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2763 	;
2764 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2765 }
2766 
2767 /*********************************************************************
2768  *
2769  *  Free all receive rings.
2770  *
2771  **********************************************************************/
2772 void
2773 ixgbe_free_receive_structures(struct ix_softc *sc)
2774 {
2775 	struct rx_ring *rxr;
2776 	int		i;
2777 
2778 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2779 		if_rxr_init(&rxr->rx_ring, 0, 0);
2780 
2781 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2782 		ixgbe_free_receive_buffers(rxr);
2783 }
2784 
2785 /*********************************************************************
2786  *
2787  *  Free receive ring data structures
2788  *
2789  **********************************************************************/
2790 void
2791 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2792 {
2793 	struct ix_softc		*sc;
2794 	struct ixgbe_rx_buf	*rxbuf;
2795 	int			 i;
2796 
2797 	sc = rxr->sc;
2798 	if (rxr->rx_buffers != NULL) {
2799 		for (i = 0; i < sc->num_rx_desc; i++) {
2800 			rxbuf = &rxr->rx_buffers[i];
2801 			if (rxbuf->buf != NULL) {
2802 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2803 				    0, rxbuf->map->dm_mapsize,
2804 				    BUS_DMASYNC_POSTREAD);
2805 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2806 				    rxbuf->map);
2807 				m_freem(rxbuf->buf);
2808 				rxbuf->buf = NULL;
2809 			}
2810 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2811 			rxbuf->map = NULL;
2812 		}
2813 		free(rxr->rx_buffers, M_DEVBUF,
2814 		    sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
2815 		rxr->rx_buffers = NULL;
2816 	}
2817 }
2818 
2819 /*********************************************************************
2820  *
2821  *  This routine executes in interrupt context. It replenishes
2822  *  the mbufs in the descriptor and sends data which has been
2823  *  dma'ed into host memory to upper layer.
2824  *
2825  *********************************************************************/
2826 int
2827 ixgbe_rxeof(struct ix_queue *que)
2828 {
2829 	struct ix_softc 	*sc = que->sc;
2830 	struct rx_ring		*rxr = que->rxr;
2831 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2832 	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
2833 	struct mbuf    		*mp, *sendmp;
2834 	uint8_t		    	 eop = 0;
2835 	uint16_t		 len, vtag;
2836 	uint32_t		 staterr = 0, ptype;
2837 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
2838 	union ixgbe_adv_rx_desc	*rxdesc;
2839 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2840 	int			 i, nextp;
2841 
2842 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2843 		return FALSE;
2844 
2845 	i = rxr->next_to_check;
2846 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
2847 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2848 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2849 
2850 		rxdesc = &rxr->rx_base[i];
2851 		staterr = letoh32(rxdesc->wb.upper.status_error);
2852 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2853 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2854 			    dsize * i, dsize,
2855 			    BUS_DMASYNC_PREREAD);
2856 			break;
2857 		}
2858 
2859 		/* Zero out the receive descriptors status  */
2860 		rxdesc->wb.upper.status_error = 0;
2861 		rxbuf = &rxr->rx_buffers[i];
2862 
2863 		/* pull the mbuf off the ring */
2864 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2865 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2866 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2867 
2868 		mp = rxbuf->buf;
2869 		len = letoh16(rxdesc->wb.upper.length);
2870 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2871 		    IXGBE_RXDADV_PKTTYPE_MASK;
2872 		vtag = letoh16(rxdesc->wb.upper.vlan);
2873 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2874 
2875 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2876 			sc->dropped_pkts++;
2877 
2878 			if (rxbuf->fmp) {
2879 				m_freem(rxbuf->fmp);
2880 				rxbuf->fmp = NULL;
2881 			}
2882 
2883 			m_freem(mp);
2884 			rxbuf->buf = NULL;
2885 			goto next_desc;
2886 		}
2887 
2888 		if (mp == NULL) {
2889 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2890 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2891 			    i, if_rxr_inuse(&rxr->rx_ring),
2892 			    rxr->last_desc_filled);
2893 		}
2894 
2895 		/* Currently no HW RSC support of 82599 */
2896 		if (!eop) {
2897 			/*
2898 			 * Figure out the next descriptor of this frame.
2899 			 */
2900 			nextp = i + 1;
2901 			if (nextp == sc->num_rx_desc)
2902 				nextp = 0;
2903 			nxbuf = &rxr->rx_buffers[nextp];
2904 			/* prefetch(nxbuf); */
2905 		}
2906 
2907 		/*
2908 		 * Rather than using the fmp/lmp global pointers
2909 		 * we now keep the head of a packet chain in the
2910 		 * buffer struct and pass this along from one
2911 		 * descriptor to the next, until we get EOP.
2912 		 */
2913 		mp->m_len = len;
2914 		/*
2915 		 * See if there is a stored head
2916 		 * that determines what we are
2917 		 */
2918 		sendmp = rxbuf->fmp;
2919 		rxbuf->buf = rxbuf->fmp = NULL;
2920 
2921 		if (sendmp != NULL) /* secondary frag */
2922 			sendmp->m_pkthdr.len += mp->m_len;
2923 		else {
2924 			/* first desc of a non-ps chain */
2925 			sendmp = mp;
2926 			sendmp->m_pkthdr.len = mp->m_len;
2927 #if NVLAN > 0
2928 			if (staterr & IXGBE_RXD_STAT_VP) {
2929 				sendmp->m_pkthdr.ether_vtag = vtag;
2930 				sendmp->m_flags |= M_VLANTAG;
2931 			}
2932 #endif
2933 		}
2934 
2935 		/* Pass the head pointer on */
2936 		if (eop == 0) {
2937 			nxbuf->fmp = sendmp;
2938 			sendmp = NULL;
2939 			mp->m_next = nxbuf->buf;
2940 		} else { /* Sending this frame? */
2941 			rxr->rx_packets++;
2942 			/* capture data for AIM */
2943 			rxr->bytes += sendmp->m_pkthdr.len;
2944 			rxr->rx_bytes += sendmp->m_pkthdr.len;
2945 
2946 			ixgbe_rx_checksum(staterr, sendmp, ptype);
2947 
2948 			ml_enqueue(&ml, sendmp);
2949 		}
2950 next_desc:
2951 		if_rxr_put(&rxr->rx_ring, 1);
2952 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2953 		    dsize * i, dsize,
2954 		    BUS_DMASYNC_PREREAD);
2955 
2956 		/* Advance our pointers to the next descriptor. */
2957 		if (++i == sc->num_rx_desc)
2958 			i = 0;
2959 	}
2960 	rxr->next_to_check = i;
2961 
2962 	if_input(ifp, &ml);
2963 
2964 	if (!(staterr & IXGBE_RXD_STAT_DD))
2965 		return FALSE;
2966 
2967 	return TRUE;
2968 }
2969 
2970 /*********************************************************************
2971  *
2972  *  Verify that the hardware indicated that the checksum is valid.
2973  *  Inform the stack about the status of checksum so that stack
2974  *  doesn't spend time verifying the checksum.
2975  *
2976  *********************************************************************/
2977 void
2978 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
2979 {
2980 	uint16_t status = (uint16_t) staterr;
2981 	uint8_t  errors = (uint8_t) (staterr >> 24);
2982 
2983 	if (status & IXGBE_RXD_STAT_IPCS) {
2984 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2985 			/* IP Checksum Good */
2986 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2987 		} else
2988 			mp->m_pkthdr.csum_flags = 0;
2989 	}
2990 	if (status & IXGBE_RXD_STAT_L4CS) {
2991 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2992 			mp->m_pkthdr.csum_flags |=
2993 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2994 	}
2995 }
2996 
2997 void
2998 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
2999 {
3000 	uint32_t	ctrl;
3001 	int		i;
3002 
3003 	/*
3004 	 * A soft reset zero's out the VFTA, so
3005 	 * we need to repopulate it now.
3006 	 */
3007 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3008 		if (sc->shadow_vfta[i] != 0)
3009 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3010 			    sc->shadow_vfta[i]);
3011 	}
3012 
3013 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3014 #if 0
3015 	/* Enable the Filter Table if enabled */
3016 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3017 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3018 		ctrl |= IXGBE_VLNCTRL_VFE;
3019 	}
3020 #endif
3021 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3022 		ctrl |= IXGBE_VLNCTRL_VME;
3023 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3024 
3025 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3026 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3027 		for (i = 0; i < sc->num_queues; i++) {
3028 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3029 			ctrl |= IXGBE_RXDCTL_VME;
3030 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3031 		}
3032 	}
3033 }
3034 
3035 void
3036 ixgbe_enable_intr(struct ix_softc *sc)
3037 {
3038 	struct ixgbe_hw *hw = &sc->hw;
3039 	struct ix_queue *que = sc->queues;
3040 	uint32_t	mask, fwsm;
3041 	int i;
3042 
3043 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3044 	/* Enable Fan Failure detection */
3045 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3046 		    mask |= IXGBE_EIMS_GPI_SDP1;
3047 
3048 	switch (sc->hw.mac.type) {
3049 	case ixgbe_mac_82599EB:
3050 		mask |= IXGBE_EIMS_ECC;
3051 		/* Temperature sensor on some adapters */
3052 		mask |= IXGBE_EIMS_GPI_SDP0;
3053 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3054 		mask |= IXGBE_EIMS_GPI_SDP1;
3055 		mask |= IXGBE_EIMS_GPI_SDP2;
3056 		break;
3057 	case ixgbe_mac_X540:
3058 		mask |= IXGBE_EIMS_ECC;
3059 		/* Detect if Thermal Sensor is enabled */
3060 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3061 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3062 			mask |= IXGBE_EIMS_TS;
3063 		break;
3064 	case ixgbe_mac_X550:
3065 	case ixgbe_mac_X550EM_x:
3066 		mask |= IXGBE_EIMS_ECC;
3067 		/* MAC thermal sensor is automatically enabled */
3068 		mask |= IXGBE_EIMS_TS;
3069 		/* Some devices use SDP0 for important information */
3070 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3071 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3072 			mask |= IXGBE_EIMS_GPI_SDP0_X540;
3073 	default:
3074 		break;
3075 	}
3076 
3077 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3078 
3079 	/* With MSI-X we use auto clear */
3080 	if (sc->msix > 1) {
3081 		mask = IXGBE_EIMS_ENABLE_MASK;
3082 		/* Don't autoclear Link */
3083 		mask &= ~IXGBE_EIMS_OTHER;
3084 		mask &= ~IXGBE_EIMS_LSC;
3085 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3086 	}
3087 
3088 	/*
3089 	 * Now enable all queues, this is done separately to
3090 	 * allow for handling the extended (beyond 32) MSIX
3091 	 * vectors that can be used by 82599
3092 	 */
3093 	for (i = 0; i < sc->num_queues; i++, que++)
3094 		ixgbe_enable_queue(sc, que->msix);
3095 
3096 	IXGBE_WRITE_FLUSH(hw);
3097 }
3098 
3099 void
3100 ixgbe_disable_intr(struct ix_softc *sc)
3101 {
3102 	if (sc->msix > 1)
3103 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3104 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3105 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3106 	} else {
3107 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3108 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3109 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3110 	}
3111 	IXGBE_WRITE_FLUSH(&sc->hw);
3112 }
3113 
3114 uint16_t
3115 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3116 {
3117 	struct pci_attach_args	*pa;
3118 	uint32_t value;
3119 	int high = 0;
3120 
3121 	if (reg & 0x2) {
3122 		high = 1;
3123 		reg &= ~0x2;
3124 	}
3125 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3126 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3127 
3128 	if (high)
3129 		value >>= 16;
3130 
3131 	return (value & 0xffff);
3132 }
3133 
3134 void
3135 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3136 {
3137 	struct pci_attach_args	*pa;
3138 	uint32_t rv;
3139 	int high = 0;
3140 
3141 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3142 	if (reg & 0x2) {
3143 		high = 1;
3144 		reg &= ~0x2;
3145 	}
3146 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3147 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3148 	if (!high)
3149 		rv = (rv & 0xffff0000) | value;
3150 	else
3151 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3152 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3153 }
3154 
3155 /*
3156  * Setup the correct IVAR register for a particular MSIX interrupt
3157  *   (yes this is all very magic and confusing :)
3158  *  - entry is the register array entry
3159  *  - vector is the MSIX vector for this queue
3160  *  - type is RX/TX/MISC
3161  */
3162 void
3163 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3164 {
3165 	struct ixgbe_hw *hw = &sc->hw;
3166 	uint32_t ivar, index;
3167 
3168 	vector |= IXGBE_IVAR_ALLOC_VAL;
3169 
3170 	switch (hw->mac.type) {
3171 
3172 	case ixgbe_mac_82598EB:
3173 		if (type == -1)
3174 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3175 		else
3176 			entry += (type * 64);
3177 		index = (entry >> 2) & 0x1F;
3178 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3179 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3180 		ivar |= (vector << (8 * (entry & 0x3)));
3181 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3182 		break;
3183 
3184 	case ixgbe_mac_82599EB:
3185 	case ixgbe_mac_X540:
3186 	case ixgbe_mac_X550:
3187 	case ixgbe_mac_X550EM_x:
3188 		if (type == -1) { /* MISC IVAR */
3189 			index = (entry & 1) * 8;
3190 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3191 			ivar &= ~(0xFF << index);
3192 			ivar |= (vector << index);
3193 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3194 		} else {	/* RX/TX IVARS */
3195 			index = (16 * (entry & 1)) + (8 * type);
3196 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3197 			ivar &= ~(0xFF << index);
3198 			ivar |= (vector << index);
3199 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3200 		}
3201 
3202 	default:
3203 		break;
3204 	}
3205 }
3206 
3207 void
3208 ixgbe_configure_ivars(struct ix_softc *sc)
3209 {
3210 #if notyet
3211 	struct ix_queue *que = sc->queues;
3212 	uint32_t newitr;
3213 	int i;
3214 
3215 	if (ixgbe_max_interrupt_rate > 0)
3216 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3217 	else
3218 		newitr = 0;
3219 
3220 	for (i = 0; i < sc->num_queues; i++, que++) {
3221 		/* First the RX queue entry */
3222 		ixgbe_set_ivar(sc, i, que->msix, 0);
3223 		/* ... and the TX */
3224 		ixgbe_set_ivar(sc, i, que->msix, 1);
3225 		/* Set an Initial EITR value */
3226 		IXGBE_WRITE_REG(&sc->hw,
3227 		    IXGBE_EITR(que->msix), newitr);
3228 	}
3229 
3230 	/* For the Link interrupt */
3231 	ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3232 #endif
3233 }
3234 
3235 /*
3236  * SFP module interrupts handler
3237  */
3238 void
3239 ixgbe_handle_mod(struct ix_softc *sc)
3240 {
3241 	struct ixgbe_hw *hw = &sc->hw;
3242 	uint32_t err;
3243 
3244 	err = hw->phy.ops.identify_sfp(hw);
3245 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3246 		printf("%s: Unsupported SFP+ module type was detected!\n",
3247 		    sc->dev.dv_xname);
3248 		return;
3249 	}
3250 	err = hw->mac.ops.setup_sfp(hw);
3251 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3252 		printf("%s: Setup failure - unsupported SFP+ module type!\n",
3253 		    sc->dev.dv_xname);
3254 		return;
3255 	}
3256 	/* Set the optics type so system reports correctly */
3257 	ixgbe_setup_optics(sc);
3258 
3259 	ixgbe_handle_msf(sc);
3260 }
3261 
3262 
3263 /*
3264  * MSF (multispeed fiber) interrupts handler
3265  */
3266 void
3267 ixgbe_handle_msf(struct ix_softc *sc)
3268 {
3269 	struct ixgbe_hw *hw = &sc->hw;
3270 	uint32_t autoneg;
3271 	bool negotiate;
3272 
3273 	autoneg = hw->phy.autoneg_advertised;
3274 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3275 		if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3276 			return;
3277 	}
3278 	if (hw->mac.ops.setup_link)
3279 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3280 
3281 	ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3282 	ixgbe_add_media_types(sc);
3283 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3284 }
3285 
3286 /*
3287  * External PHY interrupts handler
3288  */
3289 void
3290 ixgbe_handle_phy(struct ix_softc *sc)
3291 {
3292 	struct ixgbe_hw *hw = &sc->hw;
3293 	int error;
3294 
3295 	error = hw->phy.ops.handle_lasi(hw);
3296 	if (error == IXGBE_ERR_OVERTEMP)
3297 		printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3298 		    " PHY will downshift to lower power state!\n",
3299 		    sc->dev.dv_xname);
3300 	else if (error)
3301 		printf("%s: Error handling LASI interrupt: %d\n",
3302 		    sc->dev.dv_xname, error);
3303 
3304 }
3305 
3306 /**********************************************************************
3307  *
3308  *  Update the board statistics counters.
3309  *
3310  **********************************************************************/
3311 void
3312 ixgbe_update_stats_counters(struct ix_softc *sc)
3313 {
3314 	struct ifnet	*ifp = &sc->arpcom.ac_if;
3315 	struct ixgbe_hw	*hw = &sc->hw;
3316 	uint64_t	total_missed_rx = 0;
3317 #ifdef IX_DEBUG
3318 	uint32_t	missed_rx = 0, bprc, lxon, lxoff, total;
3319 	int		i;
3320 #endif
3321 
3322 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3323 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3324 
3325 #ifdef IX_DEBUG
3326 	for (i = 0; i < 8; i++) {
3327 		uint32_t mp;
3328 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3329 		/* missed_rx tallies misses for the gprc workaround */
3330 		missed_rx += mp;
3331 		/* global total per queue */
3332 		sc->stats.mpc[i] += mp;
3333 		/* running comprehensive total for stats display */
3334 		total_missed_rx += sc->stats.mpc[i];
3335 		if (hw->mac.type == ixgbe_mac_82598EB)
3336 			sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3337 	}
3338 
3339 	/* Hardware workaround, gprc counts missed packets */
3340 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3341 	sc->stats.gprc -= missed_rx;
3342 
3343 	if (hw->mac.type != ixgbe_mac_82598EB) {
3344 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3345 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3346 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3347 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3348 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3349 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3350 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3351 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3352 	} else {
3353 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3354 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3355 		/* 82598 only has a counter in the high register */
3356 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3357 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3358 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3359 	}
3360 
3361 	/*
3362 	 * Workaround: mprc hardware is incorrectly counting
3363 	 * broadcasts, so for now we subtract those.
3364 	 */
3365 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3366 	sc->stats.bprc += bprc;
3367 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3368 	if (hw->mac.type == ixgbe_mac_82598EB)
3369 		sc->stats.mprc -= bprc;
3370 
3371 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3372 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3373 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3374 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3375 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3376 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3377 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3378 
3379 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3380 	sc->stats.lxontxc += lxon;
3381 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3382 	sc->stats.lxofftxc += lxoff;
3383 	total = lxon + lxoff;
3384 
3385 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3386 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3387 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3388 	sc->stats.gptc -= total;
3389 	sc->stats.mptc -= total;
3390 	sc->stats.ptc64 -= total;
3391 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3392 
3393 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3394 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3395 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3396 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3397 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3398 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3399 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3400 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3401 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3402 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3403 #endif
3404 
3405 	/* Fill out the OS statistics structure */
3406 	ifp->if_collisions = 0;
3407 	ifp->if_oerrors = sc->watchdog_events;
3408 	ifp->if_ierrors = total_missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3409 }
3410 
3411 #ifdef IX_DEBUG
3412 /**********************************************************************
3413  *
3414  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3415  *  This routine provides a way to take a look at important statistics
3416  *  maintained by the driver and hardware.
3417  *
3418  **********************************************************************/
3419 void
3420 ixgbe_print_hw_stats(struct ix_softc * sc)
3421 {
3422 	struct ifnet   *ifp = &sc->arpcom.ac_if;
3423 
3424 	printf("%s: missed pkts %llu, rx len errs %llu, crc errs %llu, "
3425 	    "dropped pkts %lu, watchdog timeouts %ld, "
3426 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3427 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3428 	    "tso tx %lu\n",
3429 	    ifp->if_xname,
3430 	    (long long)sc->stats.mpc[0],
3431 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3432 	    (long long)sc->stats.crcerrs,
3433 	    sc->dropped_pkts,
3434 	    sc->watchdog_events,
3435 	    (long long)sc->stats.lxonrxc,
3436 	    (long long)sc->stats.lxontxc,
3437 	    (long long)sc->stats.lxoffrxc,
3438 	    (long long)sc->stats.lxofftxc,
3439 	    (long long)sc->stats.tpr,
3440 	    (long long)sc->stats.gprc,
3441 	    (long long)sc->stats.gptc,
3442 	    sc->tso_tx);
3443 }
3444 #endif
3445