xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: if_ix.c,v 1.65 2012/07/06 11:08:44 mikeb Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2008, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.5 2008/05/16 18:46:30 jfv Exp $*/
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 
44 #define IXGBE_DRIVER_VERSION	"1.4.4"
45 
46 /*********************************************************************
47  *  PCI Device ID Table
48  *
49  *  Used by probe to select devices to load on
50  *********************************************************************/
51 
52 const struct pci_matchid ixgbe_devices[] = {
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BACKPLANE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE }
76 #if 0
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599VF }
78 #endif
79 };
80 
81 /*********************************************************************
82  *  Function prototypes
83  *********************************************************************/
84 int	ixgbe_probe(struct device *, void *, void *);
85 void	ixgbe_attach(struct device *, struct device *, void *);
86 int	ixgbe_detach(struct device *, int);
87 void	ixgbe_start(struct ifnet *);
88 void	ixgbe_start_locked(struct tx_ring *, struct ifnet *);
89 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
90 void	ixgbe_watchdog(struct ifnet *);
91 void	ixgbe_init(void *);
92 void	ixgbe_stop(void *);
93 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
94 int	ixgbe_media_change(struct ifnet *);
95 void	ixgbe_identify_hardware(struct ix_softc *);
96 int	ixgbe_allocate_pci_resources(struct ix_softc *);
97 int	ixgbe_allocate_legacy(struct ix_softc *);
98 int	ixgbe_allocate_queues(struct ix_softc *);
99 void	ixgbe_free_pci_resources(struct ix_softc *);
100 void	ixgbe_local_timer(void *);
101 void	ixgbe_setup_interface(struct ix_softc *);
102 void	ixgbe_config_link(struct ix_softc *sc);
103 
104 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
105 int	ixgbe_setup_transmit_structures(struct ix_softc *);
106 int	ixgbe_setup_transmit_ring(struct tx_ring *);
107 void	ixgbe_initialize_transmit_units(struct ix_softc *);
108 void	ixgbe_free_transmit_structures(struct ix_softc *);
109 void	ixgbe_free_transmit_buffers(struct tx_ring *);
110 
111 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
112 int	ixgbe_setup_receive_structures(struct ix_softc *);
113 int	ixgbe_setup_receive_ring(struct rx_ring *);
114 void	ixgbe_initialize_receive_units(struct ix_softc *);
115 void	ixgbe_free_receive_structures(struct ix_softc *);
116 void	ixgbe_free_receive_buffers(struct rx_ring *);
117 int	ixgbe_rxfill(struct rx_ring *);
118 void	ixgbe_rxrefill(void *);
119 
120 void	ixgbe_enable_intr(struct ix_softc *);
121 void	ixgbe_disable_intr(struct ix_softc *);
122 void	ixgbe_update_stats_counters(struct ix_softc *);
123 int	ixgbe_txeof(struct tx_ring *);
124 int	ixgbe_rxeof(struct ix_queue *, int);
125 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
126 void	ixgbe_iff(struct ix_softc *);
127 #ifdef IX_DEBUG
128 void	ixgbe_print_hw_stats(struct ix_softc *);
129 #endif
130 void	ixgbe_update_link_status(struct ix_softc *);
131 int	ixgbe_get_buf(struct rx_ring *, int);
132 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
133 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
134 		    struct ixgbe_dma_alloc *, int);
135 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
136 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
137 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
138 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
139 void	ixgbe_configure_ivars(struct ix_softc *);
140 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
141 
142 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
143 
144 /* Support for pluggable optic modules */
145 int	ixgbe_sfp_probe(struct ix_softc *);
146 void	ixgbe_setup_optics(struct ix_softc *);
147 
148 /* Legacy (single vector interrupt handler */
149 int	ixgbe_legacy_irq(void *);
150 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
151 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
152 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
153 void	ixgbe_handle_que(void *, int);
154 
155 /*********************************************************************
156  *  OpenBSD Device Interface Entry Points
157  *********************************************************************/
158 
159 struct cfdriver ix_cd = {
160 	NULL, "ix", DV_IFNET
161 };
162 
163 struct cfattach ix_ca = {
164 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
165 };
166 
167 int ixgbe_smart_speed = ixgbe_smart_speed_on;
168 
169 /*********************************************************************
170  *  Device identification routine
171  *
172  *  ixgbe_probe determines if the driver should be loaded on
173  *  adapter based on PCI vendor/device id of the adapter.
174  *
175  *  return 0 on success, positive on failure
176  *********************************************************************/
177 
178 int
179 ixgbe_probe(struct device *parent, void *match, void *aux)
180 {
181 	INIT_DEBUGOUT("ixgbe_probe: begin");
182 
183 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
184 	    nitems(ixgbe_devices)));
185 }
186 
187 /*********************************************************************
188  *  Device initialization routine
189  *
190  *  The attach entry point is called when the driver is being loaded.
191  *  This routine identifies the type of hardware, allocates all resources
192  *  and initializes the hardware.
193  *
194  *  return 0 on success, positive on failure
195  *********************************************************************/
196 
197 void
198 ixgbe_attach(struct device *parent, struct device *self, void *aux)
199 {
200 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
201 	struct ix_softc		*sc = (struct ix_softc *)self;
202 	int			 error = 0;
203 	uint16_t		 csum;
204 	uint32_t			 ctrl_ext;
205 	struct ixgbe_hw		*hw = &sc->hw;
206 
207 	INIT_DEBUGOUT("ixgbe_attach: begin");
208 
209 	sc->osdep.os_sc = sc;
210 	sc->osdep.os_pa = *pa;
211 
212 	/* Core Lock Init*/
213 	mtx_init(&sc->core_mtx, IPL_NET);
214 
215 	/* Set up the timer callout */
216 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
217 	timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
218 
219 	/* Determine hardware revision */
220 	ixgbe_identify_hardware(sc);
221 
222 	/* Indicate to RX setup to use Jumbo Clusters */
223 	sc->num_tx_desc = DEFAULT_TXD;
224 	sc->num_rx_desc = DEFAULT_RXD;
225 	sc->rx_process_limit = 100;	// XXX
226 
227 	/* Do base PCI setup - map BAR0 */
228 	if (ixgbe_allocate_pci_resources(sc))
229 		goto err_out;
230 
231 	/* Allocate our TX/RX Queues */
232 	if (ixgbe_allocate_queues(sc))
233 		goto err_out;
234 
235 	/* Allocate multicast array memory. */
236 	sc->mta = malloc(sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
237 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
238 	if (sc->mta == 0) {
239 		printf(": Can not allocate multicast setup array\n");
240 		goto err_late;
241 	}
242 
243 	/* Initialize the shared code */
244 	switch (hw->mac.type) {
245 	case ixgbe_mac_82598EB:
246 		error = ixgbe_init_ops_82598(hw);
247 		break;
248 	case ixgbe_mac_82599EB:
249 		error = ixgbe_init_ops_82599(hw);
250 		break;
251 #if 0
252 	case ixgbe_mac_82599_vf:
253 		error = ixgbe_init_ops_vf(hw);
254 		break;
255 #endif
256 	default:
257 		error = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
258 		break;
259 	}
260 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
261 		/*
262 		 * No optics in this port, set up
263 		 * so the timer routine will probe
264 		 * for later insertion.
265 		 */
266 		sc->sfp_probe = TRUE;
267 		error = 0;
268 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
269 		printf(": Unsupported SFP+ module detected!\n");
270 		goto err_late;
271 	} else if (error) {
272 		printf(": Unable to initialize the shared code\n");
273 		goto err_late;
274 	}
275 
276 	/* Make sure we have a good EEPROM before we read from it */
277 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
278 		printf(": The EEPROM Checksum Is Not Valid\n");
279 		goto err_late;
280 	}
281 
282 	/* Get Hardware Flow Control setting */
283 	hw->fc.requested_mode = ixgbe_fc_full;
284 	hw->fc.pause_time = IXGBE_FC_PAUSE;
285 	hw->fc.low_water = IXGBE_FC_LO;
286 	hw->fc.high_water = IXGBE_FC_HI;
287 	hw->fc.send_xon = TRUE;
288 
289 	error = sc->hw.mac.ops.init_hw(hw);
290 	if (error == IXGBE_ERR_EEPROM_VERSION) {
291 		printf(": This device is a pre-production adapter/"
292 		    "LOM.  Please be aware there may be issues associated "
293 		    "with your hardware.\n If you are experiencing problems "
294 		    "please contact your Intel or hardware representative "
295 		    "who provided you with this hardware.\n");
296 	} else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
297 		printf("Unsupported SFP+ Module\n");
298 	}
299 
300 	if (error) {
301 		printf(": Hardware Initialization Failure\n");
302 		goto err_late;
303 	}
304 
305 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
306 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
307 
308 	/* XXX sc->msix > 1 && ixgbe_allocate_msix() */
309 	error = ixgbe_allocate_legacy(sc);
310 	if (error)
311 		goto err_late;
312 
313 	/* Setup OS specific network interface */
314 	ixgbe_setup_interface(sc);
315 
316 	/* Initialize statistics */
317 	ixgbe_update_stats_counters(sc);
318 
319 	/* Print PCIE bus type/speed/width info */
320 	hw->mac.ops.get_bus_info(hw);
321 
322 	/* let hardware know driver is loaded */
323 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
324 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
325 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
326 
327 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
328 
329 	INIT_DEBUGOUT("ixgbe_attach: end");
330 	return;
331 
332 err_late:
333 	ixgbe_free_transmit_structures(sc);
334 	ixgbe_free_receive_structures(sc);
335 err_out:
336 	ixgbe_free_pci_resources(sc);
337 	free(sc->mta, M_DEVBUF);
338 }
339 
340 /*********************************************************************
341  *  Device removal routine
342  *
343  *  The detach entry point is called when the driver is being removed.
344  *  This routine stops the adapter and deallocates all the resources
345  *  that were allocated for driver operation.
346  *
347  *  return 0 on success, positive on failure
348  *********************************************************************/
349 
350 int
351 ixgbe_detach(struct device *self, int flags)
352 {
353 	struct ix_softc *sc = (struct ix_softc *)self;
354 	struct ifnet *ifp = &sc->arpcom.ac_if;
355 	uint32_t	ctrl_ext;
356 
357 	INIT_DEBUGOUT("ixgbe_detach: begin");
358 
359 	ixgbe_stop(sc);
360 
361 	/* let hardware know driver is unloading */
362 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
363 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
364 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
365 
366 	ether_ifdetach(ifp);
367 	if_detach(ifp);
368 
369 	timeout_del(&sc->timer);
370 	timeout_del(&sc->rx_refill);
371 	ixgbe_free_pci_resources(sc);
372 
373 	ixgbe_free_transmit_structures(sc);
374 	ixgbe_free_receive_structures(sc);
375 	free(sc->mta, M_DEVBUF);
376 
377 	return (0);
378 }
379 
380 /*********************************************************************
381  *  Transmit entry point
382  *
383  *  ixgbe_start is called by the stack to initiate a transmit.
384  *  The driver will remain in this routine as long as there are
385  *  packets to transmit and transmit resources are available.
386  *  In case resources are not available stack is notified and
387  *  the packet is requeued.
388  **********************************************************************/
389 
390 void
391 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
392 {
393 	struct mbuf  		*m_head;
394 	struct ix_softc		*sc = txr->sc;
395 	int			 post = 0;
396 
397 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
398 		return;
399 
400 	if (!sc->link_active)
401 		return;
402 
403 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
404 	    txr->txdma.dma_map->dm_mapsize,
405 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
406 
407 	for (;;) {
408 		IFQ_POLL(&ifp->if_snd, m_head);
409 		if (m_head == NULL)
410 			break;
411 
412 		if (ixgbe_encap(txr, m_head)) {
413 			ifp->if_flags |= IFF_OACTIVE;
414 			break;
415 		}
416 
417 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
418 
419 #if NBPFILTER > 0
420 		if (ifp->if_bpf)
421 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
422 #endif
423 
424 		/* Set timeout in case hardware has problems transmitting */
425 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
426 		ifp->if_timer = IXGBE_TX_TIMEOUT;
427 
428 		post = 1;
429 	}
430 
431         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
432 	    0, txr->txdma.dma_map->dm_mapsize,
433             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434 
435 	/*
436 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
437 	 * hardware that this frame is available to transmit.
438 	 */
439 	if (post)
440 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
441 		    txr->next_avail_desc);
442 }
443 
444 
445 void
446 ixgbe_start(struct ifnet *ifp)
447 {
448 	struct ix_softc *sc = ifp->if_softc;
449 	struct tx_ring	*txr = sc->tx_rings;
450 	uint32_t queue = 0;
451 
452 #if 0
453 	/*
454 	 * This is really just here for testing
455 	 * TX multiqueue, ultimately what is
456 	 * needed is the flow support in the stack
457 	 * and appropriate logic here to deal with
458 	 * it. -jfv
459 	 */
460 	if (sc->num_queues > 1)
461 		queue = (curcpu % sc->num_queues);
462 #endif
463 
464 	txr = &sc->tx_rings[queue];
465 
466 	if (ifp->if_flags & IFF_RUNNING)
467 		ixgbe_start_locked(txr, ifp);
468 
469 	return;
470 }
471 
472 /*********************************************************************
473  *  Ioctl entry point
474  *
475  *  ixgbe_ioctl is called when the user wants to configure the
476  *  interface.
477  *
478  *  return 0 on success, positive on failure
479  **********************************************************************/
480 
481 int
482 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
483 {
484 	struct ix_softc	*sc = ifp->if_softc;
485 	struct ifaddr	*ifa = (struct ifaddr *) data;
486 	struct ifreq	*ifr = (struct ifreq *) data;
487 	int		s, error = 0;
488 
489 	s = splnet();
490 
491 	switch (command) {
492 	case SIOCSIFADDR:
493 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
494 		ifp->if_flags |= IFF_UP;
495 		if (!(ifp->if_flags & IFF_RUNNING))
496 			ixgbe_init(sc);
497 #ifdef INET
498 		if (ifa->ifa_addr->sa_family == AF_INET)
499 			arp_ifinit(&sc->arpcom, ifa);
500 #endif
501 		break;
502 
503 	case SIOCSIFMTU:
504 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
505 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
506 			error = EINVAL;
507 		else if (ifp->if_mtu != ifr->ifr_mtu) {
508 			ifp->if_mtu = ifr->ifr_mtu;
509 			sc->max_frame_size =
510 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
511 			ixgbe_init(sc);
512 		}
513 		break;
514 
515 	case SIOCSIFFLAGS:
516 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
517 		if (ifp->if_flags & IFF_UP) {
518 			if (ifp->if_flags & IFF_RUNNING)
519 				error = ENETRESET;
520 			else
521 				ixgbe_init(sc);
522 		} else {
523 			if (ifp->if_flags & IFF_RUNNING)
524 				ixgbe_stop(sc);
525 		}
526 		break;
527 
528 	case SIOCSIFMEDIA:
529 	case SIOCGIFMEDIA:
530 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
531 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
532 		break;
533 
534 	default:
535 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
536 	}
537 
538 	if (error == ENETRESET) {
539 		if (ifp->if_flags & IFF_RUNNING) {
540 			ixgbe_disable_intr(sc);
541 			ixgbe_iff(sc);
542 			ixgbe_enable_intr(sc);
543 		}
544 		error = 0;
545 	}
546 
547 	splx(s);
548 	return (error);
549 }
550 
551 /*********************************************************************
552  *  Watchdog entry point
553  *
554  *  This routine is called by the local timer
555  *  to detect hardware hangs .
556  *
557  **********************************************************************/
558 
559 void
560 ixgbe_watchdog(struct ifnet * ifp)
561 {
562 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
563 	struct tx_ring *txr = sc->tx_rings;
564 	struct ixgbe_hw *hw = &sc->hw;
565 	int		tx_hang = FALSE;
566 	int		i;
567 
568         /*
569          * The timer is set to 5 every time ixgbe_start() queues a packet.
570          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
571          * least one descriptor.
572          * Finally, anytime all descriptors are clean the timer is
573          * set to 0.
574          */
575 	for (i = 0; i < sc->num_queues; i++, txr++) {
576         	if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
577                 	continue;
578 		else {
579 			tx_hang = TRUE;
580 			break;
581 		}
582 	}
583 	if (tx_hang == FALSE)
584 		return;
585 
586 	/*
587 	 * If we are in this routine because of pause frames, then don't
588 	 * reset the hardware.
589 	 */
590 	if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
591 		for (i = 0; i < sc->num_queues; i++, txr++)
592 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
593 		ifp->if_timer = IXGBE_TX_TIMEOUT;
594 		return;
595 	}
596 
597 
598 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
599 	for (i = 0; i < sc->num_queues; i++, txr++) {
600 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
601 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
602 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
603 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
604 		    i, txr->tx_avail, txr->next_to_clean);
605 	}
606 	ifp->if_flags &= ~IFF_RUNNING;
607 	sc->watchdog_events++;
608 
609 	ixgbe_init(sc);
610 	return;
611 }
612 
613 /*********************************************************************
614  *  Init entry point
615  *
616  *  This routine is used in two ways. It is used by the stack as
617  *  init entry point in network interface structure. It is also used
618  *  by the driver as a hw/sw initialization routine to get to a
619  *  consistent state.
620  *
621  *  return 0 on success, positive on failure
622  **********************************************************************/
623 #define IXGBE_MHADD_MFS_SHIFT 16
624 
625 void
626 ixgbe_init(void *arg)
627 {
628 	struct ix_softc	*sc = (struct ix_softc *)arg;
629 	struct ifnet	*ifp = &sc->arpcom.ac_if;
630 	struct rx_ring	*rxr = sc->rx_rings;
631 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, gpie, itr;
632 	int		 i, s, err;
633 
634 	INIT_DEBUGOUT("ixgbe_init: begin");
635 
636 	s = splnet();
637 
638 	ixgbe_stop(sc);
639 
640 	/* reprogram the RAR[0] in case user changed it. */
641 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
642 
643 	/* Get the latest mac address, User can use a LAA */
644 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
645 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
646 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, 1);
647 	sc->hw.addr_ctrl.rar_used_count = 1;
648 
649 	/* Prepare transmit descriptors and buffers */
650 	if (ixgbe_setup_transmit_structures(sc)) {
651 		printf("%s: Could not setup transmit structures\n",
652 		    ifp->if_xname);
653 		ixgbe_stop(sc);
654 		splx(s);
655 		return;
656 	}
657 
658 	ixgbe_hw0(&sc->hw, init_hw);
659 	ixgbe_initialize_transmit_units(sc);
660 
661 	/* Determine the correct buffer size for jumbo/headersplit */
662 	if (sc->max_frame_size <= 2048)
663 		sc->rx_mbuf_sz = MCLBYTES;
664 	else if (sc->max_frame_size <= 4096)
665 		sc->rx_mbuf_sz = 4096;
666 	else if (sc->max_frame_size <= 9216)
667 		sc->rx_mbuf_sz = 9216;
668 	else
669 		sc->rx_mbuf_sz = 16 * 1024;
670 
671 	/* Prepare receive descriptors and buffers */
672 	if (ixgbe_setup_receive_structures(sc)) {
673 		printf("%s: Could not setup receive structures\n",
674 		    ifp->if_xname);
675 		ixgbe_stop(sc);
676 		splx(s);
677 		return;
678 	}
679 
680 	/* Configure RX settings */
681 	ixgbe_initialize_receive_units(sc);
682 
683 	/* Program promiscuous mode and multicast filters. */
684 	ixgbe_iff(sc);
685 
686 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
687 
688 	/* Enable Fan Failure Interrupt */
689 	gpie |= IXGBE_SDP1_GPIEN;
690 
691 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
692 		/* Add for Thermal detection */
693 		gpie |= IXGBE_SDP2_GPIEN;
694 
695 		/*
696 		 * Set LL interval to max to reduce the number of low latency
697 		 * interrupts hitting the card when the ring is getting full.
698 		 */
699 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
700 	}
701 
702 	if (sc->msix > 1) {
703 		/* Enable Enhanced MSIX mode */
704 		gpie |= IXGBE_GPIE_MSIX_MODE;
705 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
706 		    IXGBE_GPIE_OCD;
707 	}
708 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
709 
710 	/* Set MTU size */
711 	if (ifp->if_mtu > ETHERMTU) {
712 		mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
713 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
714 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
715 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
716 	}
717 
718 	/* Now enable all the queues */
719 
720 	for (i = 0; i < sc->num_queues; i++) {
721 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
722 		txdctl |= IXGBE_TXDCTL_ENABLE;
723 		/* Set WTHRESH to 8, burst writeback */
724 		txdctl |= (8 << 16);
725 		/*
726 		 * When the internal queue falls below PTHRESH (16),
727 		 * start prefetching as long as there are at least
728 		 * HTHRESH (1) buffers ready.
729 		 */
730 		txdctl |= (16 << 0) | (1 << 8);
731 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
732 	}
733 
734 	for (i = 0; i < sc->num_queues; i++) {
735 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
736 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
737 			/*
738 			 * PTHRESH = 21
739 			 * HTHRESH = 4
740 			 * WTHRESH = 8
741 			 */
742 			rxdctl &= ~0x3FFFFF;
743 			rxdctl |= 0x080420;
744 		}
745 		rxdctl |= IXGBE_RXDCTL_ENABLE;
746 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
747 		for (k = 0; k < 10; k++) {
748 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
749 			    IXGBE_RXDCTL_ENABLE)
750 				break;
751 			else
752 				msec_delay(1);
753 		}
754 		/* XXX wmb() : memory barrier */
755 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
756 	}
757 
758 	/* Set up VLAN support and filter */
759 	ixgbe_setup_vlan_hw_support(sc);
760 
761 	/* Enable Receive engine */
762 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
763 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
764 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
765 	rxctrl |= IXGBE_RXCTRL_RXEN;
766 	ixgbe_hw(&sc->hw, enable_rx_dma, rxctrl);
767 
768 	timeout_add_sec(&sc->timer, 1);
769 
770 #ifdef MSI
771 	/* Set up MSI/X routing */
772 	if (ixgbe_enable_msix) {
773 		ixgbe_configure_ivars(sc);
774 		/* Set up auto-mask */
775 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
776 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
777 		else {
778 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
779 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
780 		}
781 	} else  /* Simple settings for Legacy/MSI */
782 #else
783 	{
784 		ixgbe_set_ivar(sc, 0, 0, 0);
785 		ixgbe_set_ivar(sc, 0, 0, 1);
786 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
787 	}
788 #endif
789 
790 #ifdef IXGBE_FDIR
791 	/* Init Flow director */
792 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
793 		ixgbe_init_fdir_signature_82599(&sc->hw, fdir_pballoc);
794 #endif
795 
796 	/*
797 	 * Check on any SFP devices that
798 	 * need to be kick-started
799 	 */
800 	if (sc->hw.phy.type == ixgbe_phy_none) {
801 		err = sc->hw.phy.ops.identify(&sc->hw);
802 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
803 			printf("Unsupported SFP+ module type was detected.\n");
804 			splx(s);
805 			return;
806 		}
807 	}
808 
809 	/* Setup interrupt moderation */
810 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
811 		itr = (8000000 / IXGBE_INTS_PER_SEC) & 0xff8;
812 	else {
813 		itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
814 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
815 	}
816 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
817 
818 	/* Config/Enable Link */
819 	ixgbe_config_link(sc);
820 
821 	/* And now turn on interrupts */
822 	ixgbe_enable_intr(sc);
823 
824 	/* Now inform the stack we're ready */
825 	ifp->if_flags |= IFF_RUNNING;
826 	ifp->if_flags &= ~IFF_OACTIVE;
827 
828 	splx(s);
829 }
830 
831 /*
832  * MSIX Interrupt Handlers
833  */
834 void
835 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
836 {
837 	uint64_t queue = 1ULL << vector;
838 	uint32_t mask;
839 
840 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
841 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
842 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
843 	} else {
844 		mask = (queue & 0xFFFFFFFF);
845 		if (mask)
846 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
847 		mask = (queue >> 32);
848 		if (mask)
849 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
850 	}
851 }
852 
853 void
854 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
855 {
856 	uint64_t queue = 1ULL << vector;
857 	uint32_t mask;
858 
859 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
860 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
861 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
862 	} else {
863 		mask = (queue & 0xFFFFFFFF);
864 		if (mask)
865 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
866 		mask = (queue >> 32);
867 		if (mask)
868 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
869 	}
870 }
871 
872 void
873 ixgbe_rearm_queue(struct ix_softc *sc, uint32_t vector)
874 {
875         uint64_t queue = 1ULL << vector;
876 	uint32_t mask;
877 
878 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
879 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
880 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS, mask);
881 	} else {
882 		mask = (queue & 0xFFFFFFFF);
883 		if (mask)
884 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(0), mask);
885 		mask = (queue >> 32);
886 		if (mask)
887 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EICS_EX(1), mask);
888 	}
889 }
890 
891 void
892 ixgbe_handle_que(void *context, int pending)
893 {
894         struct ix_queue *que = context;
895 	struct ix_softc *sc = que->sc;
896 	struct tx_ring	*txr = que->txr;
897 	struct ifnet	*ifp = &que->sc->arpcom.ac_if;
898 
899 	if (ifp->if_flags & IFF_RUNNING) {
900 		ixgbe_rxeof(que, -1 /* XXX sc->rx_process_limit */);
901 		ixgbe_txeof(txr);
902 
903 		if (ixgbe_rxfill(que->rxr)) {
904 			/* Advance the Rx Queue "Tail Pointer" */
905 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
906 			    que->rxr->last_desc_filled);
907 		}
908 
909 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
910 			ixgbe_start_locked(txr, ifp);
911 	}
912 
913 	/* Reenable this interrupt */
914 	ixgbe_enable_queue(que->sc, que->msix);
915 }
916 
917 /*********************************************************************
918  *
919  *  Legacy Interrupt Service routine
920  *
921  **********************************************************************/
922 
923 int
924 ixgbe_legacy_irq(void *arg)
925 {
926 	struct ix_softc	*sc = (struct ix_softc *)arg;
927 	struct ix_queue *que = sc->queues;
928 	struct ifnet	*ifp = &sc->arpcom.ac_if;
929 	struct tx_ring	*txr = sc->tx_rings;
930 	struct ixgbe_hw	*hw = &sc->hw;
931 	uint32_t	 reg_eicr;
932 	int		 i, refill = 0;
933 
934 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
935 	if (reg_eicr == 0) {
936 		ixgbe_enable_intr(sc);
937 		return (0);
938 	}
939 
940 	++que->irqs;
941 	if (ifp->if_flags & IFF_RUNNING) {
942 		ixgbe_rxeof(que, -1);
943 		ixgbe_txeof(txr);
944 		refill = 1;
945 	}
946 
947 	/* Check for fan failure */
948 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
949 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
950                 printf("\n%s: CRITICAL: FAN FAILURE!! "
951 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
952 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS,
953 		    IXGBE_EICR_GPI_SDP1);
954 	}
955 
956 	/* Link status change */
957 	if (reg_eicr & IXGBE_EICR_LSC) {
958 		timeout_del(&sc->timer);
959 	        ixgbe_update_link_status(sc);
960 		timeout_add_sec(&sc->timer, 1);
961 	}
962 
963 	if (refill) {
964 		if (ixgbe_rxfill(que->rxr)) {
965 			/* Advance the Rx Queue "Tail Pointer" */
966 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
967 			    que->rxr->last_desc_filled);
968 		} else
969 			timeout_add(&sc->rx_refill, 1);
970 	}
971 
972 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
973 		ixgbe_start_locked(txr, ifp);
974 
975 	for (i = 0; i < sc->num_queues; i++, que++)
976 		ixgbe_enable_queue(sc, que->msix);
977 
978 	return (1);
979 }
980 
981 /*********************************************************************
982  *
983  *  Media Ioctl callback
984  *
985  *  This routine is called whenever the user queries the status of
986  *  the interface using ifconfig.
987  *
988  **********************************************************************/
989 void
990 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
991 {
992 	struct ix_softc *sc = ifp->if_softc;
993 
994 	ifmr->ifm_active = IFM_ETHER;
995 	ifmr->ifm_status = IFM_AVALID;
996 
997 	INIT_DEBUGOUT("ixgbe_media_status: begin");
998 	ixgbe_update_link_status(sc);
999 
1000 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1001 		ifmr->ifm_status |= IFM_ACTIVE;
1002 
1003 		switch (sc->link_speed) {
1004 		case IXGBE_LINK_SPEED_1GB_FULL:
1005 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1006 			break;
1007 		case IXGBE_LINK_SPEED_10GB_FULL:
1008 			ifmr->ifm_active |= sc->optics | IFM_FDX;
1009 			break;
1010 		}
1011 	}
1012 }
1013 
1014 /*********************************************************************
1015  *
1016  *  Media Ioctl callback
1017  *
1018  *  This routine is called when the user changes speed/duplex using
1019  *  media/mediopt option with ifconfig.
1020  *
1021  **********************************************************************/
1022 int
1023 ixgbe_media_change(struct ifnet * ifp)
1024 {
1025 	/* ignore */
1026 	return (0);
1027 }
1028 
1029 /*********************************************************************
1030  *
1031  *  This routine maps the mbufs to tx descriptors.
1032  *    WARNING: while this code is using an MQ style infrastructure,
1033  *    it would NOT work as is with more than 1 queue.
1034  *
1035  *  return 0 on success, positive on failure
1036  **********************************************************************/
1037 
1038 int
1039 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1040 {
1041 	struct ix_softc *sc = txr->sc;
1042 	uint32_t	olinfo_status = 0, cmd_type_len = 0;
1043 	int             i, j, error;
1044 	int		first, last = 0;
1045 	bus_dmamap_t	map;
1046 	struct ixgbe_tx_buf *txbuf;
1047 	union ixgbe_adv_tx_desc *txd = NULL;
1048 	uint32_t	paylen = 0;
1049 
1050 	/* Basic descriptor defines */
1051         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
1052         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
1053 
1054 #if NVLAN > 0
1055 	if (m_head->m_flags & M_VLANTAG)
1056 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1057 #endif
1058 
1059 	/*
1060 	 * Force a cleanup if number of TX descriptors
1061 	 * available is below the threshold. If it fails
1062 	 * to get above, then abort transmit.
1063 	 */
1064 	if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
1065 		ixgbe_txeof(txr);
1066 		/* Make sure things have improved */
1067 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD)
1068 			return (ENOBUFS);
1069 	}
1070 
1071         /*
1072          * Important to capture the first descriptor
1073          * used because it will contain the index of
1074          * the one we tell the hardware to report back
1075          */
1076         first = txr->next_avail_desc;
1077 	txbuf = &txr->tx_buffers[first];
1078 	map = txbuf->map;
1079 
1080 	/*
1081 	 * Map the packet for DMA.
1082 	 */
1083 	error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1084 	    m_head, BUS_DMA_NOWAIT);
1085 	/* XXX EFBIG */
1086 	if (error == ENOMEM) {
1087 		sc->no_tx_dma_setup++;
1088 		return (error);
1089 	} else if (error != 0) {
1090 		sc->no_tx_dma_setup++;
1091 		return (error);
1092 	}
1093 
1094 	/* Make certain there are enough descriptors */
1095 	if (map->dm_nsegs > txr->tx_avail - 2) {
1096 		error = ENOBUFS;
1097 		goto xmit_fail;
1098 	}
1099 
1100 	/*
1101 	 * Set the appropriate offload context
1102 	 * this becomes the first descriptor of
1103 	 * a packet.
1104 	 */
1105 #ifdef notyet
1106 	if (ixgbe_tso_setup(txr, m_head, &paylen)) {
1107 		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
1108 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1109 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
1110 		olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
1111 		++sc->tso_tx;
1112 	} else
1113 #endif
1114 	if (ixgbe_tx_ctx_setup(txr, m_head))
1115 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
1116 
1117 	/* Record payload length */
1118 	if (paylen == 0)
1119 		olinfo_status |= m_head->m_pkthdr.len <<
1120 		    IXGBE_ADVTXD_PAYLEN_SHIFT;
1121 
1122 	i = txr->next_avail_desc;
1123 	for (j = 0; j < map->dm_nsegs; j++) {
1124 		txbuf = &txr->tx_buffers[i];
1125 		txd = &txr->tx_base[i];
1126 
1127 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1128 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1129 		    cmd_type_len | map->dm_segs[j].ds_len);
1130 		txd->read.olinfo_status = htole32(olinfo_status);
1131 		last = i; /* descriptor that will get completion IRQ */
1132 
1133 		if (++i == sc->num_tx_desc)
1134 			i = 0;
1135 
1136 		txbuf->m_head = NULL;
1137 		txbuf->eop_index = -1;
1138 	}
1139 
1140 	txd->read.cmd_type_len |=
1141 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1142 	txr->tx_avail -= map->dm_nsegs;
1143 	txr->next_avail_desc = i;
1144 
1145 	txbuf->m_head = m_head;
1146 	/* swap maps because last tx descriptor is tracking all the data */
1147 	txr->tx_buffers[first].map = txbuf->map;
1148 	txbuf->map = map;
1149 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1150 	    BUS_DMASYNC_PREWRITE);
1151 
1152         /* Set the index of the descriptor that will be marked done */
1153         txbuf = &txr->tx_buffers[first];
1154 	txbuf->eop_index = last;
1155 
1156 	++txr->tx_packets;
1157 	return (0);
1158 
1159 xmit_fail:
1160 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1161 	return (error);
1162 
1163 }
1164 
1165 void
1166 ixgbe_iff(struct ix_softc *sc)
1167 {
1168 	struct ifnet *ifp = &sc->arpcom.ac_if;
1169 	struct arpcom *ac = &sc->arpcom;
1170 	uint32_t	fctrl;
1171 	uint8_t	*mta;
1172 	uint8_t	*update_ptr;
1173 	struct ether_multi *enm;
1174 	struct ether_multistep step;
1175 	int	mcnt = 0;
1176 
1177 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1178 
1179 	mta = sc->mta;
1180 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1181 	    MAX_NUM_MULTICAST_ADDRESSES);
1182 
1183 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1184 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1185 	ifp->if_flags &= ~IFF_ALLMULTI;
1186 
1187 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1188 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1189 		ifp->if_flags |= IFF_ALLMULTI;
1190 		fctrl |= IXGBE_FCTRL_MPE;
1191 		if (ifp->if_flags & IFF_PROMISC)
1192 			fctrl |= IXGBE_FCTRL_UPE;
1193 	} else {
1194 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1195 		while (enm != NULL) {
1196 			bcopy(enm->enm_addrlo,
1197 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1198 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1199 			mcnt++;
1200 
1201 			ETHER_NEXT_MULTI(step, enm);
1202 		}
1203 
1204 		update_ptr = mta;
1205 		ixgbe_hw(&sc->hw, update_mc_addr_list,
1206 		    update_ptr, mcnt, ixgbe_mc_array_itr);
1207 	}
1208 
1209 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1210 }
1211 
1212 /*
1213  * This is an iterator function now needed by the multicast
1214  * shared code. It simply feeds the shared code routine the
1215  * addresses in the array of ixgbe_iff() one by one.
1216  */
1217 uint8_t *
1218 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1219 {
1220 	uint8_t *addr = *update_ptr;
1221 	uint8_t *newptr;
1222 	*vmdq = 0;
1223 
1224 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1225 	*update_ptr = newptr;
1226 	return addr;
1227 }
1228 
1229 
1230 /*********************************************************************
1231  *  Timer routine
1232  *
1233  *  This routine checks for link status,updates statistics,
1234  *  and runs the watchdog timer.
1235  *
1236  **********************************************************************/
1237 
1238 void
1239 ixgbe_local_timer(void *arg)
1240 {
1241 	struct ix_softc *sc = arg;
1242 #ifdef IX_DEBUG
1243 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1244 #endif
1245 	int		 s;
1246 
1247 	s = splnet();
1248 
1249 	/* Check for pluggable optics */
1250 	if (sc->sfp_probe)
1251 		if (!ixgbe_sfp_probe(sc))
1252 			goto out; /* Nothing to do */
1253 
1254 	ixgbe_update_link_status(sc);
1255 	ixgbe_update_stats_counters(sc);
1256 
1257 out:
1258 #ifdef IX_DEBUG
1259 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1260 	    (IFF_RUNNING|IFF_DEBUG))
1261 		ixgbe_print_hw_stats(sc);
1262 #endif
1263 	timeout_add_sec(&sc->timer, 1);
1264 
1265 	splx(s);
1266 }
1267 
1268 void
1269 ixgbe_update_link_status(struct ix_softc *sc)
1270 {
1271 	int link_up = FALSE;
1272 	struct ifnet *ifp = &sc->arpcom.ac_if;
1273 	struct tx_ring *txr = sc->tx_rings;
1274 	int		link_state;
1275 	int		i;
1276 
1277 	ixgbe_hw(&sc->hw, check_link, &sc->link_speed, &link_up, 0);
1278 
1279 	link_state = link_up ? LINK_STATE_FULL_DUPLEX : LINK_STATE_DOWN;
1280 
1281 	if (ifp->if_link_state != link_state) {
1282 		sc->link_active = link_up;
1283 		ifp->if_link_state = link_state;
1284 		if_link_state_change(ifp);
1285 	}
1286 
1287 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1288 		switch (sc->link_speed) {
1289 		case IXGBE_LINK_SPEED_UNKNOWN:
1290 			ifp->if_baudrate = 0;
1291 			break;
1292 		case IXGBE_LINK_SPEED_100_FULL:
1293 			ifp->if_baudrate = IF_Mbps(100);
1294 			break;
1295 		case IXGBE_LINK_SPEED_1GB_FULL:
1296 			ifp->if_baudrate = IF_Gbps(1);
1297 			break;
1298 		case IXGBE_LINK_SPEED_10GB_FULL:
1299 			ifp->if_baudrate = IF_Gbps(10);
1300 			break;
1301 		}
1302 	} else {
1303 		ifp->if_baudrate = 0;
1304 		ifp->if_timer = 0;
1305 		for (i = 0; i < sc->num_queues; i++)
1306 			txr[i].watchdog_timer = FALSE;
1307 	}
1308 
1309 
1310 	return;
1311 }
1312 
1313 
1314 
1315 /*********************************************************************
1316  *
1317  *  This routine disables all traffic on the sc by issuing a
1318  *  global reset on the MAC and deallocates TX/RX buffers.
1319  *
1320  **********************************************************************/
1321 
1322 void
1323 ixgbe_stop(void *arg)
1324 {
1325 	struct ix_softc *sc = arg;
1326 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1327 
1328 	/* Tell the stack that the interface is no longer active */
1329 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1330 
1331 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1332 	ixgbe_disable_intr(sc);
1333 
1334 	ixgbe_hw0(&sc->hw, reset_hw);
1335 	sc->hw.adapter_stopped = FALSE;
1336 	ixgbe_hw0(&sc->hw, stop_adapter);
1337 	/* Turn off the laser */
1338 	if (sc->hw.phy.multispeed_fiber)
1339 		ixgbe_hw0(&sc->hw, disable_tx_laser);
1340 	timeout_del(&sc->timer);
1341 	timeout_del(&sc->rx_refill);
1342 
1343 	/* reprogram the RAR[0] in case user changed it. */
1344 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1345 
1346 	/* Should we really clear all structures on stop? */
1347 	ixgbe_free_transmit_structures(sc);
1348 	ixgbe_free_receive_structures(sc);
1349 }
1350 
1351 
1352 /*********************************************************************
1353  *
1354  *  Determine hardware revision.
1355  *
1356  **********************************************************************/
1357 void
1358 ixgbe_identify_hardware(struct ix_softc *sc)
1359 {
1360 	struct ixgbe_osdep	*os = &sc->osdep;
1361 	struct pci_attach_args	*pa = &os->os_pa;
1362 	uint32_t		 reg;
1363 
1364 	/* Save off the information about this board */
1365 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1366 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1367 
1368 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1369 	sc->hw.revision_id = PCI_REVISION(reg);
1370 
1371 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1372 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1373 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1374 
1375 	switch (sc->hw.device_id) {
1376 	case PCI_PRODUCT_INTEL_82598:
1377 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1378 	case PCI_PRODUCT_INTEL_82598_DA_DUAL:
1379 	case PCI_PRODUCT_INTEL_82598AF:
1380 	case PCI_PRODUCT_INTEL_82598_SR_DUAL_EM:
1381 	case PCI_PRODUCT_INTEL_82598EB_SFP:
1382 		sc->hw.mac.type = ixgbe_mac_82598EB;
1383 		sc->optics = IFM_10G_SR;
1384 		break;
1385 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1386 	case PCI_PRODUCT_INTEL_82598EB_CX4:
1387 		sc->hw.mac.type = ixgbe_mac_82598EB;
1388 		sc->optics = IFM_10G_CX4;
1389 		break;
1390 	case PCI_PRODUCT_INTEL_82598EB_XF_LR:
1391 		sc->hw.mac.type = ixgbe_mac_82598EB;
1392 		sc->optics = IFM_10G_LR;
1393 		break;
1394 	case PCI_PRODUCT_INTEL_82598AT:
1395 	case PCI_PRODUCT_INTEL_82598AT2:
1396 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1397 		sc->hw.mac.type = ixgbe_mac_82598EB;
1398 		sc->optics = IFM_10G_T;
1399 		break;
1400 	case PCI_PRODUCT_INTEL_82598_BX:
1401 		sc->hw.mac.type = ixgbe_mac_82598EB;
1402 		sc->optics = IFM_AUTO;
1403 		break;
1404 	case PCI_PRODUCT_INTEL_82599_SFP:
1405 	case PCI_PRODUCT_INTEL_82599_SFP_EM:
1406 	case PCI_PRODUCT_INTEL_82599_SFP_FCOE:
1407 		sc->hw.mac.type = ixgbe_mac_82599EB;
1408 		sc->optics = IFM_10G_SR;
1409 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1410 		break;
1411 	case PCI_PRODUCT_INTEL_82599_KX4:
1412 	case PCI_PRODUCT_INTEL_82599_KX4_MEZZ:
1413 	case PCI_PRODUCT_INTEL_82599_CX4:
1414 		sc->hw.mac.type = ixgbe_mac_82599EB;
1415 		sc->optics = IFM_10G_CX4;
1416 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1417 		break;
1418 	case PCI_PRODUCT_INTEL_82599_T3_LOM:
1419 		sc->hw.mac.type = ixgbe_mac_82599EB;
1420 		sc->optics = IFM_10G_T;
1421 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1422 		break;
1423 	case PCI_PRODUCT_INTEL_82599_XAUI:
1424 	case PCI_PRODUCT_INTEL_82599_COMBO_BACKPLANE:
1425 	case PCI_PRODUCT_INTEL_82599_BPLANE_FCOE:
1426 		sc->hw.mac.type = ixgbe_mac_82599EB;
1427 		sc->optics = IFM_AUTO;
1428 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1429 		break;
1430 	case PCI_PRODUCT_INTEL_82599VF:
1431 		sc->hw.mac.type = ixgbe_mac_82599_vf;
1432 		sc->optics = IFM_AUTO;
1433 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1434 		break;
1435 	default:
1436 		sc->optics = IFM_AUTO;
1437 		break;
1438 	}
1439 }
1440 
1441 /*********************************************************************
1442  *
1443  *  Determine optic type
1444  *
1445  **********************************************************************/
1446 void
1447 ixgbe_setup_optics(struct ix_softc *sc)
1448 {
1449 	struct ixgbe_hw *hw = &sc->hw;
1450 	int		layer;
1451 
1452 	layer = ixgbe_hw(hw, get_supported_physical_layer);
1453 	switch (layer) {
1454 		case IXGBE_PHYSICAL_LAYER_10GBASE_T:
1455 			sc->optics = IFM_10G_T;
1456 			break;
1457 		case IXGBE_PHYSICAL_LAYER_1000BASE_T:
1458 			sc->optics = IFM_1000_T;
1459 			break;
1460 		case IXGBE_PHYSICAL_LAYER_10GBASE_LR:
1461 		case IXGBE_PHYSICAL_LAYER_10GBASE_LRM:
1462 			sc->optics = IFM_10G_LR;
1463 			break;
1464 		case IXGBE_PHYSICAL_LAYER_10GBASE_SR:
1465 			sc->optics = IFM_10G_SR;
1466 			break;
1467 		case IXGBE_PHYSICAL_LAYER_10GBASE_KX4:
1468 		case IXGBE_PHYSICAL_LAYER_10GBASE_CX4:
1469 			sc->optics = IFM_10G_CX4;
1470 			break;
1471 		case IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU:
1472 			sc->optics = IFM_10G_SFP_CU;
1473 			break;
1474 		case IXGBE_PHYSICAL_LAYER_1000BASE_KX:
1475 		case IXGBE_PHYSICAL_LAYER_10GBASE_KR:
1476 		case IXGBE_PHYSICAL_LAYER_10GBASE_XAUI:
1477 		case IXGBE_PHYSICAL_LAYER_UNKNOWN:
1478 		default:
1479 			sc->optics = IFM_ETHER | IFM_AUTO;
1480 			break;
1481 	}
1482 	return;
1483 }
1484 
1485 /*********************************************************************
1486  *
1487  *  Setup the Legacy or MSI Interrupt handler
1488  *
1489  **********************************************************************/
1490 int
1491 ixgbe_allocate_legacy(struct ix_softc *sc)
1492 {
1493 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1494 	struct ixgbe_osdep	*os = &sc->osdep;
1495 	struct pci_attach_args	*pa = &os->os_pa;
1496 	const char		*intrstr = NULL;
1497 	pci_chipset_tag_t	pc = pa->pa_pc;
1498 	pci_intr_handle_t	ih;
1499 
1500 	/* We allocate a single interrupt resource */
1501 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1502 	    pci_intr_map(pa, &ih) != 0) {
1503 		printf(": couldn't map interrupt\n");
1504 		return (ENXIO);
1505 	}
1506 
1507 #if 0
1508 	/* XXX */
1509 	/* Tasklets for Link, SFP and Multispeed Fiber */
1510 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1511 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1512 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1513 #endif
1514 
1515 	intrstr = pci_intr_string(pc, ih);
1516 	sc->tag = pci_intr_establish(pc, ih, IPL_NET,
1517 	    ixgbe_legacy_irq, sc, ifp->if_xname);
1518 	if (sc->tag == NULL) {
1519 		printf(": couldn't establish interrupt");
1520 		if (intrstr != NULL)
1521 			printf(" at %s", intrstr);
1522 		printf("\n");
1523 		return (ENXIO);
1524 	}
1525 	printf(": %s", intrstr);
1526 
1527 	/* For simplicity in the handlers */
1528 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1529 
1530 	return (0);
1531 }
1532 
1533 int
1534 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1535 {
1536 	struct ixgbe_osdep	*os = &sc->osdep;
1537 	struct pci_attach_args	*pa = &os->os_pa;
1538 	int			 val;
1539 
1540 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1541 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM &&
1542 	    PCI_MAPREG_TYPE(val) != PCI_MAPREG_MEM_TYPE_64BIT) {
1543 		printf(": mmba is not mem space\n");
1544 		return (ENXIO);
1545 	}
1546 
1547 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1548 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1549 		printf(": cannot find mem space\n");
1550 		return (ENXIO);
1551 	}
1552 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1553 
1554 	/* Legacy defaults */
1555 	sc->num_queues = 1;
1556 	sc->hw.back = os;
1557 
1558 #ifdef notyet
1559 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1560 	sc->msix = ixgbe_setup_msix(sc);
1561 #endif
1562 
1563 	return (0);
1564 }
1565 
1566 void
1567 ixgbe_free_pci_resources(struct ix_softc * sc)
1568 {
1569 	struct ixgbe_osdep	*os = &sc->osdep;
1570 	struct pci_attach_args	*pa = &os->os_pa;
1571 	struct ix_queue *que = sc->queues;
1572 	int i;
1573 
1574 
1575 	/* Release all msix queue resources: */
1576 	for (i = 0; i < sc->num_queues; i++, que++) {
1577 		if (que->tag)
1578 			pci_intr_disestablish(pa->pa_pc, que->tag);
1579 		que->tag = NULL;
1580 	}
1581 
1582 	if (sc->tag)
1583 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1584 	sc->tag = NULL;
1585 	if (os->os_membase != 0)
1586 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1587 	os->os_membase = 0;
1588 
1589 	return;
1590 }
1591 
1592 /*********************************************************************
1593  *
1594  *  Setup networking device structure and register an interface.
1595  *
1596  **********************************************************************/
1597 void
1598 ixgbe_setup_interface(struct ix_softc *sc)
1599 {
1600 	struct ixgbe_hw *hw = &sc->hw;
1601 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1602 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1603 
1604 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1605 	ifp->if_baudrate = IF_Gbps(10);
1606 	ifp->if_softc = sc;
1607 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1608 	ifp->if_ioctl = ixgbe_ioctl;
1609 	ifp->if_start = ixgbe_start;
1610 	ifp->if_timer = 0;
1611 	ifp->if_watchdog = ixgbe_watchdog;
1612 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1613 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1614 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1615 	IFQ_SET_READY(&ifp->if_snd);
1616 
1617 	m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
1618 
1619 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1620 
1621 #if NVLAN > 0
1622 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1623 #endif
1624 
1625 #ifdef IX_CSUM_OFFLOAD
1626 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
1627 	    IFCAP_CSUM_IPv4;
1628 #endif
1629 
1630 	sc->max_frame_size =
1631 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1632 
1633 	/*
1634 	 * Specify the media types supported by this sc and register
1635 	 * callbacks to update media and link information
1636 	 */
1637 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1638 		     ixgbe_media_status);
1639 	ifmedia_add(&sc->media, IFM_ETHER | sc->optics |
1640 	    IFM_FDX, 0, NULL);
1641 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1642 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL)) {
1643 		ifmedia_add(&sc->media,
1644 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1645 		ifmedia_add(&sc->media,
1646 		    IFM_ETHER | IFM_1000_T, 0, NULL);
1647 	}
1648 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1649 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1650 
1651 	if_attach(ifp);
1652 	ether_ifattach(ifp);
1653 
1654 
1655 	return;
1656 }
1657 
1658 void
1659 ixgbe_config_link(struct ix_softc *sc)
1660 {
1661 	uint32_t	autoneg, err = 0;
1662 	int		sfp, negotiate = FALSE;
1663 
1664 	switch (sc->hw.phy.type) {
1665 	case ixgbe_phy_sfp_avago:
1666 	case ixgbe_phy_sfp_ftl:
1667 	case ixgbe_phy_sfp_intel:
1668 	case ixgbe_phy_sfp_unknown:
1669 	case ixgbe_phy_sfp_passive_tyco:
1670 	case ixgbe_phy_sfp_passive_unknown:
1671 		sfp = 1;
1672 		break;
1673 	default:
1674 		sfp = 0;
1675 		break;
1676 	}
1677 
1678 	if (sfp) {
1679 		if (&sc->hw.phy.multispeed_fiber) {
1680 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1681 			ixgbe_hw0(&sc->hw, enable_tx_laser);
1682 			/* XXX taskqueue_enqueue(sc->tq, &sc->msf_task); */
1683 		} /* else */
1684 			/* XXX taskqueue_enqueue(sc->tq, &sc->mod_task); */
1685 	} else {
1686 		if (sc->hw.mac.ops.check_link)
1687 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1688 			    &sc->link_up, FALSE);
1689 			if (err)
1690 				return;
1691 		if (sc->hw.mac.ops.setup_link)
1692 			err = sc->hw.mac.ops.setup_link(&sc->hw, autoneg,
1693 			    negotiate, sc->link_up);
1694 	}
1695 	return;
1696 }
1697 
1698 
1699 /********************************************************************
1700  * Manage DMA'able memory.
1701   *******************************************************************/
1702 int
1703 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1704 		struct ixgbe_dma_alloc *dma, int mapflags)
1705 {
1706 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1707 	struct ixgbe_osdep	*os = &sc->osdep;
1708 	int			 r;
1709 
1710 	dma->dma_tag = os->os_pa.pa_dmat;
1711 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1712 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1713 	if (r != 0) {
1714 		printf("%s: ixgbe_dma_malloc: bus_dma_tag_create failed; "
1715 		       "error %u\n", ifp->if_xname, r);
1716 		goto fail_0;
1717 	}
1718 
1719 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1720 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1721 	if (r != 0) {
1722 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1723 		       "error %u\n", ifp->if_xname, r);
1724 		goto fail_1;
1725 	}
1726 
1727 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1728 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1729 	if (r != 0) {
1730 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1731 		       "error %u\n", ifp->if_xname, r);
1732 		goto fail_2;
1733 	}
1734 
1735 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1736 	    dma->dma_vaddr, size, NULL,
1737 	    mapflags | BUS_DMA_NOWAIT);
1738 	if (r != 0) {
1739 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1740 		       "error %u\n", ifp->if_xname, r);
1741 		goto fail_3;
1742 	}
1743 
1744 	dma->dma_size = size;
1745 	return (0);
1746 fail_3:
1747 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1748 fail_2:
1749 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1750 fail_1:
1751 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1752 fail_0:
1753 	dma->dma_map = NULL;
1754 	dma->dma_tag = NULL;
1755 	return (r);
1756 }
1757 
1758 void
1759 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1760 {
1761 	if (dma->dma_tag == NULL)
1762 		return;
1763 
1764 	if (dma->dma_map != NULL) {
1765 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1766 		    dma->dma_map->dm_mapsize,
1767 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1768 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1769 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1770 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1771 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1772 		dma->dma_map = NULL;
1773 	}
1774 }
1775 
1776 
1777 /*********************************************************************
1778  *
1779  *  Allocate memory for the transmit and receive rings, and then
1780  *  the descriptors associated with each, called only once at attach.
1781  *
1782  **********************************************************************/
1783 int
1784 ixgbe_allocate_queues(struct ix_softc *sc)
1785 {
1786 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1787 	struct ix_queue *que;
1788 	struct tx_ring *txr;
1789 	struct rx_ring *rxr;
1790 	int rsize, tsize;
1791 	int txconf = 0, rxconf = 0, i;
1792 
1793 	/* First allocate the top level queue structs */
1794 	if (!(sc->queues =
1795 	    (struct ix_queue *) malloc(sizeof(struct ix_queue) *
1796 	    sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1797 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1798 		goto fail;
1799 	}
1800 
1801 	/* Then allocate the TX ring struct memory */
1802 	if (!(sc->tx_rings =
1803 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1804 	    sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1805 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1806 		goto fail;
1807 	}
1808 
1809 	/* Next allocate the RX */
1810 	if (!(sc->rx_rings =
1811 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1812 	    sc->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1813 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1814 		goto rx_fail;
1815 	}
1816 
1817 	/* For the ring itself */
1818 	tsize = roundup2(sc->num_tx_desc *
1819 	    sizeof(union ixgbe_adv_tx_desc), 4096);
1820 
1821 	/*
1822 	 * Now set up the TX queues, txconf is needed to handle the
1823 	 * possibility that things fail midcourse and we need to
1824 	 * undo memory gracefully
1825 	 */
1826 	for (i = 0; i < sc->num_queues; i++, txconf++) {
1827 		/* Set up some basics */
1828 		txr = &sc->tx_rings[i];
1829 		txr->sc = sc;
1830 		txr->me = i;
1831 
1832 		/* Initialize the TX side lock */
1833 		mtx_init(&txr->tx_mtx, IPL_NET);
1834 
1835 		if (ixgbe_dma_malloc(sc, tsize,
1836 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1837 			printf("%s: Unable to allocate TX Descriptor memory\n",
1838 			    ifp->if_xname);
1839 			goto err_tx_desc;
1840 		}
1841 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1842 		bzero((void *)txr->tx_base, tsize);
1843 	}
1844 
1845 	/*
1846 	 * Next the RX queues...
1847 	 */
1848 	rsize = roundup2(sc->num_rx_desc *
1849 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1850 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
1851 		rxr = &sc->rx_rings[i];
1852 		/* Set up some basics */
1853 		rxr->sc = sc;
1854 		rxr->me = i;
1855 
1856 		/* Initialize the TX side lock */
1857 		mtx_init(&rxr->rx_mtx, IPL_NET);
1858 
1859 		if (ixgbe_dma_malloc(sc, rsize,
1860 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1861 			printf("%s: Unable to allocate RxDescriptor memory\n",
1862 			    ifp->if_xname);
1863 			goto err_rx_desc;
1864 		}
1865 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1866 		bzero((void *)rxr->rx_base, rsize);
1867 	}
1868 
1869 	/*
1870 	 * Finally set up the queue holding structs
1871 	 */
1872 	for (i = 0; i < sc->num_queues; i++) {
1873 		que = &sc->queues[i];
1874 		que->sc = sc;
1875 		que->txr = &sc->tx_rings[i];
1876 		que->rxr = &sc->rx_rings[i];
1877 	}
1878 
1879 	return (0);
1880 
1881 err_rx_desc:
1882 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1883 		ixgbe_dma_free(sc, &rxr->rxdma);
1884 err_tx_desc:
1885 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) {
1886 		ixgbe_dma_free(sc, &txr->txdma);
1887 	}
1888 	free(sc->rx_rings, M_DEVBUF);
1889 	sc->rx_rings = NULL;
1890 rx_fail:
1891 	free(sc->tx_rings, M_DEVBUF);
1892 	sc->tx_rings = NULL;
1893 fail:
1894 	return (ENOMEM);
1895 }
1896 
1897 /*********************************************************************
1898  *
1899  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1900  *  the information needed to transmit a packet on the wire. This is
1901  *  called only once at attach, setup is done every reset.
1902  *
1903  **********************************************************************/
1904 int
1905 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1906 {
1907 	struct ix_softc 	*sc;
1908 	struct ixgbe_osdep	*os;
1909 	struct ifnet		*ifp;
1910 	struct ixgbe_tx_buf	*txbuf;
1911 	int			 error, i;
1912 	int			 max_segs;
1913 
1914 	sc = txr->sc;
1915 	os = &sc->osdep;
1916 	ifp = &sc->arpcom.ac_if;
1917 
1918 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
1919 		max_segs = IXGBE_82598_SCATTER;
1920 	else
1921 		max_segs = IXGBE_82599_SCATTER;
1922 
1923 	if (!(txr->tx_buffers =
1924 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
1925 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1926 		printf("%s: Unable to allocate tx_buffer memory\n",
1927 		    ifp->if_xname);
1928 		error = ENOMEM;
1929 		goto fail;
1930 	}
1931 	txr->txtag = txr->txdma.dma_tag;
1932 
1933         /* Create the descriptor buffer dma maps */
1934 	for (i = 0; i < sc->num_tx_desc; i++) {
1935 		txbuf = &txr->tx_buffers[i];
1936 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1937 			    max_segs, PAGE_SIZE, 0,
1938 			    BUS_DMA_NOWAIT, &txbuf->map);
1939 
1940 		if (error != 0) {
1941 			printf("%s: Unable to create TX DMA map\n",
1942 			    ifp->if_xname);
1943 			goto fail;
1944 		}
1945 	}
1946 
1947 	return 0;
1948 fail:
1949 	return (error);
1950 }
1951 
1952 /*********************************************************************
1953  *
1954  *  Initialize a transmit ring.
1955  *
1956  **********************************************************************/
1957 int
1958 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1959 {
1960 	struct ix_softc		*sc = txr->sc;
1961 	int			 error;
1962 
1963 	/* Now allocate transmit buffers for the ring */
1964 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1965 		return (error);
1966 
1967 	/* Clear the old ring contents */
1968 	bzero((void *)txr->tx_base,
1969 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1970 
1971 	/* Reset indices */
1972 	txr->next_avail_desc = 0;
1973 	txr->next_to_clean = 0;
1974 
1975 	/* Set number of descriptors available */
1976 	txr->tx_avail = sc->num_tx_desc;
1977 
1978 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1979 	    0, txr->txdma.dma_map->dm_mapsize,
1980 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1981 
1982 	return (0);
1983 }
1984 
1985 /*********************************************************************
1986  *
1987  *  Initialize all transmit rings.
1988  *
1989  **********************************************************************/
1990 int
1991 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1992 {
1993 	struct tx_ring *txr = sc->tx_rings;
1994 	int		i, error;
1995 
1996 	for (i = 0; i < sc->num_queues; i++, txr++) {
1997 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
1998 			goto fail;
1999 	}
2000 
2001 	return (0);
2002 fail:
2003 	ixgbe_free_transmit_structures(sc);
2004 	return (error);
2005 }
2006 
2007 /*********************************************************************
2008  *
2009  *  Enable transmit unit.
2010  *
2011  **********************************************************************/
2012 void
2013 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2014 {
2015 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2016 	struct tx_ring	*txr;
2017 	struct ixgbe_hw	*hw = &sc->hw;
2018 	int		 i;
2019 	uint64_t	 tdba;
2020 	uint32_t	 txctrl;
2021 
2022 	/* Setup the Base and Length of the Tx Descriptor Ring */
2023 
2024 	for (i = 0; i < sc->num_queues; i++) {
2025 		txr = &sc->tx_rings[i];
2026 
2027 		/* Setup descriptor base address */
2028 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2029 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2030 		       (tdba & 0x00000000ffffffffULL));
2031 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2032 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2033 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2034 
2035 		/* Setup the HW Tx Head and Tail descriptor pointers */
2036 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2037 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2038 
2039 		/* Setup Transmit Descriptor Cmd Settings */
2040 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2041 		txr->queue_status = IXGBE_QUEUE_IDLE;
2042 		txr->watchdog_timer = 0;
2043 
2044 		/* Disable Head Writeback */
2045 		switch (hw->mac.type) {
2046 		case ixgbe_mac_82598EB:
2047 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2048 			break;
2049 		case ixgbe_mac_82599EB:
2050 		default:
2051 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2052 			break;
2053 		}
2054 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2055 		switch (hw->mac.type) {
2056 		case ixgbe_mac_82598EB:
2057 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2058 			break;
2059 		case ixgbe_mac_82599EB:
2060 		default:
2061 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2062 			break;
2063 		}
2064 	}
2065 	ifp->if_timer = 0;
2066 
2067 	if (hw->mac.type == ixgbe_mac_82599EB) {
2068 		uint32_t dmatxctl, rttdcs;
2069 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2070 		dmatxctl |= IXGBE_DMATXCTL_TE;
2071 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2072 		/* Disable arbiter to set MTQC */
2073 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2074 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2075 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2076 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2077 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2078 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2079 	}
2080 
2081 	return;
2082 }
2083 
2084 /*********************************************************************
2085  *
2086  *  Free all transmit rings.
2087  *
2088  **********************************************************************/
2089 void
2090 ixgbe_free_transmit_structures(struct ix_softc *sc)
2091 {
2092 	struct tx_ring *txr = sc->tx_rings;
2093 	int		i;
2094 
2095 	for (i = 0; i < sc->num_queues; i++, txr++) {
2096 		ixgbe_free_transmit_buffers(txr);
2097 	}
2098 }
2099 
2100 /*********************************************************************
2101  *
2102  *  Free transmit ring related data structures.
2103  *
2104  **********************************************************************/
2105 void
2106 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2107 {
2108 	struct ix_softc *sc = txr->sc;
2109 	struct ixgbe_tx_buf *tx_buffer;
2110 	int             i;
2111 
2112 	INIT_DEBUGOUT("free_transmit_ring: begin");
2113 
2114 	if (txr->tx_buffers == NULL)
2115 		return;
2116 
2117 	tx_buffer = txr->tx_buffers;
2118 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2119 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2120 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2121 			    0, tx_buffer->map->dm_mapsize,
2122 			    BUS_DMASYNC_POSTWRITE);
2123 			bus_dmamap_unload(txr->txdma.dma_tag,
2124 			    tx_buffer->map);
2125 		}
2126 		if (tx_buffer->m_head != NULL) {
2127 			m_freem(tx_buffer->m_head);
2128 			tx_buffer->m_head = NULL;
2129 		}
2130 		if (tx_buffer->map != NULL) {
2131 			bus_dmamap_destroy(txr->txdma.dma_tag,
2132 			    tx_buffer->map);
2133 			tx_buffer->map = NULL;
2134 		}
2135 	}
2136 
2137 	if (txr->tx_buffers != NULL)
2138 		free(txr->tx_buffers, M_DEVBUF);
2139 	txr->tx_buffers = NULL;
2140 	txr->txtag = NULL;
2141 }
2142 
2143 /*********************************************************************
2144  *
2145  *  Advanced Context Descriptor setup for VLAN or CSUM
2146  *
2147  **********************************************************************/
2148 
2149 int
2150 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
2151 {
2152 	struct ix_softc *sc = txr->sc;
2153 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2154 	struct ixgbe_adv_tx_context_desc *TXD;
2155 	struct ixgbe_tx_buf        *tx_buffer;
2156 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2157 	struct ip *ip;
2158 #ifdef notyet
2159 	struct ip6_hdr *ip6;
2160 #endif
2161 	uint8_t ipproto = 0;
2162 	int  ehdrlen, ip_hlen = 0;
2163 	uint16_t etype;
2164 	int offload = TRUE;
2165 	int ctxd = txr->next_avail_desc;
2166 #if NVLAN > 0
2167 	struct ether_vlan_header *eh;
2168 #else
2169 	struct ether_header *eh;
2170 #endif
2171 	uint16_t vtag = 0;
2172 
2173 	if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
2174 		offload = FALSE;
2175 
2176 	tx_buffer = &txr->tx_buffers[ctxd];
2177 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2178 
2179 	/*
2180 	 * In advanced descriptors the vlan tag must
2181 	 * be placed into the descriptor itself.
2182 	 */
2183 #if NVLAN > 0
2184 	if (mp->m_flags & M_VLANTAG) {
2185 		vtag = htole16(mp->m_pkthdr.ether_vtag);
2186 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2187 	} else
2188 #endif
2189 	if (offload == FALSE)
2190 		return FALSE;	/* No need for CTX */
2191 
2192 	/*
2193 	 * Determine where frame payload starts.
2194 	 * Jump over vlan headers if already present,
2195 	 * helpful for QinQ too.
2196 	 */
2197 #if NVLAN > 0
2198 	eh = mtod(mp, struct ether_vlan_header *);
2199 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2200 		etype = ntohs(eh->evl_proto);
2201 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2202 	} else {
2203 		etype = ntohs(eh->evl_encap_proto);
2204 		ehdrlen = ETHER_HDR_LEN;
2205 	}
2206 #else
2207 	eh = mtod(mp, struct ether_header *);
2208 	etype = ntohs(eh->ether_type);
2209 	ehdrlen = ETHER_HDR_LEN;
2210 #endif
2211 
2212 	/* Set the ether header length */
2213 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2214 
2215 	switch (etype) {
2216 	case ETHERTYPE_IP:
2217 		ip = (struct ip *)(mp->m_data + ehdrlen);
2218 		ip_hlen = ip->ip_hl << 2;
2219 		if (mp->m_len < ehdrlen + ip_hlen)
2220 			return FALSE; /* failure */
2221 		ipproto = ip->ip_p;
2222 		if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2223 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2224 		break;
2225 #ifdef notyet
2226 	case ETHERTYPE_IPV6:
2227 		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
2228 		ip_hlen = sizeof(struct ip6_hdr);
2229 		if (mp->m_len < ehdrlen + ip_hlen)
2230 			return FALSE; /* failure */
2231 		ipproto = ip6->ip6_nxt;
2232 		if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
2233 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2234 		break;
2235 #endif
2236 	default:
2237 		offload = FALSE;
2238 		break;
2239 	}
2240 
2241 	vlan_macip_lens |= ip_hlen;
2242 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2243 
2244 	switch (ipproto) {
2245 	case IPPROTO_TCP:
2246 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2247 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2248 		break;
2249 	case IPPROTO_UDP:
2250 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2251 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2252 		break;
2253 	}
2254 
2255 	/* Now copy bits into descriptor */
2256 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2257 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2258 	TXD->seqnum_seed = htole32(0);
2259 	TXD->mss_l4len_idx = htole32(0);
2260 
2261 	tx_buffer->m_head = NULL;
2262 	tx_buffer->eop_index = -1;
2263 
2264 	/* We've consumed the first desc, adjust counters */
2265 	if (++ctxd == sc->num_tx_desc)
2266 		ctxd = 0;
2267 	txr->next_avail_desc = ctxd;
2268 	--txr->tx_avail;
2269 
2270         return (offload);
2271 }
2272 
2273 #ifdef notyet
2274 /**********************************************************************
2275  *
2276  *  Setup work for hardware segmentation offload (TSO) on
2277  *  scs using advanced tx descriptors
2278  *
2279  **********************************************************************/
2280 int
2281 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2282 {
2283 	struct ix_softc *sc = txr->sc;
2284 	struct ixgbe_adv_tx_context_desc *TXD;
2285 	struct ixgbe_tx_buf        *tx_buffer;
2286 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2287 	uint32_t mss_l4len_idx = 0;
2288 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2289 #if NVLAN > 0
2290 	uint16_t vtag = 0;
2291 	struct ether_vlan_header *eh;
2292 #else
2293 	struct ether_header *eh;
2294 #endif
2295 	struct ip *ip;
2296 	struct tcphdr *th;
2297 
2298 	if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2299 	    (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2300 	        return FALSE;
2301 
2302 	/*
2303 	 * Determine where frame payload starts.
2304 	 * Jump over vlan headers if already present
2305 	 */
2306 #if NVLAN > 0
2307 	eh = mtod(mp, struct ether_vlan_header *);
2308 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2309 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2310 	else
2311 		ehdrlen = ETHER_HDR_LEN;
2312 #else
2313 	eh = mtod(mp, struct ether_header *);
2314 	ehdrlen = ETHER_HDR_LEN;
2315 #endif
2316 
2317         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2318         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2319 		return FALSE;
2320 
2321 	ctxd = txr->next_avail_desc;
2322 	tx_buffer = &txr->tx_buffers[ctxd];
2323 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2324 
2325 	ip = (struct ip *)(mp->m_data + ehdrlen);
2326 	if (ip->ip_p != IPPROTO_TCP)
2327 		return FALSE;   /* 0 */
2328 	ip->ip_len = 0;
2329 	ip->ip_sum = 0;
2330 	ip_hlen = ip->ip_hl << 2;
2331 	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2332 	th->th_sum = in_pseudo(ip->ip_src.s_addr,
2333 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2334 	tcp_hlen = th->th_off << 2;
2335 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2336 	/* This is used in the transmit desc in encap */
2337 	*paylen = mp->m_pkthdr.len - hdrlen;
2338 
2339 #if NVLAN > 0
2340 	/* VLAN MACLEN IPLEN */
2341 	if (mp->m_flags & M_VLANTAG) {
2342 		vtag = htole16(mp->m_pkthdr.ether_vtag);
2343 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2344 	}
2345 #endif
2346 
2347 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2348 	vlan_macip_lens |= ip_hlen;
2349 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2350 
2351 	/* ADV DTYPE TUCMD */
2352 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2353 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2354 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2355 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2356 
2357 
2358 	/* MSS L4LEN IDX */
2359 	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2360 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2361 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2362 
2363 	TXD->seqnum_seed = htole32(0);
2364 	tx_buffer->m_head = NULL;
2365 
2366 	if (++ctxd == sc->num_tx_desc)
2367 		ctxd = 0;
2368 
2369 	txr->tx_avail--;
2370 	txr->next_avail_desc = ctxd;
2371 	return TRUE;
2372 }
2373 
2374 #else
2375 /* This makes it easy to keep the code common */
2376 int
2377 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2378 {
2379 	return (FALSE);
2380 }
2381 #endif
2382 
2383 /**********************************************************************
2384  *
2385  *  Examine each tx_buffer in the used queue. If the hardware is done
2386  *  processing the packet then free associated resources. The
2387  *  tx_buffer is put back on the free queue.
2388  *
2389  **********************************************************************/
2390 int
2391 ixgbe_txeof(struct tx_ring *txr)
2392 {
2393 	struct ix_softc			*sc = txr->sc;
2394 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2395 	uint32_t			 first, last, done, processed;
2396 	struct ixgbe_tx_buf		*tx_buffer;
2397 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2398 
2399 	if (txr->tx_avail == sc->num_tx_desc) {
2400 		txr->queue_status = IXGBE_QUEUE_IDLE;
2401 		return FALSE;
2402 	}
2403 
2404 	processed = 0;
2405 	first = txr->next_to_clean;
2406 	tx_buffer = &txr->tx_buffers[first];
2407 	/* For cleanup we just use legacy struct */
2408 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2409 	last = tx_buffer->eop_index;
2410 	if (last == -1)
2411 		return FALSE;
2412 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2413 
2414 	/*
2415 	 * Get the index of the first descriptor
2416 	 * BEYOND the EOP and call that 'done'.
2417 	 * I do this so the comparison in the
2418 	 * inner while loop below can be simple
2419 	 */
2420 	if (++last == sc->num_tx_desc) last = 0;
2421 	done = last;
2422 
2423         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2424 	    0, txr->txdma.dma_map->dm_mapsize,
2425             BUS_DMASYNC_POSTREAD);
2426 
2427 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2428 		/* We clean the range of the packet */
2429 		while (first != done) {
2430 			tx_desc->upper.data = 0;
2431 			tx_desc->lower.data = 0;
2432 			tx_desc->buffer_addr = 0;
2433 			++txr->tx_avail;
2434 			++processed;
2435 
2436 			if (tx_buffer->m_head) {
2437 				bus_dmamap_sync(txr->txdma.dma_tag,
2438 				    tx_buffer->map,
2439 				    0, tx_buffer->map->dm_mapsize,
2440 				    BUS_DMASYNC_POSTWRITE);
2441 				bus_dmamap_unload(txr->txdma.dma_tag,
2442 				    tx_buffer->map);
2443 				m_freem(tx_buffer->m_head);
2444 				tx_buffer->m_head = NULL;
2445 			}
2446 			tx_buffer->eop_index = -1;
2447 
2448 			if (++first == sc->num_tx_desc)
2449 				first = 0;
2450 
2451 			tx_buffer = &txr->tx_buffers[first];
2452 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2453 			    &txr->tx_base[first];
2454 		}
2455 		++txr->packets;
2456 		++ifp->if_opackets;
2457 		/* See if there is more work now */
2458 		last = tx_buffer->eop_index;
2459 		if (last != -1) {
2460 			eop_desc =
2461 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2462 			/* Get next done point */
2463 			if (++last == sc->num_tx_desc) last = 0;
2464 			done = last;
2465 		} else
2466 			break;
2467 	}
2468 
2469 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2470 	    0, txr->txdma.dma_map->dm_mapsize,
2471 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2472 
2473 	txr->next_to_clean = first;
2474 
2475 	/*
2476 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
2477 	 * it is OK to send packets. If there are no pending descriptors,
2478 	 * clear the timeout. Otherwise, if some descriptors have been freed,
2479 	 * restart the timeout.
2480 	 */
2481 	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2482 		ifp->if_flags &= ~IFF_OACTIVE;
2483 
2484 		/* If all are clean turn off the timer */
2485 		if (txr->tx_avail == sc->num_tx_desc) {
2486 			ifp->if_timer = 0;
2487 			txr->watchdog_timer = 0;
2488 			return FALSE;
2489 		}
2490 		/* Some were cleaned, so reset timer */
2491 		else if (processed) {
2492 			ifp->if_timer = IXGBE_TX_TIMEOUT;
2493 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2494 		}
2495 	}
2496 
2497 	return TRUE;
2498 }
2499 
2500 /*********************************************************************
2501  *
2502  *  Get a buffer from system mbuf buffer pool.
2503  *
2504  **********************************************************************/
2505 int
2506 ixgbe_get_buf(struct rx_ring *rxr, int i)
2507 {
2508 	struct ix_softc		*sc = rxr->sc;
2509 	struct ixgbe_rx_buf	*rxbuf;
2510 	struct mbuf		*mp, *mh = NULL;
2511 	int			error;
2512 	union ixgbe_adv_rx_desc	*rxdesc;
2513 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2514 
2515 	rxbuf = &rxr->rx_buffers[i];
2516 	rxdesc = &rxr->rx_base[i];
2517 	if (rxbuf->m_head != NULL || rxbuf->m_pack) {
2518 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2519 		    sc->dev.dv_xname, i);
2520 		return (ENOBUFS);
2521 	}
2522 
2523 	/* needed in any case so prealocate since this one will fail for sure */
2524 	mp = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, sc->rx_mbuf_sz);
2525 	if (!mp) {
2526 		sc->mbuf_packet_failed++;
2527 		return (ENOBUFS);
2528 	}
2529 
2530 	if (rxr->hdr_split == FALSE)
2531 		goto no_split;
2532 
2533 	mh = m_gethdr(M_DONTWAIT, MT_DATA);
2534 	if (mh == NULL) {
2535 		m_freem(mp);
2536 		return (ENOBUFS);
2537 	}
2538 
2539 	mh->m_pkthdr.len = mh->m_len = MHLEN;
2540 	mh->m_len = MHLEN;
2541 	/* always offset header buffers */
2542 	m_adj(mh, ETHER_ALIGN);
2543 
2544 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->hmap,
2545 	    mh, BUS_DMA_NOWAIT);
2546 	if (error) {
2547 		m_freem(mp);
2548 		m_freem(mh);
2549 		return (error);
2550 	}
2551         bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap,
2552 	    0, rxbuf->hmap->dm_mapsize, BUS_DMASYNC_PREREAD);
2553 	rxbuf->m_head = mh;
2554 
2555 	rxdesc->read.hdr_addr = htole64(rxbuf->hmap->dm_segs[0].ds_addr);
2556 
2557 no_split:
2558 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2559 	/* only adjust if this is not a split header */
2560 	if (rxr->hdr_split == FALSE &&
2561 	    sc->max_frame_size <= (sc->rx_mbuf_sz - ETHER_ALIGN))
2562 		m_adj(mp, ETHER_ALIGN);
2563 
2564 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->pmap,
2565 	    mp, BUS_DMA_NOWAIT);
2566 	if (error) {
2567 		if (mh) {
2568 			bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->hmap);
2569 			rxbuf->m_head = NULL;
2570 			m_freem(mh);
2571 		}
2572 		m_freem(mp);
2573 		return (error);
2574 	}
2575 
2576         bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap,
2577 	    0, rxbuf->pmap->dm_mapsize, BUS_DMASYNC_PREREAD);
2578 	rxbuf->m_pack = mp;
2579 
2580 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2581 	    dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2582 
2583 	rxdesc->read.pkt_addr = htole64(rxbuf->pmap->dm_segs[0].ds_addr);
2584 
2585 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2586 	    dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2587 
2588 	rxr->rx_ndescs++;
2589 
2590         return (0);
2591 }
2592 
2593 /*********************************************************************
2594  *
2595  *  Allocate memory for rx_buffer structures. Since we use one
2596  *  rx_buffer per received packet, the maximum number of rx_buffer's
2597  *  that we'll need is equal to the number of receive descriptors
2598  *  that we've allocated.
2599  *
2600  **********************************************************************/
2601 int
2602 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2603 {
2604 	struct ix_softc		*sc = rxr->sc;
2605 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2606 	struct ixgbe_rx_buf 	*rxbuf;
2607 	int             	i, bsize, error;
2608 
2609 	bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
2610 	if (!(rxr->rx_buffers = (struct ixgbe_rx_buf *) malloc(bsize,
2611 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2612 		printf("%s: Unable to allocate rx_buffer memory\n",
2613 		    ifp->if_xname);
2614 		error = ENOMEM;
2615 		goto fail;
2616 	}
2617 
2618 	rxbuf = rxr->rx_buffers;
2619 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2620 		error = bus_dmamap_create(rxr->rxdma.dma_tag, MSIZE, 1,
2621 		    MSIZE, 0, BUS_DMA_NOWAIT, &rxbuf->hmap);
2622 		if (error) {
2623 			printf("%s: Unable to create Head DMA map\n",
2624 			    ifp->if_xname);
2625 			goto fail;
2626 		}
2627 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2628 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->pmap);
2629 		if (error) {
2630 			printf("%s: Unable to create Pack DMA map\n",
2631 			    ifp->if_xname);
2632 			goto fail;
2633 		}
2634 	}
2635 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2636 	    rxr->rxdma.dma_map->dm_mapsize,
2637 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2638 
2639 	return (0);
2640 
2641 fail:
2642 	return (error);
2643 }
2644 
2645 /*********************************************************************
2646  *
2647  *  Initialize a receive ring and its buffers.
2648  *
2649  **********************************************************************/
2650 int
2651 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2652 {
2653 	struct ix_softc		*sc = rxr->sc;
2654 	int			 rsize, error;
2655 
2656 	rsize = roundup2(sc->num_rx_desc *
2657 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2658 	/* Clear the ring contents */
2659 	bzero((void *)rxr->rx_base, rsize);
2660 
2661 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2662 		return (error);
2663 
2664 	/* Setup our descriptor indices */
2665 	rxr->next_to_check = 0;
2666 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2667 	rxr->rx_ndescs = 0;
2668 
2669 	ixgbe_rxfill(rxr);
2670 	if (rxr->rx_ndescs < 1) {
2671 		printf("%s: unable to fill any rx descriptors\n",
2672 		    sc->dev.dv_xname);
2673 		return (ENOBUFS);
2674 	}
2675 
2676 	return (0);
2677 }
2678 
2679 int
2680 ixgbe_rxfill(struct rx_ring *rxr)
2681 {
2682 	struct ix_softc *sc = rxr->sc;
2683 	int		 post = 0;
2684 	int		 i;
2685 
2686 	i = rxr->last_desc_filled;
2687 	while (rxr->rx_ndescs < sc->num_rx_desc) {
2688 		if (++i == sc->num_rx_desc)
2689 			i = 0;
2690 
2691 		if (ixgbe_get_buf(rxr, i) != 0)
2692 			break;
2693 
2694 		rxr->last_desc_filled = i;
2695 		post = 1;
2696 	}
2697 
2698 	return (post);
2699 }
2700 
2701 void
2702 ixgbe_rxrefill(void *xsc)
2703 {
2704 	struct ix_softc *sc = xsc;
2705 	struct ix_queue *que = sc->queues;
2706 	int s;
2707 
2708 	s = splnet();
2709 	if (ixgbe_rxfill(que->rxr)) {
2710 		/* Advance the Rx Queue "Tail Pointer" */
2711 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2712 		    que->rxr->last_desc_filled);
2713 	} else
2714 		timeout_add(&sc->rx_refill, 1);
2715 	splx(s);
2716 }
2717 
2718 /*********************************************************************
2719  *
2720  *  Initialize all receive rings.
2721  *
2722  **********************************************************************/
2723 int
2724 ixgbe_setup_receive_structures(struct ix_softc *sc)
2725 {
2726 	struct rx_ring *rxr = sc->rx_rings;
2727 	int i;
2728 
2729 	for (i = 0; i < sc->num_queues; i++, rxr++)
2730 		if (ixgbe_setup_receive_ring(rxr))
2731 			goto fail;
2732 
2733 	return (0);
2734 
2735 fail:
2736 	ixgbe_free_receive_structures(sc);
2737 	return (ENOBUFS);
2738 }
2739 
2740 /*********************************************************************
2741  *
2742  *  Enable receive unit.
2743  *
2744  **********************************************************************/
2745 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2746 
2747 void
2748 ixgbe_initialize_receive_units(struct ix_softc *sc)
2749 {
2750 	struct	rx_ring	*rxr = sc->rx_rings;
2751 	struct ifnet   *ifp = &sc->arpcom.ac_if;
2752 	uint32_t	bufsz, rxctrl, fctrl, srrctl, rxcsum;
2753 	uint32_t	reta, mrqc = 0, hlreg;
2754 	uint32_t	random[10];
2755 	int		i;
2756 
2757 	/*
2758 	 * Make sure receives are disabled while
2759 	 * setting up the descriptor ring
2760 	 */
2761 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
2762 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL,
2763 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
2764 
2765 	/* Enable broadcasts */
2766 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2767 	fctrl |= IXGBE_FCTRL_BAM;
2768 	fctrl |= IXGBE_FCTRL_DPF;
2769 	fctrl |= IXGBE_FCTRL_PMCF;
2770 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
2771 
2772 	/* Set for Jumbo Frames? */
2773 	hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
2774 	if (ifp->if_mtu > ETHERMTU)
2775 		hlreg |= IXGBE_HLREG0_JUMBOEN;
2776 	else
2777 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2778 	IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
2779 
2780 	bufsz = sc->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2781 
2782 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2783 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2784 
2785 		/* Setup the Base and Length of the Rx Descriptor Ring */
2786 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
2787 			       (rdba & 0x00000000ffffffffULL));
2788 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAH(i), (rdba >> 32));
2789 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
2790 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2791 
2792 		/* Set up the SRRCTL register */
2793 		srrctl = bufsz;
2794 		if (rxr->hdr_split) {
2795 			/* Use a standard mbuf for the header */
2796 			srrctl |= ((IXGBE_RX_HDR <<
2797 			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
2798 			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
2799 			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2800 		} else
2801 			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2802 		IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(i), srrctl);
2803 
2804 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2805 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
2806 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), 0);
2807 	}
2808 
2809 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2810 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2811 		              IXGBE_PSRTYPE_UDPHDR |
2812 			      IXGBE_PSRTYPE_IPV4HDR |
2813 			      IXGBE_PSRTYPE_IPV6HDR;
2814 		IXGBE_WRITE_REG(&sc->hw, IXGBE_PSRTYPE(0), psrtype);
2815 	}
2816 
2817 	rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
2818 
2819 	/* Setup RSS */
2820 	if (sc->num_queues > 1) {
2821 		int j;
2822 		reta = 0;
2823 		/* set up random bits */
2824 		arc4random_buf(&random, sizeof(random));
2825 
2826 		/* Set up the redirection table */
2827 		for (i = 0, j = 0; i < 128; i++, j++) {
2828 			if (j == sc->num_queues)
2829 				j = 0;
2830 			reta = (reta << 8) | (j * 0x11);
2831 			if ((i & 3) == 3)
2832 				IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i >> 2), reta);
2833 		}
2834 
2835 		/* Now fill our hash function seeds */
2836 		for (i = 0; i < 10; i++)
2837 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RSSRK(i), random[i]);
2838 
2839 		/* Perform hash on these packet types */
2840 		mrqc = IXGBE_MRQC_RSSEN
2841 		    | IXGBE_MRQC_RSS_FIELD_IPV4
2842 		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2843 		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2844 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2845 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2846 		    | IXGBE_MRQC_RSS_FIELD_IPV6
2847 		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2848 		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2849 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2850 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MRQC, mrqc);
2851 
2852 		/* RSS and RX IPP Checksum are mutually exclusive */
2853 		rxcsum |= IXGBE_RXCSUM_PCSD;
2854 	}
2855 
2856 	if (ifp->if_capabilities & IFCAP_CSUM_IPv4)
2857 		rxcsum |= IXGBE_RXCSUM_PCSD;
2858 
2859 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2860 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2861 
2862 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
2863 
2864 	return;
2865 }
2866 
2867 /*********************************************************************
2868  *
2869  *  Free all receive rings.
2870  *
2871  **********************************************************************/
2872 void
2873 ixgbe_free_receive_structures(struct ix_softc *sc)
2874 {
2875 	struct rx_ring *rxr = sc->rx_rings;
2876 	int		i;
2877 
2878 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2879 		ixgbe_free_receive_buffers(rxr);
2880 	}
2881 }
2882 
2883 /*********************************************************************
2884  *
2885  *  Free receive ring data structures
2886  *
2887  **********************************************************************/
2888 void
2889 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2890 {
2891 	struct ix_softc		*sc;
2892 	struct ixgbe_rx_buf	*rxbuf;
2893 	int			 i;
2894 
2895 	sc = rxr->sc;
2896 	if (rxr->rx_buffers != NULL) {
2897 		for (i = 0; i < sc->num_rx_desc; i++) {
2898 			rxbuf = &rxr->rx_buffers[i];
2899 			if (rxbuf->m_head != NULL) {
2900 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap,
2901 				    0, rxbuf->hmap->dm_mapsize,
2902 				    BUS_DMASYNC_POSTREAD);
2903 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2904 				    rxbuf->hmap);
2905 				m_freem(rxbuf->m_head);
2906 				rxbuf->m_head = NULL;
2907 			}
2908 			if (rxbuf->m_pack != NULL) {
2909 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap,
2910 				    0, rxbuf->pmap->dm_mapsize,
2911 				    BUS_DMASYNC_POSTREAD);
2912 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2913 				    rxbuf->pmap);
2914 				m_freem(rxbuf->m_pack);
2915 				rxbuf->m_pack = NULL;
2916 			}
2917 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->hmap);
2918 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->pmap);
2919 			rxbuf->hmap = NULL;
2920 			rxbuf->pmap = NULL;
2921 		}
2922 		free(rxr->rx_buffers, M_DEVBUF);
2923 		rxr->rx_buffers = NULL;
2924 	}
2925 }
2926 
2927 /*********************************************************************
2928  *
2929  *  This routine executes in interrupt context. It replenishes
2930  *  the mbufs in the descriptor and sends data which has been
2931  *  dma'ed into host memory to upper layer.
2932  *
2933  *  We loop at most count times if count is > 0, or until done if
2934  *  count < 0.
2935  *
2936  *********************************************************************/
2937 int
2938 ixgbe_rxeof(struct ix_queue *que, int count)
2939 {
2940 	struct ix_softc 	*sc = que->sc;
2941 	struct rx_ring		*rxr = que->rxr;
2942 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2943 	struct mbuf    		*mh, *mp, *sendmp;
2944 	uint8_t		    	 eop = 0;
2945 	uint16_t		 hlen, plen, hdr, vtag;
2946 	uint32_t		 staterr, ptype;
2947 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
2948 	union ixgbe_adv_rx_desc	*rxdesc;
2949 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2950 	int			 i, nextp;
2951 
2952 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2953 		return FALSE;
2954 
2955 	i = rxr->next_to_check;
2956 	while (count != 0 && rxr->rx_ndescs > 0) {
2957 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2958 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2959 
2960 		rxdesc = &rxr->rx_base[i];
2961 		staterr = letoh32(rxdesc->wb.upper.status_error);
2962 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2963 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2964 			    dsize * i, dsize,
2965 			    BUS_DMASYNC_PREREAD);
2966 			break;
2967 		}
2968 
2969 		/* Zero out the receive descriptors status  */
2970 		rxdesc->wb.upper.status_error = 0;
2971 		rxbuf = &rxr->rx_buffers[i];
2972 
2973 		/* pull the mbuf off the ring */
2974 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->hmap, 0,
2975 		    rxbuf->hmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2976 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->hmap);
2977 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->pmap, 0,
2978 		    rxbuf->pmap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2979 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->pmap);
2980 
2981 		mh = rxbuf->m_head;
2982 		mp = rxbuf->m_pack;
2983 		plen = letoh16(rxdesc->wb.upper.length);
2984 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2985 		    IXGBE_RXDADV_PKTTYPE_MASK;
2986 		hdr = letoh16(rxdesc->wb.lower.lo_dword.hs_rss.hdr_info);
2987 		vtag = letoh16(rxdesc->wb.upper.vlan);
2988 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2989 
2990 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2991 			ifp->if_ierrors++;
2992 			sc->dropped_pkts++;
2993 
2994 			if (rxbuf->fmp) {
2995 				m_freem(rxbuf->fmp);
2996 				rxbuf->fmp = NULL;
2997 			}
2998 
2999 			m_freem(mh);
3000 			m_freem(mp);
3001 			rxbuf->m_head = NULL;
3002 			rxbuf->m_pack = NULL;
3003 			goto next_desc;
3004 		}
3005 
3006 		if (mp == NULL) {
3007 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
3008 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
3009 			    i, rxr->rx_ndescs,
3010 			    rxr->last_desc_filled);
3011 		}
3012 
3013 		/* XXX ixgbe_realign() STRICT_ALIGN */
3014 		/* Currently no HW RSC support of 82599 */
3015 		if (!eop) {
3016 			/*
3017 			 * Figure out the next descriptor of this frame.
3018 			 */
3019 			nextp = i + 1;
3020 			if (nextp == sc->num_rx_desc)
3021 				nextp = 0;
3022 			nxbuf = &rxr->rx_buffers[nextp];
3023 			/* prefetch(nxbuf); */
3024 		}
3025 		/*
3026 		 * The header mbuf is ONLY used when header
3027 		 * split is enabled, otherwise we get normal
3028 		 * behavior, ie, both header and payload
3029 		 * are DMA'd into the payload buffer.
3030 		 *
3031 		 * Rather than using the fmp/lmp global pointers
3032 		 * we now keep the head of a packet chain in the
3033 		 * buffer struct and pass this along from one
3034 		 * descriptor to the next, until we get EOP.
3035 		 */
3036 		if (rxr->hdr_split && (rxbuf->fmp == NULL)) {
3037 			/* This must be an initial descriptor */
3038 			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
3039 			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
3040 			if (hlen > IXGBE_RX_HDR)
3041 				hlen = IXGBE_RX_HDR;
3042 			mh->m_len = hlen;
3043 			mh->m_pkthdr.len = mh->m_len;
3044 			rxbuf->m_head = NULL;
3045 			/*
3046 			 * Check the payload length, this could be zero if
3047 			 * its a small packet.
3048 			 */
3049 			if (plen > 0) {
3050 				mp->m_len = plen;
3051 				mp->m_flags &= ~M_PKTHDR;
3052 				mh->m_next = mp;
3053 				mh->m_pkthdr.len += mp->m_len;
3054 				rxbuf->m_pack = NULL;
3055 				rxr->rx_split_packets++;
3056 			} else {
3057 				m_freem(mp);
3058 				rxbuf->m_pack = NULL;
3059 			}
3060 			/* Now create the forward chain. */
3061 			if (eop == 0) {
3062 				/* stash the chain head */
3063 				nxbuf->fmp = mh;
3064 				/* Make forward chain */
3065 				if (plen)
3066 					mp->m_next = nxbuf->m_pack;
3067 				else
3068 					mh->m_next = nxbuf->m_pack;
3069 			} else {
3070 				/* Singlet, prepare to send */
3071 				sendmp = mh;
3072 #if NVLAN > 0
3073 				if (staterr & IXGBE_RXD_STAT_VP) {
3074 					sendmp->m_pkthdr.ether_vtag = vtag;
3075 					sendmp->m_flags |= M_VLANTAG;
3076 				}
3077 #endif
3078 			}
3079 		} else {
3080 			/*
3081 			 * Either no header split, or a
3082 			 * secondary piece of a fragmented
3083 			 * split packet.
3084 			 */
3085 			mp->m_len = plen;
3086 			/*
3087 			 * See if there is a stored head
3088 			 * that determines what we are
3089 			 */
3090 			sendmp = rxbuf->fmp;
3091 			rxbuf->m_pack = rxbuf->fmp = NULL;
3092 
3093 			if (sendmp != NULL) /* secondary frag */
3094 				sendmp->m_pkthdr.len += mp->m_len;
3095 			else {
3096 				 /* first desc of a non-ps chain */
3097 				 sendmp = mp;
3098 				 sendmp->m_pkthdr.len = mp->m_len;
3099 #if NVLAN > 0
3100 				if (staterr & IXGBE_RXD_STAT_VP) {
3101 					sendmp->m_pkthdr.ether_vtag = vtag;
3102 					sendmp->m_flags |= M_VLANTAG;
3103 				}
3104 #endif
3105 			}
3106 			/* Pass the head pointer on */
3107 			if (eop == 0) {
3108 				nxbuf->fmp = sendmp;
3109 				sendmp = NULL;
3110 				mp->m_next = nxbuf->m_pack;
3111 			}
3112 		}
3113 		rxr->rx_ndescs--;
3114 		/* Sending this frame? */
3115 		if (eop) {
3116 			m_cluncount(sendmp, 1);
3117 
3118 			sendmp->m_pkthdr.rcvif = ifp;
3119 			ifp->if_ipackets++;
3120 			rxr->rx_packets++;
3121 			/* capture data for AIM */
3122 			rxr->bytes += sendmp->m_pkthdr.len;
3123 			rxr->rx_bytes += sendmp->m_pkthdr.len;
3124 
3125 			ixgbe_rx_checksum(staterr, sendmp, ptype);
3126 
3127 #if NBPFILTER > 0
3128 			if (ifp->if_bpf)
3129 				bpf_mtap_ether(ifp->if_bpf, sendmp,
3130 				    BPF_DIRECTION_IN);
3131 #endif
3132 
3133 			ether_input_mbuf(ifp, sendmp);
3134 		}
3135 next_desc:
3136 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3137 		    dsize * i, dsize,
3138 		    BUS_DMASYNC_PREREAD);
3139 
3140 		/* Advance our pointers to the next descriptor. */
3141 		if (++i == sc->num_rx_desc)
3142 			i = 0;
3143 	}
3144 	rxr->next_to_check = i;
3145 
3146 	if (!(staterr & IXGBE_RXD_STAT_DD))
3147 		return FALSE;
3148 
3149 	return TRUE;
3150 }
3151 
3152 /*********************************************************************
3153  *
3154  *  Verify that the hardware indicated that the checksum is valid.
3155  *  Inform the stack about the status of checksum so that stack
3156  *  doesn't spend time verifying the checksum.
3157  *
3158  *********************************************************************/
3159 void
3160 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
3161 {
3162 	uint16_t status = (uint16_t) staterr;
3163 	uint8_t  errors = (uint8_t) (staterr >> 24);
3164 
3165 	if (status & IXGBE_RXD_STAT_IPCS) {
3166 		/* Did it pass? */
3167 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
3168 			/* IP Checksum Good */
3169 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3170 		} else
3171 			mp->m_pkthdr.csum_flags = 0;
3172 	}
3173 
3174 	if (status & IXGBE_RXD_STAT_L4CS) {
3175 		/* Did it pass? */
3176 		if (!(errors & IXGBE_RXD_ERR_TCPE))
3177 			mp->m_pkthdr.csum_flags |=
3178 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3179 	}
3180 
3181 }
3182 
3183 void
3184 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
3185 {
3186 	uint32_t	ctrl;
3187 	int		i;
3188 
3189 	/*
3190 	 * A soft reset zero's out the VFTA, so
3191 	 * we need to repopulate it now.
3192 	 */
3193 	for (i = 0; i < IXGBE_VFTA_SIZE; i++)
3194 		if (sc->shadow_vfta[i] != 0)
3195 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3196 			    sc->shadow_vfta[i]);
3197 
3198 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3199 #if 0
3200 	/* Enable the Filter Table if enabled */
3201 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3202 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3203 		ctrl |= IXGBE_VLNCTRL_VFE;
3204 	}
3205 #endif
3206 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3207 		ctrl |= IXGBE_VLNCTRL_VME;
3208 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3209 
3210 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3211 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
3212 		for (i = 0; i < sc->num_queues; i++) {
3213 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3214 			ctrl |= IXGBE_RXDCTL_VME;
3215 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3216 		}
3217 
3218 }
3219 
3220 void
3221 ixgbe_enable_intr(struct ix_softc *sc)
3222 {
3223 	struct ixgbe_hw *hw = &sc->hw;
3224 	struct ix_queue *que = sc->queues;
3225 	uint32_t mask = IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE;
3226 	int i;
3227 
3228 	/* Enable Fan Failure detection */
3229 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3230 		    mask |= IXGBE_EIMS_GPI_SDP1;
3231 	else {
3232 		mask |= IXGBE_EIMS_ECC;
3233 		mask |= IXGBE_EIMS_GPI_SDP1;
3234 		mask |= IXGBE_EIMS_GPI_SDP2;
3235 	}
3236 
3237 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3238 
3239 	/* With RSS we use auto clear */
3240 	if (sc->msix) {
3241 		mask = IXGBE_EIMS_ENABLE_MASK;
3242 		/* Dont autoclear Link */
3243 		mask &= ~IXGBE_EIMS_OTHER;
3244 		mask &= ~IXGBE_EIMS_LSC;
3245 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, mask);
3246 	}
3247 
3248 	/*
3249 	 * Now enable all queues, this is done separately to
3250 	 * allow for handling the extended (beyond 32) MSIX
3251 	 * vectors that can be used by 82599
3252 	 */
3253 	for (i = 0; i < sc->num_queues; i++, que++)
3254 		ixgbe_enable_queue(sc, que->msix);
3255 
3256 	IXGBE_WRITE_FLUSH(hw);
3257 
3258 	return;
3259 }
3260 
3261 void
3262 ixgbe_disable_intr(struct ix_softc *sc)
3263 {
3264 	if (sc->msix)
3265 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3266 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3267 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3268 	} else {
3269 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3270 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3271 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3272 	}
3273 	IXGBE_WRITE_FLUSH(&sc->hw);
3274 	return;
3275 }
3276 
3277 uint16_t
3278 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3279 {
3280 	struct pci_attach_args	*pa;
3281 	uint32_t value;
3282 	int high = 0;
3283 
3284 	if (reg & 0x2) {
3285 		high = 1;
3286 		reg &= ~0x2;
3287 	}
3288 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3289 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3290 
3291 	if (high)
3292 		value >>= 16;
3293 
3294 	return (value & 0xffff);
3295 }
3296 
3297 void
3298 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3299 {
3300 	struct pci_attach_args	*pa;
3301 	uint32_t rv;
3302 	int high = 0;
3303 
3304 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3305 	if (reg & 0x2) {
3306 		high = 1;
3307 		reg &= ~0x2;
3308 	}
3309 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3310 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3311 	if (!high)
3312 		rv = (rv & 0xffff0000) | value;
3313 	else
3314 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3315 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3316 }
3317 
3318 /*
3319  * Setup the correct IVAR register for a particular MSIX interrupt
3320  *   (yes this is all very magic and confusing :)
3321  *  - entry is the register array entry
3322  *  - vector is the MSIX vector for this queue
3323  *  - type is RX/TX/MISC
3324  */
3325 void
3326 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3327 {
3328 	struct ixgbe_hw *hw = &sc->hw;
3329 	uint32_t ivar, index;
3330 
3331 	vector |= IXGBE_IVAR_ALLOC_VAL;
3332 
3333 	switch (hw->mac.type) {
3334 
3335 	case ixgbe_mac_82598EB:
3336 		if (type == -1)
3337 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3338 		else
3339 			entry += (type * 64);
3340 		index = (entry >> 2) & 0x1F;
3341 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3342 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3343 		ivar |= (vector << (8 * (entry & 0x3)));
3344 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3345 		break;
3346 
3347 	case ixgbe_mac_82599EB:
3348 		if (type == -1) { /* MISC IVAR */
3349 			index = (entry & 1) * 8;
3350 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3351 			ivar &= ~(0xFF << index);
3352 			ivar |= (vector << index);
3353 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3354 		} else {	/* RX/TX IVARS */
3355 			index = (16 * (entry & 1)) + (8 * type);
3356 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3357 			ivar &= ~(0xFF << index);
3358 			ivar |= (vector << index);
3359 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3360 		}
3361 
3362 	default:
3363 		break;
3364 	}
3365 }
3366 
3367 void
3368 ixgbe_configure_ivars(struct ix_softc *sc)
3369 {
3370 	struct ix_queue *que = sc->queues;
3371 	uint32_t newitr;
3372 	int i;
3373 
3374 #if 0
3375 	if (ixgbe_max_interrupt_rate > 0)
3376 		newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3377 	else
3378 #endif
3379 		newitr = 0;
3380 
3381         for (i = 0; i < sc->num_queues; i++, que++) {
3382 		/* First the RX queue entry */
3383 		ixgbe_set_ivar(sc, i, que->msix, 0);
3384 		/* ... and the TX */
3385 		ixgbe_set_ivar(sc, i, que->msix, 1);
3386 		/* Set an Initial EITR value */
3387 		IXGBE_WRITE_REG(&sc->hw,
3388 		    IXGBE_EITR(que->msix), newitr);
3389 	}
3390 
3391 	/* For the Link interrupt */
3392         ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3393 }
3394 
3395 /*
3396  * ixgbe_sfp_probe - called in the local timer to
3397  * determine if a port had optics inserted.
3398  */
3399 int
3400 ixgbe_sfp_probe(struct ix_softc *sc)
3401 {
3402 	int result = FALSE;
3403 
3404 	if ((sc->hw.phy.type == ixgbe_phy_nl) &&
3405 	    (sc->hw.phy.sfp_type == ixgbe_sfp_type_not_present)) {
3406 		int32_t  ret = sc->hw.phy.ops.identify_sfp(&sc->hw);
3407 		if (ret)
3408 			goto out;
3409 		ret = sc->hw.phy.ops.reset(&sc->hw);
3410 		if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3411 			printf("%s: Unsupported SFP+ module detected!",
3412 			    sc->dev.dv_xname);
3413 			goto out;
3414 		}
3415 		/* We now have supported optics */
3416 		sc->sfp_probe = FALSE;
3417 		/* Set the optics type so system reports correctly */
3418 		ixgbe_setup_optics(sc);
3419 		result = TRUE;
3420 	}
3421 out:
3422 	return (result);
3423 }
3424 
3425 /**********************************************************************
3426  *
3427  *  Update the board statistics counters.
3428  *
3429  **********************************************************************/
3430 void
3431 ixgbe_update_stats_counters(struct ix_softc *sc)
3432 {
3433 	struct ifnet   *ifp = &sc->arpcom.ac_if;
3434 	struct ixgbe_hw *hw = &sc->hw;
3435 	uint32_t  missed_rx = 0, bprc, lxon, lxoff, total;
3436 	int	i;
3437 
3438 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3439 
3440 	for (i = 0; i < 8; i++) {
3441 		int mp;
3442 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3443 		missed_rx += mp;
3444         	sc->stats.mpc[i] += mp;
3445 		if (hw->mac.type == ixgbe_mac_82598EB)
3446 			sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3447 	}
3448 
3449 	/* Hardware workaround, gprc counts missed packets */
3450 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3451 	sc->stats.gprc -= missed_rx;
3452 
3453 	sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3454 	sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3455 	sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3456 
3457 	/*
3458 	 * Workaround: mprc hardware is incorrectly counting
3459 	 * broadcasts, so for now we subtract those.
3460 	 */
3461 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3462 	sc->stats.bprc += bprc;
3463 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3464 	sc->stats.mprc -= bprc;
3465 
3466 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3467 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3468 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3469 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3470 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3471 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3472 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3473 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3474 
3475 	sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3476 	sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3477 
3478 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3479 	sc->stats.lxontxc += lxon;
3480 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3481 	sc->stats.lxofftxc += lxoff;
3482 	total = lxon + lxoff;
3483 
3484 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3485 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3486 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3487 	sc->stats.gptc -= total;
3488 	sc->stats.mptc -= total;
3489 	sc->stats.ptc64 -= total;
3490 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3491 
3492 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3493 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3494 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3495 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3496 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3497 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3498 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3499 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3500 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3501 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3502 
3503 #if 0
3504 	/* Fill out the OS statistics structure */
3505 	ifp->if_ipackets = sc->stats.gprc;
3506 	ifp->if_opackets = sc->stats.gptc;
3507 	ifp->if_ibytes = sc->stats.gorc;
3508 	ifp->if_obytes = sc->stats.gotc;
3509 	ifp->if_imcasts = sc->stats.mprc;
3510 #endif
3511 	ifp->if_collisions = 0;
3512 	ifp->if_oerrors = sc->watchdog_events;
3513 	ifp->if_ierrors = missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3514 }
3515 
3516 #ifdef IX_DEBUG
3517 /**********************************************************************
3518  *
3519  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3520  *  This routine provides a way to take a look at important statistics
3521  *  maintained by the driver and hardware.
3522  *
3523  **********************************************************************/
3524 void
3525 ixgbe_print_hw_stats(struct ix_softc * sc)
3526 {
3527 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
3528 
3529 	printf("%s: mbuf alloc failed %lu, mbuf cluster failed %lu, "
3530 	    "missed pkts %llu, rx len errs %llu, crc errs %llu, "
3531 	    "dropped pkts %lu, watchdog timeouts %ld, "
3532 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3533 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3534 	    "tso tx %lu\n",
3535 	    ifp->if_xname,
3536 	    sc->mbuf_alloc_failed,
3537 	    sc->mbuf_cluster_failed,
3538 	    (long long)sc->stats.mpc[0],
3539 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3540 	    (long long)sc->stats.crcerrs,
3541 	    sc->dropped_pkts,
3542 	    sc->watchdog_events,
3543 	    (long long)sc->stats.lxonrxc,
3544 	    (long long)sc->stats.lxontxc,
3545 	    (long long)sc->stats.lxoffrxc,
3546 	    (long long)sc->stats.lxofftxc,
3547 	    (long long)sc->stats.tpr,
3548 	    (long long)sc->stats.gprc,
3549 	    (long long)sc->stats.gptc,
3550 	    sc->tso_tx);
3551 }
3552 #endif
3553