xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 43003dfe3ad45d1698bed8a37f2b0f5b14f20d4f)
1 /*	$OpenBSD: if_ix.c,v 1.30 2009/08/13 14:24:47 jasper Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2008, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.5 2008/05/16 18:46:30 jfv Exp $*/
36 
37 #include <dev/pci/if_ix.h>
38 
39 /*********************************************************************
40  *  Driver version
41  *********************************************************************/
42 
43 #define IXGBE_DRIVER_VERSION	"1.4.4"
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT }
59 };
60 
61 /*********************************************************************
62  *  Function prototypes
63  *********************************************************************/
64 int	ixgbe_probe(struct device *, void *, void *);
65 void	ixgbe_attach(struct device *, struct device *, void *);
66 int	ixgbe_detach(struct device *, int);
67 void	ixgbe_power(int, void *);
68 void	ixgbe_start(struct ifnet *);
69 void	ixgbe_start_locked(struct tx_ring *, struct ifnet *);
70 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
71 void	ixgbe_watchdog(struct ifnet *);
72 void	ixgbe_init(void *);
73 void	ixgbe_stop(void *);
74 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
75 int	ixgbe_media_change(struct ifnet *);
76 void	ixgbe_identify_hardware(struct ix_softc *);
77 int	ixgbe_allocate_pci_resources(struct ix_softc *);
78 int	ixgbe_allocate_legacy(struct ix_softc *);
79 int	ixgbe_allocate_queues(struct ix_softc *);
80 void	ixgbe_free_pci_resources(struct ix_softc *);
81 void	ixgbe_local_timer(void *);
82 int	ixgbe_hardware_init(struct ix_softc *);
83 void	ixgbe_setup_interface(struct ix_softc *);
84 
85 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
86 int	ixgbe_setup_transmit_structures(struct ix_softc *);
87 int	ixgbe_setup_transmit_ring(struct tx_ring *);
88 void	ixgbe_initialize_transmit_units(struct ix_softc *);
89 void	ixgbe_free_transmit_structures(struct ix_softc *);
90 void	ixgbe_free_transmit_buffers(struct tx_ring *);
91 
92 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
93 int	ixgbe_setup_receive_structures(struct ix_softc *);
94 int	ixgbe_setup_receive_ring(struct rx_ring *);
95 void	ixgbe_initialize_receive_units(struct ix_softc *);
96 void	ixgbe_free_receive_structures(struct ix_softc *);
97 void	ixgbe_free_receive_buffers(struct rx_ring *);
98 int	ixgbe_rxfill(struct rx_ring *);
99 
100 void	ixgbe_enable_intr(struct ix_softc *);
101 void	ixgbe_disable_intr(struct ix_softc *);
102 void	ixgbe_update_stats_counters(struct ix_softc *);
103 int	ixgbe_txeof(struct tx_ring *);
104 int	ixgbe_rxeof(struct rx_ring *, int);
105 void	ixgbe_rx_checksum(struct ix_softc *, uint32_t, struct mbuf *);
106 void	ixgbe_set_promisc(struct ix_softc *);
107 void	ixgbe_disable_promisc(struct ix_softc *);
108 void	ixgbe_set_multi(struct ix_softc *);
109 #ifdef IX_DEBUG
110 void	ixgbe_print_hw_stats(struct ix_softc *);
111 #endif
112 void	ixgbe_update_link_status(struct ix_softc *);
113 int	ixgbe_get_buf(struct rx_ring *, int);
114 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
115 void	ixgbe_enable_hw_vlans(struct ix_softc * sc);
116 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
117 		    struct ixgbe_dma_alloc *, int);
118 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
119 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
120 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
121 void	ixgbe_set_ivar(struct ix_softc *, uint16_t, uint8_t);
122 void	ixgbe_configure_ivars(struct ix_softc *);
123 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
124 
125 /* Legacy (single vector interrupt handler */
126 int	ixgbe_legacy_irq(void *);
127 
128 #ifndef NO_82598_A0_SUPPORT
129 void	desc_flip(void *);
130 #endif
131 
132 /*********************************************************************
133  *  OpenBSD Device Interface Entry Points
134  *********************************************************************/
135 
136 struct cfdriver ix_cd = {
137 	NULL, "ix", DV_IFNET
138 };
139 
140 struct cfattach ix_ca = {
141 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
142 };
143 
144 /* Total number of Interfaces - need for config sanity check */
145 static int ixgbe_total_ports;
146 
147 /*********************************************************************
148  *  Device identification routine
149  *
150  *  ixgbe_probe determines if the driver should be loaded on
151  *  sc based on PCI vendor/device id of the sc.
152  *
153  *  return 0 on success, positive on failure
154  *********************************************************************/
155 
156 int
157 ixgbe_probe(struct device *parent, void *match, void *aux)
158 {
159 	INIT_DEBUGOUT("ixgbe_probe: begin");
160 
161 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
162 	    sizeof(ixgbe_devices)/sizeof(ixgbe_devices[0])));
163 }
164 
165 /*********************************************************************
166  *  Device initialization routine
167  *
168  *  The attach entry point is called when the driver is being loaded.
169  *  This routine identifies the type of hardware, allocates all resources
170  *  and initializes the hardware.
171  *
172  *  return 0 on success, positive on failure
173  *********************************************************************/
174 
175 void
176 ixgbe_attach(struct device *parent, struct device *self, void *aux)
177 {
178 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
179 	struct ix_softc		*sc = (struct ix_softc *)self;
180 	int			 error = 0;
181 	uint32_t			 ctrl_ext;
182 
183 	INIT_DEBUGOUT("ixgbe_attach: begin");
184 
185 	sc->osdep.os_sc = sc;
186 	sc->osdep.os_pa = pa;
187 
188 	/* Core Lock Init*/
189 	mtx_init(&sc->core_mtx, IPL_NET);
190 
191 	/* Set up the timer callout */
192 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
193 
194 	/* Determine hardware revision */
195 	ixgbe_identify_hardware(sc);
196 
197 	/* Indicate to RX setup to use Jumbo Clusters */
198 	sc->bigbufs = FALSE;
199 	sc->num_tx_desc = DEFAULT_TXD;
200 	sc->num_rx_desc = DEFAULT_RXD;
201 	sc->rx_process_limit = 100;	// XXX
202 
203 	/* Do base PCI setup - map BAR0 */
204 	if (ixgbe_allocate_pci_resources(sc))
205 		goto err_out;
206 
207 	/* Allocate our TX/RX Queues */
208 	if (ixgbe_allocate_queues(sc))
209 		goto err_out;
210 
211 	/* Initialize the shared code */
212 	sc->hw.mac.type = ixgbe_mac_82598EB;
213 	if (ixgbe_init_ops_82598(&sc->hw) != 0) {
214 		printf(": failed to init the 82598EB\n");
215 		goto err_late;
216 	}
217 
218 	/* Initialize the hardware */
219 	if (ixgbe_hardware_init(sc)) {
220 		printf(": unable to initialize the hardware\n");
221 		goto err_late;
222 	}
223 
224 	/* XXX sc->msix > 1 && ixgbe_allocate_msix() */
225 	error = ixgbe_allocate_legacy(sc);
226 	if (error)
227 		goto err_late;
228 
229 	/* Setup OS specific network interface */
230 	ixgbe_setup_interface(sc);
231 
232 	/* Initialize statistics */
233 	ixgbe_update_stats_counters(sc);
234 
235 	/* let hardware know driver is loaded */
236 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
237 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
238 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
239 
240 	sc->powerhook = powerhook_establish(ixgbe_power, sc);
241 
242 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
243 
244 	INIT_DEBUGOUT("ixgbe_attach: end");
245 	return;
246 
247 err_late:
248 	ixgbe_free_transmit_structures(sc);
249 	ixgbe_free_receive_structures(sc);
250 err_out:
251 	ixgbe_free_pci_resources(sc);
252 }
253 
254 /*********************************************************************
255  *  Device removal routine
256  *
257  *  The detach entry point is called when the driver is being removed.
258  *  This routine stops the sc and deallocates all the resources
259  *  that were allocated for driver operation.
260  *
261  *  return 0 on success, positive on failure
262  *********************************************************************/
263 
264 int
265 ixgbe_detach(struct device *self, int flags)
266 {
267 	struct ix_softc *sc = (struct ix_softc *)self;
268 	struct ifnet *ifp = &sc->arpcom.ac_if;
269 	uint32_t	ctrl_ext;
270 
271 	INIT_DEBUGOUT("ixgbe_detach: begin");
272 
273 	ixgbe_stop(sc);
274 
275 	/* let hardware know driver is unloading */
276 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
277 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
278 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
279 
280 	ether_ifdetach(ifp);
281 	if_detach(ifp);
282 
283 	timeout_del(&sc->timer);
284 	ixgbe_free_pci_resources(sc);
285 
286 	ixgbe_free_transmit_structures(sc);
287 	ixgbe_free_receive_structures(sc);
288 
289 	return (0);
290 }
291 
292 void
293 ixgbe_power(int why, void *arg)
294 {
295 	struct ix_softc *sc = (struct ix_softc *)arg;
296 	struct ifnet *ifp;
297 
298 	if (why == PWR_RESUME) {
299 		ifp = &sc->arpcom.ac_if;
300 		if (ifp->if_flags & IFF_UP)
301 			ixgbe_init(sc);
302 	}
303 }
304 
305 /*********************************************************************
306  *  Transmit entry point
307  *
308  *  ixgbe_start is called by the stack to initiate a transmit.
309  *  The driver will remain in this routine as long as there are
310  *  packets to transmit and transmit resources are available.
311  *  In case resources are not available stack is notified and
312  *  the packet is requeued.
313  **********************************************************************/
314 
315 void
316 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
317 {
318 	struct mbuf  		*m_head;
319 	struct ix_softc		*sc = txr->sc;
320 	int			 post = 0;
321 
322 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
323 		return;
324 
325 	if (!sc->link_active)
326 		return;
327 
328 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
329 	    txr->txdma.dma_map->dm_mapsize,
330 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
331 
332 	for (;;) {
333 		IFQ_POLL(&ifp->if_snd, m_head);
334 		if (m_head == NULL)
335 			break;
336 
337 		if (ixgbe_encap(txr, m_head)) {
338 			ifp->if_flags |= IFF_OACTIVE;
339 			break;
340 		}
341 
342 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
343 
344 #if NBPFILTER > 0
345 		if (ifp->if_bpf)
346 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
347 #endif
348 
349 		/* Set timeout in case hardware has problems transmitting */
350 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
351 		ifp->if_timer = IXGBE_TX_TIMEOUT;
352 
353 		post = 1;
354 	}
355 
356         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
357 	    0, txr->txdma.dma_map->dm_mapsize,
358             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
359 
360 	/*
361 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
362 	 * hardware that this frame is available to transmit.
363 	 */
364 	if (post)
365 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
366 		    txr->next_avail_tx_desc);
367 }
368 
369 
370 void
371 ixgbe_start(struct ifnet *ifp)
372 {
373 	struct ix_softc *sc = ifp->if_softc;
374 	struct tx_ring	*txr = sc->tx_rings;
375 	uint32_t queue = 0;
376 
377 #if 0
378 	/*
379 	 * This is really just here for testing
380 	 * TX multiqueue, ultimately what is
381 	 * needed is the flow support in the stack
382 	 * and appropriate logic here to deal with
383 	 * it. -jfv
384 	 */
385 	if (sc->num_tx_queues > 1)
386 		queue = (curcpu % sc->num_tx_queues);
387 #endif
388 
389 	txr = &sc->tx_rings[queue];
390 
391 	if (ifp->if_flags & IFF_RUNNING)
392 		ixgbe_start_locked(txr, ifp);
393 
394 	return;
395 }
396 
397 /*********************************************************************
398  *  Ioctl entry point
399  *
400  *  ixgbe_ioctl is called when the user wants to configure the
401  *  interface.
402  *
403  *  return 0 on success, positive on failure
404  **********************************************************************/
405 
406 int
407 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
408 {
409 	struct ix_softc	*sc = ifp->if_softc;
410 	struct ifaddr	*ifa = (struct ifaddr *) data;
411 	struct ifreq	*ifr = (struct ifreq *) data;
412 	int		s, error = 0;
413 
414 	s = splnet();
415 
416 	switch (command) {
417 	case SIOCSIFADDR:
418 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
419 		ifp->if_flags |= IFF_UP;
420 		if (!(ifp->if_flags & IFF_RUNNING))
421 			ixgbe_init(sc);
422 #ifdef INET
423 		if (ifa->ifa_addr->sa_family == AF_INET)
424 			arp_ifinit(&sc->arpcom, ifa);
425 #endif
426 		break;
427 
428 	case SIOCSIFMTU:
429 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
430 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
431 			error = EINVAL;
432 		else if (ifp->if_mtu != ifr->ifr_mtu) {
433 			ifp->if_mtu = ifr->ifr_mtu;
434 			sc->max_frame_size =
435 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
436 			ixgbe_init(sc);
437 		}
438 		break;
439 
440 	case SIOCSIFFLAGS:
441 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
442 		if (ifp->if_flags & IFF_UP) {
443 			if ((ifp->if_flags & IFF_RUNNING)) {
444 				if ((ifp->if_flags ^ sc->if_flags) &
445 				    (IFF_PROMISC | IFF_ALLMULTI)) {
446 					ixgbe_disable_promisc(sc);
447 					ixgbe_set_promisc(sc);
448                                 }
449 			} else
450 				ixgbe_init(sc);
451 		} else
452 			if (ifp->if_flags & IFF_RUNNING)
453 				ixgbe_stop(sc);
454 		sc->if_flags = ifp->if_flags;
455 		break;
456 
457 	case SIOCSIFMEDIA:
458 	case SIOCGIFMEDIA:
459 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
460 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
461 		break;
462 
463 	default:
464 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
465 	}
466 
467 	if (error == ENETRESET) {
468 		if (ifp->if_flags & IFF_RUNNING) {
469 			ixgbe_disable_intr(sc);
470 			ixgbe_set_multi(sc);
471 			ixgbe_enable_intr(sc);
472 		}
473 		error = 0;
474 	}
475 
476 	splx(s);
477 	return (error);
478 }
479 
480 /*********************************************************************
481  *  Watchdog entry point
482  *
483  *  This routine is called by the local timer
484  *  to detect hardware hangs .
485  *
486  **********************************************************************/
487 
488 void
489 ixgbe_watchdog(struct ifnet * ifp)
490 {
491 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
492 	struct tx_ring *txr = sc->tx_rings;
493 	struct ixgbe_hw *hw = &sc->hw;
494 	int		tx_hang = FALSE;
495 	int		i;
496 
497         /*
498          * The timer is set to 5 every time ixgbe_start() queues a packet.
499          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
500          * least one descriptor.
501          * Finally, anytime all descriptors are clean the timer is
502          * set to 0.
503          */
504 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
505         	if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
506                 	continue;
507 		else {
508 			tx_hang = TRUE;
509 			break;
510 		}
511 	}
512 	if (tx_hang == FALSE)
513 		return;
514 
515 	/*
516 	 * If we are in this routine because of pause frames, then don't
517 	 * reset the hardware.
518 	 */
519 	if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
520 		for (i = 0; i < sc->num_tx_queues; i++, txr++)
521 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
522 		ifp->if_timer = IXGBE_TX_TIMEOUT;
523 		return;
524 	}
525 
526 
527 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
528 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
529 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
530 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
531 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
532 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
533 		    i, txr->tx_avail, txr->next_tx_to_clean);
534 	}
535 	ifp->if_flags &= ~IFF_RUNNING;
536 	sc->watchdog_events++;
537 
538 	ixgbe_init(sc);
539 	return;
540 }
541 
542 /*********************************************************************
543  *  Init entry point
544  *
545  *  This routine is used in two ways. It is used by the stack as
546  *  init entry point in network interface structure. It is also used
547  *  by the driver as a hw/sw initialization routine to get to a
548  *  consistent state.
549  *
550  *  return 0 on success, positive on failure
551  **********************************************************************/
552 #define IXGBE_MHADD_MFS_SHIFT 16
553 
554 void
555 ixgbe_init(void *arg)
556 {
557 	struct ix_softc	*sc = (struct ix_softc *)arg;
558 	struct ifnet	*ifp = &sc->arpcom.ac_if;
559 	uint32_t	 txdctl, rxdctl, mhadd, gpie;
560 	int		 i, s;
561 
562 	INIT_DEBUGOUT("ixgbe_init: begin");
563 
564 	s = splnet();
565 
566 	ixgbe_stop(sc);
567 
568 	/* Get the latest mac address, User can use a LAA */
569 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
570 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
571 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, 1);
572 	sc->hw.addr_ctrl.rar_used_count = 1;
573 
574 	/* Initialize the hardware */
575 	if (ixgbe_hardware_init(sc)) {
576 		printf("%s: Unable to initialize the hardware\n",
577 		    ifp->if_xname);
578 		splx(s);
579 		return;
580 	}
581 
582 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
583 		ixgbe_enable_hw_vlans(sc);
584 
585 	/* Prepare transmit descriptors and buffers */
586 	if (ixgbe_setup_transmit_structures(sc)) {
587 		printf("%s: Could not setup transmit structures\n",
588 		    ifp->if_xname);
589 		ixgbe_stop(sc);
590 		splx(s);
591 		return;
592 	}
593 
594 	ixgbe_initialize_transmit_units(sc);
595 
596 	/* Setup Multicast table */
597 	ixgbe_set_multi(sc);
598 
599 	/*
600 	 * If we are resetting MTU smaller than 2K
601 	 * drop to small RX buffers
602 	 */
603 	if (sc->max_frame_size <= MCLBYTES)
604 		sc->bigbufs = FALSE;
605 
606 	/* Prepare receive descriptors and buffers */
607 	if (ixgbe_setup_receive_structures(sc)) {
608 		printf("%s: Could not setup receive structures\n", ifp->if_xname);
609 		ixgbe_stop(sc);
610 		splx(s);
611 		return;
612 	}
613 
614 	/* Configure RX settings */
615 	ixgbe_initialize_receive_units(sc);
616 
617 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
618 	/* Enable Fan Failure Interrupt */
619 	if (sc->hw.phy.media_type == ixgbe_media_type_copper)
620 		gpie |= IXGBE_SDP1_GPIEN;
621 	if (sc->msix) {
622 		/* Enable Enhanced MSIX mode */
623 		gpie |= IXGBE_GPIE_MSIX_MODE;
624 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
625 		    IXGBE_GPIE_OCD;
626 	}
627 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
628 
629 	/* Set MTU size */
630 	if (ifp->if_mtu > ETHERMTU) {
631 		mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
632 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
633 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
634 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
635 	}
636 
637 	/* Now enable all the queues */
638 
639 	for (i = 0; i < sc->num_tx_queues; i++) {
640 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
641 		txdctl |= IXGBE_TXDCTL_ENABLE;
642 		/* Set WTHRESH to 8, burst writeback */
643 		txdctl |= (8 << 16);
644 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
645 	}
646 
647 	for (i = 0; i < sc->num_rx_queues; i++) {
648 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
649 		/* PTHRESH set to 32 */
650 		rxdctl |= 0x0020;
651 		rxdctl |= IXGBE_RXDCTL_ENABLE;
652 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
653 	}
654 
655 	timeout_add_sec(&sc->timer, 1);
656 
657 	/* Set up MSI/X routing */
658 	ixgbe_configure_ivars(sc);
659 
660 	ixgbe_enable_intr(sc);
661 
662 	/* Now inform the stack we're ready */
663 	ifp->if_flags |= IFF_RUNNING;
664 	ifp->if_flags &= ~IFF_OACTIVE;
665 
666 	splx(s);
667 }
668 
669 /*********************************************************************
670  *
671  *  Legacy Interrupt Service routine
672  *
673  **********************************************************************/
674 
675 int
676 ixgbe_legacy_irq(void *arg)
677 {
678 	struct ix_softc	*sc = (struct ix_softc *)arg;
679 	struct ifnet	*ifp = &sc->arpcom.ac_if;
680 	uint32_t	 reg_eicr;
681 	struct tx_ring	*txr = sc->tx_rings;
682 	struct rx_ring	*rxr = sc->rx_rings;
683 	struct ixgbe_hw	*hw = &sc->hw;
684 	int		 claimed = 0, refill = 0;
685 
686 	for (;;) {
687 		reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
688 		if (reg_eicr == 0)
689 			break;
690 
691 		claimed = 1;
692 		refill = 0;
693 
694 		if (ifp->if_flags & IFF_RUNNING) {
695 			ixgbe_rxeof(rxr, -1);
696 			ixgbe_txeof(txr);
697 			refill = 1;
698 		}
699 
700 		/* Check for fan failure */
701 		if ((hw->phy.media_type == ixgbe_media_type_copper) &&
702 		    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
703 	                printf("%s: \nCRITICAL: FAN FAILURE!! "
704 			    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
705 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS,
706 			    IXGBE_EICR_GPI_SDP1);
707 		}
708 
709 		/* Link status change */
710 		if (reg_eicr & IXGBE_EICR_LSC) {
711 			timeout_del(&sc->timer);
712 		        ixgbe_update_link_status(sc);
713 			timeout_add_sec(&sc->timer, 1);
714 		}
715 
716 		if (refill && ixgbe_rxfill(rxr)) {
717 			/* Advance the Rx Queue "Tail Pointer" */
718 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
719 			    rxr->last_rx_desc_filled);
720 		}
721 	}
722 
723 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
724 		ixgbe_start_locked(txr, ifp);
725 
726 	return (claimed);
727 }
728 
729 /*********************************************************************
730  *
731  *  Media Ioctl callback
732  *
733  *  This routine is called whenever the user queries the status of
734  *  the interface using ifconfig.
735  *
736  **********************************************************************/
737 void
738 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
739 {
740 	struct ix_softc *sc = ifp->if_softc;
741 
742 	ifmr->ifm_active = IFM_ETHER;
743 	ifmr->ifm_status = IFM_AVALID;
744 
745 	INIT_DEBUGOUT("ixgbe_media_status: begin");
746 	ixgbe_update_link_status(sc);
747 
748 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
749 		ifmr->ifm_status |= IFM_ACTIVE;
750 
751 		switch (sc->link_speed) {
752 		case IXGBE_LINK_SPEED_1GB_FULL:
753 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
754 			break;
755 		case IXGBE_LINK_SPEED_10GB_FULL:
756 			ifmr->ifm_active |= sc->optics | IFM_FDX;
757 			break;
758 		}
759 	}
760 }
761 
762 /*********************************************************************
763  *
764  *  Media Ioctl callback
765  *
766  *  This routine is called when the user changes speed/duplex using
767  *  media/mediopt option with ifconfig.
768  *
769  **********************************************************************/
770 int
771 ixgbe_media_change(struct ifnet * ifp)
772 {
773 	/* ignore */
774 	return (0);
775 }
776 
777 /*********************************************************************
778  *
779  *  This routine maps the mbufs to tx descriptors.
780  *    WARNING: while this code is using an MQ style infrastructure,
781  *    it would NOT work as is with more than 1 queue.
782  *
783  *  return 0 on success, positive on failure
784  **********************************************************************/
785 
786 int
787 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
788 {
789 	struct ix_softc *sc = txr->sc;
790 	uint32_t	olinfo_status = 0, cmd_type_len = 0;
791 	int             i, j, error;
792 	int		first, last = 0;
793 	bus_dmamap_t	map;
794 	struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
795 	union ixgbe_adv_tx_desc *txd = NULL;
796 #ifdef notyet
797 	uint32_t	paylen = 0;
798 #endif
799 
800 	/* Basic descriptor defines */
801         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
802         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
803 
804 #if NVLAN > 0
805 	if (m_head->m_flags & M_VLANTAG)
806 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
807 #endif
808 
809 	/*
810 	 * Force a cleanup if number of TX descriptors
811 	 * available is below the threshold. If it fails
812 	 * to get above, then abort transmit.
813 	 */
814 	if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
815 		ixgbe_txeof(txr);
816 		/* Make sure things have improved */
817 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
818 			txr->no_tx_desc_avail++;
819 			return (ENOBUFS);
820 		}
821 	}
822 
823         /*
824          * Important to capture the first descriptor
825          * used because it will contain the index of
826          * the one we tell the hardware to report back
827          */
828         first = txr->next_avail_tx_desc;
829 	txbuf = &txr->tx_buffers[first];
830 	txbuf_mapped = txbuf;
831 	map = txbuf->map;
832 
833 	/*
834 	 * Map the packet for DMA.
835 	 */
836 	error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
837 	    m_head, BUS_DMA_NOWAIT);
838 
839 	if (error == ENOMEM) {
840 		sc->no_tx_dma_setup++;
841 		return (error);
842 	} else if (error != 0) {
843 		sc->no_tx_dma_setup++;
844 		return (error);
845 	}
846 
847 	/* Make certain there are enough descriptors */
848 	if (map->dm_nsegs > txr->tx_avail - 2) {
849 		txr->no_tx_desc_avail++;
850 		error = ENOBUFS;
851 		goto xmit_fail;
852 	}
853 
854 	/*
855 	 * Set the appropriate offload context
856 	 * this becomes the first descriptor of
857 	 * a packet.
858 	 */
859 #ifdef notyet
860 	if (ixgbe_tso_setup(txr, m_head, &paylen)) {
861 		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
862 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
863 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
864 		olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
865 		++sc->tso_tx;
866 	} else
867 #endif
868 	if (ixgbe_tx_ctx_setup(txr, m_head))
869 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
870 
871 	i = txr->next_avail_tx_desc;
872 	for (j = 0; j < map->dm_nsegs; j++) {
873 		txbuf = &txr->tx_buffers[i];
874 		txd = &txr->tx_base[i];
875 
876 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
877 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
878 		    cmd_type_len | map->dm_segs[j].ds_len);
879 		txd->read.olinfo_status = htole32(olinfo_status);
880 		last = i; /* Next descriptor that will get completed */
881 
882 		if (++i == sc->num_tx_desc)
883 			i = 0;
884 
885 		txbuf->m_head = NULL;
886 
887 		/*
888 		 * we have to do this inside the loop right now
889 		 * because of the hardware workaround.
890 		 */
891 		if (j == (map->dm_nsegs -1)) /* Last descriptor gets EOP and RS */
892 			txd->read.cmd_type_len |=
893 			    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
894 #ifndef NO_82598_A0_SUPPORT
895 		if (sc->hw.revision_id == 0)
896 			desc_flip(txd);
897 #endif
898 	}
899 
900 	txr->tx_avail -= map->dm_nsegs;
901 	txr->next_avail_tx_desc = i;
902 
903 	txbuf->m_head = m_head;
904 	txbuf_mapped->map = txbuf->map;
905 	txbuf->map = map;
906 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
907 	    BUS_DMASYNC_PREWRITE);
908 
909         /* Set the index of the descriptor that will be marked done */
910         txbuf = &txr->tx_buffers[first];
911 
912 	++txr->tx_packets;
913 	return (0);
914 
915 xmit_fail:
916 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
917 	return (error);
918 
919 }
920 
921 void
922 ixgbe_set_promisc(struct ix_softc *sc)
923 {
924 
925 	uint32_t       reg_rctl;
926 	struct ifnet *ifp = &sc->arpcom.ac_if;
927 
928 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
929 
930 	if (ifp->if_flags & IFF_PROMISC) {
931 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
932 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
933 	} else if (ifp->if_flags & IFF_ALLMULTI) {
934 		reg_rctl |= IXGBE_FCTRL_MPE;
935 		reg_rctl &= ~IXGBE_FCTRL_UPE;
936 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
937 	}
938 	return;
939 }
940 
941 void
942 ixgbe_disable_promisc(struct ix_softc * sc)
943 {
944 	uint32_t       reg_rctl;
945 
946 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
947 
948 	reg_rctl &= (~IXGBE_FCTRL_UPE);
949 	reg_rctl &= (~IXGBE_FCTRL_MPE);
950 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
951 
952 	return;
953 }
954 
955 
956 /*********************************************************************
957  *  Multicast Update
958  *
959  *  This routine is called whenever multicast address list is updated.
960  *
961  **********************************************************************/
962 #define IXGBE_RAR_ENTRIES 16
963 
964 void
965 ixgbe_set_multi(struct ix_softc *sc)
966 {
967 	uint32_t	fctrl;
968 	uint8_t	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
969 	uint8_t	*update_ptr;
970 	struct ether_multi *enm;
971 	struct ether_multistep step;
972 	int	mcnt = 0;
973 	struct ifnet *ifp = &sc->arpcom.ac_if;
974 
975 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
976 
977 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
978 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
979 	if (ifp->if_flags & IFF_PROMISC)
980 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
981 	else if (ifp->if_flags & IFF_ALLMULTI) {
982 		fctrl |= IXGBE_FCTRL_MPE;
983 		fctrl &= ~IXGBE_FCTRL_UPE;
984 	} else
985 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
986 
987 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
988 
989 	ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
990 	while (enm != NULL) {
991 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
992 			ifp->if_flags |= IFF_ALLMULTI;
993 			mcnt = MAX_NUM_MULTICAST_ADDRESSES;
994 		}
995 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
996 			break;
997 		bcopy(enm->enm_addrlo,
998 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
999 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1000 		mcnt++;
1001 		ETHER_NEXT_MULTI(step, enm);
1002 	}
1003 
1004 	update_ptr = mta;
1005 	ixgbe_hw(&sc->hw, update_mc_addr_list,
1006 	    update_ptr, mcnt, ixgbe_mc_array_itr);
1007 
1008 	return;
1009 }
1010 
1011 /*
1012  * This is an iterator function now needed by the multicast
1013  * shared code. It simply feeds the shared code routine the
1014  * addresses in the array of ixgbe_set_multi() one by one.
1015  */
1016 uint8_t *
1017 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1018 {
1019 	uint8_t *addr = *update_ptr;
1020 	uint8_t *newptr;
1021 	*vmdq = 0;
1022 
1023 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1024 	*update_ptr = newptr;
1025 	return addr;
1026 }
1027 
1028 
1029 /*********************************************************************
1030  *  Timer routine
1031  *
1032  *  This routine checks for link status,updates statistics,
1033  *  and runs the watchdog timer.
1034  *
1035  **********************************************************************/
1036 
1037 void
1038 ixgbe_local_timer(void *arg)
1039 {
1040 	struct ix_softc *sc = arg;
1041 #ifdef IX_DEBUG
1042 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1043 #endif
1044 	int		 s;
1045 
1046 	s = splnet();
1047 
1048 	ixgbe_update_link_status(sc);
1049 	ixgbe_update_stats_counters(sc);
1050 
1051 #ifdef IX_DEBUG
1052 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1053 	    (IFF_RUNNING|IFF_DEBUG))
1054 		ixgbe_print_hw_stats(sc);
1055 #endif
1056 
1057 	timeout_add_sec(&sc->timer, 1);
1058 
1059 	splx(s);
1060 }
1061 
1062 void
1063 ixgbe_update_link_status(struct ix_softc *sc)
1064 {
1065 	int link_up = FALSE;
1066 	struct ifnet *ifp = &sc->arpcom.ac_if;
1067 	struct tx_ring *txr = sc->tx_rings;
1068 	int		link_state;
1069 	int		i;
1070 
1071 	ixgbe_hw(&sc->hw, check_link, &sc->link_speed, &link_up, 0);
1072 
1073 	link_state = link_up ? LINK_STATE_FULL_DUPLEX : LINK_STATE_DOWN;
1074 
1075 	if (ifp->if_link_state != link_state) {
1076 		sc->link_active = link_up;
1077 		ifp->if_link_state = link_state;
1078 		if_link_state_change(ifp);
1079 	}
1080 
1081 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1082 		switch (sc->link_speed) {
1083 		case IXGBE_LINK_SPEED_UNKNOWN:
1084 			ifp->if_baudrate = 0;
1085 			break;
1086 		case IXGBE_LINK_SPEED_100_FULL:
1087 			ifp->if_baudrate = IF_Mbps(100);
1088 			break;
1089 		case IXGBE_LINK_SPEED_1GB_FULL:
1090 			ifp->if_baudrate = IF_Gbps(1);
1091 			break;
1092 		case IXGBE_LINK_SPEED_10GB_FULL:
1093 			ifp->if_baudrate = IF_Gbps(10);
1094 			break;
1095 		}
1096 	} else {
1097 		ifp->if_baudrate = 0;
1098 		ifp->if_timer = 0;
1099 		for (i = 0; i < sc->num_tx_queues; i++)
1100 			txr[i].watchdog_timer = FALSE;
1101 	}
1102 
1103 
1104 	return;
1105 }
1106 
1107 
1108 
1109 /*********************************************************************
1110  *
1111  *  This routine disables all traffic on the sc by issuing a
1112  *  global reset on the MAC and deallocates TX/RX buffers.
1113  *
1114  **********************************************************************/
1115 
1116 void
1117 ixgbe_stop(void *arg)
1118 {
1119 	struct ix_softc *sc = arg;
1120 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1121 
1122 	/* Tell the stack that the interface is no longer active */
1123 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1124 
1125 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1126 	ixgbe_disable_intr(sc);
1127 
1128 	ixgbe_hw0(&sc->hw, reset_hw);
1129 	sc->hw.adapter_stopped = FALSE;
1130 	ixgbe_hw0(&sc->hw, stop_adapter);
1131 	timeout_del(&sc->timer);
1132 
1133 	/* reprogram the RAR[0] in case user changed it. */
1134 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1135 
1136 	ixgbe_free_transmit_structures(sc);
1137 	ixgbe_free_receive_structures(sc);
1138 }
1139 
1140 
1141 /*********************************************************************
1142  *
1143  *  Determine hardware revision.
1144  *
1145  **********************************************************************/
1146 void
1147 ixgbe_identify_hardware(struct ix_softc *sc)
1148 {
1149 	struct ixgbe_osdep	*os = &sc->osdep;
1150 	struct pci_attach_args	*pa = os->os_pa;
1151 	uint32_t		 reg;
1152 
1153 	/* Save off the information about this board */
1154 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1155 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1156 
1157 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1158 	sc->hw.revision_id = PCI_REVISION(reg);
1159 
1160 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1161 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1162 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1163 
1164 	ixgbe_total_ports++;
1165 	switch (sc->hw.device_id) {
1166 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1167 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1168 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1169 		ixgbe_total_ports++;
1170 		break;
1171 	}
1172 
1173 	switch (sc->hw.device_id) {
1174 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1175 	case PCI_PRODUCT_INTEL_82598AF:
1176 		sc->optics = IFM_10G_SR;
1177 		break;
1178 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1179 	case PCI_PRODUCT_INTEL_82598EB_CX4:
1180 		sc->optics = IFM_10G_CX4;
1181 		break;
1182 	case PCI_PRODUCT_INTEL_82598EB_XF_LR:
1183 		sc->optics = IFM_10G_LR;
1184 		break;
1185 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1186 	case PCI_PRODUCT_INTEL_82598AT:
1187 		sc->optics = IFM_10G_T;
1188 		break;
1189 	default:
1190 		sc->optics = IFM_AUTO;
1191 		break;
1192 	}
1193 }
1194 
1195 /*********************************************************************
1196  *
1197  *  Setup the Legacy or MSI Interrupt handler
1198  *
1199  **********************************************************************/
1200 int
1201 ixgbe_allocate_legacy(struct ix_softc *sc)
1202 {
1203 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1204 	struct ixgbe_osdep	*os = &sc->osdep;
1205 	struct pci_attach_args	*pa = os->os_pa;
1206 	const char		*intrstr = NULL;
1207 	pci_chipset_tag_t	pc = pa->pa_pc;
1208 	pci_intr_handle_t	ih;
1209 
1210 	/* Legacy RID at 0 */
1211 	if (sc->msix == 0)
1212 		sc->rid[0] = 0;
1213 
1214 	/* We allocate a single interrupt resource */
1215 	if (pci_intr_map(pa, &ih)) {
1216 		printf(": couldn't map interrupt\n");
1217 		return (ENXIO);
1218 	}
1219 
1220 	intrstr = pci_intr_string(pc, ih);
1221 	sc->tag[0] = pci_intr_establish(pc, ih, IPL_NET,
1222 	    ixgbe_legacy_irq, sc, ifp->if_xname);
1223 	if (sc->tag[0] == NULL) {
1224 		printf(": couldn't establish interrupt");
1225 		if (intrstr != NULL)
1226 			printf(" at %s", intrstr);
1227 		printf("\n");
1228 		return (ENXIO);
1229 	}
1230 	printf(": %s", intrstr);
1231 
1232 	return (0);
1233 }
1234 
1235 int
1236 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1237 {
1238 	struct ixgbe_osdep	*os = &sc->osdep;
1239 	struct pci_attach_args	*pa = os->os_pa;
1240 	int			 val, i;
1241 
1242 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1243 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM &&
1244 	    PCI_MAPREG_TYPE(val) != PCI_MAPREG_MEM_TYPE_64BIT) {
1245 		printf(": mmba is not mem space\n");
1246 		return (ENXIO);
1247 	}
1248 
1249 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1250 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1251 		printf(": cannot find mem space\n");
1252 		return (ENXIO);
1253 	}
1254 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1255 
1256 	/*
1257 	 * Init the resource arrays
1258 	 */
1259 	for (i = 0; i < IXGBE_MSGS; i++) {
1260 		sc->rid[i] = i + 1; /* MSI/X RID starts at 1 */
1261 		sc->tag[i] = NULL;
1262 		sc->res[i] = NULL;
1263 	}
1264 
1265 	/* Legacy defaults */
1266 	sc->num_tx_queues = 1;
1267 	sc->num_rx_queues = 1;
1268 
1269 #ifdef notyet
1270 	/* Now setup MSI or MSI/X */
1271 	sc->msix = ixgbe_setup_msix(sc);
1272 #endif
1273 	sc->hw.back = os;
1274 
1275 	return (0);
1276 }
1277 
1278 void
1279 ixgbe_free_pci_resources(struct ix_softc * sc)
1280 {
1281 	struct ixgbe_osdep	*os = &sc->osdep;
1282 	struct pci_attach_args	*pa = os->os_pa;
1283 
1284 	pci_intr_disestablish(pa->pa_pc, sc->tag[0]);
1285 	sc->tag[0] = NULL;
1286 	if (os->os_membase != NULL)
1287 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1288 	os->os_membase = NULL;
1289 
1290 	return;
1291 }
1292 
1293 /*********************************************************************
1294  *
1295  *  Initialize the hardware to a configuration as specified by the
1296  *  sc structure. The controller is reset, the EEPROM is
1297  *  verified, the MAC address is set, then the shared initialization
1298  *  routines are called.
1299  *
1300  **********************************************************************/
1301 int
1302 ixgbe_hardware_init(struct ix_softc *sc)
1303 {
1304 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1305 	uint16_t csum;
1306 
1307 	csum = 0;
1308 	/* Issue a global reset */
1309 	sc->hw.adapter_stopped = FALSE;
1310 	ixgbe_hw0(&sc->hw, stop_adapter);
1311 
1312 	/* Make sure we have a good EEPROM before we read from it */
1313 	if (ixgbe_ee(&sc->hw, validate_checksum, &csum) < 0) {
1314 		printf("%s: The EEPROM Checksum Is Not Valid\n", ifp->if_xname);
1315 		return (EIO);
1316 	}
1317 
1318 	/* Get Hardware Flow Control setting */
1319 	sc->hw.fc.type = ixgbe_fc_full;
1320 	sc->hw.fc.pause_time = IXGBE_FC_PAUSE;
1321 	sc->hw.fc.low_water = IXGBE_FC_LO;
1322 	sc->hw.fc.high_water = IXGBE_FC_HI;
1323 	sc->hw.fc.send_xon = TRUE;
1324 
1325 	if (ixgbe_hw0(&sc->hw, init_hw) != 0) {
1326 		printf("%s: Hardware Initialization Failed", ifp->if_xname);
1327 		return (EIO);
1328 	}
1329 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
1330 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
1331 
1332 	return (0);
1333 }
1334 
1335 /*********************************************************************
1336  *
1337  *  Setup networking device structure and register an interface.
1338  *
1339  **********************************************************************/
1340 void
1341 ixgbe_setup_interface(struct ix_softc *sc)
1342 {
1343 	struct ixgbe_hw *hw = &sc->hw;
1344 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1345 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1346 
1347 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1348 	ifp->if_baudrate = IF_Gbps(10);
1349 	ifp->if_softc = sc;
1350 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1351 	ifp->if_ioctl = ixgbe_ioctl;
1352 	ifp->if_start = ixgbe_start;
1353 	ifp->if_timer = 0;
1354 	ifp->if_watchdog = ixgbe_watchdog;
1355 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1356 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1357 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1358 	IFQ_SET_READY(&ifp->if_snd);
1359 
1360 	m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
1361 
1362 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1363 
1364 #if NVLAN > 0
1365 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1366 #endif
1367 
1368 #ifdef IX_CSUM_OFFLOAD
1369 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1370 				IFCAP_CSUM_UDPv4;
1371 #endif
1372 
1373 	sc->max_frame_size =
1374 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1375 
1376 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1377 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL))
1378 		ixgbe_hw(hw, setup_link_speed,
1379 		    IXGBE_LINK_SPEED_10GB_FULL |
1380 		    IXGBE_LINK_SPEED_1GB_FULL, TRUE, TRUE);
1381 	else
1382 		ixgbe_hw(hw, setup_link_speed,
1383 		    IXGBE_LINK_SPEED_10GB_FULL,
1384 		    TRUE, FALSE);
1385 
1386 	/*
1387 	 * Specify the media types supported by this sc and register
1388 	 * callbacks to update media and link information
1389 	 */
1390 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1391 		     ixgbe_media_status);
1392 	ifmedia_add(&sc->media, IFM_ETHER | sc->optics |
1393 	    IFM_FDX, 0, NULL);
1394 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1395 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL)) {
1396 		ifmedia_add(&sc->media,
1397 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1398 		ifmedia_add(&sc->media,
1399 		    IFM_ETHER | IFM_1000_T, 0, NULL);
1400 	}
1401 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1402 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1403 
1404 	if_attach(ifp);
1405 	ether_ifattach(ifp);
1406 
1407 
1408 	return;
1409 }
1410 
1411 int
1412 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1413 		struct ixgbe_dma_alloc *dma, int mapflags)
1414 {
1415 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1416 	struct ixgbe_osdep	*os = &sc->osdep;
1417 	int			 r;
1418 
1419 	dma->dma_tag = os->os_pa->pa_dmat;
1420 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1421 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1422 	if (r != 0) {
1423 		printf("%s: ixgbe_dma_malloc: bus_dma_tag_create failed; "
1424 		       "error %u\n", ifp->if_xname, r);
1425 		goto fail_0;
1426 	}
1427 
1428 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1429 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1430 	if (r != 0) {
1431 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1432 		       "error %u\n", ifp->if_xname, r);
1433 		goto fail_1;
1434 	}
1435 
1436 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1437 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1438 	if (r != 0) {
1439 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1440 		       "error %u\n", ifp->if_xname, r);
1441 		goto fail_2;
1442 	}
1443 
1444 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1445 	    dma->dma_vaddr, size, NULL,
1446 	    mapflags | BUS_DMA_NOWAIT);
1447 	if (r != 0) {
1448 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1449 		       "error %u\n", ifp->if_xname, r);
1450 		goto fail_3;
1451 	}
1452 
1453 	dma->dma_size = size;
1454 	return (0);
1455 fail_3:
1456 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1457 fail_2:
1458 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1459 fail_1:
1460 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1461 fail_0:
1462 	dma->dma_map = NULL;
1463 	dma->dma_tag = NULL;
1464 	return (r);
1465 }
1466 
1467 void
1468 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1469 {
1470 	if (dma->dma_tag == NULL)
1471 		return;
1472 
1473 	if (dma->dma_map != NULL) {
1474 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1475 		    dma->dma_map->dm_mapsize,
1476 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1477 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1478 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1479 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1480 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1481 		dma->dma_map = NULL;
1482 	}
1483 }
1484 
1485 
1486 /*********************************************************************
1487  *
1488  *  Allocate memory for the transmit and receive rings, and then
1489  *  the descriptors associated with each, called only once at attach.
1490  *
1491  **********************************************************************/
1492 int
1493 ixgbe_allocate_queues(struct ix_softc *sc)
1494 {
1495 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1496 	struct tx_ring *txr;
1497 	struct rx_ring *rxr;
1498 	int rsize, tsize, error = IXGBE_SUCCESS;
1499 	int txconf = 0, rxconf = 0, i;
1500 
1501 	/* First allocate the TX ring struct memory */
1502 	if (!(sc->tx_rings =
1503 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1504 	    sc->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1505 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1506 		error = ENOMEM;
1507 		goto fail;
1508 	}
1509 	txr = sc->tx_rings;
1510 
1511 	/* Next allocate the RX */
1512 	if (!(sc->rx_rings =
1513 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1514 	    sc->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1515 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1516 		error = ENOMEM;
1517 		goto rx_fail;
1518 	}
1519 	rxr = sc->rx_rings;
1520 
1521 	/* For the ring itself */
1522 	tsize = roundup2(sc->num_tx_desc *
1523 	    sizeof(union ixgbe_adv_tx_desc), 4096);
1524 
1525 	/*
1526 	 * Now set up the TX queues, txconf is needed to handle the
1527 	 * possibility that things fail midcourse and we need to
1528 	 * undo memory gracefully
1529 	 */
1530 	for (i = 0; i < sc->num_tx_queues; i++, txconf++) {
1531 		/* Set up some basics */
1532 		txr = &sc->tx_rings[i];
1533 		txr->sc = sc;
1534 		txr->me = i;
1535 
1536 		/* Initialize the TX side lock */
1537 		mtx_init(&txr->tx_mtx, IPL_NET);
1538 
1539 		if (ixgbe_dma_malloc(sc, tsize,
1540 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1541 			printf("%s: Unable to allocate TX Descriptor memory\n",
1542 			    ifp->if_xname);
1543 			error = ENOMEM;
1544 			goto err_tx_desc;
1545 		}
1546 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1547 		bzero((void *)txr->tx_base, tsize);
1548 
1549 		if (ixgbe_dma_malloc(sc, sizeof(uint32_t),
1550 		    &txr->txwbdma, BUS_DMA_NOWAIT)) {
1551 			printf("%s: Unable to allocate TX Write Back memory\n",
1552 			    ifp->if_xname);
1553 			error = ENOMEM;
1554 			goto err_tx_desc;
1555 		}
1556 		txr->tx_hwb = (uint32_t *)txr->txwbdma.dma_vaddr;
1557 		*txr->tx_hwb = 0;
1558 	}
1559 
1560 	/*
1561 	 * Next the RX queues...
1562 	 */
1563 	rsize = roundup2(sc->num_rx_desc *
1564 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1565 	for (i = 0; i < sc->num_rx_queues; i++, rxconf++) {
1566 		rxr = &sc->rx_rings[i];
1567 		/* Set up some basics */
1568 		rxr->sc = sc;
1569 		rxr->me = i;
1570 
1571 		/* Initialize the TX side lock */
1572 		mtx_init(&rxr->rx_mtx, IPL_NET);
1573 
1574 		if (ixgbe_dma_malloc(sc, rsize,
1575 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1576 			printf("%s: Unable to allocate RxDescriptor memory\n",
1577 			    ifp->if_xname);
1578 			error = ENOMEM;
1579 			goto err_rx_desc;
1580 		}
1581 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1582 		bzero((void *)rxr->rx_base, rsize);
1583 	}
1584 
1585 	return (0);
1586 
1587 err_rx_desc:
1588 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1589 		ixgbe_dma_free(sc, &rxr->rxdma);
1590 err_tx_desc:
1591 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) {
1592 		ixgbe_dma_free(sc, &txr->txdma);
1593 		ixgbe_dma_free(sc, &txr->txwbdma);
1594 	}
1595 	free(sc->rx_rings, M_DEVBUF);
1596 	sc->rx_rings = NULL;
1597 rx_fail:
1598 	free(sc->tx_rings, M_DEVBUF);
1599 	sc->tx_rings = NULL;
1600 fail:
1601 	return (error);
1602 }
1603 
1604 /*********************************************************************
1605  *
1606  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1607  *  the information needed to transmit a packet on the wire. This is
1608  *  called only once at attach, setup is done every reset.
1609  *
1610  **********************************************************************/
1611 int
1612 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1613 {
1614 	struct ix_softc 	*sc;
1615 	struct ixgbe_osdep	*os;
1616 	struct ifnet		*ifp;
1617 	struct ixgbe_tx_buf	*txbuf;
1618 	int			 error, i;
1619 
1620 	sc = txr->sc;
1621 	os = &sc->osdep;
1622 	ifp = &sc->arpcom.ac_if;
1623 
1624 	if (!(txr->tx_buffers =
1625 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
1626 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1627 		printf("%s: Unable to allocate tx_buffer memory\n",
1628 		    ifp->if_xname);
1629 		error = ENOMEM;
1630 		goto fail;
1631 	}
1632 	txr->txtag = txr->txdma.dma_tag;
1633 
1634         /* Create the descriptor buffer dma maps */
1635 	for (i = 0; i < sc->num_tx_desc; i++) {
1636 		txbuf = &txr->tx_buffers[i];
1637 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1638 			    IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
1639 			    BUS_DMA_NOWAIT, &txbuf->map);
1640 
1641 		if (error != 0) {
1642 			printf("%s: Unable to create TX DMA map\n",
1643 			    ifp->if_xname);
1644 			goto fail;
1645 		}
1646 	}
1647 
1648 	return 0;
1649 fail:
1650 	return (error);
1651 }
1652 
1653 /*********************************************************************
1654  *
1655  *  Initialize a transmit ring.
1656  *
1657  **********************************************************************/
1658 int
1659 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1660 {
1661 	struct ix_softc		*sc = txr->sc;
1662 	int			 error;
1663 
1664 	/* Now allocate transmit buffers for the ring */
1665 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1666 		return (error);
1667 
1668 	/* Clear the old ring contents */
1669 	bzero((void *)txr->tx_base,
1670 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1671 
1672 	/* Reset indices */
1673 	txr->next_avail_tx_desc = 0;
1674 	txr->next_tx_to_clean = 0;
1675 
1676 	/* Set number of descriptors available */
1677 	txr->tx_avail = sc->num_tx_desc;
1678 
1679 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1680 	    0, txr->txdma.dma_map->dm_mapsize,
1681 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1682 
1683 	return (0);
1684 }
1685 
1686 /*********************************************************************
1687  *
1688  *  Initialize all transmit rings.
1689  *
1690  **********************************************************************/
1691 int
1692 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1693 {
1694 	struct tx_ring *txr = sc->tx_rings;
1695 	int		i, error;
1696 
1697 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
1698 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
1699 			goto fail;
1700 	}
1701 
1702 	return (0);
1703 fail:
1704 	ixgbe_free_transmit_structures(sc);
1705 	return (error);
1706 }
1707 
1708 /*********************************************************************
1709  *
1710  *  Enable transmit unit.
1711  *
1712  **********************************************************************/
1713 void
1714 ixgbe_initialize_transmit_units(struct ix_softc *sc)
1715 {
1716 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1717 	struct tx_ring	*txr;
1718 	struct ixgbe_hw	*hw = &sc->hw;
1719 	int		 i;
1720 	uint64_t	 tdba, txhwb;
1721 	uint32_t	 txctrl;
1722 
1723 	/* Setup the Base and Length of the Tx Descriptor Ring */
1724 
1725 	for (i = 0; i < sc->num_tx_queues; i++) {
1726 		txr = &sc->tx_rings[i];
1727 
1728 		/* Setup descriptor base address */
1729 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1730 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
1731 		       (tdba & 0x00000000ffffffffULL));
1732 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
1733 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
1734 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1735 
1736 		/* Setup for Head WriteBack */
1737 		txhwb = txr->txwbdma.dma_map->dm_segs[0].ds_addr;
1738 		txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1739 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
1740 		    (txhwb & 0x00000000ffffffffULL));
1741 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
1742 		    (txhwb >> 32));
1743 		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1744 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1745 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
1746 
1747 		/* Setup the HW Tx Head and Tail descriptor pointers */
1748 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
1749 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
1750 
1751 		/* Setup Transmit Descriptor Cmd Settings */
1752 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
1753 
1754 		txr->watchdog_timer = 0;
1755 	}
1756 	ifp->if_timer = 0;
1757 
1758 	return;
1759 }
1760 
1761 /*********************************************************************
1762  *
1763  *  Free all transmit rings.
1764  *
1765  **********************************************************************/
1766 void
1767 ixgbe_free_transmit_structures(struct ix_softc *sc)
1768 {
1769 	struct tx_ring *txr = sc->tx_rings;
1770 	int		i;
1771 
1772 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
1773 		ixgbe_free_transmit_buffers(txr);
1774 	}
1775 }
1776 
1777 /*********************************************************************
1778  *
1779  *  Free transmit ring related data structures.
1780  *
1781  **********************************************************************/
1782 void
1783 ixgbe_free_transmit_buffers(struct tx_ring *txr)
1784 {
1785 	struct ix_softc *sc = txr->sc;
1786 	struct ixgbe_tx_buf *tx_buffer;
1787 	int             i;
1788 
1789 	INIT_DEBUGOUT("free_transmit_ring: begin");
1790 
1791 	if (txr->tx_buffers == NULL)
1792 		return;
1793 
1794 	tx_buffer = txr->tx_buffers;
1795 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1796 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
1797 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
1798 			    0, tx_buffer->map->dm_mapsize,
1799 			    BUS_DMASYNC_POSTWRITE);
1800 			bus_dmamap_unload(txr->txdma.dma_tag,
1801 			    tx_buffer->map);
1802 		}
1803 		if (tx_buffer->m_head != NULL) {
1804 			m_freem(tx_buffer->m_head);
1805 			tx_buffer->m_head = NULL;
1806 		}
1807 		if (tx_buffer->map != NULL) {
1808 			bus_dmamap_destroy(txr->txdma.dma_tag,
1809 			    tx_buffer->map);
1810 			tx_buffer->map = NULL;
1811 		}
1812 	}
1813 
1814 	if (txr->tx_buffers != NULL) {
1815 		free(txr->tx_buffers, M_DEVBUF);
1816 		txr->tx_buffers = NULL;
1817 	}
1818 	txr->tx_buffers = NULL;
1819 	txr->txtag = NULL;
1820 }
1821 
1822 /*********************************************************************
1823  *
1824  *  Advanced Context Descriptor setup for VLAN or CSUM
1825  *
1826  **********************************************************************/
1827 
1828 int
1829 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
1830 {
1831 	struct ix_softc *sc = txr->sc;
1832 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1833 	struct ixgbe_adv_tx_context_desc *TXD;
1834 	struct ixgbe_tx_buf        *tx_buffer;
1835 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
1836 	struct ip *ip;
1837 	struct ip6_hdr *ip6;
1838 	uint8_t ipproto = 0;
1839 	int  ehdrlen, ip_hlen = 0;
1840 	uint16_t etype;
1841 	int offload = TRUE;
1842 	int ctxd = txr->next_avail_tx_desc;
1843 #if NVLAN > 0
1844 	struct ether_vlan_header *eh;
1845 #else
1846 	struct ether_header *eh;
1847 #endif
1848 
1849 	if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
1850 		offload = FALSE;
1851 
1852 	tx_buffer = &txr->tx_buffers[ctxd];
1853 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
1854 
1855 	/*
1856 	 * In advanced descriptors the vlan tag must
1857 	 * be placed into the descriptor itself.
1858 	 */
1859 #if NVLAN > 0
1860 	if (mp->m_flags & M_VLANTAG) {
1861 		vlan_macip_lens |=
1862 		    htole16(mp->m_pkthdr.ether_vtag) << IXGBE_ADVTXD_VLAN_SHIFT;
1863 	} else
1864 #endif
1865 	if (offload == FALSE)
1866 		return FALSE;	/* No need for CTX */
1867 
1868 	/*
1869 	 * Determine where frame payload starts.
1870 	 * Jump over vlan headers if already present,
1871 	 * helpful for QinQ too.
1872 	 */
1873 #if NVLAN > 0
1874 	eh = mtod(mp, struct ether_vlan_header *);
1875 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1876 		etype = ntohs(eh->evl_proto);
1877 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1878 	} else {
1879 		etype = ntohs(eh->evl_encap_proto);
1880 		ehdrlen = ETHER_HDR_LEN;
1881 	}
1882 #else
1883 	eh = mtod(mp, struct ether_header *);
1884 	etype = ntohs(eh->ether_type);
1885 	ehdrlen = ETHER_HDR_LEN;
1886 #endif
1887 
1888 	/* Set the ether header length */
1889 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1890 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1891 
1892 	if (offload == TRUE) {
1893 		switch (etype) {
1894 		case ETHERTYPE_IP:
1895 			ip = (struct ip *)(mp->m_data + ehdrlen);
1896 			ip_hlen = ip->ip_hl << 2;
1897 			if (mp->m_len < ehdrlen + ip_hlen)
1898 				return FALSE; /* failure */
1899 			ipproto = ip->ip_p;
1900 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1901 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1902 			break;
1903 		case ETHERTYPE_IPV6:
1904 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1905 			ip_hlen = sizeof(struct ip6_hdr);
1906 			if (mp->m_len < ehdrlen + ip_hlen)
1907 				return FALSE; /* failure */
1908 			ipproto = ip6->ip6_nxt;
1909 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1910 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1911 			break;
1912 		default:
1913 			offload = FALSE;
1914 			break;
1915 		}
1916 
1917 		vlan_macip_lens |= ip_hlen;
1918 
1919 		switch (ipproto) {
1920 		case IPPROTO_TCP:
1921 			if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1922 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1923 			break;
1924 		case IPPROTO_UDP:
1925 			if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1926 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
1927 			break;
1928 		default:
1929 			offload = FALSE;
1930 			break;
1931 		}
1932 	}
1933 
1934 	/* Now copy bits into descriptor */
1935 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
1936 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
1937 	TXD->seqnum_seed = htole32(0);
1938 	TXD->mss_l4len_idx = htole32(0);
1939 
1940 #ifndef NO_82598_A0_SUPPORT
1941 	if (sc->hw.revision_id == 0)
1942 		desc_flip(TXD);
1943 #endif
1944 
1945 	tx_buffer->m_head = NULL;
1946 
1947 	/* We've consumed the first desc, adjust counters */
1948 	if (++ctxd == sc->num_tx_desc)
1949 		ctxd = 0;
1950 	txr->next_avail_tx_desc = ctxd;
1951 	--txr->tx_avail;
1952 
1953         return (offload);
1954 }
1955 
1956 #ifdef notyet
1957 /**********************************************************************
1958  *
1959  *  Setup work for hardware segmentation offload (TSO) on
1960  *  scs using advanced tx descriptors
1961  *
1962  **********************************************************************/
1963 int
1964 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
1965 {
1966 	struct ix_softc *sc = txr->sc;
1967 	struct ixgbe_adv_tx_context_desc *TXD;
1968 	struct ixgbe_tx_buf        *tx_buffer;
1969 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
1970 	uint32_t mss_l4len_idx = 0;
1971 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
1972 #if NVLAN > 0
1973 	uint16_t vtag = 0;
1974 	struct ether_vlan_header *eh;
1975 #else
1976 	struct ether_header *eh;
1977 #endif
1978 	struct ip *ip;
1979 	struct tcphdr *th;
1980 
1981 	if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
1982 	    (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
1983 	        return FALSE;
1984 
1985 	/*
1986 	 * Determine where frame payload starts.
1987 	 * Jump over vlan headers if already present
1988 	 */
1989 #if NVLAN > 0
1990 	eh = mtod(mp, struct ether_vlan_header *);
1991 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
1992 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1993 	else
1994 		ehdrlen = ETHER_HDR_LEN;
1995 #else
1996 	eh = mtod(mp, struct ether_header *);
1997 	ehdrlen = ETHER_HDR_LEN;
1998 #endif
1999 
2000         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2001         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2002 		return FALSE;
2003 
2004 	ctxd = txr->next_avail_tx_desc;
2005 	tx_buffer = &txr->tx_buffers[ctxd];
2006 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2007 
2008 	ip = (struct ip *)(mp->m_data + ehdrlen);
2009 	if (ip->ip_p != IPPROTO_TCP)
2010 		return FALSE;   /* 0 */
2011 	ip->ip_len = 0;
2012 	ip->ip_sum = 0;
2013 	ip_hlen = ip->ip_hl << 2;
2014 	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2015 	th->th_sum = in_pseudo(ip->ip_src.s_addr,
2016 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2017 	tcp_hlen = th->th_off << 2;
2018 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2019 	/* This is used in the transmit desc in encap */
2020 	*paylen = mp->m_pkthdr.len - hdrlen;
2021 
2022 #if NVLAN > 0
2023 	/* VLAN MACLEN IPLEN */
2024 	if (mp->m_flags & M_VLANTAG) {
2025 		vtag = htole16(mp->m_pkthdr.ether_vtag);
2026 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2027 	}
2028 #endif
2029 
2030 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2031 	vlan_macip_lens |= ip_hlen;
2032 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2033 
2034 	/* ADV DTYPE TUCMD */
2035 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2036 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2037 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2038 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2039 
2040 
2041 	/* MSS L4LEN IDX */
2042 	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2043 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2044 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2045 
2046 	TXD->seqnum_seed = htole32(0);
2047 	tx_buffer->m_head = NULL;
2048 
2049 #ifndef NO_82598_A0_SUPPORT
2050 	if (sc->hw.revision_id == 0)
2051 		desc_flip(TXD);
2052 #endif
2053 
2054 	if (++ctxd == sc->num_tx_desc)
2055 		ctxd = 0;
2056 
2057 	txr->tx_avail--;
2058 	txr->next_avail_tx_desc = ctxd;
2059 	return TRUE;
2060 }
2061 
2062 #else
2063 /* This makes it easy to keep the code common */
2064 int
2065 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2066 {
2067 	return (FALSE);
2068 }
2069 #endif
2070 
2071 /**********************************************************************
2072  *
2073  *  Examine each tx_buffer in the used queue. If the hardware is done
2074  *  processing the packet then free associated resources. The
2075  *  tx_buffer is put back on the free queue.
2076  *
2077  **********************************************************************/
2078 int
2079 ixgbe_txeof(struct tx_ring *txr)
2080 {
2081 	struct ix_softc			*sc = txr->sc;
2082 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2083 	uint				 first, last, done, num_avail;
2084 	struct ixgbe_tx_buf		*tx_buffer;
2085 	struct ixgbe_legacy_tx_desc *tx_desc;
2086 
2087 	if (txr->tx_avail == sc->num_tx_desc)
2088 		return FALSE;
2089 
2090 	num_avail = txr->tx_avail;
2091 	first = txr->next_tx_to_clean;
2092 
2093 	tx_buffer = &txr->tx_buffers[first];
2094 
2095 	/* For cleanup we just use legacy struct */
2096 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2097 
2098 	/* Get the HWB */
2099         bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2100 	    0, txr->txwbdma.dma_map->dm_mapsize,
2101             BUS_DMASYNC_POSTREAD);
2102         done = *txr->tx_hwb;
2103 
2104         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2105 	    0, txr->txdma.dma_map->dm_mapsize,
2106             BUS_DMASYNC_POSTREAD);
2107 
2108 	while (TRUE) {
2109 		/* We clean the range til last head write back */
2110 		while (first != done) {
2111 			tx_desc->upper.data = 0;
2112 			tx_desc->lower.data = 0;
2113 			tx_desc->buffer_addr = 0;
2114 			num_avail++;
2115 
2116 			if (tx_buffer->m_head) {
2117 				ifp->if_opackets++;
2118 				bus_dmamap_sync(txr->txdma.dma_tag,
2119 				    tx_buffer->map,
2120 				    0, tx_buffer->map->dm_mapsize,
2121 				    BUS_DMASYNC_POSTWRITE);
2122 				bus_dmamap_unload(txr->txdma.dma_tag,
2123 				    tx_buffer->map);
2124 				m_freem(tx_buffer->m_head);
2125 				tx_buffer->m_head = NULL;
2126 			}
2127 
2128 			if (++first == sc->num_tx_desc)
2129 				first = 0;
2130 
2131 			tx_buffer = &txr->tx_buffers[first];
2132 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2133 			    &txr->tx_base[first];
2134 		}
2135 		/* See if there is more work now */
2136 		last = done;
2137 	        bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2138 		    0, txr->txwbdma.dma_map->dm_mapsize,
2139 	            BUS_DMASYNC_POSTREAD);
2140         	done = *txr->tx_hwb;
2141 		if (last == done)
2142 			break;
2143 	}
2144 
2145 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2146 	    0, txr->txdma.dma_map->dm_mapsize,
2147 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2148 
2149 	txr->next_tx_to_clean = first;
2150 
2151 	/*
2152 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
2153 	 * it is OK to send packets. If there are no pending descriptors,
2154 	 * clear the timeout. Otherwise, if some descriptors have been freed,
2155 	 * restart the timeout.
2156 	 */
2157 	if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2158 		ifp->if_flags &= ~IFF_OACTIVE;
2159 
2160 		/* If all are clean turn off the timer */
2161 		if (num_avail == sc->num_tx_desc) {
2162 			ifp->if_timer = 0;
2163 			txr->watchdog_timer = 0;
2164 			txr->tx_avail = num_avail;
2165 			return FALSE;
2166 		}
2167 		/* Some were cleaned, so reset timer */
2168 		else if (num_avail != txr->tx_avail) {
2169 			ifp->if_timer = IXGBE_TX_TIMEOUT;
2170 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2171 		}
2172 	}
2173 
2174 	txr->tx_avail = num_avail;
2175 
2176 	return TRUE;
2177 }
2178 
2179 /*********************************************************************
2180  *
2181  *  Get a buffer from system mbuf buffer pool.
2182  *
2183  **********************************************************************/
2184 int
2185 ixgbe_get_buf(struct rx_ring *rxr, int i)
2186 {
2187 	struct ix_softc		*sc = rxr->sc;
2188 	struct mbuf		*m;
2189 	int			error;
2190 	int			size = MCLBYTES;
2191 	struct ixgbe_rx_buf	*rxbuf;
2192 	union ixgbe_adv_rx_desc	*rxdesc;
2193 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2194 
2195 	rxbuf = &rxr->rx_buffers[i];
2196 	rxdesc = &rxr->rx_base[i];
2197 
2198 	if (rxbuf->m_head != NULL) {
2199 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2200 		    sc->dev.dv_xname, i);
2201 		return (ENOBUFS);
2202 	}
2203 
2204 	m = MCLGETI(NULL, M_DONTWAIT, &sc->arpcom.ac_if, size);
2205 	if (!m) {
2206 		sc->mbuf_cluster_failed++;
2207 		return (ENOBUFS);
2208 	}
2209 	m->m_len = m->m_pkthdr.len = size;
2210 	if (sc->max_frame_size <= (size - ETHER_ALIGN))
2211 		m_adj(m, ETHER_ALIGN);
2212 
2213 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2214 	    m, BUS_DMA_NOWAIT);
2215 	if (error) {
2216 		m_freem(m);
2217 		return (error);
2218 	}
2219 
2220         bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2221 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2222 	rxbuf->m_head = m;
2223 
2224 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2225 	    dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2226 
2227 	bzero(rxdesc, dsize);
2228 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2229 
2230 #ifndef NO_82598_A0_SUPPORT
2231         /* A0 needs to One's Compliment descriptors */
2232 	if (sc->hw.revision_id == 0) {
2233         	struct dhack {
2234 			uint32_t a1;
2235 			uint32_t a2;
2236 			uint32_t b1;
2237 			uint32_t b2;
2238 		};
2239         	struct dhack *d;
2240 
2241         	d = (struct dhack *)rxdesc;
2242         	d->a1 = ~(d->a1);
2243         	d->a2 = ~(d->a2);
2244 	}
2245 #endif
2246 
2247 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2248 	    dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2249 
2250 	rxr->rx_ndescs++;
2251 
2252         return (0);
2253 }
2254 
2255 /*********************************************************************
2256  *
2257  *  Allocate memory for rx_buffer structures. Since we use one
2258  *  rx_buffer per received packet, the maximum number of rx_buffer's
2259  *  that we'll need is equal to the number of receive descriptors
2260  *  that we've allocated.
2261  *
2262  **********************************************************************/
2263 int
2264 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2265 {
2266 	struct ix_softc		*sc = rxr->sc;
2267 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2268 	struct ixgbe_rx_buf 	*rxbuf;
2269 	int             	i, bsize, error, size = MCLBYTES;
2270 
2271 	bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
2272 	if (!(rxr->rx_buffers = (struct ixgbe_rx_buf *) malloc(bsize,
2273 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2274 		printf("%s: Unable to allocate rx_buffer memory\n",
2275 		    ifp->if_xname);
2276 		error = ENOMEM;
2277 		goto fail;
2278 	}
2279 	rxr->rxtag = rxr->rxdma.dma_tag;
2280 
2281 	rxbuf = rxr->rx_buffers;
2282 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2283 		error = bus_dmamap_create(rxr->rxdma.dma_tag, size, 1,
2284 		    size, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2285 		if (error) {
2286 			printf("%s: Unable to create Rx DMA map\n",
2287 			    ifp->if_xname);
2288 			goto fail;
2289 		}
2290 		rxbuf->m_head = NULL;
2291 	}
2292 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2293 	    rxr->rxdma.dma_map->dm_mapsize,
2294 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2295 
2296 	return (0);
2297 
2298 fail:
2299 	return (error);
2300 }
2301 
2302 /*********************************************************************
2303  *
2304  *  Initialize a receive ring and its buffers.
2305  *
2306  **********************************************************************/
2307 int
2308 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2309 {
2310 	struct ix_softc		*sc = rxr->sc;
2311 	int			 rsize, error;
2312 
2313 	rsize = roundup2(sc->num_rx_desc *
2314 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2315 	/* Clear the ring contents */
2316 	bzero((void *)rxr->rx_base, rsize);
2317 
2318 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2319 		return (error);
2320 
2321 	/* Setup our descriptor indices */
2322 	rxr->next_to_check = 0;
2323 	rxr->last_rx_desc_filled = sc->num_rx_desc - 1;
2324 
2325 	ixgbe_rxfill(rxr);
2326 	if (rxr->rx_ndescs < 1) {
2327 		printf("%s: unable to fill any rx descriptors\n",
2328 		    sc->dev.dv_xname);
2329 		return (ENOBUFS);
2330 	}
2331 
2332 	return (0);
2333 }
2334 
2335 int
2336 ixgbe_rxfill(struct rx_ring *rxr)
2337 {
2338 	struct ix_softc *sc = rxr->sc;
2339 	int		 post = 0;
2340 	int		 i;
2341 
2342 	i = rxr->last_rx_desc_filled;
2343 	while (rxr->rx_ndescs < sc->num_rx_desc) {
2344 		if (++i == sc->num_rx_desc)
2345 			i = 0;
2346 
2347 		if (ixgbe_get_buf(rxr, i) != 0)
2348 			break;
2349 
2350 		rxr->last_rx_desc_filled = i;
2351 		post = 1;
2352 	}
2353 
2354 	return (post);
2355 }
2356 
2357 /*********************************************************************
2358  *
2359  *  Initialize all receive rings.
2360  *
2361  **********************************************************************/
2362 int
2363 ixgbe_setup_receive_structures(struct ix_softc *sc)
2364 {
2365 	struct rx_ring *rxr = sc->rx_rings;
2366 	int i;
2367 
2368 	for (i = 0; i < sc->num_rx_queues; i++, rxr++)
2369 		if (ixgbe_setup_receive_ring(rxr))
2370 			goto fail;
2371 
2372 	return (0);
2373 
2374 fail:
2375 	ixgbe_free_receive_structures(sc);
2376 	return (ENOBUFS);
2377 }
2378 
2379 /*********************************************************************
2380  *
2381  *  Enable receive unit.
2382  *
2383  **********************************************************************/
2384 void
2385 ixgbe_initialize_receive_units(struct ix_softc *sc)
2386 {
2387 	struct	rx_ring	*rxr = sc->rx_rings;
2388 	struct ifnet   *ifp = &sc->arpcom.ac_if;
2389 	uint32_t	rxctrl, fctrl, srrctl, rxcsum;
2390 	uint32_t	reta, mrqc, hlreg, linkvec;
2391 	uint32_t	random[10];
2392 	int		i;
2393 
2394 	/*
2395 	 * Make sure receives are disabled while
2396 	 * setting up the descriptor ring
2397 	 */
2398 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
2399 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL,
2400 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
2401 
2402 	/* Enable broadcasts */
2403 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2404 	fctrl |= IXGBE_FCTRL_BAM;
2405 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
2406 
2407 	hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
2408 	if (ifp->if_mtu > ETHERMTU)
2409 		hlreg |= IXGBE_HLREG0_JUMBOEN;
2410 	else
2411 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2412 	IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
2413 
2414 	srrctl = IXGBE_READ_REG(&sc->hw, IXGBE_SRRCTL(0));
2415 	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2416 	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2417 	if (sc->bigbufs)
2418 		srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2419 	else
2420 		srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2421 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2422 	IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(0), srrctl);
2423 
2424 	/* Set Queue moderation rate */
2425 	for (i = 0; i < IXGBE_MSGS; i++)
2426 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(i), DEFAULT_ITR);
2427 
2428 	/* Set Link moderation lower */
2429 	linkvec = sc->num_tx_queues + sc->num_rx_queues;
2430 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(linkvec), LINK_ITR);
2431 
2432 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2433 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2434 		/* Setup the Base and Length of the Rx Descriptor Ring */
2435 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
2436 			       (rdba & 0x00000000ffffffffULL));
2437 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAH(i), (rdba >> 32));
2438 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
2439 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2440 
2441 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2442 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
2443 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i),
2444 		    rxr->last_rx_desc_filled);
2445 	}
2446 
2447 	rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
2448 
2449 	if (sc->num_rx_queues > 1) {
2450 		/* set up random bits */
2451 		arc4random_buf(&random, sizeof(random));
2452 		switch (sc->num_rx_queues) {
2453 			case 8:
2454 			case 4:
2455 				reta = 0x00010203;
2456 				break;
2457 			case 2:
2458 				reta = 0x00010001;
2459 				break;
2460 			default:
2461 				reta = 0x00000000;
2462 		}
2463 
2464 		/* Set up the redirection table */
2465 		for (i = 0; i < 32; i++) {
2466 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i), reta);
2467 			if (sc->num_rx_queues > 4) {
2468 				++i;
2469 				IXGBE_WRITE_REG(&sc->hw,
2470 				    IXGBE_RETA(i), 0x04050607);
2471 			}
2472 		}
2473 
2474 		/* Now fill our hash function seeds */
2475 		for (i = 0; i < 10; i++)
2476 			IXGBE_WRITE_REG_ARRAY(&sc->hw,
2477 			    IXGBE_RSSRK(0), i, random[i]);
2478 
2479 		mrqc = IXGBE_MRQC_RSSEN
2480 		    /* Perform hash on these packet types */
2481 		    | IXGBE_MRQC_RSS_FIELD_IPV4
2482 		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2483 		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2484 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2485 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2486 		    | IXGBE_MRQC_RSS_FIELD_IPV6
2487 		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2488 		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2489 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2490 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MRQC, mrqc);
2491 
2492 		/* RSS and RX IPP Checksum are mutually exclusive */
2493 		rxcsum |= IXGBE_RXCSUM_PCSD;
2494 	}
2495 
2496 #if defined(IX_CSUM_OFFLOAD)
2497 	rxcsum |= IXGBE_RXCSUM_PCSD;
2498 #endif
2499 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2500 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2501 
2502 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
2503 
2504 	/* Enable Receive engine */
2505 	rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
2506 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL, rxctrl);
2507 
2508 	return;
2509 }
2510 
2511 /*********************************************************************
2512  *
2513  *  Free all receive rings.
2514  *
2515  **********************************************************************/
2516 void
2517 ixgbe_free_receive_structures(struct ix_softc *sc)
2518 {
2519 	struct rx_ring *rxr = sc->rx_rings;
2520 	int		i;
2521 
2522 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2523 		ixgbe_free_receive_buffers(rxr);
2524 	}
2525 }
2526 
2527 /*********************************************************************
2528  *
2529  *  Free receive ring data structures
2530  *
2531  **********************************************************************/
2532 void
2533 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2534 {
2535 	struct ix_softc		*sc = NULL;
2536 	struct ixgbe_rx_buf	*rxbuf = NULL;
2537 	int			 i;
2538 
2539 	INIT_DEBUGOUT("free_receive_buffers: begin");
2540 	sc = rxr->sc;
2541 	if (rxr->rx_buffers != NULL) {
2542 		rxbuf = rxr->rx_buffers;
2543 		for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2544 			if (rxbuf->m_head != NULL) {
2545 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2546 				    0, rxbuf->map->dm_mapsize,
2547 				    BUS_DMASYNC_POSTREAD);
2548 				bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2549 				m_freem(rxbuf->m_head);
2550 				rxbuf->m_head = NULL;
2551 			}
2552 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2553 			rxbuf->map = NULL;
2554 		}
2555 	}
2556 	if (rxr->rx_buffers != NULL) {
2557 		free(rxr->rx_buffers, M_DEVBUF);
2558 		rxr->rx_buffers = NULL;
2559 	}
2560 
2561 	if (rxr->rxtag != NULL)
2562 		rxr->rxtag = NULL;
2563 
2564 	if (rxr->fmp != NULL) {
2565 		m_freem(rxr->fmp);
2566 		rxr->fmp = NULL;
2567 		rxr->lmp = NULL;
2568 	}
2569 }
2570 
2571 /*********************************************************************
2572  *
2573  *  This routine executes in interrupt context. It replenishes
2574  *  the mbufs in the descriptor and sends data which has been
2575  *  dma'ed into host memory to upper layer.
2576  *
2577  *  We loop at most count times if count is > 0, or until done if
2578  *  count < 0.
2579  *
2580  *********************************************************************/
2581 int
2582 ixgbe_rxeof(struct rx_ring *rxr, int count)
2583 {
2584 	struct ix_softc 	*sc = rxr->sc;
2585 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2586 	struct mbuf    		*m;
2587 	uint8_t			 accept_frame = 0;
2588 	uint8_t		    	 eop = 0;
2589 	uint16_t		 len, desc_len, prev_len_adj;
2590 	uint32_t		 staterr;
2591 	struct ixgbe_rx_buf	*rxbuf;
2592 	union ixgbe_adv_rx_desc	*rxdesc;
2593 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2594 	int			 i;
2595 
2596 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2597 		return FALSE;
2598 
2599 	i = rxr->next_to_check;
2600 
2601 	while (count != 0 && rxr->rx_ndescs > 0) {
2602 		m = NULL;
2603 
2604 		rxdesc = &rxr->rx_base[i];
2605 		rxbuf = &rxr->rx_buffers[i];
2606 
2607 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2608 		    dsize * i, dsize,
2609 		    BUS_DMASYNC_POSTREAD);
2610 
2611 		staterr = letoh32(rxdesc->wb.upper.status_error);
2612 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2613 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2614 			    dsize * i, dsize,
2615 			    BUS_DMASYNC_PREREAD);
2616 			break;
2617 		}
2618 
2619 		/* pull the mbuf off the ring */
2620 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2621 		    rxbuf->map->dm_mapsize,
2622 		    BUS_DMASYNC_POSTREAD);
2623 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2624 		m = rxbuf->m_head;
2625 		rxbuf->m_head = NULL;
2626 
2627 		if (m == NULL) {
2628 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2629 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2630 			    i, rxr->rx_ndescs,
2631 			    rxr->last_rx_desc_filled);
2632 		}
2633 
2634 		m_cluncount(m, 1);
2635 		rxr->rx_ndescs--;
2636 
2637 		accept_frame = 1;
2638 		prev_len_adj = 0;
2639 		desc_len = letoh16(rxdesc->wb.upper.length);
2640 
2641 		if (staterr & IXGBE_RXD_STAT_EOP) {
2642 			count--;
2643 			eop = 1;
2644 		} else {
2645 			eop = 0;
2646 		}
2647 		len = desc_len;
2648 
2649 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
2650 			accept_frame = 0;
2651 
2652 		if (accept_frame) {
2653 			m->m_len = len;
2654 
2655 			/* XXX ixgbe_realign() STRICT_ALIGN */
2656 
2657 			if (rxr->fmp == NULL) {
2658 				m->m_pkthdr.len = m->m_len;
2659 				rxr->fmp = m; /* Store the first mbuf */
2660 				rxr->lmp = m;
2661 			} else {
2662 				/* Chain mbuf's together */
2663 				m->m_flags &= ~M_PKTHDR;
2664 #if 0
2665 				/*
2666 				 * Adjust length of previous mbuf in chain if
2667 				 * we received less than 4 bytes in the last
2668 				 * descriptor.
2669 				 */
2670 				if (prev_len_adj > 0) {
2671 					rxr->lmp->m_len -= prev_len_adj;
2672 					rxr->fmp->m_pkthdr.len -= prev_len_adj;
2673 				}
2674 #endif
2675 				rxr->lmp->m_next = m;
2676 				rxr->lmp = m;
2677 				rxr->fmp->m_pkthdr.len += m->m_len;
2678 			}
2679 
2680 			if (eop) {
2681 				ifp->if_ipackets++;
2682 
2683 				m = rxr->fmp;
2684 				m->m_pkthdr.rcvif = ifp;
2685 
2686 				rxr->packet_count++;
2687 				rxr->byte_count += rxr->fmp->m_pkthdr.len;
2688 
2689 				ixgbe_rx_checksum(sc, staterr, m);
2690 
2691 #if NVLAN > 0
2692 				if (staterr & IXGBE_RXD_STAT_VP) {
2693 					m->m_pkthdr.ether_vtag =
2694 					    letoh16(rxdesc->wb.upper.vlan);
2695 					m->m_flags |= M_VLANTAG;
2696 				}
2697 #endif
2698 #if NBPFILTER > 0
2699 				if (ifp->if_bpf)
2700 					bpf_mtap_ether(ifp->if_bpf, m,
2701 					    BPF_DIRECTION_IN);
2702 #endif
2703 
2704 				ether_input_mbuf(ifp, m);
2705 
2706 				rxr->fmp = NULL;
2707 				rxr->lmp = NULL;
2708 			}
2709 		} else {
2710 			sc->dropped_pkts++;
2711 
2712 			if (rxr->fmp != NULL) {
2713 				m_freem(rxr->fmp);
2714 				rxr->fmp = NULL;
2715 				rxr->lmp = NULL;
2716 			}
2717 
2718 			m_freem(m);
2719 		}
2720 
2721 		/* Zero out the receive descriptors status  */
2722 		rxdesc->wb.upper.status_error = 0;
2723 
2724 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2725 		    dsize * i, dsize,
2726 		    BUS_DMASYNC_PREREAD);
2727 
2728 		/* Advance our pointers to the next descriptor. */
2729 		if (++i == sc->num_rx_desc)
2730 			i = 0;
2731 	}
2732 	rxr->next_to_check = i;
2733 
2734 	if (!(staterr & IXGBE_RXD_STAT_DD))
2735 		return FALSE;
2736 
2737 	return TRUE;
2738 }
2739 
2740 /*********************************************************************
2741  *
2742  *  Verify that the hardware indicated that the checksum is valid.
2743  *  Inform the stack about the status of checksum so that stack
2744  *  doesn't spend time verifying the checksum.
2745  *
2746  *********************************************************************/
2747 void
2748 ixgbe_rx_checksum(struct ix_softc *sc,
2749     uint32_t staterr, struct mbuf * mp)
2750 {
2751 	uint16_t status = (uint16_t) staterr;
2752 	uint8_t  errors = (uint8_t) (staterr >> 24);
2753 
2754 	if (status & IXGBE_RXD_STAT_IPCS) {
2755 		/* Did it pass? */
2756 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2757 			/* IP Checksum Good */
2758 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2759 		} else
2760 			mp->m_pkthdr.csum_flags = 0;
2761 	}
2762 
2763 	if (status & IXGBE_RXD_STAT_L4CS) {
2764 		/* Did it pass? */
2765 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2766 			mp->m_pkthdr.csum_flags |=
2767 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2768 	}
2769 
2770 }
2771 
2772 void
2773 ixgbe_enable_hw_vlans(struct ix_softc *sc)
2774 {
2775 	uint32_t	ctrl;
2776 
2777 	ixgbe_disable_intr(sc);
2778 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
2779 	ctrl |= IXGBE_VLNCTRL_VME;
2780 	ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2781 	ctrl &= ~IXGBE_VLNCTRL_VFE;
2782 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
2783 	ixgbe_enable_intr(sc);
2784 }
2785 
2786 void
2787 ixgbe_enable_intr(struct ix_softc *sc)
2788 {
2789 	struct ixgbe_hw *hw = &sc->hw;
2790 	uint32_t mask = IXGBE_EIMS_ENABLE_MASK;
2791 
2792 	/* Enable Fan Failure detection */
2793 	if (hw->phy.media_type == ixgbe_media_type_copper)
2794 		    mask |= IXGBE_EIMS_GPI_SDP1;
2795 	/* With RSS we use auto clear */
2796 	if (sc->msix_mem) {
2797 		/* Dont autoclear Link */
2798 		mask &= ~IXGBE_EIMS_OTHER;
2799 		mask &= ~IXGBE_EIMS_LSC;
2800 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC,
2801 		    sc->eims_mask | mask);
2802 	}
2803 
2804 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2805 	IXGBE_WRITE_FLUSH(hw);
2806 
2807 	return;
2808 }
2809 
2810 void
2811 ixgbe_disable_intr(struct ix_softc *sc)
2812 {
2813 	if (sc->msix_mem)
2814 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
2815 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
2816 	IXGBE_WRITE_FLUSH(&sc->hw);
2817 	return;
2818 }
2819 
2820 uint16_t
2821 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
2822 {
2823 	struct pci_attach_args	*pa;
2824 	uint16_t value;
2825 
2826 	pa = ((struct ixgbe_osdep *)hw->back)->os_pa;
2827 
2828 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2829 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg) & 0xffff;
2830 
2831 	return (value);
2832 }
2833 
2834 void
2835 ixgbe_set_ivar(struct ix_softc *sc, uint16_t entry, uint8_t vector)
2836 {
2837 	uint32_t ivar, index;
2838 
2839 	vector |= IXGBE_IVAR_ALLOC_VAL;
2840 	index = (entry >> 2) & 0x1F;
2841 	ivar = IXGBE_READ_REG(&sc->hw, IXGBE_IVAR(index));
2842 	ivar &= ~(0xFF << (8 * (entry & 0x3)));
2843 	ivar |= (vector << (8 * (entry & 0x3)));
2844 	IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
2845 }
2846 
2847 void
2848 ixgbe_configure_ivars(struct ix_softc *sc)
2849 {
2850 	struct  tx_ring *txr = sc->tx_rings;
2851 	struct  rx_ring *rxr = sc->rx_rings;
2852 	int		 i;
2853 
2854         for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2855                 ixgbe_set_ivar(sc, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
2856 		sc->eims_mask |= rxr->eims;
2857 	}
2858 
2859         for (i = 0; i < sc->num_tx_queues; i++, txr++) {
2860 		ixgbe_set_ivar(sc, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
2861 		sc->eims_mask |= txr->eims;
2862 	}
2863 
2864 	/* For the Link interrupt */
2865         ixgbe_set_ivar(sc, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2866 	    sc->linkvec);
2867 	sc->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
2868 }
2869 
2870 /**********************************************************************
2871  *
2872  *  Update the board statistics counters.
2873  *
2874  **********************************************************************/
2875 void
2876 ixgbe_update_stats_counters(struct ix_softc *sc)
2877 {
2878 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
2879 	struct ixgbe_hw *hw = &sc->hw;
2880 	uint32_t  missed_rx = 0, bprc, lxon, lxoff, total;
2881 	int	i;
2882 
2883 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2884 
2885 	for (i = 0; i < 8; i++) {
2886 		int mp;
2887 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2888 		missed_rx += mp;
2889         	sc->stats.mpc[i] += mp;
2890 		sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2891 	}
2892 
2893 	/* Hardware workaround, gprc counts missed packets */
2894 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2895 	sc->stats.gprc -= missed_rx;
2896 
2897 	sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2898 	sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2899 	sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2900 
2901 	/*
2902 	 * Workaround: mprc hardware is incorrectly counting
2903 	 * broadcasts, so for now we subtract those.
2904 	 */
2905 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2906 	sc->stats.bprc += bprc;
2907 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2908 	sc->stats.mprc -= bprc;
2909 
2910 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2911 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2912 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2913 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2914 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2915 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2916 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2917 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2918 
2919 	sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2920 	sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2921 
2922 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2923 	sc->stats.lxontxc += lxon;
2924 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2925 	sc->stats.lxofftxc += lxoff;
2926 	total = lxon + lxoff;
2927 
2928 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2929 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2930 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2931 	sc->stats.gptc -= total;
2932 	sc->stats.mptc -= total;
2933 	sc->stats.ptc64 -= total;
2934 	sc->stats.gotc -= total * ETHER_MIN_LEN;
2935 
2936 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2937 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2938 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2939 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2940 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2941 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2942 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2943 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2944 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2945 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2946 
2947 #if 0
2948 	/* Fill out the OS statistics structure */
2949 	ifp->if_ipackets = sc->stats.gprc;
2950 	ifp->if_opackets = sc->stats.gptc;
2951 	ifp->if_ibytes = sc->stats.gorc;
2952 	ifp->if_obytes = sc->stats.gotc;
2953 	ifp->if_imcasts = sc->stats.mprc;
2954 #endif
2955 	ifp->if_collisions = 0;
2956 	ifp->if_oerrors = sc->watchdog_events;
2957 	ifp->if_ierrors = missed_rx + sc->stats.crcerrs + sc->stats.rlec;
2958 }
2959 
2960 #ifdef IX_DEBUG
2961 /**********************************************************************
2962  *
2963  *  This routine is called only when ixgbe_display_debug_stats is enabled.
2964  *  This routine provides a way to take a look at important statistics
2965  *  maintained by the driver and hardware.
2966  *
2967  **********************************************************************/
2968 void
2969 ixgbe_print_hw_stats(struct ix_softc * sc)
2970 {
2971 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
2972 
2973 	printf("%s: mbuf alloc failed %lu, mbuf cluster failed %lu, "
2974 	    "missed pkts %llu, rx len errs %llu, crc errs %llu, "
2975 	    "dropped pkts %lu, watchdog timeouts %ld, "
2976 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
2977 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
2978 	    "tso tx %lu\n",
2979 	    ifp->if_xname,
2980 	    sc->mbuf_alloc_failed,
2981 	    sc->mbuf_cluster_failed,
2982 	    (long long)sc->stats.mpc[0],
2983 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
2984 	    (long long)sc->stats.crcerrs,
2985 	    sc->dropped_pkts,
2986 	    sc->watchdog_events,
2987 	    (long long)sc->stats.lxonrxc,
2988 	    (long long)sc->stats.lxontxc,
2989 	    (long long)sc->stats.lxoffrxc,
2990 	    (long long)sc->stats.lxofftxc,
2991 	    (long long)sc->stats.tpr,
2992 	    (long long)sc->stats.gprc,
2993 	    (long long)sc->stats.gptc,
2994 	    sc->tso_tx);
2995 }
2996 #endif
2997 
2998 #ifndef NO_82598_A0_SUPPORT
2999 /*
3000  * A0 Workaround: invert descriptor for hardware
3001  */
3002 void
3003 desc_flip(void *desc)
3004 {
3005         struct dhack {uint32_t a1; uint32_t a2; uint32_t b1; uint32_t b2;};
3006         struct dhack *d;
3007 
3008         d = (struct dhack *)desc;
3009         d->a1 = ~(d->a1);
3010         d->a2 = ~(d->a2);
3011         d->b1 = ~(d->b1);
3012         d->b2 = ~(d->b2);
3013         d->b2 &= 0xFFFFFFF0;
3014         d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3015 }
3016 #endif
3017 
3018 
3019 
3020