xref: /openbsd-src/sys/dev/pci/if_ix.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: if_ix.c,v 1.17 2009/04/29 13:18:58 jsg Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2008, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.5 2008/05/16 18:46:30 jfv Exp $*/
36 
37 #include <dev/pci/if_ix.h>
38 
39 /*********************************************************************
40  *  Driver version
41  *********************************************************************/
42 
43 #define IXGBE_DRIVER_VERSION	"1.4.4"
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT }
59 };
60 
61 /*********************************************************************
62  *  Function prototypes
63  *********************************************************************/
64 int	ixgbe_probe(struct device *, void *, void *);
65 void	ixgbe_attach(struct device *, struct device *, void *);
66 int	ixgbe_detach(struct device *, int);
67 void	ixgbe_power(int, void *);
68 void	ixgbe_shutdown(void *);
69 void	ixgbe_start(struct ifnet *);
70 void	ixgbe_start_locked(struct tx_ring *, struct ifnet *);
71 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
72 void	ixgbe_watchdog(struct ifnet *);
73 void	ixgbe_init(void *);
74 void	ixgbe_stop(void *);
75 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
76 int	ixgbe_media_change(struct ifnet *);
77 void	ixgbe_identify_hardware(struct ix_softc *);
78 int	ixgbe_allocate_pci_resources(struct ix_softc *);
79 int	ixgbe_allocate_legacy(struct ix_softc *);
80 int	ixgbe_allocate_queues(struct ix_softc *);
81 void	ixgbe_free_pci_resources(struct ix_softc *);
82 void	ixgbe_local_timer(void *);
83 int	ixgbe_hardware_init(struct ix_softc *);
84 void	ixgbe_setup_interface(struct ix_softc *);
85 
86 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
87 int	ixgbe_setup_transmit_structures(struct ix_softc *);
88 int	ixgbe_setup_transmit_ring(struct tx_ring *);
89 void	ixgbe_initialize_transmit_units(struct ix_softc *);
90 void	ixgbe_free_transmit_structures(struct ix_softc *);
91 void	ixgbe_free_transmit_buffers(struct tx_ring *);
92 
93 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
94 int	ixgbe_setup_receive_structures(struct ix_softc *);
95 int	ixgbe_setup_receive_ring(struct rx_ring *);
96 void	ixgbe_initialize_receive_units(struct ix_softc *);
97 void	ixgbe_free_receive_structures(struct ix_softc *);
98 void	ixgbe_free_receive_buffers(struct rx_ring *);
99 int	ixgbe_rxfill(struct rx_ring *);
100 
101 void	ixgbe_enable_intr(struct ix_softc *);
102 void	ixgbe_disable_intr(struct ix_softc *);
103 void	ixgbe_update_stats_counters(struct ix_softc *);
104 int	ixgbe_txeof(struct tx_ring *);
105 int	ixgbe_rxeof(struct rx_ring *, int);
106 void	ixgbe_rx_checksum(struct ix_softc *, uint32_t, struct mbuf *);
107 void	ixgbe_set_promisc(struct ix_softc *);
108 void	ixgbe_disable_promisc(struct ix_softc *);
109 void	ixgbe_set_multi(struct ix_softc *);
110 #ifdef IX_DEBUG
111 void	ixgbe_print_hw_stats(struct ix_softc *);
112 #endif
113 void	ixgbe_update_link_status(struct ix_softc *);
114 int	ixgbe_get_buf(struct rx_ring *, int);
115 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
116 void	ixgbe_enable_hw_vlans(struct ix_softc * sc);
117 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
118 		    struct ixgbe_dma_alloc *, int);
119 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
120 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
121 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
122 void	ixgbe_set_ivar(struct ix_softc *, uint16_t, uint8_t);
123 void	ixgbe_configure_ivars(struct ix_softc *);
124 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
125 
126 /* Legacy (single vector interrupt handler */
127 int	ixgbe_legacy_irq(void *);
128 
129 #ifndef NO_82598_A0_SUPPORT
130 void	desc_flip(void *);
131 #endif
132 
133 /*********************************************************************
134  *  OpenBSD Device Interface Entry Points
135  *********************************************************************/
136 
137 struct cfdriver ix_cd = {
138 	0, "ix", DV_IFNET
139 };
140 
141 struct cfattach ix_ca = {
142 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
143 };
144 
145 /* Total number of Interfaces - need for config sanity check */
146 static int ixgbe_total_ports;
147 
148 /*********************************************************************
149  *  Device identification routine
150  *
151  *  ixgbe_probe determines if the driver should be loaded on
152  *  sc based on PCI vendor/device id of the sc.
153  *
154  *  return 0 on success, positive on failure
155  *********************************************************************/
156 
157 int
158 ixgbe_probe(struct device *parent, void *match, void *aux)
159 {
160 	INIT_DEBUGOUT("ixgbe_probe: begin");
161 
162 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
163 	    sizeof(ixgbe_devices)/sizeof(ixgbe_devices[0])));
164 }
165 
166 /*********************************************************************
167  *  Device initialization routine
168  *
169  *  The attach entry point is called when the driver is being loaded.
170  *  This routine identifies the type of hardware, allocates all resources
171  *  and initializes the hardware.
172  *
173  *  return 0 on success, positive on failure
174  *********************************************************************/
175 
176 void
177 ixgbe_attach(struct device *parent, struct device *self, void *aux)
178 {
179 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
180 	struct ix_softc		*sc = (struct ix_softc *)self;
181 	int			 error = 0;
182 	uint32_t			 ctrl_ext;
183 
184 	INIT_DEBUGOUT("ixgbe_attach: begin");
185 
186 	sc->osdep.os_sc = sc;
187 	sc->osdep.os_pa = pa;
188 
189 	/* Core Lock Init*/
190 	mtx_init(&sc->core_mtx, IPL_NET);
191 
192 	/* Set up the timer callout */
193 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
194 
195 	/* Determine hardware revision */
196 	ixgbe_identify_hardware(sc);
197 
198 	/* Indicate to RX setup to use Jumbo Clusters */
199 	sc->bigbufs = FALSE;
200 	sc->num_tx_desc = DEFAULT_TXD;
201 	sc->num_rx_desc = DEFAULT_RXD;
202 	sc->rx_process_limit = 100;	// XXX
203 
204 	/* Do base PCI setup - map BAR0 */
205 	if (ixgbe_allocate_pci_resources(sc)) {
206 		printf(": allocation of PCI resources failed\n");
207 		goto err_out;
208 	}
209 
210 	/* Allocate our TX/RX Queues */
211 	if (ixgbe_allocate_queues(sc))
212 		goto err_out;
213 
214 	/* Initialize the shared code */
215 	sc->hw.mac.type = ixgbe_mac_82598EB;
216 	if (ixgbe_init_ops_82598(&sc->hw) != 0) {
217 		printf(": failed to init the 82598EB\n");
218 		goto err_late;
219 	}
220 
221 	/* Initialize the hardware */
222 	if (ixgbe_hardware_init(sc)) {
223 		printf(": unable to initialize the hardware\n");
224 		goto err_late;
225 	}
226 
227 	/* XXX sc->msix > 1 && ixgbe_allocate_msix() */
228 	error = ixgbe_allocate_legacy(sc);
229 	if (error)
230 		goto err_late;
231 
232 	/* Setup OS specific network interface */
233 	ixgbe_setup_interface(sc);
234 
235 	/* Initialize statistics */
236 	ixgbe_update_stats_counters(sc);
237 
238 	/* let hardware know driver is loaded */
239 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
240 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
241 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
242 
243 	sc->powerhook = powerhook_establish(ixgbe_power, sc);
244 	sc->shutdownhook = shutdownhook_establish(ixgbe_shutdown, sc);
245 
246 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
247 
248 	INIT_DEBUGOUT("ixgbe_attach: end");
249 	return;
250 
251 err_late:
252 	ixgbe_free_transmit_structures(sc);
253 	ixgbe_free_receive_structures(sc);
254 err_out:
255 	ixgbe_free_pci_resources(sc);
256 }
257 
258 /*********************************************************************
259  *  Device removal routine
260  *
261  *  The detach entry point is called when the driver is being removed.
262  *  This routine stops the sc and deallocates all the resources
263  *  that were allocated for driver operation.
264  *
265  *  return 0 on success, positive on failure
266  *********************************************************************/
267 
268 int
269 ixgbe_detach(struct device *self, int flags)
270 {
271 	struct ix_softc *sc = (struct ix_softc *)self;
272 	struct ifnet *ifp = &sc->arpcom.ac_if;
273 	uint32_t	ctrl_ext;
274 
275 	INIT_DEBUGOUT("ixgbe_detach: begin");
276 
277 	ixgbe_stop(sc);
278 
279 	/* let hardware know driver is unloading */
280 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
281 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
282 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
283 
284 	ether_ifdetach(ifp);
285 	if_detach(ifp);
286 
287 	timeout_del(&sc->timer);
288 	ixgbe_free_pci_resources(sc);
289 
290 	ixgbe_free_transmit_structures(sc);
291 	ixgbe_free_receive_structures(sc);
292 
293 	return (0);
294 }
295 
296 void
297 ixgbe_power(int why, void *arg)
298 {
299 	struct ix_softc *sc = (struct ix_softc *)arg;
300 	struct ifnet *ifp;
301 
302 	if (why == PWR_RESUME) {
303 		ifp = &sc->arpcom.ac_if;
304 		if (ifp->if_flags & IFF_UP)
305 			ixgbe_init(sc);
306 	}
307 }
308 
309 /*********************************************************************
310  *
311  *  Shutdown entry point
312  *
313  **********************************************************************/
314 
315 void
316 ixgbe_shutdown(void *arg)
317 {
318 	struct ix_softc *sc = (struct ix_softc *)arg;
319 
320 	ixgbe_stop(sc);
321 }
322 
323 /*********************************************************************
324  *  Transmit entry point
325  *
326  *  ixgbe_start is called by the stack to initiate a transmit.
327  *  The driver will remain in this routine as long as there are
328  *  packets to transmit and transmit resources are available.
329  *  In case resources are not available stack is notified and
330  *  the packet is requeued.
331  **********************************************************************/
332 
333 void
334 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
335 {
336 	struct mbuf  		*m_head;
337 	struct ix_softc		*sc = txr->sc;
338 	int			 post = 0;
339 
340 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
341 		return;
342 
343 	if (!sc->link_active)
344 		return;
345 
346 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
347 	    txr->txdma.dma_map->dm_mapsize,
348 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
349 
350 	for (;;) {
351 		IFQ_POLL(&ifp->if_snd, m_head);
352 		if (m_head == NULL)
353 			break;
354 
355 		if (ixgbe_encap(txr, m_head)) {
356 			ifp->if_flags |= IFF_OACTIVE;
357 			break;
358 		}
359 
360 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
361 
362 #if NBPFILTER > 0
363 		if (ifp->if_bpf)
364 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
365 #endif
366 
367 		/* Set timeout in case hardware has problems transmitting */
368 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
369 		ifp->if_timer = IXGBE_TX_TIMEOUT;
370 
371 		post = 1;
372 	}
373 
374         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
375 	    0, txr->txdma.dma_map->dm_mapsize,
376             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
377 
378 	/*
379 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
380 	 * hardware that this frame is available to transmit.
381 	 */
382 	if (post)
383 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
384 		    txr->next_avail_tx_desc);
385 }
386 
387 
388 void
389 ixgbe_start(struct ifnet *ifp)
390 {
391 	struct ix_softc *sc = ifp->if_softc;
392 	struct tx_ring	*txr = sc->tx_rings;
393 	uint32_t queue = 0;
394 
395 #if 0
396 	/*
397 	 * This is really just here for testing
398 	 * TX multiqueue, ultimately what is
399 	 * needed is the flow support in the stack
400 	 * and appropriate logic here to deal with
401 	 * it. -jfv
402 	 */
403 	if (sc->num_tx_queues > 1)
404 		queue = (curcpu % sc->num_tx_queues);
405 #endif
406 
407 	txr = &sc->tx_rings[queue];
408 
409 	if (ifp->if_flags & IFF_RUNNING)
410 		ixgbe_start_locked(txr, ifp);
411 
412 	return;
413 }
414 
415 /*********************************************************************
416  *  Ioctl entry point
417  *
418  *  ixgbe_ioctl is called when the user wants to configure the
419  *  interface.
420  *
421  *  return 0 on success, positive on failure
422  **********************************************************************/
423 
424 int
425 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
426 {
427 	struct ix_softc	*sc = ifp->if_softc;
428 	struct ifaddr	*ifa = (struct ifaddr *) data;
429 	struct ifreq	*ifr = (struct ifreq *) data;
430 	int		s, error = 0;
431 
432 	s = splnet();
433 
434 	switch (command) {
435 	case SIOCSIFADDR:
436 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
437 		ifp->if_flags |= IFF_UP;
438 		if (!(ifp->if_flags & IFF_RUNNING))
439 			ixgbe_init(sc);
440 #ifdef INET
441 		if (ifa->ifa_addr->sa_family == AF_INET)
442 			arp_ifinit(&sc->arpcom, ifa);
443 #endif
444 		break;
445 
446 	case SIOCSIFMTU:
447 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
448 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
449 			error = EINVAL;
450 		else if (ifp->if_mtu != ifr->ifr_mtu) {
451 			ifp->if_mtu = ifr->ifr_mtu;
452 			sc->max_frame_size =
453 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
454 			ixgbe_init(sc);
455 		}
456 		break;
457 
458 	case SIOCSIFFLAGS:
459 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
460 		if (ifp->if_flags & IFF_UP) {
461 			if ((ifp->if_flags & IFF_RUNNING)) {
462 				if ((ifp->if_flags ^ sc->if_flags) &
463 				    (IFF_PROMISC | IFF_ALLMULTI)) {
464 					ixgbe_disable_promisc(sc);
465 					ixgbe_set_promisc(sc);
466                                 }
467 			} else
468 				ixgbe_init(sc);
469 		} else
470 			if (ifp->if_flags & IFF_RUNNING)
471 				ixgbe_stop(sc);
472 		sc->if_flags = ifp->if_flags;
473 		break;
474 
475 	case SIOCSIFMEDIA:
476 	case SIOCGIFMEDIA:
477 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
478 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
479 		break;
480 
481 	default:
482 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
483 	}
484 
485 	if (error == ENETRESET) {
486 		if (ifp->if_flags & IFF_RUNNING) {
487 			ixgbe_disable_intr(sc);
488 			ixgbe_set_multi(sc);
489 			ixgbe_enable_intr(sc);
490 		}
491 		error = 0;
492 	}
493 
494 	splx(s);
495 	return (error);
496 }
497 
498 /*********************************************************************
499  *  Watchdog entry point
500  *
501  *  This routine is called by the local timer
502  *  to detect hardware hangs .
503  *
504  **********************************************************************/
505 
506 void
507 ixgbe_watchdog(struct ifnet * ifp)
508 {
509 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
510 	struct tx_ring *txr = sc->tx_rings;
511 	struct ixgbe_hw *hw = &sc->hw;
512 	int		tx_hang = FALSE;
513 	int		i;
514 
515         /*
516          * The timer is set to 5 every time ixgbe_start() queues a packet.
517          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
518          * least one descriptor.
519          * Finally, anytime all descriptors are clean the timer is
520          * set to 0.
521          */
522 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
523         	if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
524                 	continue;
525 		else {
526 			tx_hang = TRUE;
527 			break;
528 		}
529 	}
530 	if (tx_hang == FALSE)
531 		return;
532 
533 	/*
534 	 * If we are in this routine because of pause frames, then don't
535 	 * reset the hardware.
536 	 */
537 	if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
538 		for (i = 0; i < sc->num_tx_queues; i++, txr++)
539 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
540 		ifp->if_timer = IXGBE_TX_TIMEOUT;
541 		return;
542 	}
543 
544 
545 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
546 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
547 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
548 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
549 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
550 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
551 		    i, txr->tx_avail, txr->next_tx_to_clean);
552 	}
553 	ifp->if_flags &= ~IFF_RUNNING;
554 	sc->watchdog_events++;
555 
556 	ixgbe_init(sc);
557 	return;
558 }
559 
560 /*********************************************************************
561  *  Init entry point
562  *
563  *  This routine is used in two ways. It is used by the stack as
564  *  init entry point in network interface structure. It is also used
565  *  by the driver as a hw/sw initialization routine to get to a
566  *  consistent state.
567  *
568  *  return 0 on success, positive on failure
569  **********************************************************************/
570 #define IXGBE_MHADD_MFS_SHIFT 16
571 
572 void
573 ixgbe_init(void *arg)
574 {
575 	struct ix_softc	*sc = (struct ix_softc *)arg;
576 	struct ifnet	*ifp = &sc->arpcom.ac_if;
577 	uint32_t	 txdctl, rxdctl, mhadd, gpie;
578 	int		 i, s;
579 
580 	INIT_DEBUGOUT("ixgbe_init: begin");
581 
582 	s = splnet();
583 
584 	ixgbe_stop(sc);
585 
586 	/* Get the latest mac address, User can use a LAA */
587 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
588 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
589 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, 1);
590 	sc->hw.addr_ctrl.rar_used_count = 1;
591 
592 	/* Initialize the hardware */
593 	if (ixgbe_hardware_init(sc)) {
594 		printf("%s: Unable to initialize the hardware\n",
595 		    ifp->if_xname);
596 		splx(s);
597 		return;
598 	}
599 
600 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
601 		ixgbe_enable_hw_vlans(sc);
602 
603 	/* Prepare transmit descriptors and buffers */
604 	if (ixgbe_setup_transmit_structures(sc)) {
605 		printf("%s: Could not setup transmit structures\n",
606 		    ifp->if_xname);
607 		ixgbe_stop(sc);
608 		splx(s);
609 		return;
610 	}
611 
612 	ixgbe_initialize_transmit_units(sc);
613 
614 	/* Setup Multicast table */
615 	ixgbe_set_multi(sc);
616 
617 	/*
618 	 * If we are resetting MTU smaller than 2K
619 	 * drop to small RX buffers
620 	 */
621 	if (sc->max_frame_size <= MCLBYTES)
622 		sc->bigbufs = FALSE;
623 
624 	/* Prepare receive descriptors and buffers */
625 	if (ixgbe_setup_receive_structures(sc)) {
626 		printf("%s: Could not setup receive structures\n", ifp->if_xname);
627 		ixgbe_stop(sc);
628 		splx(s);
629 		return;
630 	}
631 
632 	/* Configure RX settings */
633 	ixgbe_initialize_receive_units(sc);
634 
635 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
636 	/* Enable Fan Failure Interrupt */
637 	if (sc->hw.phy.media_type == ixgbe_media_type_copper)
638 		gpie |= IXGBE_SDP1_GPIEN;
639 	if (sc->msix) {
640 		/* Enable Enhanced MSIX mode */
641 		gpie |= IXGBE_GPIE_MSIX_MODE;
642 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
643 		    IXGBE_GPIE_OCD;
644 	}
645 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
646 
647 	/* Set MTU size */
648 	if (ifp->if_mtu > ETHERMTU) {
649 		mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
650 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
651 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
652 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
653 	}
654 
655 	/* Now enable all the queues */
656 
657 	for (i = 0; i < sc->num_tx_queues; i++) {
658 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
659 		txdctl |= IXGBE_TXDCTL_ENABLE;
660 		/* Set WTHRESH to 8, burst writeback */
661 		txdctl |= (8 << 16);
662 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
663 	}
664 
665 	for (i = 0; i < sc->num_rx_queues; i++) {
666 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
667 		/* PTHRESH set to 32 */
668 		rxdctl |= 0x0020;
669 		rxdctl |= IXGBE_RXDCTL_ENABLE;
670 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
671 	}
672 
673 	timeout_add_sec(&sc->timer, 1);
674 
675 	/* Set up MSI/X routing */
676 	ixgbe_configure_ivars(sc);
677 
678 	ixgbe_enable_intr(sc);
679 
680 	/* Now inform the stack we're ready */
681 	ifp->if_flags |= IFF_RUNNING;
682 	ifp->if_flags &= ~IFF_OACTIVE;
683 
684 	splx(s);
685 }
686 
687 /*********************************************************************
688  *
689  *  Legacy Interrupt Service routine
690  *
691  **********************************************************************/
692 
693 int
694 ixgbe_legacy_irq(void *arg)
695 {
696 	struct ix_softc	*sc = (struct ix_softc *)arg;
697 	struct ifnet	*ifp = &sc->arpcom.ac_if;
698 	uint32_t	 reg_eicr;
699 	struct tx_ring	*txr = sc->tx_rings;
700 	struct rx_ring	*rxr = sc->rx_rings;
701 	struct ixgbe_hw	*hw = &sc->hw;
702 	int		 claimed = 0, refill = 0;
703 
704 	for (;;) {
705 		reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
706 		if (reg_eicr == 0)
707 			break;
708 
709 		claimed = 1;
710 		refill = 0;
711 
712 		if (ifp->if_flags & IFF_RUNNING) {
713 			ixgbe_rxeof(rxr, -1);
714 			ixgbe_txeof(txr);
715 			refill = 1;
716 		}
717 
718 		/* Check for fan failure */
719 		if ((hw->phy.media_type == ixgbe_media_type_copper) &&
720 		    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
721 	                printf("%s: \nCRITICAL: FAN FAILURE!! "
722 			    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
723 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS,
724 			    IXGBE_EICR_GPI_SDP1);
725 		}
726 
727 		/* Link status change */
728 		if (reg_eicr & IXGBE_EICR_LSC) {
729 			timeout_del(&sc->timer);
730 		        ixgbe_update_link_status(sc);
731 			timeout_add_sec(&sc->timer, 1);
732 		}
733 
734 		if (refill && ixgbe_rxfill(rxr)) {
735 			/* Advance the Rx Queue "Tail Pointer" */
736 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me),
737 			    rxr->last_rx_desc_filled);
738 		}
739 	}
740 
741 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
742 		ixgbe_start_locked(txr, ifp);
743 
744 	return (claimed);
745 }
746 
747 /*********************************************************************
748  *
749  *  Media Ioctl callback
750  *
751  *  This routine is called whenever the user queries the status of
752  *  the interface using ifconfig.
753  *
754  **********************************************************************/
755 void
756 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
757 {
758 	struct ix_softc *sc = ifp->if_softc;
759 
760 	INIT_DEBUGOUT("ixgbe_media_status: begin");
761 	ixgbe_update_link_status(sc);
762 
763 	ifmr->ifm_status = IFM_AVALID;
764 	ifmr->ifm_active = IFM_ETHER;
765 
766 	if (!sc->link_active) {
767 		ifmr->ifm_status |= IFM_NONE;
768 		return;
769 	}
770 
771 	ifmr->ifm_status |= IFM_ACTIVE;
772 
773 	switch (sc->link_speed) {
774 	case IXGBE_LINK_SPEED_1GB_FULL:
775 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
776 		break;
777 	case IXGBE_LINK_SPEED_10GB_FULL:
778 		ifmr->ifm_active |= sc->optics | IFM_FDX;
779 		break;
780 	}
781 }
782 
783 /*********************************************************************
784  *
785  *  Media Ioctl callback
786  *
787  *  This routine is called when the user changes speed/duplex using
788  *  media/mediopt option with ifconfig.
789  *
790  **********************************************************************/
791 int
792 ixgbe_media_change(struct ifnet * ifp)
793 {
794 	struct ix_softc *sc = ifp->if_softc;
795 	struct ifmedia *ifm = &sc->media;
796 
797 	INIT_DEBUGOUT("ixgbe_media_change: begin");
798 
799 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
800 		return (EINVAL);
801 
802         switch (IFM_SUBTYPE(ifm->ifm_media)) {
803         case IFM_AUTO:
804                 sc->hw.mac.autoneg = TRUE;
805                 sc->hw.phy.autoneg_advertised =
806 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
807                 break;
808         default:
809                 printf("%s: Only auto media type\n", ifp->if_xname);
810 		return (EINVAL);
811         }
812 
813 	return (0);
814 }
815 
816 /*********************************************************************
817  *
818  *  This routine maps the mbufs to tx descriptors.
819  *    WARNING: while this code is using an MQ style infrastructure,
820  *    it would NOT work as is with more than 1 queue.
821  *
822  *  return 0 on success, positive on failure
823  **********************************************************************/
824 
825 int
826 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
827 {
828 	struct ix_softc *sc = txr->sc;
829 	uint32_t	olinfo_status = 0, cmd_type_len = 0;
830 	int             i, j, error;
831 	int		first, last = 0;
832 	bus_dmamap_t	map;
833 	struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
834 	union ixgbe_adv_tx_desc *txd = NULL;
835 #ifdef notyet
836 	uint32_t	paylen = 0;
837 #endif
838 
839 	/* Basic descriptor defines */
840         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
841         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
842 
843 #if NVLAN > 0
844 	if (m_head->m_flags & M_VLANTAG)
845 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
846 #endif
847 
848 	/*
849 	 * Force a cleanup if number of TX descriptors
850 	 * available is below the threshold. If it fails
851 	 * to get above, then abort transmit.
852 	 */
853 	if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
854 		ixgbe_txeof(txr);
855 		/* Make sure things have improved */
856 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
857 			txr->no_tx_desc_avail++;
858 			return (ENOBUFS);
859 		}
860 	}
861 
862         /*
863          * Important to capture the first descriptor
864          * used because it will contain the index of
865          * the one we tell the hardware to report back
866          */
867         first = txr->next_avail_tx_desc;
868 	txbuf = &txr->tx_buffers[first];
869 	txbuf_mapped = txbuf;
870 	map = txbuf->map;
871 
872 	/*
873 	 * Map the packet for DMA.
874 	 */
875 	error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
876 	    m_head, BUS_DMA_NOWAIT);
877 
878 	if (error == ENOMEM) {
879 		sc->no_tx_dma_setup++;
880 		return (error);
881 	} else if (error != 0) {
882 		sc->no_tx_dma_setup++;
883 		return (error);
884 	}
885 
886 	/* Make certain there are enough descriptors */
887 	if (map->dm_nsegs > txr->tx_avail - 2) {
888 		txr->no_tx_desc_avail++;
889 		error = ENOBUFS;
890 		goto xmit_fail;
891 	}
892 
893 	/*
894 	 * Set the appropriate offload context
895 	 * this becomes the first descriptor of
896 	 * a packet.
897 	 */
898 #ifdef notyet
899 	if (ixgbe_tso_setup(txr, m_head, &paylen)) {
900 		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
901 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
902 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
903 		olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
904 		++sc->tso_tx;
905 	} else
906 #endif
907 	if (ixgbe_tx_ctx_setup(txr, m_head))
908 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
909 
910 	i = txr->next_avail_tx_desc;
911 	for (j = 0; j < map->dm_nsegs; j++) {
912 		txbuf = &txr->tx_buffers[i];
913 		txd = &txr->tx_base[i];
914 
915 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
916 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
917 		    cmd_type_len | map->dm_segs[j].ds_len);
918 		txd->read.olinfo_status = htole32(olinfo_status);
919 		last = i; /* Next descriptor that will get completed */
920 
921 		if (++i == sc->num_tx_desc)
922 			i = 0;
923 
924 		txbuf->m_head = NULL;
925 
926 		/*
927 		 * we have to do this inside the loop right now
928 		 * because of the hardware workaround.
929 		 */
930 		if (j == (map->dm_nsegs -1)) /* Last descriptor gets EOP and RS */
931 			txd->read.cmd_type_len |=
932 			    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
933 #ifndef NO_82598_A0_SUPPORT
934 		if (sc->hw.revision_id == 0)
935 			desc_flip(txd);
936 #endif
937 	}
938 
939 	txr->tx_avail -= map->dm_nsegs;
940 	txr->next_avail_tx_desc = i;
941 
942 	txbuf->m_head = m_head;
943 	txbuf->map = map;
944 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
945 	    BUS_DMASYNC_PREWRITE);
946 
947         /* Set the index of the descriptor that will be marked done */
948         txbuf = &txr->tx_buffers[first];
949 
950 	++txr->tx_packets;
951 	return (0);
952 
953 xmit_fail:
954 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
955 	return (error);
956 
957 }
958 
959 void
960 ixgbe_set_promisc(struct ix_softc *sc)
961 {
962 
963 	uint32_t       reg_rctl;
964 	struct ifnet *ifp = &sc->arpcom.ac_if;
965 
966 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
967 
968 	if (ifp->if_flags & IFF_PROMISC) {
969 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
970 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
971 	} else if (ifp->if_flags & IFF_ALLMULTI) {
972 		reg_rctl |= IXGBE_FCTRL_MPE;
973 		reg_rctl &= ~IXGBE_FCTRL_UPE;
974 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
975 	}
976 	return;
977 }
978 
979 void
980 ixgbe_disable_promisc(struct ix_softc * sc)
981 {
982 	uint32_t       reg_rctl;
983 
984 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
985 
986 	reg_rctl &= (~IXGBE_FCTRL_UPE);
987 	reg_rctl &= (~IXGBE_FCTRL_MPE);
988 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
989 
990 	return;
991 }
992 
993 
994 /*********************************************************************
995  *  Multicast Update
996  *
997  *  This routine is called whenever multicast address list is updated.
998  *
999  **********************************************************************/
1000 #define IXGBE_RAR_ENTRIES 16
1001 
1002 void
1003 ixgbe_set_multi(struct ix_softc *sc)
1004 {
1005 	uint32_t	fctrl;
1006 	uint8_t	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
1007 	uint8_t	*update_ptr;
1008 	struct ether_multi *enm;
1009 	struct ether_multistep step;
1010 	int	mcnt = 0;
1011 	struct ifnet *ifp = &sc->arpcom.ac_if;
1012 
1013 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1014 
1015 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1016 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1017 	if (ifp->if_flags & IFF_PROMISC)
1018 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1019 	else if (ifp->if_flags & IFF_ALLMULTI) {
1020 		fctrl |= IXGBE_FCTRL_MPE;
1021 		fctrl &= ~IXGBE_FCTRL_UPE;
1022 	} else
1023 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1024 
1025 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1026 
1027 	ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1028 	while (enm != NULL) {
1029 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1030 			ifp->if_flags |= IFF_ALLMULTI;
1031 			mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1032 		}
1033 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1034 			break;
1035 		bcopy(enm->enm_addrlo,
1036 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1037 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1038 		mcnt++;
1039 		ETHER_NEXT_MULTI(step, enm);
1040 	}
1041 
1042 	update_ptr = mta;
1043 	ixgbe_hw(&sc->hw, update_mc_addr_list,
1044 	    update_ptr, mcnt, ixgbe_mc_array_itr);
1045 
1046 	return;
1047 }
1048 
1049 /*
1050  * This is an iterator function now needed by the multicast
1051  * shared code. It simply feeds the shared code routine the
1052  * addresses in the array of ixgbe_set_multi() one by one.
1053  */
1054 uint8_t *
1055 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1056 {
1057 	uint8_t *addr = *update_ptr;
1058 	uint8_t *newptr;
1059 	*vmdq = 0;
1060 
1061 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1062 	*update_ptr = newptr;
1063 	return addr;
1064 }
1065 
1066 
1067 /*********************************************************************
1068  *  Timer routine
1069  *
1070  *  This routine checks for link status,updates statistics,
1071  *  and runs the watchdog timer.
1072  *
1073  **********************************************************************/
1074 
1075 void
1076 ixgbe_local_timer(void *arg)
1077 {
1078 	struct ix_softc *sc = arg;
1079 #ifdef IX_DEBUG
1080 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1081 #endif
1082 	int		 s;
1083 
1084 	s = splnet();
1085 
1086 	ixgbe_update_link_status(sc);
1087 	ixgbe_update_stats_counters(sc);
1088 
1089 #ifdef IX_DEBUG
1090 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1091 	    (IFF_RUNNING|IFF_DEBUG))
1092 		ixgbe_print_hw_stats(sc);
1093 #endif
1094 
1095 	timeout_add_sec(&sc->timer, 1);
1096 
1097 	splx(s);
1098 }
1099 
1100 void
1101 ixgbe_update_link_status(struct ix_softc *sc)
1102 {
1103 	int link_up = FALSE;
1104 	struct ifnet *ifp = &sc->arpcom.ac_if;
1105 	struct tx_ring *txr = sc->tx_rings;
1106 	int		i;
1107 
1108 	ixgbe_hw(&sc->hw, check_link, &sc->link_speed, &link_up, 0);
1109 
1110 	switch (sc->link_speed) {
1111 	case IXGBE_LINK_SPEED_UNKNOWN:
1112 		ifp->if_baudrate = 0;
1113 		break;
1114 	case IXGBE_LINK_SPEED_100_FULL:
1115 		ifp->if_baudrate = IF_Mbps(100);
1116 		break;
1117 	case IXGBE_LINK_SPEED_1GB_FULL:
1118 		ifp->if_baudrate = IF_Gbps(1);
1119 		break;
1120 	case IXGBE_LINK_SPEED_10GB_FULL:
1121 		ifp->if_baudrate = IF_Gbps(10);
1122 		break;
1123 	}
1124 
1125 	if (link_up){
1126 		if (sc->link_active == FALSE) {
1127 			sc->link_active = TRUE;
1128 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1129 			if_link_state_change(ifp);
1130 		}
1131 	} else { /* Link down */
1132 		if (sc->link_active == TRUE) {
1133 			ifp->if_baudrate = 0;
1134 			ifp->if_link_state = LINK_STATE_DOWN;
1135 			if_link_state_change(ifp);
1136 			sc->link_active = FALSE;
1137 			for (i = 0; i < sc->num_tx_queues;
1138 			    i++, txr++)
1139 				txr->watchdog_timer = FALSE;
1140 			ifp->if_timer = 0;
1141 		}
1142 	}
1143 
1144 	return;
1145 }
1146 
1147 
1148 
1149 /*********************************************************************
1150  *
1151  *  This routine disables all traffic on the sc by issuing a
1152  *  global reset on the MAC and deallocates TX/RX buffers.
1153  *
1154  **********************************************************************/
1155 
1156 void
1157 ixgbe_stop(void *arg)
1158 {
1159 	struct ix_softc *sc = arg;
1160 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1161 
1162 	/* Tell the stack that the interface is no longer active */
1163 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1164 
1165 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1166 	ixgbe_disable_intr(sc);
1167 
1168 	ixgbe_hw(&sc->hw, reset_hw);
1169 	sc->hw.adapter_stopped = FALSE;
1170 	ixgbe_hw(&sc->hw, stop_adapter);
1171 	timeout_del(&sc->timer);
1172 
1173 	/* reprogram the RAR[0] in case user changed it. */
1174 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1175 
1176 	ixgbe_free_transmit_structures(sc);
1177 	ixgbe_free_receive_structures(sc);
1178 }
1179 
1180 
1181 /*********************************************************************
1182  *
1183  *  Determine hardware revision.
1184  *
1185  **********************************************************************/
1186 void
1187 ixgbe_identify_hardware(struct ix_softc *sc)
1188 {
1189 	struct ixgbe_osdep	*os = &sc->osdep;
1190 	struct pci_attach_args	*pa = os->os_pa;
1191 	uint32_t		 reg;
1192 
1193 	/* Save off the information about this board */
1194 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1195 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1196 
1197 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1198 	sc->hw.revision_id = PCI_REVISION(reg);
1199 
1200 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1201 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1202 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1203 
1204 	ixgbe_total_ports++;
1205 	switch (sc->hw.device_id) {
1206 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1207 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1208 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1209 		ixgbe_total_ports++;
1210 		break;
1211 	}
1212 
1213 	switch (sc->hw.device_id) {
1214 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1215 	case PCI_PRODUCT_INTEL_82598AF:
1216 		sc->optics = IFM_10G_SR;
1217 		break;
1218 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1219 	case PCI_PRODUCT_INTEL_82598EB_CX4:
1220 		sc->optics = IFM_10G_CX4;
1221 		break;
1222 	case PCI_PRODUCT_INTEL_82598EB_XF_LR:
1223 		sc->optics = IFM_10G_LR;
1224 		break;
1225 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1226 	case PCI_PRODUCT_INTEL_82598AT:
1227 		sc->optics = IFM_10G_T;
1228 		break;
1229 	default:
1230 		sc->optics = IFM_AUTO;
1231 		break;
1232 	}
1233 }
1234 
1235 /*********************************************************************
1236  *
1237  *  Setup the Legacy or MSI Interrupt handler
1238  *
1239  **********************************************************************/
1240 int
1241 ixgbe_allocate_legacy(struct ix_softc *sc)
1242 {
1243 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1244 	struct ixgbe_osdep	*os = &sc->osdep;
1245 	struct pci_attach_args	*pa = os->os_pa;
1246 	const char		*intrstr = NULL;
1247 	pci_chipset_tag_t	pc = pa->pa_pc;
1248 	pci_intr_handle_t	ih;
1249 
1250 	/* Legacy RID at 0 */
1251 	if (sc->msix == 0)
1252 		sc->rid[0] = 0;
1253 
1254 	/* We allocate a single interrupt resource */
1255 	if (pci_intr_map(pa, &ih)) {
1256 		printf(": couldn't map interrupt\n");
1257 		return (ENXIO);
1258 	}
1259 
1260 	intrstr = pci_intr_string(pc, ih);
1261 	sc->tag[0] = pci_intr_establish(pc, ih, IPL_NET,
1262 	    ixgbe_legacy_irq, sc, ifp->if_xname);
1263 	if (sc->tag[0] == NULL) {
1264 		printf(": couldn't establish interrupt");
1265 		if (intrstr != NULL)
1266 			printf(" at %s", intrstr);
1267 		printf("\n");
1268 		return (ENXIO);
1269 	}
1270 	printf(": %s", intrstr);
1271 
1272 	return (0);
1273 }
1274 
1275 int
1276 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1277 {
1278 	struct ixgbe_osdep	*os = &sc->osdep;
1279 	struct pci_attach_args	*pa = os->os_pa;
1280 	int			 val, i;
1281 
1282 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1283 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM &&
1284 	    PCI_MAPREG_TYPE(val) != PCI_MAPREG_MEM_TYPE_64BIT) {
1285 		printf(": mmba is not mem space\n");
1286 		return (ENXIO);
1287 	}
1288 
1289 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1290 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1291 		printf(": cannot find mem space\n");
1292 		return (ENXIO);
1293 	}
1294 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1295 
1296 	/*
1297 	 * Init the resource arrays
1298 	 */
1299 	for (i = 0; i < IXGBE_MSGS; i++) {
1300 		sc->rid[i] = i + 1; /* MSI/X RID starts at 1 */
1301 		sc->tag[i] = NULL;
1302 		sc->res[i] = NULL;
1303 	}
1304 
1305 	/* Legacy defaults */
1306 	sc->num_tx_queues = 1;
1307 	sc->num_rx_queues = 1;
1308 
1309 #ifdef notyet
1310 	/* Now setup MSI or MSI/X */
1311 	sc->msix = ixgbe_setup_msix(sc);
1312 #endif
1313 	sc->hw.back = os;
1314 
1315 	return (0);
1316 }
1317 
1318 void
1319 ixgbe_free_pci_resources(struct ix_softc * sc)
1320 {
1321 	struct ixgbe_osdep	*os = &sc->osdep;
1322 	struct pci_attach_args	*pa = os->os_pa;
1323 
1324 	pci_intr_disestablish(pa->pa_pc, sc->tag[0]);
1325 	sc->tag[0] = NULL;
1326 	if (os->os_membase != NULL)
1327 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1328 	os->os_membase = NULL;
1329 
1330 	return;
1331 }
1332 
1333 /*********************************************************************
1334  *
1335  *  Initialize the hardware to a configuration as specified by the
1336  *  sc structure. The controller is reset, the EEPROM is
1337  *  verified, the MAC address is set, then the shared initialization
1338  *  routines are called.
1339  *
1340  **********************************************************************/
1341 int
1342 ixgbe_hardware_init(struct ix_softc *sc)
1343 {
1344 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1345 	uint16_t csum;
1346 
1347 	csum = 0;
1348 	/* Issue a global reset */
1349 	sc->hw.adapter_stopped = FALSE;
1350 	ixgbe_hw(&sc->hw, stop_adapter);
1351 
1352 	/* Make sure we have a good EEPROM before we read from it */
1353 	if (ixgbe_ee(&sc->hw, validate_checksum, &csum) < 0) {
1354 		printf("%s: The EEPROM Checksum Is Not Valid\n", ifp->if_xname);
1355 		return (EIO);
1356 	}
1357 
1358 	/* Get Hardware Flow Control setting */
1359 	sc->hw.fc.type = ixgbe_fc_full;
1360 	sc->hw.fc.pause_time = IXGBE_FC_PAUSE;
1361 	sc->hw.fc.low_water = IXGBE_FC_LO;
1362 	sc->hw.fc.high_water = IXGBE_FC_HI;
1363 	sc->hw.fc.send_xon = TRUE;
1364 
1365 	if (ixgbe_hw(&sc->hw, init_hw) != 0) {
1366 		printf("%s: Hardware Initialization Failed", ifp->if_xname);
1367 		return (EIO);
1368 	}
1369 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
1370 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
1371 
1372 	return (0);
1373 }
1374 
1375 /*********************************************************************
1376  *
1377  *  Setup networking device structure and register an interface.
1378  *
1379  **********************************************************************/
1380 void
1381 ixgbe_setup_interface(struct ix_softc *sc)
1382 {
1383 	struct ixgbe_hw *hw = &sc->hw;
1384 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1385 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1386 
1387 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1388 	ifp->if_baudrate = IF_Gbps(10);
1389 	ifp->if_softc = sc;
1390 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1391 	ifp->if_ioctl = ixgbe_ioctl;
1392 	ifp->if_start = ixgbe_start;
1393 	ifp->if_timer = 0;
1394 	ifp->if_watchdog = ixgbe_watchdog;
1395 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1396 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1397 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1398 	IFQ_SET_READY(&ifp->if_snd);
1399 
1400 	m_clsetwms(ifp, MCLBYTES, 4, sc->num_rx_desc);
1401 
1402 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1403 
1404 #if NVLAN > 0
1405 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1406 #endif
1407 
1408 #ifdef IX_CSUM_OFFLOAD
1409 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1410 				IFCAP_CSUM_UDPv4;
1411 #endif
1412 
1413 	sc->max_frame_size =
1414 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1415 
1416 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1417 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL))
1418 		ixgbe_hw(hw, setup_link_speed,
1419 		    IXGBE_LINK_SPEED_10GB_FULL |
1420 		    IXGBE_LINK_SPEED_1GB_FULL, TRUE, TRUE);
1421 	else
1422 		ixgbe_hw(hw, setup_link_speed,
1423 		    IXGBE_LINK_SPEED_10GB_FULL,
1424 		    TRUE, FALSE);
1425 
1426 	/*
1427 	 * Specify the media types supported by this sc and register
1428 	 * callbacks to update media and link information
1429 	 */
1430 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1431 		     ixgbe_media_status);
1432 	ifmedia_add(&sc->media, IFM_ETHER | sc->optics |
1433 	    IFM_FDX, 0, NULL);
1434 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1435 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL)) {
1436 		ifmedia_add(&sc->media,
1437 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1438 		ifmedia_add(&sc->media,
1439 		    IFM_ETHER | IFM_1000_T, 0, NULL);
1440 	}
1441 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1442 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1443 
1444 	if_attach(ifp);
1445 	ether_ifattach(ifp);
1446 
1447 
1448 	return;
1449 }
1450 
1451 int
1452 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1453 		struct ixgbe_dma_alloc *dma, int mapflags)
1454 {
1455 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1456 	struct ixgbe_osdep	*os = &sc->osdep;
1457 	int			 r;
1458 
1459 	dma->dma_tag = os->os_pa->pa_dmat;
1460 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1461 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1462 	if (r != 0) {
1463 		printf("%s: ixgbe_dma_malloc: bus_dma_tag_create failed; "
1464 		       "error %u\n", ifp->if_xname, r);
1465 		goto fail_0;
1466 	}
1467 
1468 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1469 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1470 	if (r != 0) {
1471 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1472 		       "error %u\n", ifp->if_xname, r);
1473 		goto fail_1;
1474 	}
1475 
1476 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1477 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1478 	if (r != 0) {
1479 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1480 		       "error %u\n", ifp->if_xname, r);
1481 		goto fail_2;
1482 	}
1483 
1484 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1485 	    dma->dma_vaddr, size, NULL,
1486 	    mapflags | BUS_DMA_NOWAIT);
1487 	if (r != 0) {
1488 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1489 		       "error %u\n", ifp->if_xname, r);
1490 		goto fail_3;
1491 	}
1492 
1493 	dma->dma_size = size;
1494 	return (0);
1495 fail_3:
1496 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1497 fail_2:
1498 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1499 fail_1:
1500 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1501 fail_0:
1502 	dma->dma_map = NULL;
1503 	dma->dma_tag = NULL;
1504 	return (r);
1505 }
1506 
1507 void
1508 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1509 {
1510 	if (dma->dma_tag == NULL)
1511 		return;
1512 
1513 	if (dma->dma_map != NULL) {
1514 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1515 		    dma->dma_map->dm_mapsize,
1516 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1517 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1518 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1519 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1520 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1521 		dma->dma_map = NULL;
1522 	}
1523 }
1524 
1525 
1526 /*********************************************************************
1527  *
1528  *  Allocate memory for the transmit and receive rings, and then
1529  *  the descriptors associated with each, called only once at attach.
1530  *
1531  **********************************************************************/
1532 int
1533 ixgbe_allocate_queues(struct ix_softc *sc)
1534 {
1535 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1536 	struct tx_ring *txr;
1537 	struct rx_ring *rxr;
1538 	int rsize, tsize, error = IXGBE_SUCCESS;
1539 	int txconf = 0, rxconf = 0, i;
1540 
1541 	/* First allocate the TX ring struct memory */
1542 	if (!(sc->tx_rings =
1543 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1544 	    sc->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1545 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1546 		error = ENOMEM;
1547 		goto fail;
1548 	}
1549 	txr = sc->tx_rings;
1550 
1551 	/* Next allocate the RX */
1552 	if (!(sc->rx_rings =
1553 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1554 	    sc->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1555 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1556 		error = ENOMEM;
1557 		goto rx_fail;
1558 	}
1559 	rxr = sc->rx_rings;
1560 
1561 	/* For the ring itself */
1562 	tsize = roundup2(sc->num_tx_desc *
1563 	    sizeof(union ixgbe_adv_tx_desc), 4096);
1564 
1565 	/*
1566 	 * Now set up the TX queues, txconf is needed to handle the
1567 	 * possibility that things fail midcourse and we need to
1568 	 * undo memory gracefully
1569 	 */
1570 	for (i = 0; i < sc->num_tx_queues; i++, txconf++) {
1571 		/* Set up some basics */
1572 		txr = &sc->tx_rings[i];
1573 		txr->sc = sc;
1574 		txr->me = i;
1575 
1576 		/* Initialize the TX side lock */
1577 		mtx_init(&txr->tx_mtx, IPL_NET);
1578 
1579 		if (ixgbe_dma_malloc(sc, tsize,
1580 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1581 			printf("%s: Unable to allocate TX Descriptor memory\n",
1582 			    ifp->if_xname);
1583 			error = ENOMEM;
1584 			goto err_tx_desc;
1585 		}
1586 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1587 		bzero((void *)txr->tx_base, tsize);
1588 
1589 		if (ixgbe_dma_malloc(sc, sizeof(uint32_t),
1590 		    &txr->txwbdma, BUS_DMA_NOWAIT)) {
1591 			printf("%s: Unable to allocate TX Write Back memory\n",
1592 			    ifp->if_xname);
1593 			error = ENOMEM;
1594 			goto err_tx_desc;
1595 		}
1596 		txr->tx_hwb = (uint32_t *)txr->txwbdma.dma_vaddr;
1597 		*txr->tx_hwb = 0;
1598 	}
1599 
1600 	/*
1601 	 * Next the RX queues...
1602 	 */
1603 	rsize = roundup2(sc->num_rx_desc *
1604 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1605 	for (i = 0; i < sc->num_rx_queues; i++, rxconf++) {
1606 		rxr = &sc->rx_rings[i];
1607 		/* Set up some basics */
1608 		rxr->sc = sc;
1609 		rxr->me = i;
1610 
1611 		/* Initialize the TX side lock */
1612 		mtx_init(&rxr->rx_mtx, IPL_NET);
1613 
1614 		if (ixgbe_dma_malloc(sc, rsize,
1615 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1616 			printf("%s: Unable to allocate RxDescriptor memory\n",
1617 			    ifp->if_xname);
1618 			error = ENOMEM;
1619 			goto err_rx_desc;
1620 		}
1621 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1622 		bzero((void *)rxr->rx_base, rsize);
1623 	}
1624 
1625 	return (0);
1626 
1627 err_rx_desc:
1628 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1629 		ixgbe_dma_free(sc, &rxr->rxdma);
1630 err_tx_desc:
1631 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) {
1632 		ixgbe_dma_free(sc, &txr->txdma);
1633 		ixgbe_dma_free(sc, &txr->txwbdma);
1634 	}
1635 	free(sc->rx_rings, M_DEVBUF);
1636 	sc->rx_rings = NULL;
1637 rx_fail:
1638 	free(sc->tx_rings, M_DEVBUF);
1639 	sc->tx_rings = NULL;
1640 fail:
1641 	return (error);
1642 }
1643 
1644 /*********************************************************************
1645  *
1646  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1647  *  the information needed to transmit a packet on the wire. This is
1648  *  called only once at attach, setup is done every reset.
1649  *
1650  **********************************************************************/
1651 int
1652 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1653 {
1654 	struct ix_softc 	*sc;
1655 	struct ixgbe_osdep	*os;
1656 	struct ifnet		*ifp;
1657 	struct ixgbe_tx_buf	*txbuf;
1658 	int			 error, i;
1659 
1660 	sc = txr->sc;
1661 	os = &sc->osdep;
1662 	ifp = &sc->arpcom.ac_if;
1663 
1664 	if (!(txr->tx_buffers =
1665 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
1666 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1667 		printf("%s: Unable to allocate tx_buffer memory\n",
1668 		    ifp->if_xname);
1669 		error = ENOMEM;
1670 		goto fail;
1671 	}
1672 	txr->txtag = txr->txdma.dma_tag;
1673 
1674         /* Create the descriptor buffer dma maps */
1675 	for (i = 0; i < sc->num_tx_desc; i++) {
1676 		txbuf = &txr->tx_buffers[i];
1677 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1678 			    IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
1679 			    BUS_DMA_NOWAIT, &txbuf->map);
1680 
1681 		if (error != 0) {
1682 			printf("%s: Unable to create TX DMA map\n",
1683 			    ifp->if_xname);
1684 			goto fail;
1685 		}
1686 	}
1687 
1688 	return 0;
1689 fail:
1690 	return (error);
1691 }
1692 
1693 /*********************************************************************
1694  *
1695  *  Initialize a transmit ring.
1696  *
1697  **********************************************************************/
1698 int
1699 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1700 {
1701 	struct ix_softc		*sc = txr->sc;
1702 	int			 error;
1703 
1704 	/* Now allocate transmit buffers for the ring */
1705 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1706 		return (error);
1707 
1708 	/* Clear the old ring contents */
1709 	bzero((void *)txr->tx_base,
1710 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1711 
1712 	/* Reset indices */
1713 	txr->next_avail_tx_desc = 0;
1714 	txr->next_tx_to_clean = 0;
1715 
1716 	/* Set number of descriptors available */
1717 	txr->tx_avail = sc->num_tx_desc;
1718 
1719 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1720 	    0, txr->txdma.dma_map->dm_mapsize,
1721 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1722 
1723 	return (0);
1724 }
1725 
1726 /*********************************************************************
1727  *
1728  *  Initialize all transmit rings.
1729  *
1730  **********************************************************************/
1731 int
1732 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1733 {
1734 	struct tx_ring *txr = sc->tx_rings;
1735 	int		i, error;
1736 
1737 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
1738 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
1739 			goto fail;
1740 	}
1741 
1742 	return (0);
1743 fail:
1744 	ixgbe_free_transmit_structures(sc);
1745 	return (error);
1746 }
1747 
1748 /*********************************************************************
1749  *
1750  *  Enable transmit unit.
1751  *
1752  **********************************************************************/
1753 void
1754 ixgbe_initialize_transmit_units(struct ix_softc *sc)
1755 {
1756 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1757 	struct tx_ring	*txr;
1758 	struct ixgbe_hw	*hw = &sc->hw;
1759 	int		 i;
1760 	uint64_t	 tdba, txhwb;
1761 	uint32_t	 txctrl;
1762 
1763 	/* Setup the Base and Length of the Tx Descriptor Ring */
1764 
1765 	for (i = 0; i < sc->num_tx_queues; i++) {
1766 		txr = &sc->tx_rings[i];
1767 
1768 		/* Setup descriptor base address */
1769 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1770 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
1771 		       (tdba & 0x00000000ffffffffULL));
1772 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
1773 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
1774 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1775 
1776 		/* Setup for Head WriteBack */
1777 		txhwb = txr->txwbdma.dma_map->dm_segs[0].ds_addr;
1778 		txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1779 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
1780 		    (txhwb & 0x00000000ffffffffULL));
1781 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
1782 		    (txhwb >> 32));
1783 		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1784 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1785 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
1786 
1787 		/* Setup the HW Tx Head and Tail descriptor pointers */
1788 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
1789 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
1790 
1791 		/* Setup Transmit Descriptor Cmd Settings */
1792 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
1793 
1794 		txr->watchdog_timer = 0;
1795 	}
1796 	ifp->if_timer = 0;
1797 
1798 	return;
1799 }
1800 
1801 /*********************************************************************
1802  *
1803  *  Free all transmit rings.
1804  *
1805  **********************************************************************/
1806 void
1807 ixgbe_free_transmit_structures(struct ix_softc *sc)
1808 {
1809 	struct tx_ring *txr = sc->tx_rings;
1810 	int		i;
1811 
1812 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
1813 		ixgbe_free_transmit_buffers(txr);
1814 	}
1815 }
1816 
1817 /*********************************************************************
1818  *
1819  *  Free transmit ring related data structures.
1820  *
1821  **********************************************************************/
1822 void
1823 ixgbe_free_transmit_buffers(struct tx_ring *txr)
1824 {
1825 	struct ix_softc *sc = txr->sc;
1826 	struct ixgbe_tx_buf *tx_buffer;
1827 	int             i;
1828 
1829 	INIT_DEBUGOUT("free_transmit_ring: begin");
1830 
1831 	if (txr->tx_buffers == NULL)
1832 		return;
1833 
1834 	tx_buffer = txr->tx_buffers;
1835 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1836 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
1837 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
1838 			    0, tx_buffer->map->dm_mapsize,
1839 			    BUS_DMASYNC_POSTWRITE);
1840 			bus_dmamap_unload(txr->txdma.dma_tag,
1841 			    tx_buffer->map);
1842 		}
1843 		if (tx_buffer->m_head != NULL) {
1844 			m_freem(tx_buffer->m_head);
1845 			tx_buffer->m_head = NULL;
1846 		}
1847 		if (tx_buffer->map != NULL) {
1848 			bus_dmamap_destroy(txr->txdma.dma_tag,
1849 			    tx_buffer->map);
1850 			tx_buffer->map = NULL;
1851 		}
1852 	}
1853 
1854 	if (txr->tx_buffers != NULL) {
1855 		free(txr->tx_buffers, M_DEVBUF);
1856 		txr->tx_buffers = NULL;
1857 	}
1858 	txr->tx_buffers = NULL;
1859 	txr->txtag = NULL;
1860 }
1861 
1862 /*********************************************************************
1863  *
1864  *  Advanced Context Descriptor setup for VLAN or CSUM
1865  *
1866  **********************************************************************/
1867 
1868 int
1869 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
1870 {
1871 	struct ix_softc *sc = txr->sc;
1872 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1873 	struct ixgbe_adv_tx_context_desc *TXD;
1874 	struct ixgbe_tx_buf        *tx_buffer;
1875 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
1876 	struct ip *ip;
1877 	struct ip6_hdr *ip6;
1878 	uint8_t ipproto = 0;
1879 	int  ehdrlen, ip_hlen = 0;
1880 	uint16_t etype;
1881 	int offload = TRUE;
1882 	int ctxd = txr->next_avail_tx_desc;
1883 #if NVLAN > 0
1884 	struct ether_vlan_header *eh;
1885 #else
1886 	struct ether_header *eh;
1887 #endif
1888 
1889 	if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
1890 		offload = FALSE;
1891 
1892 	tx_buffer = &txr->tx_buffers[ctxd];
1893 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
1894 
1895 	/*
1896 	 * In advanced descriptors the vlan tag must
1897 	 * be placed into the descriptor itself.
1898 	 */
1899 #if NVLAN > 0
1900 	if (mp->m_flags & M_VLANTAG) {
1901 		vlan_macip_lens |=
1902 		    htole16(mp->m_pkthdr.ether_vtag) << IXGBE_ADVTXD_VLAN_SHIFT;
1903 	} else
1904 #endif
1905 	if (offload == FALSE)
1906 		return FALSE;	/* No need for CTX */
1907 
1908 	/*
1909 	 * Determine where frame payload starts.
1910 	 * Jump over vlan headers if already present,
1911 	 * helpful for QinQ too.
1912 	 */
1913 #if NVLAN > 0
1914 	eh = mtod(mp, struct ether_vlan_header *);
1915 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1916 		etype = ntohs(eh->evl_proto);
1917 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1918 	} else {
1919 		etype = ntohs(eh->evl_encap_proto);
1920 		ehdrlen = ETHER_HDR_LEN;
1921 	}
1922 #else
1923 	eh = mtod(mp, struct ether_header *);
1924 	etype = ntohs(eh->ether_type);
1925 	ehdrlen = ETHER_HDR_LEN;
1926 #endif
1927 
1928 	/* Set the ether header length */
1929 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1930 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1931 
1932 	if (offload == TRUE) {
1933 		switch (etype) {
1934 		case ETHERTYPE_IP:
1935 			ip = (struct ip *)(mp->m_data + ehdrlen);
1936 			ip_hlen = ip->ip_hl << 2;
1937 			if (mp->m_len < ehdrlen + ip_hlen)
1938 				return FALSE; /* failure */
1939 			ipproto = ip->ip_p;
1940 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1941 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1942 			break;
1943 		case ETHERTYPE_IPV6:
1944 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1945 			ip_hlen = sizeof(struct ip6_hdr);
1946 			if (mp->m_len < ehdrlen + ip_hlen)
1947 				return FALSE; /* failure */
1948 			ipproto = ip6->ip6_nxt;
1949 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1950 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1951 			break;
1952 		default:
1953 			offload = FALSE;
1954 			break;
1955 		}
1956 
1957 		vlan_macip_lens |= ip_hlen;
1958 
1959 		switch (ipproto) {
1960 		case IPPROTO_TCP:
1961 			if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1962 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1963 			break;
1964 		case IPPROTO_UDP:
1965 			if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1966 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
1967 			break;
1968 		default:
1969 			offload = FALSE;
1970 			break;
1971 		}
1972 	}
1973 
1974 	/* Now copy bits into descriptor */
1975 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
1976 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
1977 	TXD->seqnum_seed = htole32(0);
1978 	TXD->mss_l4len_idx = htole32(0);
1979 
1980 #ifndef NO_82598_A0_SUPPORT
1981 	if (sc->hw.revision_id == 0)
1982 		desc_flip(TXD);
1983 #endif
1984 
1985 	tx_buffer->m_head = NULL;
1986 
1987 	/* We've consumed the first desc, adjust counters */
1988 	if (++ctxd == sc->num_tx_desc)
1989 		ctxd = 0;
1990 	txr->next_avail_tx_desc = ctxd;
1991 	--txr->tx_avail;
1992 
1993         return (offload);
1994 }
1995 
1996 #ifdef notyet
1997 /**********************************************************************
1998  *
1999  *  Setup work for hardware segmentation offload (TSO) on
2000  *  scs using advanced tx descriptors
2001  *
2002  **********************************************************************/
2003 int
2004 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2005 {
2006 	struct ix_softc *sc = txr->sc;
2007 	struct ixgbe_adv_tx_context_desc *TXD;
2008 	struct ixgbe_tx_buf        *tx_buffer;
2009 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2010 	uint32_t mss_l4len_idx = 0;
2011 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2012 #if NVLAN > 0
2013 	uint16_t vtag = 0;
2014 	struct ether_vlan_header *eh;
2015 #else
2016 	struct ether_header *eh;
2017 #endif
2018 	struct ip *ip;
2019 	struct tcphdr *th;
2020 
2021 	if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2022 	    (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2023 	        return FALSE;
2024 
2025 	/*
2026 	 * Determine where frame payload starts.
2027 	 * Jump over vlan headers if already present
2028 	 */
2029 #if NVLAN > 0
2030 	eh = mtod(mp, struct ether_vlan_header *);
2031 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2032 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2033 	else
2034 		ehdrlen = ETHER_HDR_LEN;
2035 #else
2036 	eh = mtod(mp, struct ether_header *);
2037 	ehdrlen = ETHER_HDR_LEN;
2038 #endif
2039 
2040         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2041         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2042 		return FALSE;
2043 
2044 	ctxd = txr->next_avail_tx_desc;
2045 	tx_buffer = &txr->tx_buffers[ctxd];
2046 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2047 
2048 	ip = (struct ip *)(mp->m_data + ehdrlen);
2049 	if (ip->ip_p != IPPROTO_TCP)
2050 		return FALSE;   /* 0 */
2051 	ip->ip_len = 0;
2052 	ip->ip_sum = 0;
2053 	ip_hlen = ip->ip_hl << 2;
2054 	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2055 	th->th_sum = in_pseudo(ip->ip_src.s_addr,
2056 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2057 	tcp_hlen = th->th_off << 2;
2058 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2059 	/* This is used in the transmit desc in encap */
2060 	*paylen = mp->m_pkthdr.len - hdrlen;
2061 
2062 #if NVLAN > 0
2063 	/* VLAN MACLEN IPLEN */
2064 	if (mp->m_flags & M_VLANTAG) {
2065 		vtag = htole16(mp->m_pkthdr.ether_vtag);
2066 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2067 	}
2068 #endif
2069 
2070 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2071 	vlan_macip_lens |= ip_hlen;
2072 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2073 
2074 	/* ADV DTYPE TUCMD */
2075 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2076 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2077 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2078 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2079 
2080 
2081 	/* MSS L4LEN IDX */
2082 	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2083 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2084 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2085 
2086 	TXD->seqnum_seed = htole32(0);
2087 	tx_buffer->m_head = NULL;
2088 
2089 #ifndef NO_82598_A0_SUPPORT
2090 	if (sc->hw.revision_id == 0)
2091 		desc_flip(TXD);
2092 #endif
2093 
2094 	if (++ctxd == sc->num_tx_desc)
2095 		ctxd = 0;
2096 
2097 	txr->tx_avail--;
2098 	txr->next_avail_tx_desc = ctxd;
2099 	return TRUE;
2100 }
2101 
2102 #else
2103 /* This makes it easy to keep the code common */
2104 int
2105 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2106 {
2107 	return (FALSE);
2108 }
2109 #endif
2110 
2111 /**********************************************************************
2112  *
2113  *  Examine each tx_buffer in the used queue. If the hardware is done
2114  *  processing the packet then free associated resources. The
2115  *  tx_buffer is put back on the free queue.
2116  *
2117  **********************************************************************/
2118 int
2119 ixgbe_txeof(struct tx_ring *txr)
2120 {
2121 	struct ix_softc			*sc = txr->sc;
2122 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2123 	uint				 first, last, done, num_avail;
2124 	struct ixgbe_tx_buf		*tx_buffer;
2125 	struct ixgbe_legacy_tx_desc *tx_desc;
2126 
2127 	if (txr->tx_avail == sc->num_tx_desc)
2128 		return FALSE;
2129 
2130 	num_avail = txr->tx_avail;
2131 	first = txr->next_tx_to_clean;
2132 
2133 	tx_buffer = &txr->tx_buffers[first];
2134 
2135 	/* For cleanup we just use legacy struct */
2136 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2137 
2138 	/* Get the HWB */
2139         bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2140 	    0, txr->txwbdma.dma_map->dm_mapsize,
2141             BUS_DMASYNC_POSTREAD);
2142         done = *txr->tx_hwb;
2143 
2144         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2145 	    0, txr->txdma.dma_map->dm_mapsize,
2146             BUS_DMASYNC_POSTREAD);
2147 
2148 	while (TRUE) {
2149 		/* We clean the range til last head write back */
2150 		while (first != done) {
2151 			tx_desc->upper.data = 0;
2152 			tx_desc->lower.data = 0;
2153 			tx_desc->buffer_addr = 0;
2154 			num_avail++;
2155 
2156 			if (tx_buffer->m_head) {
2157 				ifp->if_opackets++;
2158 				bus_dmamap_sync(txr->txdma.dma_tag,
2159 				    tx_buffer->map,
2160 				    0, tx_buffer->map->dm_mapsize,
2161 				    BUS_DMASYNC_POSTWRITE);
2162 				bus_dmamap_unload(txr->txdma.dma_tag,
2163 				    tx_buffer->map);
2164 				m_freem(tx_buffer->m_head);
2165 				tx_buffer->m_head = NULL;
2166 			}
2167 
2168 			if (++first == sc->num_tx_desc)
2169 				first = 0;
2170 
2171 			tx_buffer = &txr->tx_buffers[first];
2172 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2173 			    &txr->tx_base[first];
2174 		}
2175 		/* See if there is more work now */
2176 		last = done;
2177 	        bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2178 		    0, txr->txwbdma.dma_map->dm_mapsize,
2179 	            BUS_DMASYNC_POSTREAD);
2180         	done = *txr->tx_hwb;
2181 		if (last == done)
2182 			break;
2183 	}
2184 
2185 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2186 	    0, txr->txdma.dma_map->dm_mapsize,
2187 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2188 
2189 	txr->next_tx_to_clean = first;
2190 
2191 	/*
2192 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
2193 	 * it is OK to send packets. If there are no pending descriptors,
2194 	 * clear the timeout. Otherwise, if some descriptors have been freed,
2195 	 * restart the timeout.
2196 	 */
2197 	if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2198 		ifp->if_flags &= ~IFF_OACTIVE;
2199 
2200 		/* If all are clean turn off the timer */
2201 		if (num_avail == sc->num_tx_desc) {
2202 			ifp->if_timer = 0;
2203 			txr->watchdog_timer = 0;
2204 			txr->tx_avail = num_avail;
2205 			return FALSE;
2206 		}
2207 		/* Some were cleaned, so reset timer */
2208 		else if (num_avail != txr->tx_avail) {
2209 			ifp->if_timer = IXGBE_TX_TIMEOUT;
2210 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2211 		}
2212 	}
2213 
2214 	txr->tx_avail = num_avail;
2215 
2216 	return TRUE;
2217 }
2218 
2219 /*********************************************************************
2220  *
2221  *  Get a buffer from system mbuf buffer pool.
2222  *
2223  **********************************************************************/
2224 int
2225 ixgbe_get_buf(struct rx_ring *rxr, int i)
2226 {
2227 	struct ix_softc		*sc = rxr->sc;
2228 	struct mbuf		*m;
2229 	int			error;
2230 	int			size = MCLBYTES;
2231 	struct ixgbe_rx_buf	*rxbuf;
2232 	union ixgbe_adv_rx_desc	*rxdesc;
2233 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2234 
2235 	rxbuf = &rxr->rx_buffers[i];
2236 	rxdesc = &rxr->rx_base[i];
2237 
2238 	if (rxbuf->m_head != NULL) {
2239 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2240 		    sc->dev.dv_xname, i);
2241 		return (ENOBUFS);
2242 	}
2243 
2244 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2245 	if (m == NULL) {
2246 		sc->mbuf_alloc_failed++;
2247 		return (ENOBUFS);
2248 	}
2249 	MCLGETI(m, M_DONTWAIT, &sc->arpcom.ac_if, size);
2250 	if ((m->m_flags & M_EXT) == 0) {
2251 		m_freem(m);
2252 		sc->mbuf_cluster_failed++;
2253 		return (ENOBUFS);
2254 	}
2255 	m->m_len = m->m_pkthdr.len = size;
2256 	if (sc->max_frame_size <= (size - ETHER_ALIGN))
2257 		m_adj(m, ETHER_ALIGN);
2258 
2259 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2260 	    m, BUS_DMA_NOWAIT);
2261 	if (error) {
2262 		m_freem(m);
2263 		return (error);
2264 	}
2265 
2266         bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2267 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2268 	rxbuf->m_head = m;
2269 
2270 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2271 	    dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2272 
2273 	bzero(rxdesc, dsize);
2274 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2275 
2276 #ifndef NO_82598_A0_SUPPORT
2277         /* A0 needs to One's Compliment descriptors */
2278 	if (sc->hw.revision_id == 0) {
2279         	struct dhack {
2280 			uint32_t a1;
2281 			uint32_t a2;
2282 			uint32_t b1;
2283 			uint32_t b2;
2284 		};
2285         	struct dhack *d;
2286 
2287         	d = (struct dhack *)rxdesc;
2288         	d->a1 = ~(d->a1);
2289         	d->a2 = ~(d->a2);
2290 	}
2291 #endif
2292 
2293 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2294 	    dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2295 
2296 	rxr->rx_ndescs++;
2297 
2298         return (0);
2299 }
2300 
2301 /*********************************************************************
2302  *
2303  *  Allocate memory for rx_buffer structures. Since we use one
2304  *  rx_buffer per received packet, the maximum number of rx_buffer's
2305  *  that we'll need is equal to the number of receive descriptors
2306  *  that we've allocated.
2307  *
2308  **********************************************************************/
2309 int
2310 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2311 {
2312 	struct ix_softc		*sc = rxr->sc;
2313 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2314 	struct ixgbe_rx_buf 	*rxbuf;
2315 	int             	i, bsize, error, size = MCLBYTES;
2316 
2317 	bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
2318 	if (!(rxr->rx_buffers = (struct ixgbe_rx_buf *) malloc(bsize,
2319 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2320 		printf("%s: Unable to allocate rx_buffer memory\n",
2321 		    ifp->if_xname);
2322 		error = ENOMEM;
2323 		goto fail;
2324 	}
2325 	rxr->rxtag = rxr->rxdma.dma_tag;
2326 
2327 	rxbuf = rxr->rx_buffers;
2328 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2329 		error = bus_dmamap_create(rxr->rxdma.dma_tag, size, 1,
2330 		    size, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2331 		if (error) {
2332 			printf("%s: Unable to create Rx DMA map\n",
2333 			    ifp->if_xname);
2334 			goto fail;
2335 		}
2336 		rxbuf->m_head = NULL;
2337 	}
2338 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2339 	    rxr->rxdma.dma_map->dm_mapsize,
2340 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2341 
2342 	return (0);
2343 
2344 fail:
2345 	return (error);
2346 }
2347 
2348 /*********************************************************************
2349  *
2350  *  Initialize a receive ring and its buffers.
2351  *
2352  **********************************************************************/
2353 int
2354 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2355 {
2356 	struct ix_softc		*sc = rxr->sc;
2357 	int			 rsize, error;
2358 
2359 	rsize = roundup2(sc->num_rx_desc *
2360 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2361 	/* Clear the ring contents */
2362 	bzero((void *)rxr->rx_base, rsize);
2363 
2364 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2365 		return (error);
2366 
2367 	/* Setup our descriptor indices */
2368 	rxr->next_to_check = 0;
2369 	rxr->last_rx_desc_filled = sc->num_rx_desc - 1;
2370 
2371 	ixgbe_rxfill(rxr);
2372 	if (rxr->rx_ndescs < 1) {
2373 		printf("%s: unable to fill any rx descriptors\n",
2374 		    sc->dev.dv_xname);
2375 		return (ENOBUFS);
2376 	}
2377 
2378 	return (0);
2379 }
2380 
2381 int
2382 ixgbe_rxfill(struct rx_ring *rxr)
2383 {
2384 	struct ix_softc *sc = rxr->sc;
2385 	int		 post = 0;
2386 	int		 i;
2387 
2388 	i = rxr->last_rx_desc_filled;
2389 	while (rxr->rx_ndescs < sc->num_rx_desc) {
2390 		if (++i == sc->num_rx_desc)
2391 			i = 0;
2392 
2393 		if (ixgbe_get_buf(rxr, i) != 0)
2394 			break;
2395 
2396 		rxr->last_rx_desc_filled = i;
2397 		post = 1;
2398 	}
2399 
2400 	return (post);
2401 }
2402 
2403 /*********************************************************************
2404  *
2405  *  Initialize all receive rings.
2406  *
2407  **********************************************************************/
2408 int
2409 ixgbe_setup_receive_structures(struct ix_softc *sc)
2410 {
2411 	struct rx_ring *rxr = sc->rx_rings;
2412 	int i;
2413 
2414 	for (i = 0; i < sc->num_rx_queues; i++, rxr++)
2415 		if (ixgbe_setup_receive_ring(rxr))
2416 			goto fail;
2417 
2418 	return (0);
2419 
2420 fail:
2421 	ixgbe_free_receive_structures(sc);
2422 	return (ENOBUFS);
2423 }
2424 
2425 /*********************************************************************
2426  *
2427  *  Enable receive unit.
2428  *
2429  **********************************************************************/
2430 void
2431 ixgbe_initialize_receive_units(struct ix_softc *sc)
2432 {
2433 	struct	rx_ring	*rxr = sc->rx_rings;
2434 	struct ifnet   *ifp = &sc->arpcom.ac_if;
2435 	uint32_t	rxctrl, fctrl, srrctl, rxcsum;
2436 	uint32_t	reta, mrqc, hlreg, linkvec;
2437 	uint32_t	random[10];
2438 	int		i;
2439 
2440 	/*
2441 	 * Make sure receives are disabled while
2442 	 * setting up the descriptor ring
2443 	 */
2444 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
2445 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL,
2446 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
2447 
2448 	/* Enable broadcasts */
2449 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2450 	fctrl |= IXGBE_FCTRL_BAM;
2451 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
2452 
2453 	hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
2454 	if (ifp->if_mtu > ETHERMTU)
2455 		hlreg |= IXGBE_HLREG0_JUMBOEN;
2456 	else
2457 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2458 	IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
2459 
2460 	srrctl = IXGBE_READ_REG(&sc->hw, IXGBE_SRRCTL(0));
2461 	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2462 	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2463 	if (sc->bigbufs)
2464 		srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2465 	else
2466 		srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2467 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2468 	IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(0), srrctl);
2469 
2470 	/* Set Queue moderation rate */
2471 	for (i = 0; i < IXGBE_MSGS; i++)
2472 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(i), DEFAULT_ITR);
2473 
2474 	/* Set Link moderation lower */
2475 	linkvec = sc->num_tx_queues + sc->num_rx_queues;
2476 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(linkvec), LINK_ITR);
2477 
2478 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2479 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2480 		/* Setup the Base and Length of the Rx Descriptor Ring */
2481 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
2482 			       (rdba & 0x00000000ffffffffULL));
2483 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAH(i), (rdba >> 32));
2484 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
2485 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2486 
2487 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2488 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
2489 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i),
2490 		    rxr->last_rx_desc_filled);
2491 	}
2492 
2493 	rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
2494 
2495 	if (sc->num_rx_queues > 1) {
2496 		/* set up random bits */
2497 		arc4random_buf(&random, sizeof(random));
2498 		switch (sc->num_rx_queues) {
2499 			case 8:
2500 			case 4:
2501 				reta = 0x00010203;
2502 				break;
2503 			case 2:
2504 				reta = 0x00010001;
2505 				break;
2506 			default:
2507 				reta = 0x00000000;
2508 		}
2509 
2510 		/* Set up the redirection table */
2511 		for (i = 0; i < 32; i++) {
2512 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i), reta);
2513 			if (sc->num_rx_queues > 4) {
2514 				++i;
2515 				IXGBE_WRITE_REG(&sc->hw,
2516 				    IXGBE_RETA(i), 0x04050607);
2517 			}
2518 		}
2519 
2520 		/* Now fill our hash function seeds */
2521 		for (i = 0; i < 10; i++)
2522 			IXGBE_WRITE_REG_ARRAY(&sc->hw,
2523 			    IXGBE_RSSRK(0), i, random[i]);
2524 
2525 		mrqc = IXGBE_MRQC_RSSEN
2526 		    /* Perform hash on these packet types */
2527 		    | IXGBE_MRQC_RSS_FIELD_IPV4
2528 		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2529 		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2530 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2531 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2532 		    | IXGBE_MRQC_RSS_FIELD_IPV6
2533 		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2534 		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2535 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2536 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MRQC, mrqc);
2537 
2538 		/* RSS and RX IPP Checksum are mutually exclusive */
2539 		rxcsum |= IXGBE_RXCSUM_PCSD;
2540 	}
2541 
2542 #if defined(IX_CSUM_OFFLOAD)
2543 	rxcsum |= IXGBE_RXCSUM_PCSD;
2544 #endif
2545 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2546 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2547 
2548 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
2549 
2550 	/* Enable Receive engine */
2551 	rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
2552 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL, rxctrl);
2553 
2554 	return;
2555 }
2556 
2557 /*********************************************************************
2558  *
2559  *  Free all receive rings.
2560  *
2561  **********************************************************************/
2562 void
2563 ixgbe_free_receive_structures(struct ix_softc *sc)
2564 {
2565 	struct rx_ring *rxr = sc->rx_rings;
2566 	int		i;
2567 
2568 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2569 		ixgbe_free_receive_buffers(rxr);
2570 	}
2571 }
2572 
2573 /*********************************************************************
2574  *
2575  *  Free receive ring data structures
2576  *
2577  **********************************************************************/
2578 void
2579 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2580 {
2581 	struct ix_softc		*sc = NULL;
2582 	struct ixgbe_rx_buf	*rxbuf = NULL;
2583 	int			 i;
2584 
2585 	INIT_DEBUGOUT("free_receive_buffers: begin");
2586 	sc = rxr->sc;
2587 	if (rxr->rx_buffers != NULL) {
2588 		rxbuf = rxr->rx_buffers;
2589 		for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2590 			if (rxbuf->m_head != NULL) {
2591 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2592 				    0, rxbuf->map->dm_mapsize,
2593 				    BUS_DMASYNC_POSTREAD);
2594 				bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2595 				m_freem(rxbuf->m_head);
2596 				rxbuf->m_head = NULL;
2597 			}
2598 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2599 			rxbuf->map = NULL;
2600 		}
2601 	}
2602 	if (rxr->rx_buffers != NULL) {
2603 		free(rxr->rx_buffers, M_DEVBUF);
2604 		rxr->rx_buffers = NULL;
2605 	}
2606 
2607 	if (rxr->rxtag != NULL)
2608 		rxr->rxtag = NULL;
2609 
2610 	if (rxr->fmp != NULL) {
2611 		m_freem(rxr->fmp);
2612 		rxr->fmp = NULL;
2613 		rxr->lmp = NULL;
2614 	}
2615 }
2616 
2617 /*********************************************************************
2618  *
2619  *  This routine executes in interrupt context. It replenishes
2620  *  the mbufs in the descriptor and sends data which has been
2621  *  dma'ed into host memory to upper layer.
2622  *
2623  *  We loop at most count times if count is > 0, or until done if
2624  *  count < 0.
2625  *
2626  *********************************************************************/
2627 int
2628 ixgbe_rxeof(struct rx_ring *rxr, int count)
2629 {
2630 	struct ix_softc 	*sc = rxr->sc;
2631 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2632 	struct mbuf    		*m;
2633 	uint8_t			 accept_frame = 0;
2634 	uint8_t		    	 eop = 0;
2635 	uint16_t		 len, desc_len, prev_len_adj;
2636 	uint32_t		 staterr;
2637 	struct ixgbe_rx_buf	*rxbuf;
2638 	union ixgbe_adv_rx_desc	*rxdesc;
2639 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2640 	int			 i;
2641 
2642 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2643 		return FALSE;
2644 
2645 	i = rxr->next_to_check;
2646 
2647 	while (count != 0 && rxr->rx_ndescs > 0) {
2648 		m = NULL;
2649 
2650 		rxdesc = &rxr->rx_base[i];
2651 		rxbuf = &rxr->rx_buffers[i];
2652 
2653 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2654 		    dsize * i, dsize,
2655 		    BUS_DMASYNC_POSTREAD);
2656 
2657 		staterr = letoh32(rxdesc->wb.upper.status_error);
2658 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2659 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2660 			    dsize * i, dsize,
2661 			    BUS_DMASYNC_PREREAD);
2662 			break;
2663 		}
2664 
2665 		/* pull the mbuf off the ring */
2666 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2667 		    rxbuf->map->dm_mapsize,
2668 		    BUS_DMASYNC_POSTREAD);
2669 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2670 		m = rxbuf->m_head;
2671 		rxbuf->m_head = NULL;
2672 
2673 		if (m == NULL) {
2674 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2675 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2676 			    i, rxr->rx_ndescs,
2677 			    rxr->last_rx_desc_filled);
2678 		}
2679 
2680 		m_cluncount(m, 1);
2681 		rxr->rx_ndescs--;
2682 
2683 		accept_frame = 1;
2684 		prev_len_adj = 0;
2685 		desc_len = letoh16(rxdesc->wb.upper.length);
2686 
2687 		if (staterr & IXGBE_RXD_STAT_EOP) {
2688 			count--;
2689 			eop = 1;
2690 		} else {
2691 			eop = 0;
2692 		}
2693 		len = desc_len;
2694 
2695 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
2696 			accept_frame = 0;
2697 
2698 		if (accept_frame) {
2699 			m->m_len = len;
2700 
2701 			/* XXX ixgbe_realign() STRICT_ALIGN */
2702 
2703 			if (rxr->fmp == NULL) {
2704 				m->m_pkthdr.len = m->m_len;
2705 				rxr->fmp = m; /* Store the first mbuf */
2706 				rxr->lmp = m;
2707 			} else {
2708 				/* Chain mbuf's together */
2709 				m->m_flags &= ~M_PKTHDR;
2710 #if 0
2711 				/*
2712 				 * Adjust length of previous mbuf in chain if
2713 				 * we received less than 4 bytes in the last
2714 				 * descriptor.
2715 				 */
2716 				if (prev_len_adj > 0) {
2717 					rxr->lmp->m_len -= prev_len_adj;
2718 					rxr->fmp->m_pkthdr.len -= prev_len_adj;
2719 				}
2720 #endif
2721 				rxr->lmp->m_next = m;
2722 				rxr->lmp = m;
2723 				rxr->fmp->m_pkthdr.len += m->m_len;
2724 			}
2725 
2726 			if (eop) {
2727 				ifp->if_ipackets++;
2728 
2729 				m = rxr->fmp;
2730 				m->m_pkthdr.rcvif = ifp;
2731 
2732 				rxr->packet_count++;
2733 				rxr->byte_count += rxr->fmp->m_pkthdr.len;
2734 
2735 				ixgbe_rx_checksum(sc, staterr, m);
2736 
2737 #if NVLAN > 0
2738 				if (staterr & IXGBE_RXD_STAT_VP) {
2739 					m->m_pkthdr.ether_vtag =
2740 					    letoh16(rxdesc->wb.upper.vlan);
2741 					m->m_flags |= M_VLANTAG;
2742 				}
2743 #endif
2744 #if NBPFILTER > 0
2745 				if (ifp->if_bpf)
2746 					bpf_mtap_ether(ifp->if_bpf, m,
2747 					    BPF_DIRECTION_IN);
2748 #endif
2749 
2750 				ether_input_mbuf(ifp, m);
2751 
2752 				rxr->fmp = NULL;
2753 				rxr->lmp = NULL;
2754 			}
2755 		} else {
2756 			sc->dropped_pkts++;
2757 
2758 			if (rxr->fmp != NULL) {
2759 				m_freem(rxr->fmp);
2760 				rxr->fmp = NULL;
2761 				rxr->lmp = NULL;
2762 			}
2763 
2764 			m_freem(m);
2765 		}
2766 
2767 		/* Zero out the receive descriptors status  */
2768 		rxdesc->wb.upper.status_error = 0;
2769 
2770 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2771 		    dsize * i, dsize,
2772 		    BUS_DMASYNC_PREREAD);
2773 
2774 		/* Advance our pointers to the next descriptor. */
2775 		if (++i == sc->num_rx_desc)
2776 			i = 0;
2777 	}
2778 	rxr->next_to_check = i;
2779 
2780 	if (!(staterr & IXGBE_RXD_STAT_DD))
2781 		return FALSE;
2782 
2783 	return TRUE;
2784 }
2785 
2786 /*********************************************************************
2787  *
2788  *  Verify that the hardware indicated that the checksum is valid.
2789  *  Inform the stack about the status of checksum so that stack
2790  *  doesn't spend time verifying the checksum.
2791  *
2792  *********************************************************************/
2793 void
2794 ixgbe_rx_checksum(struct ix_softc *sc,
2795     uint32_t staterr, struct mbuf * mp)
2796 {
2797 	uint16_t status = (uint16_t) staterr;
2798 	uint8_t  errors = (uint8_t) (staterr >> 24);
2799 
2800 	if (status & IXGBE_RXD_STAT_IPCS) {
2801 		/* Did it pass? */
2802 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2803 			/* IP Checksum Good */
2804 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2805 		} else
2806 			mp->m_pkthdr.csum_flags = 0;
2807 	}
2808 
2809 	if (status & IXGBE_RXD_STAT_L4CS) {
2810 		/* Did it pass? */
2811 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2812 			mp->m_pkthdr.csum_flags |=
2813 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2814 	}
2815 
2816 }
2817 
2818 void
2819 ixgbe_enable_hw_vlans(struct ix_softc *sc)
2820 {
2821 	uint32_t	ctrl;
2822 
2823 	ixgbe_disable_intr(sc);
2824 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
2825 	ctrl |= IXGBE_VLNCTRL_VME;
2826 	ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2827 	ctrl &= ~IXGBE_VLNCTRL_VFE;
2828 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
2829 	ixgbe_enable_intr(sc);
2830 }
2831 
2832 void
2833 ixgbe_enable_intr(struct ix_softc *sc)
2834 {
2835 	struct ixgbe_hw *hw = &sc->hw;
2836 	uint32_t mask = IXGBE_EIMS_ENABLE_MASK;
2837 
2838 	/* Enable Fan Failure detection */
2839 	if (hw->phy.media_type == ixgbe_media_type_copper)
2840 		    mask |= IXGBE_EIMS_GPI_SDP1;
2841 	/* With RSS we use auto clear */
2842 	if (sc->msix_mem) {
2843 		/* Dont autoclear Link */
2844 		mask &= ~IXGBE_EIMS_OTHER;
2845 		mask &= ~IXGBE_EIMS_LSC;
2846 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC,
2847 		    sc->eims_mask | mask);
2848 	}
2849 
2850 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2851 	IXGBE_WRITE_FLUSH(hw);
2852 
2853 	return;
2854 }
2855 
2856 void
2857 ixgbe_disable_intr(struct ix_softc *sc)
2858 {
2859 	if (sc->msix_mem)
2860 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
2861 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
2862 	IXGBE_WRITE_FLUSH(&sc->hw);
2863 	return;
2864 }
2865 
2866 uint16_t
2867 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
2868 {
2869 	struct pci_attach_args	*pa;
2870 	uint16_t value;
2871 
2872 	pa = ((struct ixgbe_osdep *)hw->back)->os_pa;
2873 
2874 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2875 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg) & 0xffff;
2876 
2877 	return (value);
2878 }
2879 
2880 void
2881 ixgbe_set_ivar(struct ix_softc *sc, uint16_t entry, uint8_t vector)
2882 {
2883 	uint32_t ivar, index;
2884 
2885 	vector |= IXGBE_IVAR_ALLOC_VAL;
2886 	index = (entry >> 2) & 0x1F;
2887 	ivar = IXGBE_READ_REG(&sc->hw, IXGBE_IVAR(index));
2888 	ivar &= ~(0xFF << (8 * (entry & 0x3)));
2889 	ivar |= (vector << (8 * (entry & 0x3)));
2890 	IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
2891 }
2892 
2893 void
2894 ixgbe_configure_ivars(struct ix_softc *sc)
2895 {
2896 	struct  tx_ring *txr = sc->tx_rings;
2897 	struct  rx_ring *rxr = sc->rx_rings;
2898 	int		 i;
2899 
2900         for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2901                 ixgbe_set_ivar(sc, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
2902 		sc->eims_mask |= rxr->eims;
2903 	}
2904 
2905         for (i = 0; i < sc->num_tx_queues; i++, txr++) {
2906 		ixgbe_set_ivar(sc, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
2907 		sc->eims_mask |= txr->eims;
2908 	}
2909 
2910 	/* For the Link interrupt */
2911         ixgbe_set_ivar(sc, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2912 	    sc->linkvec);
2913 	sc->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
2914 }
2915 
2916 /**********************************************************************
2917  *
2918  *  Update the board statistics counters.
2919  *
2920  **********************************************************************/
2921 void
2922 ixgbe_update_stats_counters(struct ix_softc *sc)
2923 {
2924 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
2925 	struct ixgbe_hw *hw = &sc->hw;
2926 	uint32_t  missed_rx = 0, bprc, lxon, lxoff, total;
2927 	int	i;
2928 
2929 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2930 
2931 	for (i = 0; i < 8; i++) {
2932 		int mp;
2933 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2934 		missed_rx += mp;
2935         	sc->stats.mpc[i] += mp;
2936 		sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2937 	}
2938 
2939 	/* Hardware workaround, gprc counts missed packets */
2940 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2941 	sc->stats.gprc -= missed_rx;
2942 
2943 	sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2944 	sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2945 	sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2946 
2947 	/*
2948 	 * Workaround: mprc hardware is incorrectly counting
2949 	 * broadcasts, so for now we subtract those.
2950 	 */
2951 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2952 	sc->stats.bprc += bprc;
2953 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2954 	sc->stats.mprc -= bprc;
2955 
2956 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2957 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2958 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2959 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2960 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2961 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2962 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
2963 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2964 
2965 	sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2966 	sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2967 
2968 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2969 	sc->stats.lxontxc += lxon;
2970 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2971 	sc->stats.lxofftxc += lxoff;
2972 	total = lxon + lxoff;
2973 
2974 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2975 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2976 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2977 	sc->stats.gptc -= total;
2978 	sc->stats.mptc -= total;
2979 	sc->stats.ptc64 -= total;
2980 	sc->stats.gotc -= total * ETHER_MIN_LEN;
2981 
2982 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2983 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2984 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2985 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2986 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2987 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2988 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2989 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2990 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2991 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2992 
2993 #if 0
2994 	/* Fill out the OS statistics structure */
2995 	ifp->if_ipackets = sc->stats.gprc;
2996 	ifp->if_opackets = sc->stats.gptc;
2997 	ifp->if_ibytes = sc->stats.gorc;
2998 	ifp->if_obytes = sc->stats.gotc;
2999 	ifp->if_imcasts = sc->stats.mprc;
3000 #endif
3001 	ifp->if_collisions = 0;
3002 	ifp->if_oerrors = sc->watchdog_events;
3003 	ifp->if_ierrors = missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3004 }
3005 
3006 #ifdef IX_DEBUG
3007 /**********************************************************************
3008  *
3009  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3010  *  This routine provides a way to take a look at important statistics
3011  *  maintained by the driver and hardware.
3012  *
3013  **********************************************************************/
3014 void
3015 ixgbe_print_hw_stats(struct ix_softc * sc)
3016 {
3017 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
3018 
3019 	printf("%s: mbuf alloc failed %lu, mbuf cluster failed %lu, "
3020 	    "missed pkts %llu, rx len errs %llu, crc errs %llu, "
3021 	    "dropped pkts %lu, watchdog timeouts %ld, "
3022 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3023 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3024 	    "tso tx %lu\n",
3025 	    ifp->if_xname,
3026 	    sc->mbuf_alloc_failed,
3027 	    sc->mbuf_cluster_failed,
3028 	    (long long)sc->stats.mpc[0],
3029 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3030 	    (long long)sc->stats.crcerrs,
3031 	    sc->dropped_pkts,
3032 	    sc->watchdog_events,
3033 	    (long long)sc->stats.lxonrxc,
3034 	    (long long)sc->stats.lxontxc,
3035 	    (long long)sc->stats.lxoffrxc,
3036 	    (long long)sc->stats.lxofftxc,
3037 	    (long long)sc->stats.tpr,
3038 	    (long long)sc->stats.gprc,
3039 	    (long long)sc->stats.gptc,
3040 	    sc->tso_tx);
3041 }
3042 #endif
3043 
3044 #ifndef NO_82598_A0_SUPPORT
3045 /*
3046  * A0 Workaround: invert descriptor for hardware
3047  */
3048 void
3049 desc_flip(void *desc)
3050 {
3051         struct dhack {uint32_t a1; uint32_t a2; uint32_t b1; uint32_t b2;};
3052         struct dhack *d;
3053 
3054         d = (struct dhack *)desc;
3055         d->a1 = ~(d->a1);
3056         d->a2 = ~(d->a2);
3057         d->b1 = ~(d->b1);
3058         d->b2 = ~(d->b2);
3059         d->b2 &= 0xFFFFFFF0;
3060         d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3061 }
3062 #endif
3063 
3064 
3065 
3066