xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: if_ix.c,v 1.15 2008/11/28 02:44:18 brad Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2008, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe.c,v 1.5 2008/05/16 18:46:30 jfv Exp $*/
36 
37 #include <dev/pci/if_ix.h>
38 
39 /*********************************************************************
40  *  Driver version
41  *********************************************************************/
42 
43 #define IXGBE_DRIVER_VERSION	"1.4.4"
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT }
59 };
60 
61 /*********************************************************************
62  *  Function prototypes
63  *********************************************************************/
64 int	ixgbe_probe(struct device *, void *, void *);
65 void	ixgbe_attach(struct device *, struct device *, void *);
66 int	ixgbe_detach(struct device *, int);
67 void	ixgbe_power(int, void *);
68 void	ixgbe_shutdown(void *);
69 void	ixgbe_start(struct ifnet *);
70 void	ixgbe_start_locked(struct tx_ring *, struct ifnet *);
71 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
72 void	ixgbe_watchdog(struct ifnet *);
73 void	ixgbe_init(void *);
74 void	ixgbe_stop(void *);
75 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
76 int	ixgbe_media_change(struct ifnet *);
77 void	ixgbe_identify_hardware(struct ix_softc *);
78 int	ixgbe_allocate_pci_resources(struct ix_softc *);
79 int	ixgbe_allocate_legacy(struct ix_softc *);
80 int	ixgbe_allocate_queues(struct ix_softc *);
81 void	ixgbe_free_pci_resources(struct ix_softc *);
82 void	ixgbe_local_timer(void *);
83 int	ixgbe_hardware_init(struct ix_softc *);
84 void	ixgbe_setup_interface(struct ix_softc *);
85 
86 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
87 int	ixgbe_setup_transmit_structures(struct ix_softc *);
88 void	ixgbe_setup_transmit_ring(struct tx_ring *);
89 void	ixgbe_initialize_transmit_units(struct ix_softc *);
90 void	ixgbe_free_transmit_structures(struct ix_softc *);
91 void	ixgbe_free_transmit_buffers(struct tx_ring *);
92 
93 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
94 int	ixgbe_setup_receive_structures(struct ix_softc *);
95 int	ixgbe_setup_receive_ring(struct rx_ring *);
96 void	ixgbe_initialize_receive_units(struct ix_softc *);
97 void	ixgbe_free_receive_structures(struct ix_softc *);
98 void	ixgbe_free_receive_buffers(struct rx_ring *);
99 
100 void	ixgbe_enable_intr(struct ix_softc *);
101 void	ixgbe_disable_intr(struct ix_softc *);
102 void	ixgbe_update_stats_counters(struct ix_softc *);
103 int	ixgbe_txeof(struct tx_ring *);
104 int	ixgbe_rxeof(struct rx_ring *, int);
105 void	ixgbe_rx_checksum(struct ix_softc *, uint32_t, struct mbuf *);
106 void	ixgbe_set_promisc(struct ix_softc *);
107 void	ixgbe_disable_promisc(struct ix_softc *);
108 void	ixgbe_set_multi(struct ix_softc *);
109 #ifdef IX_DEBUG
110 void	ixgbe_print_hw_stats(struct ix_softc *);
111 #endif
112 void	ixgbe_update_link_status(struct ix_softc *);
113 int	ixgbe_get_buf(struct rx_ring *, int, struct mbuf *);
114 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
115 void	ixgbe_enable_hw_vlans(struct ix_softc * sc);
116 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
117 		    struct ixgbe_dma_alloc *, int);
118 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
119 #ifdef IX_CSUM_OFFLOAD
120 int ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
121 int ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *);
122 #endif
123 void	ixgbe_set_ivar(struct ix_softc *, uint16_t, uint8_t);
124 void	ixgbe_configure_ivars(struct ix_softc *);
125 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
126 
127 /* Legacy (single vector interrupt handler */
128 int	ixgbe_legacy_irq(void *);
129 
130 #ifndef NO_82598_A0_SUPPORT
131 void	desc_flip(void *);
132 #endif
133 
134 /*********************************************************************
135  *  OpenBSD Device Interface Entry Points
136  *********************************************************************/
137 
138 struct cfdriver ix_cd = {
139 	0, "ix", DV_IFNET
140 };
141 
142 struct cfattach ix_ca = {
143 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
144 };
145 
146 /* Total number of Interfaces - need for config sanity check */
147 static int ixgbe_total_ports;
148 
149 /*********************************************************************
150  *  Device identification routine
151  *
152  *  ixgbe_probe determines if the driver should be loaded on
153  *  sc based on PCI vendor/device id of the sc.
154  *
155  *  return 0 on success, positive on failure
156  *********************************************************************/
157 
158 int
159 ixgbe_probe(struct device *parent, void *match, void *aux)
160 {
161 	INIT_DEBUGOUT("ixgbe_probe: begin");
162 
163 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
164 	    sizeof(ixgbe_devices)/sizeof(ixgbe_devices[0])));
165 }
166 
167 /*********************************************************************
168  *  Device initialization routine
169  *
170  *  The attach entry point is called when the driver is being loaded.
171  *  This routine identifies the type of hardware, allocates all resources
172  *  and initializes the hardware.
173  *
174  *  return 0 on success, positive on failure
175  *********************************************************************/
176 
177 void
178 ixgbe_attach(struct device *parent, struct device *self, void *aux)
179 {
180 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
181 	struct ix_softc		*sc = (struct ix_softc *)self;
182 	int			 error = 0;
183 	uint32_t			 ctrl_ext;
184 
185 	INIT_DEBUGOUT("ixgbe_attach: begin");
186 
187 	sc->osdep.os_sc = sc;
188 	sc->osdep.os_pa = pa;
189 
190 	/* Core Lock Init*/
191 	mtx_init(&sc->core_mtx, IPL_NET);
192 
193 	/* Set up the timer callout */
194 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
195 
196 	/* Determine hardware revision */
197 	ixgbe_identify_hardware(sc);
198 
199 	/* Indicate to RX setup to use Jumbo Clusters */
200 	sc->bigbufs = FALSE;
201 	sc->num_tx_desc = DEFAULT_TXD;
202 	sc->num_rx_desc = DEFAULT_RXD;
203 	sc->rx_process_limit = 100;	// XXX
204 
205 	/* Do base PCI setup - map BAR0 */
206 	if (ixgbe_allocate_pci_resources(sc)) {
207 		printf(": allocation of PCI resources failed\n");
208 		goto err_out;
209 	}
210 
211 	/* Allocate our TX/RX Queues */
212 	if (ixgbe_allocate_queues(sc))
213 		goto err_out;
214 
215 	/* Initialize the shared code */
216 	sc->hw.mac.type = ixgbe_mac_82598EB;
217 	if (ixgbe_init_ops_82598(&sc->hw) != 0) {
218 		printf(": failed to init the 82598EB\n");
219 		goto err_late;
220 	}
221 
222 	/* Initialize the hardware */
223 	if (ixgbe_hardware_init(sc)) {
224 		printf(": unable to initialize the hardware\n");
225 		goto err_late;
226 	}
227 
228 	/* XXX sc->msix > 1 && ixgbe_allocate_msix() */
229 	error = ixgbe_allocate_legacy(sc);
230 	if (error)
231 		goto err_late;
232 
233 	/* Setup OS specific network interface */
234 	ixgbe_setup_interface(sc);
235 
236 	/* Initialize statistics */
237 	ixgbe_update_stats_counters(sc);
238 
239 	/* let hardware know driver is loaded */
240 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
241 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
242 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
243 
244 	sc->powerhook = powerhook_establish(ixgbe_power, sc);
245 	sc->shutdownhook = shutdownhook_establish(ixgbe_shutdown, sc);
246 
247 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
248 
249 	INIT_DEBUGOUT("ixgbe_attach: end");
250 	return;
251 
252 err_late:
253 	ixgbe_free_transmit_structures(sc);
254 	ixgbe_free_receive_structures(sc);
255 err_out:
256 	ixgbe_free_pci_resources(sc);
257 }
258 
259 /*********************************************************************
260  *  Device removal routine
261  *
262  *  The detach entry point is called when the driver is being removed.
263  *  This routine stops the sc and deallocates all the resources
264  *  that were allocated for driver operation.
265  *
266  *  return 0 on success, positive on failure
267  *********************************************************************/
268 
269 int
270 ixgbe_detach(struct device *self, int flags)
271 {
272 	struct ix_softc *sc = (struct ix_softc *)self;
273 	struct ifnet *ifp = &sc->arpcom.ac_if;
274 	uint32_t	ctrl_ext;
275 
276 	INIT_DEBUGOUT("ixgbe_detach: begin");
277 
278 	ixgbe_stop(sc);
279 
280 	/* let hardware know driver is unloading */
281 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
282 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
283 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
284 
285 	ether_ifdetach(ifp);
286 	if_detach(ifp);
287 
288 	timeout_del(&sc->timer);
289 	ixgbe_free_pci_resources(sc);
290 
291 	ixgbe_free_transmit_structures(sc);
292 	ixgbe_free_receive_structures(sc);
293 
294 	return (0);
295 }
296 
297 void
298 ixgbe_power(int why, void *arg)
299 {
300 	struct ix_softc *sc = (struct ix_softc *)arg;
301 	struct ifnet *ifp;
302 
303 	if (why == PWR_RESUME) {
304 		ifp = &sc->arpcom.ac_if;
305 		if (ifp->if_flags & IFF_UP)
306 			ixgbe_init(sc);
307 	}
308 }
309 
310 /*********************************************************************
311  *
312  *  Shutdown entry point
313  *
314  **********************************************************************/
315 
316 void
317 ixgbe_shutdown(void *arg)
318 {
319 	struct ix_softc *sc = (struct ix_softc *)arg;
320 
321 	ixgbe_stop(sc);
322 }
323 
324 /*********************************************************************
325  *  Transmit entry point
326  *
327  *  ixgbe_start is called by the stack to initiate a transmit.
328  *  The driver will remain in this routine as long as there are
329  *  packets to transmit and transmit resources are available.
330  *  In case resources are not available stack is notified and
331  *  the packet is requeued.
332  **********************************************************************/
333 
334 void
335 ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
336 {
337 	struct mbuf  		*m_head;
338 	struct ix_softc		*sc = txr->sc;
339 	int			 post = 0;
340 
341 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
342 		return;
343 
344 	if (!sc->link_active)
345 		return;
346 
347 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
348 	    txr->txdma.dma_map->dm_mapsize,
349 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
350 
351 	for (;;) {
352 		IFQ_POLL(&ifp->if_snd, m_head);
353 		if (m_head == NULL)
354 			break;
355 
356 		if (ixgbe_encap(txr, m_head)) {
357 			ifp->if_flags |= IFF_OACTIVE;
358 			break;
359 		}
360 
361 		IFQ_DEQUEUE(&ifp->if_snd, m_head);
362 
363 #if NBPFILTER > 0
364 		if (ifp->if_bpf)
365 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
366 #endif
367 
368 		/* Set timeout in case hardware has problems transmitting */
369 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
370 		ifp->if_timer = IXGBE_TX_TIMEOUT;
371 
372 		post = 1;
373 	}
374 
375         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
376 	    0, txr->txdma.dma_map->dm_mapsize,
377             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
378 
379 	/*
380 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
381 	 * hardware that this frame is available to transmit.
382 	 */
383 	if (post)
384 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
385 		    txr->next_avail_tx_desc);
386 }
387 
388 
389 void
390 ixgbe_start(struct ifnet *ifp)
391 {
392 	struct ix_softc *sc = ifp->if_softc;
393 	struct tx_ring	*txr = sc->tx_rings;
394 	uint32_t queue = 0;
395 
396 #if 0
397 	/*
398 	 * This is really just here for testing
399 	 * TX multiqueue, ultimately what is
400 	 * needed is the flow support in the stack
401 	 * and appropriate logic here to deal with
402 	 * it. -jfv
403 	 */
404 	if (sc->num_tx_queues > 1)
405 		queue = (curcpu % sc->num_tx_queues);
406 #endif
407 
408 	txr = &sc->tx_rings[queue];
409 
410 	if (ifp->if_flags & IFF_RUNNING)
411 		ixgbe_start_locked(txr, ifp);
412 
413 	return;
414 }
415 
416 /*********************************************************************
417  *  Ioctl entry point
418  *
419  *  ixgbe_ioctl is called when the user wants to configure the
420  *  interface.
421  *
422  *  return 0 on success, positive on failure
423  **********************************************************************/
424 
425 int
426 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
427 {
428 	struct ix_softc	*sc = ifp->if_softc;
429 	struct ifaddr	*ifa = (struct ifaddr *) data;
430 	struct ifreq	*ifr = (struct ifreq *) data;
431 	int		s, error = 0;
432 
433 	s = splnet();
434 
435 	switch (command) {
436 	case SIOCSIFADDR:
437 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
438 		ifp->if_flags |= IFF_UP;
439 		if (!(ifp->if_flags & IFF_RUNNING))
440 			ixgbe_init(sc);
441 #ifdef INET
442 		if (ifa->ifa_addr->sa_family == AF_INET)
443 			arp_ifinit(&sc->arpcom, ifa);
444 #endif
445 		break;
446 
447 	case SIOCSIFMTU:
448 		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
449 		if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ifp->if_hardmtu)
450 			error = EINVAL;
451 		else if (ifp->if_mtu != ifr->ifr_mtu) {
452 			ifp->if_mtu = ifr->ifr_mtu;
453 			sc->max_frame_size =
454 				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
455 			ixgbe_init(sc);
456 		}
457 		break;
458 
459 	case SIOCSIFFLAGS:
460 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
461 		if (ifp->if_flags & IFF_UP) {
462 			if ((ifp->if_flags & IFF_RUNNING)) {
463 				if ((ifp->if_flags ^ sc->if_flags) &
464 				    (IFF_PROMISC | IFF_ALLMULTI)) {
465 					ixgbe_disable_promisc(sc);
466 					ixgbe_set_promisc(sc);
467                                 }
468 			} else
469 				ixgbe_init(sc);
470 		} else
471 			if (ifp->if_flags & IFF_RUNNING)
472 				ixgbe_stop(sc);
473 		sc->if_flags = ifp->if_flags;
474 		break;
475 
476 	case SIOCSIFMEDIA:
477 	case SIOCGIFMEDIA:
478 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
479 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
480 		break;
481 
482 	default:
483 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
484 	}
485 
486 	if (error == ENETRESET) {
487 		if (ifp->if_flags & IFF_RUNNING) {
488 			ixgbe_disable_intr(sc);
489 			ixgbe_set_multi(sc);
490 			ixgbe_enable_intr(sc);
491 		}
492 		error = 0;
493 	}
494 
495 	splx(s);
496 	return (error);
497 }
498 
499 /*********************************************************************
500  *  Watchdog entry point
501  *
502  *  This routine is called by the local timer
503  *  to detect hardware hangs .
504  *
505  **********************************************************************/
506 
507 void
508 ixgbe_watchdog(struct ifnet * ifp)
509 {
510 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
511 	struct tx_ring *txr = sc->tx_rings;
512 	struct ixgbe_hw *hw = &sc->hw;
513 	int		tx_hang = FALSE;
514 	int		i;
515 
516         /*
517          * The timer is set to 5 every time ixgbe_start() queues a packet.
518          * Then ixgbe_txeof() keeps resetting to 5 as long as it cleans at
519          * least one descriptor.
520          * Finally, anytime all descriptors are clean the timer is
521          * set to 0.
522          */
523 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
524         	if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
525                 	continue;
526 		else {
527 			tx_hang = TRUE;
528 			break;
529 		}
530 	}
531 	if (tx_hang == FALSE)
532 		return;
533 
534 	/*
535 	 * If we are in this routine because of pause frames, then don't
536 	 * reset the hardware.
537 	 */
538 	if (IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) {
539 		for (i = 0; i < sc->num_tx_queues; i++, txr++)
540 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
541 		ifp->if_timer = IXGBE_TX_TIMEOUT;
542 		return;
543 	}
544 
545 
546 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
547 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
548 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
549 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
550 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
551 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
552 		    i, txr->tx_avail, txr->next_tx_to_clean);
553 	}
554 	ifp->if_flags &= ~IFF_RUNNING;
555 	sc->watchdog_events++;
556 
557 	ixgbe_init(sc);
558 	return;
559 }
560 
561 /*********************************************************************
562  *  Init entry point
563  *
564  *  This routine is used in two ways. It is used by the stack as
565  *  init entry point in network interface structure. It is also used
566  *  by the driver as a hw/sw initialization routine to get to a
567  *  consistent state.
568  *
569  *  return 0 on success, positive on failure
570  **********************************************************************/
571 #define IXGBE_MHADD_MFS_SHIFT 16
572 
573 void
574 ixgbe_init(void *arg)
575 {
576 	struct ix_softc	*sc = (struct ix_softc *)arg;
577 	struct ifnet	*ifp = &sc->arpcom.ac_if;
578 	uint32_t	 txdctl, rxdctl, mhadd, gpie;
579 	int		 i, s;
580 
581 	INIT_DEBUGOUT("ixgbe_init: begin");
582 
583 	s = splnet();
584 
585 	ixgbe_stop(sc);
586 
587 	/* Get the latest mac address, User can use a LAA */
588 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
589 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
590 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, 1);
591 	sc->hw.addr_ctrl.rar_used_count = 1;
592 
593 	/* Initialize the hardware */
594 	if (ixgbe_hardware_init(sc)) {
595 		printf("%s: Unable to initialize the hardware\n",
596 		    ifp->if_xname);
597 		splx(s);
598 		return;
599 	}
600 
601 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)
602 		ixgbe_enable_hw_vlans(sc);
603 
604 	/* Prepare transmit descriptors and buffers */
605 	if (ixgbe_setup_transmit_structures(sc)) {
606 		printf("%s: Could not setup transmit structures\n",
607 		    ifp->if_xname);
608 		ixgbe_stop(sc);
609 		splx(s);
610 		return;
611 	}
612 
613 	ixgbe_initialize_transmit_units(sc);
614 
615 	/* Setup Multicast table */
616 	ixgbe_set_multi(sc);
617 
618 	/*
619 	 * If we are resetting MTU smaller than 2K
620 	 * drop to small RX buffers
621 	 */
622 	if (sc->max_frame_size <= MCLBYTES)
623 		sc->bigbufs = FALSE;
624 
625 	/* Prepare receive descriptors and buffers */
626 	if (ixgbe_setup_receive_structures(sc)) {
627 		printf("%s: Could not setup receive structures\n", ifp->if_xname);
628 		ixgbe_stop(sc);
629 		splx(s);
630 		return;
631 	}
632 
633 	/* Configure RX settings */
634 	ixgbe_initialize_receive_units(sc);
635 
636 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
637 	/* Enable Fan Failure Interrupt */
638 	if (sc->hw.phy.media_type == ixgbe_media_type_copper)
639 		gpie |= IXGBE_SDP1_GPIEN;
640 	if (sc->msix) {
641 		/* Enable Enhanced MSIX mode */
642 		gpie |= IXGBE_GPIE_MSIX_MODE;
643 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
644 		    IXGBE_GPIE_OCD;
645 	}
646 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
647 
648 	/* Set MTU size */
649 	if (ifp->if_mtu > ETHERMTU) {
650 		mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
651 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
652 		mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
653 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
654 	}
655 
656 	/* Now enable all the queues */
657 
658 	for (i = 0; i < sc->num_tx_queues; i++) {
659 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
660 		txdctl |= IXGBE_TXDCTL_ENABLE;
661 		/* Set WTHRESH to 8, burst writeback */
662 		txdctl |= (8 << 16);
663 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
664 	}
665 
666 	for (i = 0; i < sc->num_rx_queues; i++) {
667 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
668 		/* PTHRESH set to 32 */
669 		rxdctl |= 0x0020;
670 		rxdctl |= IXGBE_RXDCTL_ENABLE;
671 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
672 	}
673 
674 	timeout_add_sec(&sc->timer, 1);
675 
676 	/* Set up MSI/X routing */
677 	ixgbe_configure_ivars(sc);
678 
679 	ixgbe_enable_intr(sc);
680 
681 	/* Now inform the stack we're ready */
682 	ifp->if_flags |= IFF_RUNNING;
683 	ifp->if_flags &= ~IFF_OACTIVE;
684 
685 	splx(s);
686 }
687 
688 /*********************************************************************
689  *
690  *  Legacy Interrupt Service routine
691  *
692  **********************************************************************/
693 
694 int
695 ixgbe_legacy_irq(void *arg)
696 {
697 	struct ix_softc	*sc = (struct ix_softc *)arg;
698 	struct ifnet	*ifp = &sc->arpcom.ac_if;
699 	uint32_t	 reg_eicr;
700 	struct tx_ring	*txr = sc->tx_rings;
701 	struct rx_ring	*rxr = sc->rx_rings;
702 	struct ixgbe_hw	*hw = &sc->hw;
703 	int		 claimed = 0;
704 
705 	for (;;) {
706 		reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
707 		if (reg_eicr == 0)
708 			break;
709 
710 		claimed = 1;
711 
712 		if (ifp->if_flags & IFF_RUNNING) {
713 			ixgbe_rxeof(rxr, -1);
714 			ixgbe_txeof(txr);
715 		}
716 
717 		/* Check for fan failure */
718 		if ((hw->phy.media_type == ixgbe_media_type_copper) &&
719 		    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
720 	                printf("%s: \nCRITICAL: FAN FAILURE!! "
721 			    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
722 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS,
723 			    IXGBE_EICR_GPI_SDP1);
724 		}
725 
726 		/* Link status change */
727 		if (reg_eicr & IXGBE_EICR_LSC) {
728 			timeout_del(&sc->timer);
729 		        ixgbe_update_link_status(sc);
730 			timeout_add_sec(&sc->timer, 1);
731 		}
732 	}
733 
734 	if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd))
735 		ixgbe_start_locked(txr, ifp);
736 
737 	return (claimed);
738 }
739 
740 /*********************************************************************
741  *
742  *  Media Ioctl callback
743  *
744  *  This routine is called whenever the user queries the status of
745  *  the interface using ifconfig.
746  *
747  **********************************************************************/
748 void
749 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
750 {
751 	struct ix_softc *sc = ifp->if_softc;
752 
753 	INIT_DEBUGOUT("ixgbe_media_status: begin");
754 	ixgbe_update_link_status(sc);
755 
756 	ifmr->ifm_status = IFM_AVALID;
757 	ifmr->ifm_active = IFM_ETHER;
758 
759 	if (!sc->link_active) {
760 		ifmr->ifm_status |= IFM_NONE;
761 		return;
762 	}
763 
764 	ifmr->ifm_status |= IFM_ACTIVE;
765 
766 	switch (sc->link_speed) {
767 	case IXGBE_LINK_SPEED_1GB_FULL:
768 		ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
769 		break;
770 	case IXGBE_LINK_SPEED_10GB_FULL:
771 		ifmr->ifm_active |= sc->optics | IFM_FDX;
772 		break;
773 	}
774 }
775 
776 /*********************************************************************
777  *
778  *  Media Ioctl callback
779  *
780  *  This routine is called when the user changes speed/duplex using
781  *  media/mediopt option with ifconfig.
782  *
783  **********************************************************************/
784 int
785 ixgbe_media_change(struct ifnet * ifp)
786 {
787 	struct ix_softc *sc = ifp->if_softc;
788 	struct ifmedia *ifm = &sc->media;
789 
790 	INIT_DEBUGOUT("ixgbe_media_change: begin");
791 
792 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
793 		return (EINVAL);
794 
795         switch (IFM_SUBTYPE(ifm->ifm_media)) {
796         case IFM_AUTO:
797                 sc->hw.mac.autoneg = TRUE;
798                 sc->hw.phy.autoneg_advertised =
799 		    IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_10GB_FULL;
800                 break;
801         default:
802                 printf("%s: Only auto media type\n", ifp->if_xname);
803 		return (EINVAL);
804         }
805 
806 	return (0);
807 }
808 
809 /*********************************************************************
810  *
811  *  This routine maps the mbufs to tx descriptors.
812  *    WARNING: while this code is using an MQ style infrastructure,
813  *    it would NOT work as is with more than 1 queue.
814  *
815  *  return 0 on success, positive on failure
816  **********************************************************************/
817 
818 int
819 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
820 {
821 	struct ix_softc *sc = txr->sc;
822 	uint32_t	olinfo_status = 0, cmd_type_len = 0;
823 	int             i, j, error;
824 	int		first, last = 0;
825 	bus_dmamap_t	map;
826 	struct ixgbe_tx_buf *txbuf, *txbuf_mapped;
827 	union ixgbe_adv_tx_desc *txd = NULL;
828 #ifdef IX_CSUM_OFFLOAD
829 	uint32_t	paylen = 0;
830 #endif
831 
832 	/* Basic descriptor defines */
833         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
834         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
835 
836 #if NVLAN > 0
837 	if (m_head->m_flags & M_VLANTAG)
838 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
839 #endif
840 
841 	/*
842 	 * Force a cleanup if number of TX descriptors
843 	 * available is below the threshold. If it fails
844 	 * to get above, then abort transmit.
845 	 */
846 	if (txr->tx_avail <= IXGBE_TX_CLEANUP_THRESHOLD) {
847 		ixgbe_txeof(txr);
848 		/* Make sure things have improved */
849 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
850 			txr->no_tx_desc_avail++;
851 			return (ENOBUFS);
852 		}
853 	}
854 
855         /*
856          * Important to capture the first descriptor
857          * used because it will contain the index of
858          * the one we tell the hardware to report back
859          */
860         first = txr->next_avail_tx_desc;
861 	txbuf = &txr->tx_buffers[first];
862 	txbuf_mapped = txbuf;
863 	map = txbuf->map;
864 
865 	/*
866 	 * Map the packet for DMA.
867 	 */
868 	error = bus_dmamap_load_mbuf(txr->txtag, map,
869 	    m_head, BUS_DMA_NOWAIT);
870 
871 	if (error == ENOMEM) {
872 		sc->no_tx_dma_setup++;
873 		return (error);
874 	} else if (error != 0) {
875 		sc->no_tx_dma_setup++;
876 		return (error);
877 	}
878 
879 	/* Make certain there are enough descriptors */
880 	if (map->dm_nsegs > txr->tx_avail - 2) {
881 		txr->no_tx_desc_avail++;
882 		error = ENOBUFS;
883 		goto xmit_fail;
884 	}
885 
886 #ifdef IX_CSUM_OFFLOAD
887 	/*
888 	 * Set the appropriate offload context
889 	 * this becomes the first descriptor of
890 	 * a packet.
891 	 */
892 	if (ixgbe_tso_setup(txr, m_head, &paylen)) {
893 		cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
894 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
895 		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
896 		olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
897 		++sc->tso_tx;
898 	} else if (ixgbe_tx_ctx_setup(txr, m_head))
899 		olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
900 #endif
901 
902 	i = txr->next_avail_tx_desc;
903 	for (j = 0; j < map->dm_nsegs; j++) {
904 		txbuf = &txr->tx_buffers[i];
905 		txd = &txr->tx_base[i];
906 
907 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
908 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
909 		    cmd_type_len | map->dm_segs[j].ds_len);
910 		txd->read.olinfo_status = htole32(olinfo_status);
911 		last = i; /* Next descriptor that will get completed */
912 
913 		if (++i == sc->num_tx_desc)
914 			i = 0;
915 
916 		txbuf->m_head = NULL;
917 
918 		/*
919 		 * we have to do this inside the loop right now
920 		 * because of the hardware workaround.
921 		 */
922 		if (j == (map->dm_nsegs -1)) /* Last descriptor gets EOP and RS */
923 			txd->read.cmd_type_len |=
924 			    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
925 #ifndef NO_82598_A0_SUPPORT
926 		if (sc->hw.revision_id == 0)
927 			desc_flip(txd);
928 #endif
929 	}
930 
931 	txr->tx_avail -= map->dm_nsegs;
932 	txr->next_avail_tx_desc = i;
933 
934 	txbuf->m_head = m_head;
935 	txbuf->map = map;
936 	bus_dmamap_sync(txr->txtag, map, 0, map->dm_mapsize,
937 	    BUS_DMASYNC_PREWRITE);
938 
939         /* Set the index of the descriptor that will be marked done */
940         txbuf = &txr->tx_buffers[first];
941 
942 	++txr->tx_packets;
943 	return (0);
944 
945 xmit_fail:
946 	bus_dmamap_unload(txr->txtag, txbuf->map);
947 	return (error);
948 
949 }
950 
951 void
952 ixgbe_set_promisc(struct ix_softc *sc)
953 {
954 
955 	uint32_t       reg_rctl;
956 	struct ifnet *ifp = &sc->arpcom.ac_if;
957 
958 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
959 
960 	if (ifp->if_flags & IFF_PROMISC) {
961 		reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
962 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
963 	} else if (ifp->if_flags & IFF_ALLMULTI) {
964 		reg_rctl |= IXGBE_FCTRL_MPE;
965 		reg_rctl &= ~IXGBE_FCTRL_UPE;
966 		IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
967 	}
968 	return;
969 }
970 
971 void
972 ixgbe_disable_promisc(struct ix_softc * sc)
973 {
974 	uint32_t       reg_rctl;
975 
976 	reg_rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
977 
978 	reg_rctl &= (~IXGBE_FCTRL_UPE);
979 	reg_rctl &= (~IXGBE_FCTRL_MPE);
980 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, reg_rctl);
981 
982 	return;
983 }
984 
985 
986 /*********************************************************************
987  *  Multicast Update
988  *
989  *  This routine is called whenever multicast address list is updated.
990  *
991  **********************************************************************/
992 #define IXGBE_RAR_ENTRIES 16
993 
994 void
995 ixgbe_set_multi(struct ix_softc *sc)
996 {
997 	uint32_t	fctrl;
998 	uint8_t	mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
999 	uint8_t	*update_ptr;
1000 	struct ether_multi *enm;
1001 	struct ether_multistep step;
1002 	int	mcnt = 0;
1003 	struct ifnet *ifp = &sc->arpcom.ac_if;
1004 
1005 	IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
1006 
1007 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1008 	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1009 	if (ifp->if_flags & IFF_PROMISC)
1010 		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1011 	else if (ifp->if_flags & IFF_ALLMULTI) {
1012 		fctrl |= IXGBE_FCTRL_MPE;
1013 		fctrl &= ~IXGBE_FCTRL_UPE;
1014 	} else
1015 		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1016 
1017 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1018 
1019 	ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1020 	while (enm != NULL) {
1021 		if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1022 			ifp->if_flags |= IFF_ALLMULTI;
1023 			mcnt = MAX_NUM_MULTICAST_ADDRESSES;
1024 		}
1025 		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
1026 			break;
1027 		bcopy(enm->enm_addrlo,
1028 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1029 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
1030 		mcnt++;
1031 		ETHER_NEXT_MULTI(step, enm);
1032 	}
1033 
1034 	update_ptr = mta;
1035 	ixgbe_hw(&sc->hw, update_mc_addr_list,
1036 	    update_ptr, mcnt, ixgbe_mc_array_itr);
1037 
1038 	return;
1039 }
1040 
1041 /*
1042  * This is an iterator function now needed by the multicast
1043  * shared code. It simply feeds the shared code routine the
1044  * addresses in the array of ixgbe_set_multi() one by one.
1045  */
1046 uint8_t *
1047 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1048 {
1049 	uint8_t *addr = *update_ptr;
1050 	uint8_t *newptr;
1051 	*vmdq = 0;
1052 
1053 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1054 	*update_ptr = newptr;
1055 	return addr;
1056 }
1057 
1058 
1059 /*********************************************************************
1060  *  Timer routine
1061  *
1062  *  This routine checks for link status,updates statistics,
1063  *  and runs the watchdog timer.
1064  *
1065  **********************************************************************/
1066 
1067 void
1068 ixgbe_local_timer(void *arg)
1069 {
1070 	struct ix_softc *sc = arg;
1071 #ifdef IX_DEBUG
1072 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1073 #endif
1074 	int		 s;
1075 
1076 	s = splnet();
1077 
1078 	ixgbe_update_link_status(sc);
1079 	ixgbe_update_stats_counters(sc);
1080 
1081 #ifdef IX_DEBUG
1082 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1083 	    (IFF_RUNNING|IFF_DEBUG))
1084 		ixgbe_print_hw_stats(sc);
1085 #endif
1086 
1087 	timeout_add_sec(&sc->timer, 1);
1088 
1089 	splx(s);
1090 }
1091 
1092 void
1093 ixgbe_update_link_status(struct ix_softc *sc)
1094 {
1095 	int link_up = FALSE;
1096 	struct ifnet *ifp = &sc->arpcom.ac_if;
1097 	struct tx_ring *txr = sc->tx_rings;
1098 	int		i;
1099 
1100 	ixgbe_hw(&sc->hw, check_link, &sc->link_speed, &link_up, 0);
1101 
1102 	switch (sc->link_speed) {
1103 	case IXGBE_LINK_SPEED_UNKNOWN:
1104 		ifp->if_baudrate = 0;
1105 		break;
1106 	case IXGBE_LINK_SPEED_100_FULL:
1107 		ifp->if_baudrate = IF_Mbps(100);
1108 		break;
1109 	case IXGBE_LINK_SPEED_1GB_FULL:
1110 		ifp->if_baudrate = IF_Gbps(1);
1111 		break;
1112 	case IXGBE_LINK_SPEED_10GB_FULL:
1113 		ifp->if_baudrate = IF_Gbps(10);
1114 		break;
1115 	}
1116 
1117 	if (link_up){
1118 		if (sc->link_active == FALSE) {
1119 			sc->link_active = TRUE;
1120 			ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1121 			if_link_state_change(ifp);
1122 		}
1123 	} else { /* Link down */
1124 		if (sc->link_active == TRUE) {
1125 			ifp->if_baudrate = 0;
1126 			ifp->if_link_state = LINK_STATE_DOWN;
1127 			if_link_state_change(ifp);
1128 			sc->link_active = FALSE;
1129 			for (i = 0; i < sc->num_tx_queues;
1130 			    i++, txr++)
1131 				txr->watchdog_timer = FALSE;
1132 			ifp->if_timer = 0;
1133 		}
1134 	}
1135 
1136 	return;
1137 }
1138 
1139 
1140 
1141 /*********************************************************************
1142  *
1143  *  This routine disables all traffic on the sc by issuing a
1144  *  global reset on the MAC and deallocates TX/RX buffers.
1145  *
1146  **********************************************************************/
1147 
1148 void
1149 ixgbe_stop(void *arg)
1150 {
1151 	struct ix_softc *sc = arg;
1152 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1153 
1154 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1155 	ixgbe_disable_intr(sc);
1156 
1157 	/* Tell the stack that the interface is no longer active */
1158 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1159 
1160 	ixgbe_hw(&sc->hw, reset_hw);
1161 	sc->hw.adapter_stopped = FALSE;
1162 	ixgbe_hw(&sc->hw, stop_adapter);
1163 	timeout_del(&sc->timer);
1164 
1165 	/* reprogram the RAR[0] in case user changed it. */
1166 	ixgbe_hw(&sc->hw, set_rar, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1167 }
1168 
1169 
1170 /*********************************************************************
1171  *
1172  *  Determine hardware revision.
1173  *
1174  **********************************************************************/
1175 void
1176 ixgbe_identify_hardware(struct ix_softc *sc)
1177 {
1178 	struct ixgbe_osdep	*os = &sc->osdep;
1179 	struct pci_attach_args	*pa = os->os_pa;
1180 	uint32_t		 reg;
1181 
1182 	/* Save off the information about this board */
1183 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1184 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1185 
1186 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1187 	sc->hw.revision_id = PCI_REVISION(reg);
1188 
1189 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1190 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1191 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1192 
1193 	ixgbe_total_ports++;
1194 	switch (sc->hw.device_id) {
1195 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1196 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1197 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1198 		ixgbe_total_ports++;
1199 		break;
1200 	}
1201 
1202 	switch (sc->hw.device_id) {
1203 	case PCI_PRODUCT_INTEL_82598AF_DUAL:
1204 	case PCI_PRODUCT_INTEL_82598AF:
1205 		sc->optics = IFM_10G_SR;
1206 		break;
1207 	case PCI_PRODUCT_INTEL_82598EB_CX4_DUAL:
1208 	case PCI_PRODUCT_INTEL_82598EB_CX4:
1209 		sc->optics = IFM_10G_CX4;
1210 		break;
1211 	case PCI_PRODUCT_INTEL_82598EB_XF_LR:
1212 		sc->optics = IFM_10G_LR;
1213 		break;
1214 	case PCI_PRODUCT_INTEL_82598AT_DUAL:
1215 	case PCI_PRODUCT_INTEL_82598AT:
1216 		sc->optics = IFM_10G_T;
1217 		break;
1218 	default:
1219 		sc->optics = IFM_AUTO;
1220 		break;
1221 	}
1222 }
1223 
1224 /*********************************************************************
1225  *
1226  *  Setup the Legacy or MSI Interrupt handler
1227  *
1228  **********************************************************************/
1229 int
1230 ixgbe_allocate_legacy(struct ix_softc *sc)
1231 {
1232 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1233 	struct ixgbe_osdep	*os = &sc->osdep;
1234 	struct pci_attach_args	*pa = os->os_pa;
1235 	const char		*intrstr = NULL;
1236 	pci_chipset_tag_t	pc = pa->pa_pc;
1237 	pci_intr_handle_t	ih;
1238 
1239 	/* Legacy RID at 0 */
1240 	if (sc->msix == 0)
1241 		sc->rid[0] = 0;
1242 
1243 	/* We allocate a single interrupt resource */
1244 	if (pci_intr_map(pa, &ih)) {
1245 		printf(": couldn't map interrupt\n");
1246 		return (ENXIO);
1247 	}
1248 
1249 	intrstr = pci_intr_string(pc, ih);
1250 	sc->tag[0] = pci_intr_establish(pc, ih, IPL_NET,
1251 	    ixgbe_legacy_irq, sc, ifp->if_xname);
1252 	if (sc->tag[0] == NULL) {
1253 		printf(": couldn't establish interrupt");
1254 		if (intrstr != NULL)
1255 			printf(" at %s", intrstr);
1256 		printf("\n");
1257 		return (ENXIO);
1258 	}
1259 	printf(": %s", intrstr);
1260 
1261 	return (0);
1262 }
1263 
1264 int
1265 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1266 {
1267 	struct ixgbe_osdep	*os = &sc->osdep;
1268 	struct pci_attach_args	*pa = os->os_pa;
1269 	int			 val, i;
1270 
1271 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1272 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM &&
1273 	    PCI_MAPREG_TYPE(val) != PCI_MAPREG_MEM_TYPE_64BIT) {
1274 		printf(": mmba is not mem space\n");
1275 		return (ENXIO);
1276 	}
1277 
1278 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1279 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1280 		printf(": cannot find mem space\n");
1281 		return (ENXIO);
1282 	}
1283 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1284 
1285 	/*
1286 	 * Init the resource arrays
1287 	 */
1288 	for (i = 0; i < IXGBE_MSGS; i++) {
1289 		sc->rid[i] = i + 1; /* MSI/X RID starts at 1 */
1290 		sc->tag[i] = NULL;
1291 		sc->res[i] = NULL;
1292 	}
1293 
1294 	/* Legacy defaults */
1295 	sc->num_tx_queues = 1;
1296 	sc->num_rx_queues = 1;
1297 
1298 #ifdef notyet
1299 	/* Now setup MSI or MSI/X */
1300 	sc->msix = ixgbe_setup_msix(sc);
1301 #endif
1302 	sc->hw.back = os;
1303 
1304 	return (0);
1305 }
1306 
1307 void
1308 ixgbe_free_pci_resources(struct ix_softc * sc)
1309 {
1310 	struct ixgbe_osdep	*os = &sc->osdep;
1311 	struct pci_attach_args	*pa = os->os_pa;
1312 
1313 	pci_intr_disestablish(pa->pa_pc, sc->tag[0]);
1314 	sc->tag[0] = NULL;
1315 	if (os->os_membase != NULL)
1316 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1317 	os->os_membase = NULL;
1318 
1319 	return;
1320 }
1321 
1322 /*********************************************************************
1323  *
1324  *  Initialize the hardware to a configuration as specified by the
1325  *  sc structure. The controller is reset, the EEPROM is
1326  *  verified, the MAC address is set, then the shared initialization
1327  *  routines are called.
1328  *
1329  **********************************************************************/
1330 int
1331 ixgbe_hardware_init(struct ix_softc *sc)
1332 {
1333 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1334 	uint16_t csum;
1335 
1336 	csum = 0;
1337 	/* Issue a global reset */
1338 	sc->hw.adapter_stopped = FALSE;
1339 	ixgbe_hw(&sc->hw, stop_adapter);
1340 
1341 	/* Make sure we have a good EEPROM before we read from it */
1342 	if (ixgbe_ee(&sc->hw, validate_checksum, &csum) < 0) {
1343 		printf("%s: The EEPROM Checksum Is Not Valid\n", ifp->if_xname);
1344 		return (EIO);
1345 	}
1346 
1347 	/* Get Hardware Flow Control setting */
1348 	sc->hw.fc.type = ixgbe_fc_full;
1349 	sc->hw.fc.pause_time = IXGBE_FC_PAUSE;
1350 	sc->hw.fc.low_water = IXGBE_FC_LO;
1351 	sc->hw.fc.high_water = IXGBE_FC_HI;
1352 	sc->hw.fc.send_xon = TRUE;
1353 
1354 	if (ixgbe_hw(&sc->hw, init_hw) != 0) {
1355 		printf("%s: Hardware Initialization Failed", ifp->if_xname);
1356 		return (EIO);
1357 	}
1358 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
1359 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
1360 
1361 	return (0);
1362 }
1363 
1364 /*********************************************************************
1365  *
1366  *  Setup networking device structure and register an interface.
1367  *
1368  **********************************************************************/
1369 void
1370 ixgbe_setup_interface(struct ix_softc *sc)
1371 {
1372 	struct ixgbe_hw *hw = &sc->hw;
1373 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1374 	INIT_DEBUGOUT("ixgbe_setup_interface: begin");
1375 
1376 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1377 	ifp->if_baudrate = IF_Gbps(10);
1378 	ifp->if_softc = sc;
1379 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1380 	ifp->if_ioctl = ixgbe_ioctl;
1381 	ifp->if_start = ixgbe_start;
1382 	ifp->if_timer = 0;
1383 	ifp->if_watchdog = ixgbe_watchdog;
1384 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1385 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1386 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1387 	IFQ_SET_READY(&ifp->if_snd);
1388 
1389 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1390 
1391 #ifdef IX_VLAN_HWTAGGING
1392 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1393 #endif
1394 
1395 #ifdef IX_CSUM_OFFLOAD
1396 	ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 |
1397 				IFCAP_CSUM_UDPv4;
1398 #endif
1399 
1400 	sc->max_frame_size =
1401 	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1402 
1403 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1404 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL))
1405 		ixgbe_hw(hw, setup_link_speed,
1406 		    IXGBE_LINK_SPEED_10GB_FULL |
1407 		    IXGBE_LINK_SPEED_1GB_FULL, TRUE, TRUE);
1408 	else
1409 		ixgbe_hw(hw, setup_link_speed,
1410 		    IXGBE_LINK_SPEED_10GB_FULL,
1411 		    TRUE, FALSE);
1412 
1413 	/*
1414 	 * Specify the media types supported by this sc and register
1415 	 * callbacks to update media and link information
1416 	 */
1417 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1418 		     ixgbe_media_status);
1419 	ifmedia_add(&sc->media, IFM_ETHER | sc->optics |
1420 	    IFM_FDX, 0, NULL);
1421 	if ((hw->device_id == PCI_PRODUCT_INTEL_82598AT) ||
1422 	    (hw->device_id == PCI_PRODUCT_INTEL_82598AT_DUAL)) {
1423 		ifmedia_add(&sc->media,
1424 		    IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1425 		ifmedia_add(&sc->media,
1426 		    IFM_ETHER | IFM_1000_T, 0, NULL);
1427 	}
1428 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1429 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1430 
1431 	if_attach(ifp);
1432 	ether_ifattach(ifp);
1433 
1434 
1435 	return;
1436 }
1437 
1438 int
1439 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1440 		struct ixgbe_dma_alloc *dma, int mapflags)
1441 {
1442 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1443 	struct ixgbe_osdep	*os = &sc->osdep;
1444 	int			 r;
1445 
1446 	dma->dma_tag = os->os_pa->pa_dmat;
1447 	r = bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
1448 	    BUS_DMA_NOWAIT, &dma->dma_map);
1449 	if (r != 0) {
1450 		printf("%s: ixgbe_dma_malloc: bus_dma_tag_create failed; "
1451 		       "error %u\n", ifp->if_xname, r);
1452 		goto fail_0;
1453 	}
1454 
1455 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0,
1456 	    &dma->dma_seg, 1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1457 	if (r != 0) {
1458 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1459 		       "error %u\n", ifp->if_xname, r);
1460 		goto fail_1;
1461 	}
1462 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1463 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1464 	if (r != 0) {
1465 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1466 		       "error %u\n", ifp->if_xname, r);
1467 		goto fail_2;
1468 	}
1469 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map,
1470 	    dma->dma_vaddr, size, NULL,
1471 	    mapflags | BUS_DMA_NOWAIT);
1472 	if (r != 0) {
1473 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1474 		       "error %u\n", ifp->if_xname, r);
1475 		goto fail_3;
1476 	}
1477 	dma->dma_size = size;
1478 	return (0);
1479 fail_3:
1480 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1481 fail_2:
1482 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1483 fail_1:
1484 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1485 fail_0:
1486 	dma->dma_map = NULL;
1487 	dma->dma_tag = NULL;
1488 	return (r);
1489 }
1490 
1491 void
1492 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1493 {
1494 	if (dma->dma_tag == NULL)
1495 		return;
1496 
1497 	if (dma->dma_map != NULL) {
1498 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1499 		    dma->dma_map->dm_mapsize,
1500 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1501 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1502 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1503 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1504 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1505 	}
1506 }
1507 
1508 
1509 /*********************************************************************
1510  *
1511  *  Allocate memory for the transmit and receive rings, and then
1512  *  the descriptors associated with each, called only once at attach.
1513  *
1514  **********************************************************************/
1515 int
1516 ixgbe_allocate_queues(struct ix_softc *sc)
1517 {
1518 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1519 	struct tx_ring *txr;
1520 	struct rx_ring *rxr;
1521 	int rsize, tsize, error = IXGBE_SUCCESS;
1522 	int txconf = 0, rxconf = 0, i;
1523 
1524 	/* First allocate the TX ring struct memory */
1525 	if (!(sc->tx_rings =
1526 	    (struct tx_ring *) malloc(sizeof(struct tx_ring) *
1527 	    sc->num_tx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1528 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1529 		error = ENOMEM;
1530 		goto fail;
1531 	}
1532 	txr = sc->tx_rings;
1533 
1534 	/* Next allocate the RX */
1535 	if (!(sc->rx_rings =
1536 	    (struct rx_ring *) malloc(sizeof(struct rx_ring) *
1537 	    sc->num_rx_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1538 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1539 		error = ENOMEM;
1540 		goto rx_fail;
1541 	}
1542 	rxr = sc->rx_rings;
1543 
1544 	/* For the ring itself */
1545 	tsize = roundup2(sc->num_tx_desc *
1546 	    sizeof(union ixgbe_adv_tx_desc), 4096);
1547 
1548 	/*
1549 	 * Now set up the TX queues, txconf is needed to handle the
1550 	 * possibility that things fail midcourse and we need to
1551 	 * undo memory gracefully
1552 	 */
1553 	for (i = 0; i < sc->num_tx_queues; i++, txconf++) {
1554 		/* Set up some basics */
1555 		txr = &sc->tx_rings[i];
1556 		txr->sc = sc;
1557 		txr->me = i;
1558 
1559 		/* Initialize the TX side lock */
1560 		mtx_init(&txr->tx_mtx, IPL_NET);
1561 
1562 		if (ixgbe_dma_malloc(sc, tsize,
1563 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1564 			printf("%s: Unable to allocate TX Descriptor memory\n",
1565 			    ifp->if_xname);
1566 			error = ENOMEM;
1567 			goto err_tx_desc;
1568 		}
1569 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1570 		bzero((void *)txr->tx_base, tsize);
1571 
1572 		if (ixgbe_dma_malloc(sc, sizeof(uint32_t),
1573 		    &txr->txwbdma, BUS_DMA_NOWAIT)) {
1574 			printf("%s: Unable to allocate TX Write Back memory\n",
1575 			    ifp->if_xname);
1576 			error = ENOMEM;
1577 			goto err_tx_desc;
1578 		}
1579 		txr->tx_hwb = (uint32_t *)txr->txwbdma.dma_vaddr;
1580 		*txr->tx_hwb = 0;
1581 
1582         	/* Now allocate transmit buffers for the ring */
1583         	if (ixgbe_allocate_transmit_buffers(txr)) {
1584 			printf("%s: Critical Failure setting up transmit buffers\n",
1585 			    ifp->if_xname);
1586 			error = ENOMEM;
1587 			goto err_tx_desc;
1588         	}
1589 
1590 	}
1591 
1592 	/*
1593 	 * Next the RX queues...
1594 	 */
1595 	rsize = roundup2(sc->num_rx_desc *
1596 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1597 	for (i = 0; i < sc->num_rx_queues; i++, rxconf++) {
1598 		rxr = &sc->rx_rings[i];
1599 		/* Set up some basics */
1600 		rxr->sc = sc;
1601 		rxr->me = i;
1602 
1603 		/* Initialize the TX side lock */
1604 		mtx_init(&rxr->rx_mtx, IPL_NET);
1605 
1606 		if (ixgbe_dma_malloc(sc, rsize,
1607 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1608 			printf("%s: Unable to allocate RxDescriptor memory\n",
1609 			    ifp->if_xname);
1610 			error = ENOMEM;
1611 			goto err_rx_desc;
1612 		}
1613 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1614 		bzero((void *)rxr->rx_base, rsize);
1615 
1616         	/* Allocate receive buffers for the ring*/
1617 		if (ixgbe_allocate_receive_buffers(rxr)) {
1618 			printf("%s: Critical Failure setting up receive buffers\n",
1619 			    ifp->if_xname);
1620 			error = ENOMEM;
1621 			goto err_rx_desc;
1622 		}
1623 	}
1624 
1625 	return (0);
1626 
1627 err_rx_desc:
1628 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1629 		ixgbe_dma_free(sc, &rxr->rxdma);
1630 err_tx_desc:
1631 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--) {
1632 		ixgbe_dma_free(sc, &txr->txdma);
1633 		ixgbe_dma_free(sc, &txr->txwbdma);
1634 	}
1635 	free(sc->rx_rings, M_DEVBUF);
1636 rx_fail:
1637 	free(sc->tx_rings, M_DEVBUF);
1638 fail:
1639 	return (error);
1640 }
1641 
1642 /*********************************************************************
1643  *
1644  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1645  *  the information needed to transmit a packet on the wire. This is
1646  *  called only once at attach, setup is done every reset.
1647  *
1648  **********************************************************************/
1649 int
1650 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1651 {
1652 	struct ix_softc *sc = txr->sc;
1653 	struct ixgbe_osdep *os = &sc->osdep;
1654 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1655 	struct ixgbe_tx_buf *txbuf;
1656 	int error, i;
1657 
1658 	txr->txtag = os->os_pa->pa_dmat;
1659 	if (!(txr->tx_buffers =
1660 	    (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
1661 	    sc->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
1662 		printf("%s: Unable to allocate tx_buffer memory\n", ifp->if_xname);
1663 		error = ENOMEM;
1664 		goto fail;
1665 	}
1666 
1667         /* Create the descriptor buffer dma maps */
1668 	txbuf = txr->tx_buffers;
1669 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
1670 		error = bus_dmamap_create(txr->txtag, IXGBE_TSO_SIZE,
1671 			    IXGBE_MAX_SCATTER, PAGE_SIZE, 0,
1672 			    BUS_DMA_NOWAIT, &txbuf->map);
1673 		if (error != 0) {
1674 			printf("%s: Unable to create TX DMA map\n", ifp->if_xname);
1675 			goto fail;
1676 		}
1677 	}
1678 
1679 	return 0;
1680 fail:
1681 	/* We free all, it handles case where we are in the middle */
1682 	ixgbe_free_transmit_structures(sc);
1683 	return (error);
1684 }
1685 
1686 /*********************************************************************
1687  *
1688  *  Initialize a transmit ring.
1689  *
1690  **********************************************************************/
1691 void
1692 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1693 {
1694 	struct ix_softc *sc = txr->sc;
1695 	struct ixgbe_tx_buf *txbuf;
1696 	int i;
1697 
1698 	/* Clear the old ring contents */
1699 	bzero((void *)txr->tx_base,
1700 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1701 	/* Reset indices */
1702 	txr->next_avail_tx_desc = 0;
1703 	txr->next_tx_to_clean = 0;
1704 
1705 	/* Free any existing tx buffers. */
1706         txbuf = txr->tx_buffers;
1707 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
1708 		if (txbuf->m_head != NULL) {
1709 			bus_dmamap_sync(txr->txtag, txbuf->map,
1710 			    0, txbuf->map->dm_mapsize,
1711 			    BUS_DMASYNC_POSTWRITE);
1712 			bus_dmamap_unload(txr->txtag, txbuf->map);
1713 			m_freem(txbuf->m_head);
1714 		}
1715 		txbuf->m_head = NULL;
1716         }
1717 
1718 	/* Set number of descriptors available */
1719 	txr->tx_avail = sc->num_tx_desc;
1720 
1721 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1722 	    0, txr->txdma.dma_map->dm_mapsize,
1723 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1724 
1725 }
1726 
1727 /*********************************************************************
1728  *
1729  *  Initialize all transmit rings.
1730  *
1731  **********************************************************************/
1732 int
1733 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1734 {
1735 	struct tx_ring *txr = sc->tx_rings;
1736 	int	i;
1737 
1738 	for (i = 0; i < sc->num_tx_queues; i++, txr++)
1739 		ixgbe_setup_transmit_ring(txr);
1740 
1741 	return (0);
1742 }
1743 
1744 /*********************************************************************
1745  *
1746  *  Enable transmit unit.
1747  *
1748  **********************************************************************/
1749 void
1750 ixgbe_initialize_transmit_units(struct ix_softc *sc)
1751 {
1752 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1753 	struct tx_ring	*txr;
1754 	struct ixgbe_hw	*hw = &sc->hw;
1755 	int		 i;
1756 	uint64_t	 tdba, txhwb;
1757 	uint32_t	 txctrl;
1758 
1759 	/* Setup the Base and Length of the Tx Descriptor Ring */
1760 
1761 	for (i = 0; i < sc->num_tx_queues; i++) {
1762 		txr = &sc->tx_rings[i];
1763 
1764 		/* Setup descriptor base address */
1765 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
1766 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
1767 		       (tdba & 0x00000000ffffffffULL));
1768 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
1769 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
1770 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
1771 
1772 		/* Setup for Head WriteBack */
1773 		txhwb = txr->txwbdma.dma_map->dm_segs[0].ds_addr;
1774 		txhwb |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1775 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(i),
1776 		    (txhwb & 0x00000000ffffffffULL));
1777 		IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(i),
1778 		    (txhwb >> 32));
1779 		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1780 		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1781 		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
1782 
1783 		/* Setup the HW Tx Head and Tail descriptor pointers */
1784 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
1785 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
1786 
1787 		/* Setup Transmit Descriptor Cmd Settings */
1788 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
1789 
1790 		txr->watchdog_timer = 0;
1791 	}
1792 	ifp->if_timer = 0;
1793 
1794 	return;
1795 }
1796 
1797 /*********************************************************************
1798  *
1799  *  Free all transmit rings.
1800  *
1801  **********************************************************************/
1802 void
1803 ixgbe_free_transmit_structures(struct ix_softc *sc)
1804 {
1805 	struct tx_ring *txr = sc->tx_rings;
1806 	int		i;
1807 
1808 	for (i = 0; i < sc->num_tx_queues; i++, txr++) {
1809 		ixgbe_free_transmit_buffers(txr);
1810 		ixgbe_dma_free(sc, &txr->txdma);
1811 		ixgbe_dma_free(sc, &txr->txwbdma);
1812 	}
1813 	free(sc->tx_rings, M_DEVBUF);
1814 }
1815 
1816 /*********************************************************************
1817  *
1818  *  Free transmit ring related data structures.
1819  *
1820  **********************************************************************/
1821 void
1822 ixgbe_free_transmit_buffers(struct tx_ring *txr)
1823 {
1824 	struct ix_softc *sc = txr->sc;
1825 	struct ixgbe_tx_buf *tx_buffer;
1826 	int             i;
1827 
1828 	INIT_DEBUGOUT("free_transmit_ring: begin");
1829 
1830 	if (txr->tx_buffers == NULL)
1831 		return;
1832 
1833 	tx_buffer = txr->tx_buffers;
1834 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
1835 		if (tx_buffer->m_head != NULL) {
1836 			bus_dmamap_sync(txr->txtag, tx_buffer->map,
1837 			    0, tx_buffer->map->dm_mapsize,
1838 			    BUS_DMASYNC_POSTWRITE);
1839 			bus_dmamap_unload(txr->txtag,
1840 			    tx_buffer->map);
1841 			m_freem(tx_buffer->m_head);
1842 			if (tx_buffer->map != NULL) {
1843 				bus_dmamap_destroy(txr->txtag,
1844 				    tx_buffer->map);
1845 			}
1846 		} else if (tx_buffer->map != NULL) {
1847 			bus_dmamap_unload(txr->txtag,
1848 			    tx_buffer->map);
1849 			bus_dmamap_destroy(txr->txtag,
1850 			    tx_buffer->map);
1851 		}
1852 		tx_buffer->m_head = NULL;
1853 		tx_buffer->map = NULL;
1854 	}
1855 
1856 	if (txr->tx_buffers != NULL)
1857 		free(txr->tx_buffers, M_DEVBUF);
1858 	txr->tx_buffers = NULL;
1859 	txr->txtag = NULL;
1860 }
1861 
1862 #ifdef IX_CSUM_OFFLOAD
1863 /*********************************************************************
1864  *
1865  *  Advanced Context Descriptor setup for VLAN or CSUM
1866  *
1867  **********************************************************************/
1868 
1869 int
1870 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
1871 {
1872 	struct ix_softc *sc = txr->sc;
1873 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1874 	struct ixgbe_adv_tx_context_desc *TXD;
1875 	struct ixgbe_tx_buf        *tx_buffer;
1876 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
1877 	struct ip *ip;
1878 	struct ip6_hdr *ip6;
1879 	int  ehdrlen, ip_hlen = 0;
1880 	uint16_t etype;
1881 	uint8_t ipproto = 0;
1882 	int offload = TRUE;
1883 	int ctxd = txr->next_avail_tx_desc;
1884 #if NVLAN > 0
1885 	struct ether_vlan_header *eh;
1886 #else
1887 	struct ether_header *eh;
1888 #endif
1889 
1890 	if ((ifp->if_capabilities & IFCAP_CSUM_IPv4) == 0)
1891 		offload = FALSE;
1892 
1893 	tx_buffer = &txr->tx_buffers[ctxd];
1894 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
1895 
1896 	/*
1897 	 * In advanced descriptors the vlan tag must
1898 	 * be placed into the descriptor itself.
1899 	 */
1900 #if NVLAN > 0
1901 	if (mp->m_flags & M_VLANTAG) {
1902 		vlan_macip_lens |=
1903 		    htole16(mp->m_pkthdr.ether_vtag) << IXGBE_ADVTXD_VLAN_SHIFT;
1904 	} else
1905 #endif
1906 	if (offload == FALSE)
1907 		return FALSE;	/* No need for CTX */
1908 
1909 	/*
1910 	 * Determine where frame payload starts.
1911 	 * Jump over vlan headers if already present,
1912 	 * helpful for QinQ too.
1913 	 */
1914 #if NVLAN > 0
1915 	eh = mtod(mp, struct ether_vlan_header *);
1916 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1917 		etype = ntohs(eh->evl_proto);
1918 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1919 	} else {
1920 		etype = ntohs(eh->evl_encap_proto);
1921 		ehdrlen = ETHER_HDR_LEN;
1922 	}
1923 #else
1924 	eh = mtod(mp, struct ether_header *);
1925 	etype = ntohs(eh->ether_type);
1926 	ehdrlen = ETHER_HDR_LEN;
1927 #endif
1928 
1929 	/* Set the ether header length */
1930 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
1931 
1932 	switch (etype) {
1933 		case ETHERTYPE_IP:
1934 			ip = (struct ip *)(mp->m_data + ehdrlen);
1935 			ip_hlen = ip->ip_hl << 2;
1936 			if (mp->m_len < ehdrlen + ip_hlen)
1937 				return FALSE; /* failure */
1938 			ipproto = ip->ip_p;
1939 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1940 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
1941 			break;
1942 		case ETHERTYPE_IPV6:
1943 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
1944 			ip_hlen = sizeof(struct ip6_hdr);
1945 			if (mp->m_len < ehdrlen + ip_hlen)
1946 				return FALSE; /* failure */
1947 			ipproto = ip6->ip6_nxt;
1948 			if (mp->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT)
1949 				type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
1950 			break;
1951 		default:
1952 			offload = FALSE;
1953 			break;
1954 	}
1955 
1956 	vlan_macip_lens |= ip_hlen;
1957 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1958 
1959 	switch (ipproto) {
1960 	case IPPROTO_TCP:
1961 		if (mp->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT)
1962 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
1963 		break;
1964 	case IPPROTO_UDP:
1965 		if (mp->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT)
1966 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
1967 		break;
1968 	default:
1969 		offload = FALSE;
1970 		break;
1971 	}
1972 
1973 	/* Now copy bits into descriptor */
1974 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
1975 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
1976 	TXD->seqnum_seed = htole32(0);
1977 	TXD->mss_l4len_idx = htole32(0);
1978 
1979 #ifndef NO_82598_A0_SUPPORT
1980 	if (sc->hw.revision_id == 0)
1981 		desc_flip(TXD);
1982 #endif
1983 
1984 	tx_buffer->m_head = NULL;
1985 
1986 	/* We've consumed the first desc, adjust counters */
1987 	if (++ctxd == sc->num_tx_desc)
1988 		ctxd = 0;
1989 	txr->next_avail_tx_desc = ctxd;
1990 	--txr->tx_avail;
1991 
1992         return (offload);
1993 }
1994 
1995 #ifdef notyet
1996 /**********************************************************************
1997  *
1998  *  Setup work for hardware segmentation offload (TSO) on
1999  *  scs using advanced tx descriptors
2000  *
2001  **********************************************************************/
2002 int
2003 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2004 {
2005 	struct ix_softc *sc = txr->sc;
2006 	struct ixgbe_adv_tx_context_desc *TXD;
2007 	struct ixgbe_tx_buf        *tx_buffer;
2008 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2009 	uint32_t mss_l4len_idx = 0;
2010 	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
2011 #if NVLAN > 0
2012 	uint16_t vtag = 0;
2013 	struct ether_vlan_header *eh;
2014 #else
2015 	struct ether_header *eh;
2016 #endif
2017 	struct ip *ip;
2018 	struct tcphdr *th;
2019 
2020 	if (((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) ||
2021 	    (mp->m_pkthdr.len <= IXGBE_TX_BUFFER_SIZE))
2022 	        return FALSE;
2023 
2024 	/*
2025 	 * Determine where frame payload starts.
2026 	 * Jump over vlan headers if already present
2027 	 */
2028 #if NVLAN > 0
2029 	eh = mtod(mp, struct ether_vlan_header *);
2030 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
2031 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2032 	else
2033 		ehdrlen = ETHER_HDR_LEN;
2034 #else
2035 	eh = mtod(mp, struct ether_header *);
2036 	ehdrlen = ETHER_HDR_LEN;
2037 #endif
2038 
2039         /* Ensure we have at least the IP+TCP header in the first mbuf. */
2040         if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
2041 		return FALSE;
2042 
2043 	ctxd = txr->next_avail_tx_desc;
2044 	tx_buffer = &txr->tx_buffers[ctxd];
2045 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2046 
2047 	ip = (struct ip *)(mp->m_data + ehdrlen);
2048 	if (ip->ip_p != IPPROTO_TCP)
2049 		return FALSE;   /* 0 */
2050 	ip->ip_len = 0;
2051 	ip->ip_sum = 0;
2052 	ip_hlen = ip->ip_hl << 2;
2053 	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
2054 	th->th_sum = in_pseudo(ip->ip_src.s_addr,
2055 	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2056 	tcp_hlen = th->th_off << 2;
2057 	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
2058 	/* This is used in the transmit desc in encap */
2059 	*paylen = mp->m_pkthdr.len - hdrlen;
2060 
2061 #if NVLAN > 0
2062 	/* VLAN MACLEN IPLEN */
2063 	if (mp->m_flags & M_VLANTAG) {
2064 		vtag = htole16(mp->m_pkthdr.ether_vtag);
2065 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2066 	}
2067 #endif
2068 
2069 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2070 	vlan_macip_lens |= ip_hlen;
2071 	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
2072 
2073 	/* ADV DTYPE TUCMD */
2074 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2075 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2076 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2077 	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
2078 
2079 
2080 	/* MSS L4LEN IDX */
2081 	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
2082 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
2083 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
2084 
2085 	TXD->seqnum_seed = htole32(0);
2086 	tx_buffer->m_head = NULL;
2087 
2088 #ifndef NO_82598_A0_SUPPORT
2089 	if (sc->hw.revision_id == 0)
2090 		desc_flip(TXD);
2091 #endif
2092 
2093 	if (++ctxd == sc->num_tx_desc)
2094 		ctxd = 0;
2095 
2096 	txr->tx_avail--;
2097 	txr->next_avail_tx_desc = ctxd;
2098 	return TRUE;
2099 }
2100 
2101 #else	/* For 6.2 RELEASE */
2102 /* This makes it easy to keep the code common */
2103 int
2104 ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, uint32_t *paylen)
2105 {
2106 	return (FALSE);
2107 }
2108 #endif
2109 #endif
2110 
2111 /**********************************************************************
2112  *
2113  *  Examine each tx_buffer in the used queue. If the hardware is done
2114  *  processing the packet then free associated resources. The
2115  *  tx_buffer is put back on the free queue.
2116  *
2117  **********************************************************************/
2118 int
2119 ixgbe_txeof(struct tx_ring *txr)
2120 {
2121 	struct ix_softc			*sc = txr->sc;
2122 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2123 	uint				 first, last, done, num_avail;
2124 	struct ixgbe_tx_buf		*tx_buffer;
2125 	struct ixgbe_legacy_tx_desc *tx_desc;
2126 
2127 	if (txr->tx_avail == sc->num_tx_desc)
2128 		return FALSE;
2129 
2130 	num_avail = txr->tx_avail;
2131 	first = txr->next_tx_to_clean;
2132 
2133 	tx_buffer = &txr->tx_buffers[first];
2134 
2135 	/* For cleanup we just use legacy struct */
2136 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2137 
2138 	/* Get the HWB */
2139         bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2140 	    0, txr->txwbdma.dma_map->dm_mapsize,
2141             BUS_DMASYNC_POSTREAD);
2142         done = *txr->tx_hwb;
2143 
2144         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2145 	    0, txr->txdma.dma_map->dm_mapsize,
2146             BUS_DMASYNC_POSTREAD);
2147 
2148 	while (TRUE) {
2149 		/* We clean the range til last head write back */
2150 		while (first != done) {
2151 			tx_desc->upper.data = 0;
2152 			tx_desc->lower.data = 0;
2153 			tx_desc->buffer_addr = 0;
2154 			num_avail++;
2155 
2156 			if (tx_buffer->m_head) {
2157 				ifp->if_opackets++;
2158 				bus_dmamap_sync(txr->txtag,
2159 				    tx_buffer->map,
2160 				    0, tx_buffer->map->dm_mapsize,
2161 				    BUS_DMASYNC_POSTWRITE);
2162 				bus_dmamap_unload(txr->txtag,
2163 				    tx_buffer->map);
2164 				m_freem(tx_buffer->m_head);
2165 				tx_buffer->m_head = NULL;
2166 			}
2167 
2168 			if (++first == sc->num_tx_desc)
2169 				first = 0;
2170 
2171 			tx_buffer = &txr->tx_buffers[first];
2172 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2173 			    &txr->tx_base[first];
2174 		}
2175 		/* See if there is more work now */
2176 		last = done;
2177 	        bus_dmamap_sync(txr->txwbdma.dma_tag, txr->txwbdma.dma_map,
2178 		    0, txr->txwbdma.dma_map->dm_mapsize,
2179 	            BUS_DMASYNC_POSTREAD);
2180         	done = *txr->tx_hwb;
2181 		if (last == done)
2182 			break;
2183 	}
2184 
2185 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2186 	    0, txr->txdma.dma_map->dm_mapsize,
2187 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2188 
2189 	txr->next_tx_to_clean = first;
2190 
2191 	/*
2192 	 * If we have enough room, clear IFF_OACTIVE to tell the stack that
2193 	 * it is OK to send packets. If there are no pending descriptors,
2194 	 * clear the timeout. Otherwise, if some descriptors have been freed,
2195 	 * restart the timeout.
2196 	 */
2197 	if (num_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
2198 		ifp->if_flags &= ~IFF_OACTIVE;
2199 
2200 		/* If all are clean turn off the timer */
2201 		if (num_avail == sc->num_tx_desc) {
2202 			ifp->if_timer = 0;
2203 			txr->watchdog_timer = 0;
2204 			txr->tx_avail = num_avail;
2205 			return FALSE;
2206 		}
2207 		/* Some were cleaned, so reset timer */
2208 		else if (num_avail != txr->tx_avail) {
2209 			ifp->if_timer = IXGBE_TX_TIMEOUT;
2210 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
2211 		}
2212 	}
2213 
2214 	txr->tx_avail = num_avail;
2215 
2216 	return TRUE;
2217 }
2218 
2219 /*********************************************************************
2220  *
2221  *  Get a buffer from system mbuf buffer pool.
2222  *
2223  **********************************************************************/
2224 int
2225 ixgbe_get_buf(struct rx_ring *rxr, int i, struct mbuf *nmp)
2226 {
2227 	struct ix_softc	*sc = rxr->sc;
2228 	struct mbuf	*mp = nmp;
2229 	bus_dmamap_t	map;
2230 	int		error, old, s = 0;
2231 	int		size = MCLBYTES;
2232 	struct ixgbe_rx_buf	*rxbuf;
2233 
2234 #ifdef notyet
2235 	/* Are we going to Jumbo clusters? */
2236 	if (sc->bigbufs) {
2237 		size = MJUMPAGESIZE;
2238 		s = 1;
2239 	};
2240 
2241 	mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
2242 	if (mp == NULL) {
2243 		sc->mbuf_alloc_failed++;
2244 		return (ENOBUFS);
2245 	}
2246 #endif
2247 
2248 	if (mp == NULL) {
2249 		MGETHDR(mp, M_DONTWAIT, MT_DATA);
2250 		if (mp == NULL) {
2251 			sc->mbuf_alloc_failed++;
2252 			return (ENOBUFS);
2253 		}
2254 		MCLGET(mp, M_DONTWAIT);
2255 		if ((mp->m_flags & M_EXT) == 0) {
2256 			m_freem(mp);
2257 			sc->mbuf_cluster_failed++;
2258 			return (ENOBUFS);
2259 		}
2260 		mp->m_len = mp->m_pkthdr.len = size;
2261 	} else {
2262 		mp->m_len = mp->m_pkthdr.len = size;
2263 		mp->m_data = mp->m_ext.ext_buf;
2264 		mp->m_next = NULL;
2265 	}
2266 
2267 	if (sc->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
2268 		m_adj(mp, ETHER_ALIGN);
2269 
2270 	/*
2271 	 * Using memory from the mbuf cluster pool, invoke the bus_dma
2272 	 * machinery to arrange the memory mapping.
2273 	 */
2274 	error = bus_dmamap_load_mbuf(rxr->rxtag[s], rxr->spare_map[s],
2275 	    mp, BUS_DMA_NOWAIT);
2276 	if (error) {
2277 		m_freem(mp);
2278 		return (error);
2279 	}
2280 
2281 	/* Now check our target buffer for existing mapping */
2282 	rxbuf = &rxr->rx_buffers[i];
2283 	old = rxbuf->bigbuf;
2284 	if (rxbuf->m_head != NULL)
2285 		bus_dmamap_unload(rxr->rxtag[old], rxbuf->map[old]);
2286 
2287         map = rxbuf->map[old];
2288         rxbuf->map[s] = rxr->spare_map[s];
2289         rxr->spare_map[old] = map;
2290         rxbuf->m_head = mp;
2291         rxbuf->bigbuf = s;
2292 
2293         rxr->rx_base[i].read.pkt_addr =
2294 	    htole64(rxbuf->map[s]->dm_segs[0].ds_addr);
2295 
2296         bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2297 	    0, rxbuf->map[s]->dm_mapsize, BUS_DMASYNC_PREREAD);
2298 
2299 #ifndef NO_82598_A0_SUPPORT
2300         /* A0 needs to One's Compliment descriptors */
2301 	if (sc->hw.revision_id == 0) {
2302         	struct dhack {uint32_t a1; uint32_t a2; uint32_t b1; uint32_t b2;};
2303         	struct dhack *d;
2304 
2305         	d = (struct dhack *)&rxr->rx_base[i];
2306         	d->a1 = ~(d->a1);
2307         	d->a2 = ~(d->a2);
2308 	}
2309 #endif
2310 
2311         return (0);
2312 }
2313 
2314 /*********************************************************************
2315  *
2316  *  Allocate memory for rx_buffer structures. Since we use one
2317  *  rx_buffer per received packet, the maximum number of rx_buffer's
2318  *  that we'll need is equal to the number of receive descriptors
2319  *  that we've allocated.
2320  *
2321  **********************************************************************/
2322 int
2323 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2324 {
2325 	struct ix_softc		*sc = rxr->sc;
2326 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2327 	struct ixgbe_osdep	*os = &sc->osdep;
2328 	struct ixgbe_rx_buf 	*rxbuf;
2329 	int             	i, bsize, error;
2330 
2331 	bsize = sizeof(struct ixgbe_rx_buf) * sc->num_rx_desc;
2332 	if (!(rxr->rx_buffers =
2333 	    (struct ixgbe_rx_buf *) malloc(bsize,
2334 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
2335 		printf("%s: Unable to allocate rx_buffer memory\n", ifp->if_xname);
2336 		error = ENOMEM;
2337 		goto fail;
2338 	}
2339 	rxr->rxtag[0] = rxr->rxtag[1] = os->os_pa->pa_dmat;
2340 
2341 	/* Create the spare maps (used by getbuf) */
2342         error = bus_dmamap_create(rxr->rxtag[0],
2343 	    MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &rxr->spare_map[0]);
2344 	if (error) {
2345 		printf("%s: %s: bus_dmamap_create failed: %d\n", ifp->if_xname,
2346 		    __func__, error);
2347 		goto fail;
2348 	}
2349         error = bus_dmamap_create(rxr->rxtag[1],
2350 	    MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, BUS_DMA_NOWAIT, &rxr->spare_map[1]);
2351 	if (error) {
2352 		printf("%s: %s: bus_dmamap_create failed: %d\n", ifp->if_xname,
2353 		    __func__, error);
2354 		goto fail;
2355 	}
2356 
2357 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2358 		rxbuf = &rxr->rx_buffers[i];
2359 		error = bus_dmamap_create(rxr->rxtag[0], MCLBYTES, 1, MCLBYTES,
2360 		    0, BUS_DMA_NOWAIT, &rxbuf->map[0]);
2361 		if (error) {
2362 			printf("%s: Unable to create Small RX DMA map\n", ifp->if_xname);
2363 			goto fail;
2364 		}
2365 		error = bus_dmamap_create(rxr->rxtag[1], MJUMPAGESIZE, 1, MJUMPAGESIZE,
2366 		    0, BUS_DMA_NOWAIT, &rxbuf->map[1]);
2367 		if (error) {
2368 			printf("%s: Unable to create Large RX DMA map\n", ifp->if_xname);
2369 			goto fail;
2370 		}
2371 	}
2372 
2373 	return (0);
2374 
2375 fail:
2376 	/* Frees all, but can handle partial completion */
2377 	ixgbe_free_receive_structures(sc);
2378 	return (error);
2379 }
2380 
2381 /*********************************************************************
2382  *
2383  *  Initialize a receive ring and its buffers.
2384  *
2385  **********************************************************************/
2386 int
2387 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2388 {
2389 	struct ix_softc		*sc = rxr->sc;
2390 	struct ixgbe_rx_buf	*rxbuf;
2391 	int			j, rsize, s = 0, i;
2392 
2393 	rsize = roundup2(sc->num_rx_desc *
2394 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2395 	/* Clear the ring contents */
2396 	bzero((void *)rxr->rx_base, rsize);
2397 
2398 	/*
2399 	** Free current RX buffers: the size buffer
2400 	** that is loaded is indicated by the buffer
2401 	** bigbuf value.
2402 	*/
2403 	for (i = 0; i < sc->num_rx_desc; i++) {
2404 		rxbuf = &rxr->rx_buffers[i];
2405 		s = rxbuf->bigbuf;
2406 		if (rxbuf->m_head != NULL) {
2407 			bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2408 			    0, rxbuf->map[s]->dm_mapsize,
2409 			    BUS_DMASYNC_POSTREAD);
2410 			bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2411 			m_freem(rxbuf->m_head);
2412 			rxbuf->m_head = NULL;
2413 		}
2414 	}
2415 
2416 	for (j = 0; j < sc->num_rx_desc; j++) {
2417 		if (ixgbe_get_buf(rxr, j, NULL) == ENOBUFS) {
2418 			rxr->rx_buffers[j].m_head = NULL;
2419 			rxr->rx_base[j].read.pkt_addr = 0;
2420 			/* If we fail some may have change size */
2421 			s = sc->bigbufs;
2422 			goto fail;
2423 		}
2424 	}
2425 
2426 	/* Setup our descriptor indices */
2427 	rxr->next_to_check = 0;
2428 	rxr->last_cleaned = 0;
2429 
2430 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2431 	    0, rxr->rxdma.dma_map->dm_mapsize,
2432 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2433 
2434 	return (0);
2435 fail:
2436 	/*
2437 	 * We need to clean up any buffers allocated so far
2438 	 * 'j' is the failing index, decrement it to get the
2439 	 * last success.
2440 	 */
2441 	for (--j; j < 0; j--) {
2442 		rxbuf = &rxr->rx_buffers[j];
2443 		if (rxbuf->m_head != NULL) {
2444 			bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2445 			    0, rxbuf->map[s]->dm_mapsize,
2446 			    BUS_DMASYNC_POSTREAD);
2447 			bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2448 			m_freem(rxbuf->m_head);
2449 			rxbuf->m_head = NULL;
2450 		}
2451 	}
2452 	return (ENOBUFS);
2453 }
2454 
2455 /*********************************************************************
2456  *
2457  *  Initialize all receive rings.
2458  *
2459  **********************************************************************/
2460 int
2461 ixgbe_setup_receive_structures(struct ix_softc *sc)
2462 {
2463 	struct rx_ring *rxr = sc->rx_rings;
2464 	int i, j, s;
2465 
2466 	for (i = 0; i < sc->num_rx_queues; i++, rxr++)
2467 		if (ixgbe_setup_receive_ring(rxr))
2468 			goto fail;
2469 
2470 	return (0);
2471 fail:
2472 	/*
2473 	 * Free RX buffers allocated so far, we will only handle
2474 	 * the rings that completed, the failing case will have
2475 	 * cleaned up for itself. The value of 'i' will be the
2476 	 * failed ring so we must pre-decrement it.
2477 	 */
2478 	rxr = sc->rx_rings;
2479 	for (--i; i > 0; i--, rxr++) {
2480 		for (j = 0; j < sc->num_rx_desc; j++) {
2481 			struct ixgbe_rx_buf *rxbuf;
2482 			rxbuf = &rxr->rx_buffers[j];
2483 			s = rxbuf->bigbuf;
2484 			if (rxbuf->m_head != NULL) {
2485 				bus_dmamap_sync(rxr->rxtag[s], rxbuf->map[s],
2486 				    0, rxbuf->map[s]->dm_mapsize, BUS_DMASYNC_POSTREAD);
2487 				bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2488 				m_freem(rxbuf->m_head);
2489 				rxbuf->m_head = NULL;
2490 			}
2491 		}
2492 	}
2493 
2494 	return (ENOBUFS);
2495 }
2496 
2497 /*********************************************************************
2498  *
2499  *  Enable receive unit.
2500  *
2501  **********************************************************************/
2502 void
2503 ixgbe_initialize_receive_units(struct ix_softc *sc)
2504 {
2505 	struct	rx_ring	*rxr = sc->rx_rings;
2506 	struct ifnet   *ifp = &sc->arpcom.ac_if;
2507 	uint32_t	rxctrl, fctrl, srrctl, rxcsum;
2508 	uint32_t	reta, mrqc, hlreg, linkvec;
2509 	uint32_t	random[10];
2510 	int		i;
2511 
2512 	/*
2513 	 * Make sure receives are disabled while
2514 	 * setting up the descriptor ring
2515 	 */
2516 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
2517 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL,
2518 	    rxctrl & ~IXGBE_RXCTRL_RXEN);
2519 
2520 	/* Enable broadcasts */
2521 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
2522 	fctrl |= IXGBE_FCTRL_BAM;
2523 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
2524 
2525 	hlreg = IXGBE_READ_REG(&sc->hw, IXGBE_HLREG0);
2526 	if (ifp->if_mtu > ETHERMTU)
2527 		hlreg |= IXGBE_HLREG0_JUMBOEN;
2528 	else
2529 		hlreg &= ~IXGBE_HLREG0_JUMBOEN;
2530 	IXGBE_WRITE_REG(&sc->hw, IXGBE_HLREG0, hlreg);
2531 
2532 	srrctl = IXGBE_READ_REG(&sc->hw, IXGBE_SRRCTL(0));
2533 	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2534 	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2535 	if (sc->bigbufs)
2536 		srrctl |= 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2537 	else
2538 		srrctl |= 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2539 	srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2540 	IXGBE_WRITE_REG(&sc->hw, IXGBE_SRRCTL(0), srrctl);
2541 
2542 	/* Set Queue moderation rate */
2543 	for (i = 0; i < IXGBE_MSGS; i++)
2544 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(i), DEFAULT_ITR);
2545 
2546 	/* Set Link moderation lower */
2547 	linkvec = sc->num_tx_queues + sc->num_rx_queues;
2548 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(linkvec), LINK_ITR);
2549 
2550 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2551 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2552 		/* Setup the Base and Length of the Rx Descriptor Ring */
2553 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAL(i),
2554 			       (rdba & 0x00000000ffffffffULL));
2555 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDBAH(i), (rdba >> 32));
2556 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDLEN(i),
2557 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2558 
2559 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2560 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDH(i), 0);
2561 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i),
2562 		    sc->num_rx_desc - 1);
2563 	}
2564 
2565 	rxcsum = IXGBE_READ_REG(&sc->hw, IXGBE_RXCSUM);
2566 
2567 	if (sc->num_rx_queues > 1) {
2568 		/* set up random bits */
2569 		arc4random_buf(&random, sizeof(random));
2570 		switch (sc->num_rx_queues) {
2571 			case 8:
2572 			case 4:
2573 				reta = 0x00010203;
2574 				break;
2575 			case 2:
2576 				reta = 0x00010001;
2577 				break;
2578 			default:
2579 				reta = 0x00000000;
2580 		}
2581 
2582 		/* Set up the redirection table */
2583 		for (i = 0; i < 32; i++) {
2584 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RETA(i), reta);
2585 			if (sc->num_rx_queues > 4) {
2586 				++i;
2587 				IXGBE_WRITE_REG(&sc->hw,
2588 				    IXGBE_RETA(i), 0x04050607);
2589 			}
2590 		}
2591 
2592 		/* Now fill our hash function seeds */
2593 		for (i = 0; i < 10; i++)
2594 			IXGBE_WRITE_REG_ARRAY(&sc->hw,
2595 			    IXGBE_RSSRK(0), i, random[i]);
2596 
2597 		mrqc = IXGBE_MRQC_RSSEN
2598 		    /* Perform hash on these packet types */
2599 		    | IXGBE_MRQC_RSS_FIELD_IPV4
2600 		    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2601 		    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2602 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2603 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2604 		    | IXGBE_MRQC_RSS_FIELD_IPV6
2605 		    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2606 		    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
2607 		    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2608 		IXGBE_WRITE_REG(&sc->hw, IXGBE_MRQC, mrqc);
2609 
2610 		/* RSS and RX IPP Checksum are mutually exclusive */
2611 		rxcsum |= IXGBE_RXCSUM_PCSD;
2612 	}
2613 
2614 #if defined(IX_CSUM_OFFLOAD)
2615 	rxcsum |= IXGBE_RXCSUM_PCSD;
2616 #endif
2617 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2618 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2619 
2620 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCSUM, rxcsum);
2621 
2622 	/* Enable Receive engine */
2623 	rxctrl |= (IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS);
2624 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RXCTRL, rxctrl);
2625 
2626 	return;
2627 }
2628 
2629 /*********************************************************************
2630  *
2631  *  Free all receive rings.
2632  *
2633  **********************************************************************/
2634 void
2635 ixgbe_free_receive_structures(struct ix_softc *sc)
2636 {
2637 	struct rx_ring *rxr = sc->rx_rings;
2638 	int		i;
2639 
2640 	for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2641 		ixgbe_free_receive_buffers(rxr);
2642 		/* Free the ring memory as well */
2643 		ixgbe_dma_free(sc, &rxr->rxdma);
2644 	}
2645 
2646 	free(sc->rx_rings, M_DEVBUF);
2647 }
2648 
2649 /*********************************************************************
2650  *
2651  *  Free receive ring data structures
2652  *
2653  **********************************************************************/
2654 void
2655 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2656 {
2657 	struct ix_softc		*sc = NULL;
2658 	struct ixgbe_rx_buf	*rxbuf = NULL;
2659 	int			 i, s;
2660 
2661 	INIT_DEBUGOUT("free_receive_buffers: begin");
2662 	sc = rxr->sc;
2663 	if (rxr->rx_buffers != NULL) {
2664 		rxbuf = &rxr->rx_buffers[0];
2665 		for (i = 0; i < sc->num_rx_desc; i++) {
2666 			int s = rxbuf->bigbuf;
2667 			if (rxbuf->map != NULL) {
2668 				bus_dmamap_unload(rxr->rxtag[s], rxbuf->map[s]);
2669 				bus_dmamap_destroy(rxr->rxtag[s], rxbuf->map[s]);
2670 			}
2671 			if (rxbuf->m_head != NULL) {
2672 				m_freem(rxbuf->m_head);
2673 			}
2674 			rxbuf->m_head = NULL;
2675 			++rxbuf;
2676 		}
2677 	}
2678 	if (rxr->rx_buffers != NULL) {
2679 		free(rxr->rx_buffers, M_DEVBUF);
2680 		rxr->rx_buffers = NULL;
2681 	}
2682 	for (s = 0; s < 2; s++) {
2683 		if (rxr->rxtag[s] != NULL)
2684 			rxr->rxtag[s] = NULL;
2685 	}
2686 	return;
2687 }
2688 
2689 /*********************************************************************
2690  *
2691  *  This routine executes in interrupt context. It replenishes
2692  *  the mbufs in the descriptor and sends data which has been
2693  *  dma'ed into host memory to upper layer.
2694  *
2695  *  We loop at most count times if count is > 0, or until done if
2696  *  count < 0.
2697  *
2698  *********************************************************************/
2699 int
2700 ixgbe_rxeof(struct rx_ring *rxr, int count)
2701 {
2702 	struct ix_softc 	*sc = rxr->sc;
2703 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2704 	struct mbuf    		*mp;
2705 	int             	 len, i, eop = 0;
2706 	uint8_t		 accept_frame = 0;
2707 	uint32_t		 staterr;
2708 	union ixgbe_adv_rx_desc	*cur;
2709 
2710 	i = rxr->next_to_check;
2711 	cur = &rxr->rx_base[i];
2712 
2713 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2714 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2715 
2716 	staterr = cur->wb.upper.status_error;
2717 	if (!(staterr & IXGBE_RXD_STAT_DD))
2718 		return FALSE;
2719 
2720 	while ((staterr & IXGBE_RXD_STAT_DD) && (count != 0) &&
2721 	    (ifp->if_flags & IFF_RUNNING)) {
2722 		struct mbuf *m = NULL;
2723 		int s;
2724 
2725 		mp = rxr->rx_buffers[i].m_head;
2726 		s = rxr->rx_buffers[i].bigbuf;
2727 		bus_dmamap_sync(rxr->rxtag[s], rxr->rx_buffers[i].map[s],
2728 		    0, rxr->rx_buffers[i].map[s]->dm_mapsize,
2729 		    BUS_DMASYNC_POSTREAD);
2730 		bus_dmamap_unload(rxr->rxtag[s],
2731 		    rxr->rx_buffers[i].map[s]);
2732 
2733 		accept_frame = 1;
2734 		if (staterr & IXGBE_RXD_STAT_EOP) {
2735 			count--;
2736 			eop = 1;
2737 		} else {
2738 			eop = 0;
2739 		}
2740 		len = cur->wb.upper.length;
2741 
2742 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)
2743 			accept_frame = 0;
2744 
2745 		if (accept_frame) {
2746 			/* Get a fresh buffer */
2747 			if (ixgbe_get_buf(rxr, i, NULL) != 0) {
2748 				ifp->if_iqdrops++;
2749 				goto discard;
2750 			}
2751 
2752 			/* Assign correct length to the current fragment */
2753 			mp->m_len = len;
2754 
2755 			if (rxr->fmp == NULL) {
2756 				mp->m_pkthdr.len = len;
2757 				rxr->fmp = mp; /* Store the first mbuf */
2758 				rxr->lmp = mp;
2759 			} else {
2760 				/* Chain mbuf's together */
2761 				mp->m_flags &= ~M_PKTHDR;
2762 				rxr->lmp->m_next = mp;
2763 				rxr->lmp = rxr->lmp->m_next;
2764 				rxr->fmp->m_pkthdr.len += len;
2765 			}
2766 
2767 			if (eop) {
2768 				rxr->fmp->m_pkthdr.rcvif = ifp;
2769 				ifp->if_ipackets++;
2770 				rxr->packet_count++;
2771 				rxr->byte_count += rxr->fmp->m_pkthdr.len;
2772 				ixgbe_rx_checksum(sc, staterr, rxr->fmp);
2773 
2774 #if NVLAN > 0
2775 				if (staterr & IXGBE_RXD_STAT_VP) {
2776 					rxr->fmp->m_pkthdr.ether_vtag =
2777 					    letoh16(cur->wb.upper.vlan);
2778 					rxr->fmp->m_flags |= M_VLANTAG;
2779 				}
2780 #endif
2781 
2782 				m = rxr->fmp;
2783 				rxr->fmp = NULL;
2784 				rxr->lmp = NULL;
2785 			}
2786 		} else {
2787 discard:
2788 			ixgbe_get_buf(rxr, i, mp);
2789 			if (rxr->fmp != NULL) {
2790 				m_freem(rxr->fmp);
2791 				rxr->fmp = NULL;
2792 				rxr->lmp = NULL;
2793 			}
2794 			m = NULL;
2795 		}
2796 
2797 		/* Zero out the receive descriptors status  */
2798 		cur->wb.upper.status_error = 0;
2799 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2800 		    0, rxr->rxdma.dma_map->dm_mapsize,
2801 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2802 
2803 		rxr->last_cleaned = i; /* for updating tail */
2804 
2805 		if (++i == sc->num_rx_desc)
2806 			i = 0;
2807 
2808 		/* Now send up to the stack */
2809                 if (m != NULL) {
2810                         rxr->next_to_check = i;
2811 #if NBPFILTER > 0
2812 			if (ifp->if_bpf)
2813 				bpf_mtap_ether(ifp->if_bpf, m,
2814 				    BPF_DIRECTION_IN);
2815 #endif
2816 			ether_input_mbuf(ifp, m);
2817 			i = rxr->next_to_check;
2818                 }
2819 		/* Get next descriptor */
2820 		cur = &rxr->rx_base[i];
2821 		staterr = cur->wb.upper.status_error;
2822 	}
2823 	rxr->next_to_check = i;
2824 
2825 	/* Advance the IXGB's Receive Queue "Tail Pointer" */
2826 	IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(rxr->me), rxr->last_cleaned);
2827 
2828 	if (!(staterr & IXGBE_RXD_STAT_DD))
2829 		return FALSE;
2830 
2831 	return TRUE;
2832 }
2833 
2834 /*********************************************************************
2835  *
2836  *  Verify that the hardware indicated that the checksum is valid.
2837  *  Inform the stack about the status of checksum so that stack
2838  *  doesn't spend time verifying the checksum.
2839  *
2840  *********************************************************************/
2841 void
2842 ixgbe_rx_checksum(struct ix_softc *sc,
2843     uint32_t staterr, struct mbuf * mp)
2844 {
2845 	uint16_t status = (uint16_t) staterr;
2846 	uint8_t  errors = (uint8_t) (staterr >> 24);
2847 
2848 	if (status & IXGBE_RXD_STAT_IPCS) {
2849 		/* Did it pass? */
2850 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2851 			/* IP Checksum Good */
2852 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2853 		} else
2854 			mp->m_pkthdr.csum_flags = 0;
2855 	}
2856 
2857 	if (status & IXGBE_RXD_STAT_L4CS) {
2858 		/* Did it pass? */
2859 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2860 			mp->m_pkthdr.csum_flags |=
2861 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2862 	}
2863 
2864 }
2865 
2866 void
2867 ixgbe_enable_hw_vlans(struct ix_softc *sc)
2868 {
2869 	uint32_t	ctrl;
2870 
2871 	ixgbe_disable_intr(sc);
2872 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
2873 	ctrl |= IXGBE_VLNCTRL_VME;
2874 	ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2875 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
2876 	ixgbe_enable_intr(sc);
2877 }
2878 
2879 void
2880 ixgbe_enable_intr(struct ix_softc *sc)
2881 {
2882 	struct ixgbe_hw *hw = &sc->hw;
2883 	uint32_t mask = IXGBE_EIMS_ENABLE_MASK;
2884 
2885 	/* Enable Fan Failure detection */
2886 	if (hw->phy.media_type == ixgbe_media_type_copper)
2887 		    mask |= IXGBE_EIMS_GPI_SDP1;
2888 	/* With RSS we use auto clear */
2889 	if (sc->msix_mem) {
2890 		/* Dont autoclear Link */
2891 		mask &= ~IXGBE_EIMS_OTHER;
2892 		mask &= ~IXGBE_EIMS_LSC;
2893 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC,
2894 		    sc->eims_mask | mask);
2895 	}
2896 
2897 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
2898 	IXGBE_WRITE_FLUSH(hw);
2899 
2900 	return;
2901 }
2902 
2903 void
2904 ixgbe_disable_intr(struct ix_softc *sc)
2905 {
2906 	if (sc->msix_mem)
2907 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
2908 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
2909 	IXGBE_WRITE_FLUSH(&sc->hw);
2910 	return;
2911 }
2912 
2913 uint16_t
2914 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
2915 {
2916 	struct pci_attach_args	*pa;
2917 	uint16_t value;
2918 
2919 	pa = ((struct ixgbe_osdep *)hw->back)->os_pa;
2920 
2921 	/* Should we do read/mask/write...?  16 vs 32 bit!!! */
2922 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg) & 0xffff;
2923 
2924 	return (value);
2925 }
2926 
2927 void
2928 ixgbe_set_ivar(struct ix_softc *sc, uint16_t entry, uint8_t vector)
2929 {
2930 	uint32_t ivar, index;
2931 
2932 	vector |= IXGBE_IVAR_ALLOC_VAL;
2933 	index = (entry >> 2) & 0x1F;
2934 	ivar = IXGBE_READ_REG(&sc->hw, IXGBE_IVAR(index));
2935 	ivar &= ~(0xFF << (8 * (entry & 0x3)));
2936 	ivar |= (vector << (8 * (entry & 0x3)));
2937 	IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
2938 }
2939 
2940 void
2941 ixgbe_configure_ivars(struct ix_softc *sc)
2942 {
2943 	struct  tx_ring *txr = sc->tx_rings;
2944 	struct  rx_ring *rxr = sc->rx_rings;
2945 	int		 i;
2946 
2947         for (i = 0; i < sc->num_rx_queues; i++, rxr++) {
2948                 ixgbe_set_ivar(sc, IXGBE_IVAR_RX_QUEUE(i), rxr->msix);
2949 		sc->eims_mask |= rxr->eims;
2950 	}
2951 
2952         for (i = 0; i < sc->num_tx_queues; i++, txr++) {
2953 		ixgbe_set_ivar(sc, IXGBE_IVAR_TX_QUEUE(i), txr->msix);
2954 		sc->eims_mask |= txr->eims;
2955 	}
2956 
2957 	/* For the Link interrupt */
2958         ixgbe_set_ivar(sc, IXGBE_IVAR_OTHER_CAUSES_INDEX,
2959 	    sc->linkvec);
2960 	sc->eims_mask |= IXGBE_IVAR_OTHER_CAUSES_INDEX;
2961 }
2962 
2963 /**********************************************************************
2964  *
2965  *  Update the board statistics counters.
2966  *
2967  **********************************************************************/
2968 void
2969 ixgbe_update_stats_counters(struct ix_softc *sc)
2970 {
2971 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
2972 	struct ixgbe_hw *hw = &sc->hw;
2973 	uint32_t  missed_rx = 0, bprc, lxon, lxoff, total;
2974 	int	i;
2975 
2976 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
2977 
2978 	for (i = 0; i < 8; i++) {
2979 		int mp;
2980 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2981 		missed_rx += mp;
2982         	sc->stats.mpc[i] += mp;
2983 		sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2984 	}
2985 
2986 	/* Hardware workaround, gprc counts missed packets */
2987 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2988 	sc->stats.gprc -= missed_rx;
2989 
2990 	sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
2991 	sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2992 	sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2993 
2994 	/*
2995 	 * Workaround: mprc hardware is incorrectly counting
2996 	 * broadcasts, so for now we subtract those.
2997 	 */
2998 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2999 	sc->stats.bprc += bprc;
3000 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3001 	sc->stats.mprc -= bprc;
3002 
3003 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3004 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3005 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3006 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3007 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3008 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3009 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3010 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3011 
3012 	sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3013 	sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3014 
3015 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3016 	sc->stats.lxontxc += lxon;
3017 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3018 	sc->stats.lxofftxc += lxoff;
3019 	total = lxon + lxoff;
3020 
3021 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3022 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3023 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3024 	sc->stats.gptc -= total;
3025 	sc->stats.mptc -= total;
3026 	sc->stats.ptc64 -= total;
3027 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3028 
3029 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3030 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3031 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3032 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3033 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3034 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3035 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3036 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3037 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3038 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3039 
3040 #if 0
3041 	/* Fill out the OS statistics structure */
3042 	ifp->if_ipackets = sc->stats.gprc;
3043 	ifp->if_opackets = sc->stats.gptc;
3044 	ifp->if_ibytes = sc->stats.gorc;
3045 	ifp->if_obytes = sc->stats.gotc;
3046 	ifp->if_imcasts = sc->stats.mprc;
3047 #endif
3048 	ifp->if_collisions = 0;
3049 	ifp->if_oerrors = sc->watchdog_events;
3050 	ifp->if_ierrors = missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3051 }
3052 
3053 #ifdef IX_DEBUG
3054 /**********************************************************************
3055  *
3056  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3057  *  This routine provides a way to take a look at important statistics
3058  *  maintained by the driver and hardware.
3059  *
3060  **********************************************************************/
3061 void
3062 ixgbe_print_hw_stats(struct ix_softc * sc)
3063 {
3064 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
3065 
3066 	printf("%s: mbuf alloc failed %lu, mbuf cluster failed %lu, "
3067 	    "missed pkts %llu, rx len errs %llu, crc errs %llu, "
3068 	    "dropped pkts %lu, watchdog timeouts %ld, "
3069 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3070 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3071 	    "tso tx %lu\n",
3072 	    ifp->if_xname,
3073 	    sc->mbuf_alloc_failed,
3074 	    sc->mbuf_cluster_failed,
3075 	    (long long)sc->stats.mpc[0],
3076 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3077 	    (long long)sc->stats.crcerrs,
3078 	    sc->dropped_pkts,
3079 	    sc->watchdog_events,
3080 	    (long long)sc->stats.lxonrxc,
3081 	    (long long)sc->stats.lxontxc,
3082 	    (long long)sc->stats.lxoffrxc,
3083 	    (long long)sc->stats.lxofftxc,
3084 	    (long long)sc->stats.tpr,
3085 	    (long long)sc->stats.gprc,
3086 	    (long long)sc->stats.gptc,
3087 	    sc->tso_tx);
3088 }
3089 #endif
3090 
3091 #ifndef NO_82598_A0_SUPPORT
3092 /*
3093  * A0 Workaround: invert descriptor for hardware
3094  */
3095 void
3096 desc_flip(void *desc)
3097 {
3098         struct dhack {uint32_t a1; uint32_t a2; uint32_t b1; uint32_t b2;};
3099         struct dhack *d;
3100 
3101         d = (struct dhack *)desc;
3102         d->a1 = ~(d->a1);
3103         d->a2 = ~(d->a2);
3104         d->b1 = ~(d->b1);
3105         d->b2 = ~(d->b2);
3106         d->b2 &= 0xFFFFFFF0;
3107         d->b1 &= ~IXGBE_ADVTXD_DCMD_RS;
3108 }
3109 #endif
3110 
3111 
3112 
3113